Пример #1
0
def main():
    '''Main method. Make the database from the given file and run the problem solutions.'''

    problems = (prob1, prob2, prob3, prob4, prob5, prob6)

    from argparse import ArgumentParser

    argparser = ArgumentParser()
    argparser.add_argument(
        'fname',
        default='planets.csv',
        help=
        'Name of the file from which to read the database (default: planets.csv).'
    )
    argparser.add_argument(
        'problems',
        nargs='*',
        type=int,
        help='Which problems to run (1-{0}, default: all).'.format(
            len(problems)),
        default=range(1,
                      len(problems) + 1))

    args = argparser.parse_args()
    db = make_db(args.fname)

    for prob in args.problems:
        if prob not in argparser.get_default('problems'):
            raise IndexError('Problems must be in the range ' +
                             str(argparser.get_default('problems')) + '\n' +
                             argparser.format_help())
        problems[prob - 1](db)

    return locals()
Пример #2
0
def get_enum_defaults(parser: argparse.ArgumentParser):
    """ Helper function to get all args that have Enum default values """
    from enum import Enum
    all_args = parser.parse_args([])
    enum_args = {}
    for key in vars(all_args):
        if isinstance(parser.get_default(key), Enum):
            enum_args[key] = parser.get_default(key)
    return enum_args
Пример #3
0
def _create_subcommands_parser(spec: Subcommands, parent: ArgumentParser):
    subparsers = parent.add_subparsers()
    for cmd_name, sub_spec in spec.subcommands.items():
        parser = subparsers.add_parser(cmd_name)
        command_parts = parent.get_default('_command_parts_')
        parser.set_defaults(_command_parts_=command_parts + [cmd_name])
        _dispatch(sub_spec, parser)
Пример #4
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        # NOTE(kamo): Use '_' instead of '-' to avoid confusion
        assert check_argument_types()
        group = parser.add_argument_group(description="Task related")

        # NOTE(kamo): add_arguments(..., required=True) can't be used
        # to provide --print_config mode. Instead of it, do as
        required = parser.get_default("required")
        required += ["token_list"]

        group.add_argument(
            "--token_list",
            type=str_or_none,
            default=None,
            help="A text mapping int-id to token",
        )
        group.add_argument(
            "--odim",
            type=int_or_none,
            default=None,
            help="The number of dimension of output feature",
        )
        group.add_argument(
            "--model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(ESPnetTTSModel),
            help="The keyword arguments for model class.",
        )

        group = parser.add_argument_group(description="Preprocess related")
        group.add_argument(
            "--use_preprocessor",
            type=str2bool,
            default=False,
            help="Apply preprocessing to data or not",
        )
        group.add_argument(
            "--token_type",
            type=str,
            default="bpe",
            choices=["bpe", "char", "word"],
            help="The text will be tokenized "
            "in the specified level token",
        )
        group.add_argument(
            "--bpemodel",
            type=str_or_none,
            default=None,
            help="The model file of sentencepiece",
        )
        parser.add_argument(
            "--non_linguistic_symbols",
            type=str_or_none,
            help="non_linguistic_symbols file path",
        )
        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --encoder and --encoder_conf
            class_choices.add_arguments(group)
Пример #5
0
    def add_arguments(self, parser: argparse.ArgumentParser):
        if parser.get_default('exec') is None:
            parser.set_defaults(exec=Cmd.runner)
            parser.set_defaults(fns=[self.exec])
        else:
            self.bind_self(parser)

        fns = parser.get_default('fns')
        subparsers = parser.add_subparsers(title='commands')

        if self._subcommands is not None:
            for command in self._subcommands:
                subparser = subparsers.add_parser(command.name(),
                                                  help=command.description())
                command.add_arguments(subparser)
                current = [fn for fn in fns]
                current.append(command.exec)
                subparser.set_defaults(fns=current)
Пример #6
0
def get_modified_config(parser: ArgumentParser, args: Namespace) -> DarkerConfig:
    """Return configuration options which are set to non-default values"""
    not_default = {
        argument: value
        for argument, value in vars(args).items()
        if value != parser.get_default(argument)
    }
    replace_log_level_name(not_default)
    return not_default
Пример #7
0
def wrap_parser_exit(parser: argparse.ArgumentParser):
    exit_func = parser.exit
    func = parser.get_default('func')

    def exit(status=0, message=None):
        if func not in NO_SERVER_ACTION:
            _DLNA_SERVER.stop_server()
        exit_func(status, message)

    parser.exit = exit
Пример #8
0
def main():
    '''Main method. Make the database from the given file and run the problem solutions.'''

    problems = (
        prob1,
        prob2,
        prob3,
        prob4,
    )

    from argparse import ArgumentParser

    argparser = ArgumentParser()
    argparser.add_argument(
        'fname',
        nargs='?',
        default='oecd-gdp-pc-change-1997-2017.csv',
        help='Name of the file from which to read the database\
 (default: oecd-gdp-pc-change-1997-2017.csv).')
    argparser.add_argument(
        'problems',
        nargs='*',
        type=int,
        help='Which problems to run (1-{0}, default: all).'.format(
            len(problems)),
        default=range(1,
                      len(problems) + 1))

    args = argparser.parse_args()

    db = Database()
    with open(args.fname) as fin:
        db.read_from_csv(fin, True)

    for prob in args.problems:
        if prob not in argparser.get_default('problems'):
            raise IndexError('Problems must be in the range ' +
                             str(argparser.get_default('problems')) + '\n' +
                             argparser.format_help())
        problems[prob - 1](db)

    return locals()
Пример #9
0
 def __init__(self, parser: argparse.ArgumentParser) -> None:
     """Create using default argument values."""
     self.check_merge_commits = parser.get_default('m')
     self.default_branch = parser.get_default('b')
     self.default_branch_from_remote = parser.get_default(
         'default-branch-from-remote')
     self.default_remote = parser.get_default('r')
     self.quiet = parser.get_default('q')
     self.verbose = parser.get_default('v')
Пример #10
0
def handle_enums(args: Dict, parser: argparse.ArgumentParser) -> Dict:
    """ Since REST relies on json, reverse conversion of integers to enums is needed """
    default_enums = get_enum_defaults(parser=parser)
    _args = args.copy()
    if 'log_config' in _args:
        _args['log_config'] = parser.get_default('--log-config')

    for key, value in args.items():
        if key in default_enums:
            _enum_type = type(default_enums[key])
            if isinstance(value, int):
                _args[key] = _enum_type(value)
            elif isinstance(value, str):
                _args[key] = _enum_type.from_string(value)
    return _args
Пример #11
0
def send_params_info(argv: argparse.Namespace,
                     cli_parser: argparse.ArgumentParser):
    """
    This function sends information about used command line parameters.
    :param argv: command line parameters.
    :param cli_parser: command line parameters parser.
    """
    t = tm.Telemetry()
    params_with_paths = get_params_with_paths_list()
    for arg in vars(argv):
        arg_value = getattr(argv, arg)
        if arg_value != cli_parser.get_default(arg):
            if arg in params_with_paths:
                # If command line argument value is a directory or a path to file it is not sent
                # as it may contain confidential information. "1" value is used instead.
                param_str = arg + ":" + str(1)
            else:
                param_str = arg + ":" + str(arg_value)

            t.send_event('mo', 'cli_parameters', param_str)
Пример #12
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        group = parser.add_argument_group(description="Task related")

        # NOTE(kamo): add_arguments(..., required=True) can't be used
        # to provide --print_config mode. Instead of it, do as
        required = parser.get_default("required")

        group.add_argument(
            "--init",
            type=lambda x: str_or_none(x.lower()),
            default=None,
            help="The initialization method",
            choices=[
                "chainer",
                "xavier_uniform",
                "xavier_normal",
                "kaiming_uniform",
                "kaiming_normal",
                None,
            ],
        )

        group.add_argument(
            "--input_size",
            type=int_or_none,
            default=None,
            help="The number of input dimension of the feature",
        )

        group.add_argument(
            "--model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(HynetImgrModel),
            help="The keyword arguments for model class.",
        )

        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --encoder and --encoder_conf
            class_choices.add_arguments(group)
Пример #13
0
def process_config_values(parser: argparse.ArgumentParser,
                          args: argparse.Namespace):
    """
    Bring in provided config values to the args parser, and import entries to the config
    from all arguments that were actually passed on the command line

    :param parser: The arg parser
    :param args: The value that parser.parse_args returned
    """
    # First, load a local config file, if passed or look for one in pwd if it wasn't.
    if hasattr(args, "config"):
        load_overrides(args.config)

    # Get a list of defined config vals. If these are passed on the command line,
    # update them in their correct group, not in the cli group
    defined_vars = list(get_config_keys())

    command_line_args = vars(args)

    # Bring in the options keys into args
    config_cli_args = get_group("cli")

    # Place all command line args into the cli group (for saving in the workspace). If
    # the value is set on command line, then it takes precedence; otherwise we try to
    # read it from the config file's cli group.
    for k in command_line_args:
        default = parser.get_default(k)
        set_val = getattr(args, k)
        if default is not set_val:
            if k not in defined_vars:
                config_cli_args.update(k, value=set_val)
            else:
                # Update a var's native group
                group_name, key = k.split(".")
                group = get_group(group_name)
                setattr(group, key, set_val)
        else:
            if k in config_cli_args:
                setattr(args, k, getattr(config_cli_args, k))
from platon.constants import METRES_TO_UM
from platon.transit_depth_calculator import TransitDepthCalculator
from platon.errors import AtmosphereError

from sklearn.externals import joblib

from argparse import ArgumentParser

ap = ArgumentParser()
ap.add_argument('-bm', '--bayesianmodel', required=False, default='emcee', type=str, help="type of bayesian model: 'mutlinest' or 'emcee'")
ap.add_argument('-nw', '--num_walkers', required=False, default=50, type=int, help='number of walkers in emcee')
ap.add_argument('-ns', '--num_steps', required=False, default=50, type=int, help='number of steps in emcee')

args = vars(ap.parse_args())

bayesian_model = args['bayesianmodel'] if 'bayesianmodel' in args.keys() else ap.get_default('bayesianmodel')
nwalkers = args['num_walkers'] if 'num_walkers' in args.keys() else ap.get_default('num_walkers')
nsteps = args['num_steps'] if 'num_steps' in args.keys() else ap.get_default('num_steps')

planet_name = 'HD 209458 b'
hd209458b = exoMAST_API(planet_name)
hd209458b.get_spectra()

wavelengths, wave_errs, depths, errors = hd209458b.planetary_spectra_table.values.T

wavelengths = (wavelengths*micron).to(meter).value
wave_errs = (wave_errs*micron).to(meter).value

wave_bins = np.transpose([wavelengths - wave_errs, wavelengths + wave_errs])

#create a Retriever object
def add_args(parser: ArgumentParser,
             description: List[Any],
             depth: int = 0) -> None:
    """
    Populate the given parser with arguments, as specified by the
    description. The description is a list of Arg, Cmd, and Group objects.
    """
    subparsers = None
    help_parser = None

    def description_sort_key(desc: Any) -> str:
        if isinstance(desc, Cmd):
            return desc.name

        # `sorted` is stable, so we shouldn't change the relative
        # positioning of non-Cmd arg descriptions.
        return ""

    # Sort descriptions alphabetically by name before passing them to
    # argparse. This ensures that `help` output is sorted
    # alphabetically.
    description = sorted(description, key=description_sort_key)

    for thing in description:
        if isinstance(thing, Cmd):
            if subparsers is None:
                metavar = "sub" * depth + "command"
                subparsers = parser.add_subparsers(metavar=metavar)

                # If there are any subcommands at all, also add a `help`
                # subcommand.
                help_parser = subparsers.add_parser(
                    "help", help="show help for this command")
                help_parser.set_defaults(func=help_func(parser))

            main_name, aliases = generate_aliases(thing.name)

            subparser_kwargs = {
                "aliases": aliases,
                "formatter_class": ArgumentDefaultsHelpFormatter,
            }
            if thing.help_str != SUPPRESS:
                subparser_kwargs["help"] = thing.help_str
            subparser = subparsers.add_parser(main_name, **subparser_kwargs)

            subparser.set_defaults(func=thing.func)
            subparser.set_defaults(
                **{("_" + "sub" * depth + "command"): thing.name})

            # If this is the default subcommand, make calling the parent with
            # no subcommand behave the same as calling this subcommand with no
            # arguments.
            if thing.is_default:
                thing.func = cast(Callable, thing.func)
                parser.set_defaults(func=wrap_func(subparser, thing.func))

            add_args(subparser, thing.subs, depth + 1)

        elif isinstance(thing, Arg):
            arg = parser.add_argument(*thing.args, **thing.kwargs)
            arg.completer = thing.completer  # type: ignore

        elif isinstance(thing, Group):
            group = parser.add_mutually_exclusive_group(**thing.kwargs)
            for option in thing.options:
                group.add_argument(*option.args, **option.kwargs)

        elif isinstance(thing, ArgGroup):
            arg_group = parser.add_argument_group(thing.title,
                                                  thing.description)
            for child_arg in thing.child_args:
                arg_group.add_argument(*child_arg.args, **child_arg.kwargs)

        elif isinstance(thing, BoolOptArg):
            parser.add_argument(
                thing.true_name,
                dest=thing.dest,
                action="store_true",
                help=thing.true_help,
                default=thing.default,
            )
            parser.add_argument(
                thing.false_name,
                dest=thing.dest,
                action="store_false",
                help=thing.false_help,
                default=SUPPRESS,
            )

    # If there are any subcommands but none claimed the default action, make
    # the default print help.
    if subparsers is not None and parser.get_default("func") is None:
        parser.set_defaults(func=help_func(parser))
Пример #16
0
    def parse_options(self):
        parser = ArgumentParser(
            formatter_class=ArgumentDefaultsHelpFormatter,
            version=version,
        )

        add = parser.add_argument

        add(
            "-c", "--config", action="store", default=None,
            dest="config", metavar="FILE", type=str,
            help="Read configuration from FILE"
        )

        add(
            "-D", "--debug", action="store_true", default=False,
            dest="debug",
            help="Enable debug mode"
        )

        add(
            "-V", "--verbose", action="store_true", default=False,
            dest="verbose",
            help="Enable verbose logging"
        )

        add(
            "-l", "--logfile", action="store", default="-",
            dest="logfile", metavar="FILE", type=FileType(mode="w"),
            help="Write logs to FILE"
        )

        add(
            "-P", "--plugin",
            action="append", default=plugins.DEFAULTS, dest="plugins",
            help="Plugins to load (multiple allowed)"
        )

        add(
            "-b", "--bind",
            action="store", type=str,
            default="0.0.0.0:70", metavar="INT", dest="bind",
            help="Bind to interface INT"
        )

        add(
            "-6" "--ipv6",
            action="store_true",
            default=False, dest="ipv6",
            help="Enable IPv6 support"
        )

        add(
            "-e", "--encoding",
            action="store", type=str,
            default="UTF-8", dest="encoding",
            help="Set default encoding"
        )

        add(
            "-w", "--width",
            action="store", type=int,
            default=70, dest="width",
            help="Sel default page width"
        )

        add(
            "-r", "--rootdir",
            action="store", type=str,
            default=getcwd(), dest="rootdir",
            help="Set root directory"
        )

        add(
            "-u", "--user",
            action="store", type=str,
            default="nobody", dest="user",
            help="Set user to drop privileges to"
        )

        add(
            "-g", "--group",
            action="store", type=str,
            default="nobody", dest="group",
            help="Set group to drop privileges to"
        )

        add(
            "-U", "--userdir",
            action="store", type=str,
            default="gopher", dest="userdir",
            help="Set user directory"
        )

        add(
            "-H", "--host",
            action="store", type=str,
            default="localhost", dest="host",
            help="Set hostname"
        )

        namespace = parser.parse_args()

        if namespace.config is not None:
            filename = namespace.config
            if exists(filename):
                config = reprconf.as_dict(str(filename))
                for option, value in config.pop("globals", {}).items():
                    if option in namespace:
                        self[option] = value
                    else:
                        warn("Ignoring unknown option %r" % option)
                self.update(config)

        for option, value in namespace.__dict__.items():
            key = "{}{}".format(self.prefix, option.upper())
            if key in environ and environ[key] != parser.get_default(option):
                continue

            if option not in self and value is not None:
                self[option] = value
Пример #17
0
def main(argv=None):
    '''Command line options.'''

    program_name = os.path.basename(sys.argv[0])
    program_version = "v%s" % __version__
    program_build_date = "%s" % __updated__

    program_version_string = '{prog} {:s} {:s}'.format(program_version, program_build_date, prog=program_name)
    # program_usage = '''usage: spam two eggs''' # optional - will be autogenerated by optparse
    program_longdesc = '''
gptwosample is a program to perform GPTwoSample on given treatment and control timeseries.
It defines several different classes to perform GPTwoSample tasks,
including accounting for timeshifts between timeseries,
accounting confounding variation using GPLVM
and simple GPTwoSample.

The FILE format has to fullfill following formation:

    ============ =============== ==== ===============
    *arbitrary*  x1              ...  xl
    ============ =============== ==== ===============
    Gene ID 1    y1 replicate 1  ...  yl replicate 1
    ...          ...             ...  ...
    Gene ID 1    y1 replicate k1 ...  yl replicate k1

    ...

    Gene ID n    y1 replicate 1  ...  yl replicate 1
    ...          ...             ...  ...
    Gene ID n    y1 replicate kn ...  yl replicate kn
    ============ =============== ==== ===============

Where all entries not convertable by float() will be treated as missing
'''  # optional - give further explanation about what the program does
    program_license = "Copyright 2013 Max Zwiessele\n\
    Licensed under the Apache License 2.0\n\
    http://www.apache.org/licenses/LICENSE-2.0"

    if argv is None:
        argv = sys.argv[1:]
#    try:
        # setup option parser
        parser = ArgumentParser(version=program_version_string,
                                epilog=program_longdesc,
                                description=program_license,
                                formatter_class=RawDescriptionHelpFormatter,
                                conflict_handler='resolve')
        # parser.add_option("-i", "--in", dest="infile", help="set input path [default: %default]", metavar="FILE")
        parser.add_argument("-o", "--out",
                            dest="outdir",
                            help="set output dir [default: %(default)s]",
                            metavar="DIR",
                            default="./twosample_out/")
        parser.add_argument("-t",
                            "--timeshift",
                            dest="timeshift",
                            action="store_true",
                            help="account for timeshifts in data [default: %(default)s]")
        parser.add_argument("-c",
                            "--confounder",
                            dest="confounder",
                            type=int,
                            default=0,
                            metavar="N",
                            help="account for N confounders in data [default: %(default)s]")
        parser.add_argument("-p",
                            "--plot",
                            dest="plot",
                            action="store_true",
                            default=False,
                            help="plot data into outdir/plots? [default: %(default)s]")
        parser.add_argument("-v", "--verbose",
                            dest="verbose",
                            action="count",
                            help="set verbosity level [default: %(default)s]",
                            default=0)
        parser.add_argument('--version', action='version',
                            version='%(prog)s {}'.format(__version__))
        parser.add_argument('--filter', dest="filter", metavar="FILE",default=None,
                            help="file containing gene names to use")
        parser.add_argument("infiles", nargs=2, metavar="FILE",
                            help="treatment/control files to compare against each other")
        parser.add_argument("--backend", dest="backend", metavar="[PDF,...]",default="System default",
                            help="matplotlib backend - see matplotlib.use(backend)")
        
        # process options
        opts = parser.parse_args(argv)

        if opts.verbose > 0:
            arg_strings = []
            for name, value in opts._get_kwargs():
                arg_strings.append((name,value))
            m = reduce(max, map(lambda a: len(a[0]), arg_strings))
            arg_strings = [("{0:%is} = {1!s}"%(m)).format(arg,val) for arg,val in arg_strings]
            print(message("\n".join(arg_strings)))
            
#print(message("verbosity level = %d" % opts.verbose))
#            if opts.outdir:
#                print(message("outdir = %s" % opts.outdir))
#            if opts.timeshift:
#                print(message("timeshift = %s" % opts.timeshift))
#            if opts.confounder:
#                print(message("confounder = %s" % opts.confounder))

        if opts.backend != parser.get_default("backend"):
            import matplotlib
            matplotlib.use(opts.backend)
            
            
        # MAIN BODY #
        import numpy
        
        if opts.filter:
            with open(opts.filter,'r') as f:
                fil = numpy.array(f.read().strip(os.linesep).split(os.linesep))
        else:
            fil=None

        T, Y, gene_names, Ynorm = loaddata(*opts.infiles, verbose=opts.verbose, fil=fil)
        n, r, t, d = Y.shape

        assert n == 2, "Only comparison of two samples implemented"
        
            
        if opts.timeshift:
            repindices = (numpy.arange(n * r)[:, None] * numpy.ones((n * r, t))).flatten()
        
        from pygp.covar.combinators import ShiftCF, SumCF
        from pygp.covar.se import SqexpCFARD
        from pygp.covar.bias import BiasCF

        if opts.confounder and opts.timeshift:
            covarc = lambda x: ShiftCF(SumCF((SqexpCFARD(1), x, BiasCF())), replicate_indices=repindices)
            covar1 = lambda x: ShiftCF(SumCF((SqexpCFARD(1), x, BiasCF())), replicate_indices=repindices[:r * t])
            covar2 = lambda x: ShiftCF(SumCF((SqexpCFARD(1), x, BiasCF())), replicate_indices=repindices[r * t:])
        elif opts.timeshift:
            covarc = ShiftCF(SumCF((SqexpCFARD(1), BiasCF())), replicate_indices=repindices)
            covar1 = ShiftCF(SumCF((SqexpCFARD(1), BiasCF())), replicate_indices=repindices[:r * t])
            covar2 = ShiftCF(SumCF((SqexpCFARD(1), BiasCF())), replicate_indices=repindices[r * t:])
        elif opts.confounder:
            covarc = lambda x: SumCF((SqexpCFARD(1), x, BiasCF()))
            covar1 = lambda x: SumCF((SqexpCFARD(1), x, BiasCF()))
            covar2 = lambda x: SumCF((SqexpCFARD(1), x, BiasCF()))
        else:
            covarc = SumCF((SqexpCFARD(1), BiasCF()))
            covar1 = SumCF((SqexpCFARD(1), BiasCF()))
            covar2 = SumCF((SqexpCFARD(1), BiasCF()))
            
        if opts.confounder:
            from gptwosample.confounder.confounder import TwoSampleConfounder
            twosample = TwoSampleConfounder(T, Y, q=opts.confounder)
            from gptwosample.run.confounder import run_confounder_twosample
            twosample = run_confounder_twosample(twosample)
            twosample.initialize_twosample_covariance(covar_common=covarc,
                                                      covar_individual_1=covar1,
                                                      covar_individual_2=covar2)
        else:
            from gptwosample.twosample.twosample import TwoSample
            twosample = TwoSample(T, Y, covarc, covar1, covar2)

        from gptwosample.run.twosample import run_twosample
        twosample = run_twosample(twosample, gene_names, opts.outdir)
        
        if opts.plot:
            mi = twosample.T.min()
            ma = twosample.T.max()
            s = "predicting means and variances"
            started(s)
            twosample.predict_means_variances(numpy.linspace(mi,ma,100), message=message(s))
            #finished(s)
            
            s = "plotting..."
            started(s)
            import pylab
            pylab.ion()
            pylab.figure()
            plotdir = os.path.join(opts.outdir, "plots")
            if not os.path.exists(plotdir):
                os.makedirs(plotdir)
            for i,name,_ in itertools.izip(itertools.count(), gene_names, twosample.plot(timeshift=opts.timeshift)):
                started("{2:s} {0:s} {1:.3%}".format(name, float(i+1)/len(gene_names), s))
                try:
                    pylab.savefig(os.path.join(plotdir, "{}.pdf".format(name)))
                except:
                    pylab.savefig(os.path.join(plotdir, "{}".format(name)))
            finished(s) 
Пример #18
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        group = parser.add_argument_group(description="Task related")

        # NOTE(kamo): add_arguments(..., required=True) can't be used
        # to provide --print_config mode. Instead of it, do as
        required = parser.get_default("required")
        required += ["src_token_list", "token_list"]

        group.add_argument(
            "--token_list",
            type=str_or_none,
            default=None,
            help="A text mapping int-id to token (for target language)",
        )
        group.add_argument(
            "--src_token_list",
            type=str_or_none,
            default=None,
            help="A text mapping int-id to token (for source language)",
        )
        group.add_argument(
            "--init",
            type=lambda x: str_or_none(x.lower()),
            default=None,
            help="The initialization method",
            choices=[
                "chainer",
                "xavier_uniform",
                "xavier_normal",
                "kaiming_uniform",
                "kaiming_normal",
                None,
            ],
        )

        group.add_argument(
            "--input_size",
            type=int_or_none,
            default=None,
            help="The number of input dimension of the feature",
        )

        group.add_argument(
            "--ctc_conf",
            action=NestedDictAction,
            default=get_default_kwargs(CTC),
            help="The keyword arguments for CTC class.",
        )
        group.add_argument(
            "--model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(ESPnetSTModel),
            help="The keyword arguments for model class.",
        )

        group = parser.add_argument_group(description="Preprocess related")
        group.add_argument(
            "--use_preprocessor",
            type=str2bool,
            default=True,
            help="Apply preprocessing to data or not",
        )
        group.add_argument(
            "--token_type",
            type=str,
            default="bpe",
            choices=["bpe", "char", "word", "phn"],
            help="The target text will be tokenized "
            "in the specified level token",
        )
        group.add_argument(
            "--src_token_type",
            type=str,
            default="bpe",
            choices=["bpe", "char", "word", "phn"],
            help="The source text will be tokenized "
            "in the specified level token",
        )
        group.add_argument(
            "--bpemodel",
            type=str_or_none,
            default=None,
            help="The model file of sentencepiece (for target language)",
        )
        group.add_argument(
            "--src_bpemodel",
            type=str_or_none,
            default=None,
            help="The model file of sentencepiece (for source language)",
        )
        parser.add_argument(
            "--non_linguistic_symbols",
            type=str_or_none,
            help="non_linguistic_symbols file path",
        )
        parser.add_argument(
            "--cleaner",
            type=str_or_none,
            choices=[None, "tacotron", "jaconv", "vietnamese"],
            default=None,
            help="Apply text cleaning",
        )
        parser.add_argument(
            "--g2p",
            type=str_or_none,
            choices=g2p_choices,
            default=None,
            help="Specify g2p method if --token_type=phn",
        )
        parser.add_argument(
            "--speech_volume_normalize",
            type=float_or_none,
            default=None,
            help="Scale the maximum amplitude to the given value.",
        )
        parser.add_argument(
            "--rir_scp",
            type=str_or_none,
            default=None,
            help="The file path of rir scp file.",
        )
        parser.add_argument(
            "--rir_apply_prob",
            type=float,
            default=1.0,
            help="THe probability for applying RIR convolution.",
        )
        parser.add_argument(
            "--noise_scp",
            type=str_or_none,
            default=None,
            help="The file path of noise scp file.",
        )
        parser.add_argument(
            "--noise_apply_prob",
            type=float,
            default=1.0,
            help="The probability applying Noise adding.",
        )
        parser.add_argument(
            "--noise_db_range",
            type=str,
            default="13_15",
            help="The range of noise decibel level.",
        )

        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --encoder and --encoder_conf
            class_choices.add_arguments(group)
Пример #19
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        group = parser.add_argument_group(description="Task related")

        # NOTE(kamo): add_arguments(..., required=True) can't be used
        # to provide --print_config mode. Instead of it, do as
        required = parser.get_default("required")
        required += ["token_list"]

        group.add_argument(
            "--token_list",
            type=str_or_none,
            default=None,
            help="A text mapping int-id to token",
        )
        group.add_argument(
            "--init",
            type=lambda x: str_or_none(x.lower()),
            default=None,
            help="The initialization method",
            choices=[
                "chainer",
                "xavier_uniform",
                "xavier_normal",
                "kaiming_uniform",
                "kaiming_normal",
                None,
            ],
        )

        group.add_argument(
            "--input_size",
            type=int_or_none,
            default=None,
            help="The number of input dimension of the feature",
        )

        group.add_argument(
            "--ctc_conf",
            action=NestedDictAction,
            default=get_default_kwargs(CTC),
            help="The keyword arguments for CTC class.",
        )
        group.add_argument(
            "--asr_model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(ESPnetASRModel),
            help="The keyword arguments for model class.",
        )

        group.add_argument(
            "--enh_model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(ESPnetEnhancementModel),
            help="The keyword arguments for model class.",
        )

        group = parser.add_argument_group(description="Preprocess related")
        group.add_argument(
            "--use_preprocessor",
            type=str2bool,
            default=False,
            help="Apply preprocessing to data or not",
        )
        group.add_argument(
            "--token_type",
            type=str,
            default="bpe",
            choices=["bpe", "char", "word", "phn"],
            help="The text will be tokenized " "in the specified level token",
        )
        group.add_argument(
            "--bpemodel",
            type=str_or_none,
            default=None,
            help="The model file of sentencepiece",
        )
        parser.add_argument(
            "--non_linguistic_symbols",
            type=str_or_none,
            help="non_linguistic_symbols file path",
        )
        parser.add_argument(
            "--cleaner",
            type=str_or_none,
            choices=[None, "tacotron", "jaconv", "vietnamese"],
            default=None,
            help="Apply text cleaning",
        )
        parser.add_argument(
            "--g2p",
            type=str_or_none,
            choices=[None, "g2p_en", "pyopenjtalk", "pyopenjtalk_kana"],
            default=None,
            help="Specify g2p method if --token_type=phn",
        )

        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --encoder and --encoder_conf
            class_choices.add_arguments(group)
Пример #20
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        group = parser.add_argument_group(description="Task related")

        # NOTE(kamo): add_arguments(..., required=True) can't be used
        # to provide --print_config mode. Instead of it, do as
        required = parser.get_default("required")

        group.add_argument(
            "--model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(CepNet),
            help="The keyword arguments for model class.",
        )

        group.add_argument(
            "--init",
            type=lambda x: str_or_none(x.lower()),
            default=None,
            help="The initialization method",
            choices=[
                "chainer",
                "xavier_uniform",
                "xavier_normal",
                "kaiming_uniform",
                "kaiming_normal",
                None,
            ],
        )

        group.add_argument(
            "--input_size",
            type=int_or_none,
            default=None,
            help="The number of input dimension of the feature",
        )

        group = parser.add_argument_group(description="Preprocess related")
        group.add_argument(
            "--use_preprocessor",
            type=str2bool,
            default=True,
            help="Apply preprocessing to data or not",
        )
        parser.add_argument(
            "--speech_volume_normalize",
            type=float_or_none,
            default=None,
            help="Scale the maximum amplitude to the given value.",
        )
        parser.add_argument(
            "--rir_scp",
            type=str_or_none,
            default=None,
            help="The file path of rir scp file.",
        )
        parser.add_argument(
            "--rir_apply_prob",
            type=float,
            default=1.0,
            help="THe probability for applying RIR convolution.",
        )
        parser.add_argument(
            "--noise_scp",
            type=str_or_none,
            default=None,
            help="The file path of noise scp file.",
        )
        parser.add_argument(
            "--noise_apply_prob",
            type=float,
            default=1.0,
            help="The probability applying Noise adding.",
        )
        parser.add_argument(
            "--noise_db_range",
            type=str,
            default="13_15",
            help="The range of noise decibel level.",
        )

        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --encoder and --encoder_conf
            class_choices.add_arguments(group)
Пример #21
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        group = parser.add_argument_group(description="Task related")

        required = parser.get_default("required")
        required += ["token_list"]

        group.add_argument("--train_pseudo_shape_file",
                           type=str,
                           action="append",
                           default=[])

        group.add_argument(
            "--train_pseudo_data_path_and_name_and_type",
            type=str2triple_str,
            action="append",
            default=[],
            help=
            "Give three words splitted by comma. It's used for the training data.",
        )
        group.add_argument(
            "--token_list",
            type=str_or_none,
            default=None,
            help="A text mapping int-id to token",
        )
        group.add_argument(
            "--pis_ratio",
            type=float,
            default=1,
            help="The multi-iteration ratio for SSL",
        )
        group.add_argument(
            "--init",
            type=lambda x: str_or_none(x.lower()),
            default=None,
            help="The initialization method",
            choices=[
                "chainer",
                "xavier_uniform",
                "xavier_normal",
                "kaiming_uniform",
                "kaiming_normal",
                None,
            ],
        )

        group.add_argument(
            "--input_size",
            type=int_or_none,
            default=None,
            help="The number of input dimension of the feature",
        )

        group.add_argument(
            "--ctc_conf",
            action=NestedDictAction,
            default=get_default_kwargs(CTC),
            help="The keyword arguments for CTC class.",
        )
        group.add_argument(
            "--model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(ESPnetASRModel),
            help="The keyword arguments for model class.",
        )

        group = parser.add_argument_group(description="Preprocess related")
        group.add_argument(
            "--use_preprocessor",
            type=str2bool,
            default=True,
            help="Apply preprocessing to data or not",
        )
        group.add_argument(
            "--token_type",
            type=str,
            default="bpe",
            choices=["bpe", "char", "word", "phn"],
            help="The text will be tokenized "
            "in the specified level token",
        )
        group.add_argument(
            "--bpemodel",
            type=str_or_none,
            default=None,
            help="The model file of sentencepiece",
        )
        parser.add_argument(
            "--non_linguistic_symbols",
            type=str_or_none,
            help="non_linguistic_symbols file path",
        )
        parser.add_argument(
            "--cleaner",
            type=str_or_none,
            choices=[None, "tacotron", "jaconv", "vietnamese"],
            default=None,
            help="Apply text cleaning",
        )
        parser.add_argument(
            "--g2p",
            type=str_or_none,
            choices=[None, "g2p_en", "pyopenjtalk", "pyopenjtalk_kana"],
            default=None,
            help="Specify g2p method if --token_type=phn",
        )
        parser.add_argument(
            "--speech_volume_normalize",
            type=float_or_none,
            default=None,
            help="Scale the maximum amplitude to the given value.",
        )
        parser.add_argument(
            "--rir_scp",
            type=str_or_none,
            default=None,
            help="The file path of rir scp file.",
        )
        parser.add_argument(
            "--rir_apply_prob",
            type=float,
            default=1.0,
            help="THe probability for applying RIR convolution.",
        )
        parser.add_argument(
            "--noise_scp",
            type=str_or_none,
            default=None,
            help="The file path of noise scp file.",
        )
        parser.add_argument(
            "--noise_apply_prob",
            type=float,
            default=1.0,
            help="The probability applying Noise adding.",
        )
        parser.add_argument(
            "--noise_db_range",
            type=str,
            default="13_15",
            help="The range of noise decibel level.",
        )

        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --encoder and --encoder_conf
            class_choices.add_arguments(group)
Пример #22
0
 def __init__(self, parser: argparse.ArgumentParser) -> None:
     """Create using default argument values."""
     self.__quiet = parser.get_default('q')
     self.__verbose = parser.get_default('v')
Пример #23
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        """Add Transducer task arguments.

        Args:
            cls: ASRTransducerTask object.
            parser: Transducer arguments parser.

        """
        group = parser.add_argument_group(description="Task related")

        required = parser.get_default("required")
        required += ["token_list"]

        group.add_argument(
            "--token_list",
            type=str_or_none,
            default=None,
            help="A text mapping int-id to token",
        )
        group.add_argument(
            "--input_size",
            type=int_or_none,
            default=None,
            help="The number of input dimension of the feature",
        )
        group.add_argument(
            "--init",
            type=lambda x: str_or_none(x.lower()),
            default=None,
            help="The initialization method",
            choices=[
                "chainer",
                "chainer_espnet1",
                "xavier_uniform",
                "xavier_normal",
                "kaiming_uniform",
                "kaiming_normal",
                None,
            ],
        )
        group.add_argument(
            "--model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(ESPnetASRTransducerModel),
            help="The keyword arguments for model class.",
        )
        group.add_argument(
            "--encoder_conf",
            action=NestedDictAction,
            default={},
            help="The keyword arguments for encoder class.",
        )
        group.add_argument(
            "--joint_network_conf",
            action=NestedDictAction,
            default={},
            help="The keyword arguments for joint network class.",
        )
        group = parser.add_argument_group(description="Preprocess related")
        group.add_argument(
            "--use_preprocessor",
            type=str2bool,
            default=True,
            help="Apply preprocessing to data or not",
        )
        group.add_argument(
            "--token_type",
            type=str,
            default="bpe",
            choices=["bpe", "char", "word", "phn"],
            help="The text will be tokenized "
            "in the specified level token",
        )
        group.add_argument(
            "--bpemodel",
            type=str_or_none,
            default=None,
            help="The model file of sentencepiece",
        )
        parser.add_argument(
            "--non_linguistic_symbols",
            type=str_or_none,
            help="non_linguistic_symbols file path",
        )
        parser.add_argument(
            "--cleaner",
            type=str_or_none,
            choices=[None, "tacotron", "jaconv", "vietnamese"],
            default=None,
            help="Apply text cleaning",
        )
        parser.add_argument(
            "--g2p",
            type=str_or_none,
            choices=g2p_choices,
            default=None,
            help="Specify g2p method if --token_type=phn",
        )
        parser.add_argument(
            "--speech_volume_normalize",
            type=float_or_none,
            default=None,
            help="Scale the maximum amplitude to the given value.",
        )
        parser.add_argument(
            "--rir_scp",
            type=str_or_none,
            default=None,
            help="The file path of rir scp file.",
        )
        parser.add_argument(
            "--rir_apply_prob",
            type=float,
            default=1.0,
            help="The probability for applying RIR convolution.",
        )
        parser.add_argument(
            "--noise_scp",
            type=str_or_none,
            default=None,
            help="The file path of noise scp file.",
        )
        parser.add_argument(
            "--noise_apply_prob",
            type=float,
            default=1.0,
            help="The probability applying Noise adding.",
        )
        parser.add_argument(
            "--noise_db_range",
            type=str,
            default="13_15",
            help="The range of noise decibel level.",
        )

        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --decoder and --decoder_conf
            class_choices.add_arguments(group)
Пример #24
0
                        dest="in_raster",
                        default='',
                        required=True,
                        help="Path to input raster.")
    parser.add_argument("-o",
                        dest="out_dir",
                        default='',
                        required=True,
                        help="Output directory.")

    # If no arguments are supplied, print help message
    if len(sys.argv) == 1:
        parser.print_help(sys.stderr)
        sys.exit(1)
    args = parser.parse_args()
    all_defaults = {key: parser.get_default(key) for key in vars(args)}

    if args.in_raster == all_defaults["in_raster"]:
        print('Using input raster location of: {0}'.format(
            all_defaults["in_raster"]))

    if args.out_dir == all_defaults["out_dir"]:
        print('Using output location of: {0}'.format(all_defaults["out_dir"]))

    outLat = os.path.join(args.out_dir, 'LATITUDE.tif')
    outLon = os.path.join(args.out_dir, 'LONGITUDE.tif')

    # Open (read-only) input raster
    in_raster = gdal.Open(args.in_raster, 0)  # Open with read-only mode

    # Gather information from input raster projection
ap.add_argument('-lr', '--learning_rate', type=float, default=1e-3, 
	help='Learning rate: how fast the optimizer moves up/down the gradient.')
ap.add_argument('-ts', '--test_size', type=float, default=0.75,
	help='How much to split the train / test ratio')
ap.add_argument('-rs', '--random_state', type=int, default=42, 
	help='Integer value to initialize train/test splitting randomization')
ap.add_argument('-v', '--verbose', action="store_true",
	help='Whether to set verbosity = True or False (default)')
ap.add_argument('-ds', '--data_set', type=str, default='', 
	help='The csv file containing the data to predict with')

try:
    args = vars(ap.parse_args())
except:
    args = {}
    args['directory'] = ap.get_default('directory')
    args['n_nalu_layers'] = ap.get_default('n_nalu_layers')
    args['n_nalu_neurons'] = ap.get_default('n_nalu_neurons')
    args['n_epochs'] = ap.get_default('n_epochs')
    args['n_classes'] = ap.get_default('n_classes')
    args['batch_size'] = ap.get_default('batch_size')
    args['learning_rate'] = ap.get_default('learning_rate')
    args['test_size'] = ap.get_default('test_size')
    arts['random_state'] = ap.get_default('random_state')
    args['verbose'] = ap.get_default('verbose')
    args['data_set'] = ap.get_default('data_set')

verbose = args['verbose']
data_set_fname = args['data_set']

import pandas as pd
Пример #26
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        # NOTE(kamo): Use '_' instead of '-' to avoid confusion
        assert check_argument_types()
        group = parser.add_argument_group(description="Task related")

        # NOTE(kamo): add_arguments(..., required=True) can't be used
        # to provide --print_config mode. Instead of it, do as
        required = parser.get_default("required")
        required += ["token_list"]

        group.add_argument(
            "--token_list",
            type=str_or_none,
            default=None,
            help="A train mapping int-id to token",
        )
        group.add_argument(
            "--odim",
            type=int_or_none,
            default=None,
            help="The number of dimension of output feature",
        )
        group.add_argument(
            "--model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(ESPnetTTSModel),
            help="The keyword arguments for model class.",
        )

        group = parser.add_argument_group(description="Preprocess related")
        group.add_argument(
            "--use_preprocessor",
            type=str2bool,
            default=True,
            help="Apply preprocessing to data or not",
        )
        group.add_argument(
            "--token_type",
            type=str,
            default="phn",
            choices=["bpe", "char", "word", "phn"],
            help="The train will be tokenized in the specified level token",
        )
        group.add_argument(
            "--bpemodel",
            type=str_or_none,
            default=None,
            help="The model file of sentencepiece",
        )
        parser.add_argument(
            "--non_linguistic_symbols",
            type=str_or_none,
            help="non_linguistic_symbols file path",
        )
        parser.add_argument(
            "--cleaner",
            type=str_or_none,
            choices=[None, "tacotron", "jaconv", "vietnamese"],
            default=None,
            help="Apply train cleaning",
        )
        parser.add_argument(
            "--g2p",
            type=str_or_none,
            choices=[
                None,
                "g2p_en",
                "g2p_en_no_space",
                "pyopenjtalk",
                "pyopenjtalk_kana",
                "pyopenjtalk_accent",
                "pyopenjtalk_accent_with_pause",
                "pypinyin_g2p",
                "pypinyin_g2p_phone",
                "espeak_ng_arabic",
            ],
            default=None,
            help="Specify g2p method if --token_type=phn",
        )

        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --encoder and --encoder_conf
            class_choices.add_arguments(group)
Пример #27
0
class Config:
    def __init__(self):
        self.parser = ArgumentParser()
        # Common options
        self.parser.add_argument(
            '--name',
            type=str,
            default='experiment_name',
            help=
            'name of the experiment. It decides where to store samples and models'
        )
        self.parser.add_argument(
            '--gpu_ids',
            type=str,
            default='0',
            help='gpu ids: e.g. 0  0,1,2, 0,2. use -1 for CPU')
        self.parser.add_argument('--config_file',
                                 type=str,
                                 default='',
                                 help='config default file')
        self.parser.add_argument('--checkpoints_dir',
                                 type=str,
                                 default='outputs/checkpoints',
                                 help='Checkpoints Directory')
        self.parser.add_argument(
            '--input_nc',
            type=int,
            default=3,
            help='# of input image channels: 3 for RGB and 1 for grayscale')
        self.parser.add_argument('--ngf',
                                 type=int,
                                 default=64,
                                 help='# of filters in the first conv layer')
        self.parser.add_argument(
            '--norm',
            type=str,
            default='batch',
            help='type of normalization layer [batch | groupN]')
        self.parser.add_argument(
            '--activation',
            type=str,
            default='relu',
            help='type of activation layer [relu | swish]')
        self.parser.add_argument(
            '--init_type',
            type=str,
            default='normal',
            help=
            'network initialization [normal | xavier | kaiming | orthogonal]')
        self.parser.add_argument(
            '--init_gain',
            type=float,
            default=0.02,
            help='scaling factor for normal, xavier and orthogonal.')
        self.parser.add_argument('--dropout',
                                 action='store_true',
                                 help='use dropout')
        self.parser.add_argument('--attention',
                                 action='store_true',
                                 help='use attention')
        self.parser.add_argument(
            '--serial_batches',
            action='store_true',
            help=
            'if true, takes images in order to make batches, otherwise takes them randomly'
        )
        self.parser.add_argument('--num_threads',
                                 default=4,
                                 type=int,
                                 help='# threads for loading data')
        self.parser.add_argument('--batch_size',
                                 type=int,
                                 default=1,
                                 help='input batch size')
        self.parser.add_argument('--block_size',
                                 type=int,
                                 default=1,
                                 help='input block size')
        self.parser.add_argument('--load_size',
                                 type=int,
                                 default=286,
                                 help='scale images to this size')
        self.parser.add_argument('--crop_size',
                                 type=int,
                                 default=256,
                                 help='then crop to this size')
        self.parser.add_argument(
            '--max_dataset_size',
            type=int,
            default=float("inf"),
            help=
            'Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.'
        )
        self.parser.add_argument(
            '--preprocess',
            type=str,
            default='resize_and_crop',
            help=
            'scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]'
        )
        self.parser.add_argument(
            '--no_flip',
            action='store_true',
            help='if specified, do not flip the images for data augmentation')
        self.parser.add_argument(
            '--epoch',
            type=str,
            default='latest',
            help='which epoch to load? set to latest to use latest cached model'
        )
        self.parser.add_argument(
            '--load_iter',
            type=int,
            default=0,
            help=
            'which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]'
        )
        self.parser.add_argument('--beta_schedule',
                                 type=str,
                                 default='linear',
                                 help='beta interpolation method')
        self.parser.add_argument('--beta_start',
                                 type=int,
                                 default=0.0001,
                                 help='start beta value')
        self.parser.add_argument('--beta_end',
                                 type=int,
                                 default=0.02,
                                 help='end beta value')
        self.parser.add_argument('--num_timesteps',
                                 type=int,
                                 default=1000,
                                 help='# of timesteps')
        self.parser.add_argument(
            '--loss_type',
            type=str,
            default='noisepred_mse',
            help='loss prediction policy. [noisepred_mse | noisepred_l1]')
        self.parser.add_argument(
            '--print_freq',
            type=int,
            default=100,
            help='frequency of showing training results on screen')
        self.parser.add_argument('--verbose',
                                 action='store_true',
                                 help='verbose mode')

    def parse(self):
        args = self.parser.parse_args()

        # set gpu ids
        str_ids = args.gpu_ids.split(',')
        args.gpu_ids = []
        for str_id in str_ids:
            id = int(str_id)
            if id >= 0:
                args.gpu_ids.append(id)
        if len(args.gpu_ids) > 0:
            torch.cuda.set_device(args.gpu_ids[0])

        # load from config file
        if args.config_file != '':
            print('update default options by a config file: %s' %
                  args.config_file)
            with open(args.config_file) as file:
                yaml_args = yaml.load(file, Loader=yaml.FullLoader)
            for k, v in yaml_args.items():
                default = self.parser.get_default(k)
                if v != default:
                    if hasattr(args, k):
                        setattr(args, k, v)

        assert isinstance(args.gpu_ids, list)

        self.print(args)

        self.args = args
        return args

    def print(self, opt):
        """Print options

        It will print both current options and default values(if different).
        """
        message = ''
        message += '----------------- Options ---------------\n'
        for k, v in sorted(vars(opt).items()):
            comment = ''
            default = self.parser.get_default(k)
            if v != default:
                comment = '\t[default: %s]' % str(default)
            message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
        message += '----------------- End -------------------'
        print(message)

    def save(self):
        """Save options

        It will save both current options and default values(if different) on CHECKPOINTS_DIR/NAME/PHASE+'_options.yaml'.
        """
        save_filename = os.path.join(self.args.checkpoints_dir, self.args.name,
                                     self.args.phase + '_options.yaml')
        print('save the config to %s' % save_filename)
        with open(save_filename, 'w') as file:
            yaml.dump(vars(self.args), file)
Пример #28
0
def disable_argument(parser : argparse.ArgumentParser, arg, prefix = '--'):
    # TODO: check that parser contains arg
    default = parser.get_default(arg.replace('-','_'))
    parser.add_argument(prefix + arg, help = argparse.SUPPRESS, action = DisableAction, default = default)
Пример #29
0
group = arg_parser.add_argument_group('FILTER OPTIONS (APPLIES TO -[CGLS])')
group.add_argument('-d',           dest='date_range',  action='store',                                          help='only use entries in range')
group.add_argument('-i',           dest='icase',       action='store_false',                                    help='ignore case-insensitivity')
group.add_argument('-n',           dest='num_results', action='store',       type=int,                          help='limit number of results')
group = arg_parser.add_argument_group('OUTPUT OPTIONS')
group.add_argument('-r',           dest='reverse',     action='store_true',                                     help='reverse chronological order')
group = arg_parser.add_argument_group('OPERATION-SPECIFIC OPTIONS')
group.add_argument('--no-log',     dest='log',         action='store_false',                                    help='[S] do not log search')
group.add_argument('--no-headers', dest='headers',     action='store_false',                                    help='[C] do not print headers')
group.add_argument('--unit',       dest='unit',        action='store',       choices=('year', 'month', 'day'), help='[C] set tabulation unit')
args = arg_parser.parse_args()

is_maintenance_op = args.op in ('archive', 'update', 'verify')
if is_maintenance_op:
    for option_dest in ('date_range', 'icase', 'terms'):
        setattr(args, option_dest, arg_parser.get_default(option_dest))

if args.date_range and not all(dr and RANGE_REGEX.match(dr) for dr in args.date_range.split(',')):
    arg_parser.error("argument -d: '{}' should be in format [YYYY[-MM[-DD]]][:][YYYY[-MM[-DD]]][,...]".format(args.date_range))
if args.num_results is not None and args.num_results < 1:
    arg_parser.error("argument -n: '{}' should be a positive integer".format(args.num_results))
args.directory = realpath(expanduser(args.directory))
args.ignores = set(realpath(expanduser(path.strip())) for arg in args.ignores for path in arg.split(','))
args.terms = set(args.terms)

if args.op == 'archive':
    filename = 'jrnl' + datetime.now().strftime('%Y%m%d%H%M%S')
    with tarfile.open('{}.txz'.format(filename), 'w:xz') as tar:
        tar.add(args.directory, arcname=filename, filter=(lambda tarinfo: None if basename(tarinfo.name)[0] in '._' else tarinfo))
        tar.add(argv[0], arcname=join_path(filename, basename(argv[0])))
    exit()
Пример #30
0
class Config:
    # pylint: disable=attribute-defined-outside-init,no-member,too-many-instance-attributes

    def __init__(self, config_file, start_time):
        self.start_time = start_time
        assert config_file is None or os.path.exists(
            config_file), "If specified, the config file must exist"
        self._config_file = config_file
        self._parser = ArgumentParser()

    def _load_config_file(self):
        if not self._config_file:
            return {}

        with open(self._config_file, "r") as fh:
            return json.load(fh)

    def _load_args(self):
        self._parser.add_argument(
            "--config-file",
            type=str,
            help="Name of the json file containing the config.")

        # main global parameters
        self._parser.add_argument("--eid",
                                  type=str,
                                  required=True,
                                  help="ID of the evaluation")
        self._parser.add_argument("--model-name",
                                  type=str,
                                  help="Name of the model to use")
        self._parser.add_argument(
            "--alternate-eid",
            type=str,
            help="ID of the alternate (second) evaluation")
        self._parser.add_argument(
            "--alternate-model-name",
            type=str,
            help="Name of an alternate (second) model to use")
        self._parser.add_argument("--batch-size",
                                  type=int,
                                  default=128,
                                  help="The number of samples in one batch")
        self._parser.add_argument("--epochs",
                                  type=int,
                                  default=5000,
                                  help="The number of epochs to train for")
        self._parser.add_argument(
            "--cycle",
            type=bool,
            default=False,
            help="Specify as true if this should be a CycleGAN")
        self._parser.add_argument(
            "--noise-dimensions",
            type=int,
            default=100,
            help="The number of dimensions of the input noise")
        self._parser.add_argument(
            "--extractor-name",
            type=str,
            default="VGG19",
            choices=PerceptualScores.EXTRACTOR_NAMES,
            help="The name of the feature extractor to use")

        # extra training parameters
        self._parser.add_argument(
            "--train-disc-on-previous-images",
            type=bool,
            default=False,
            help=
            "Specify if the discriminator should also be trained on the previous generator images"
        )
        self._parser.add_argument(
            "--train-disc-on-extra-targets",
            type=bool,
            default=False,
            help=
            "Specify if the discriminator should be trained on extra target samples"
        )
        self._parser.add_argument(
            "--extra-disc-step-real",
            type=bool,
            default=False,
            help=
            "Specify if the discriminator should be trained twice on real data per generator epoch"
        )
        self._parser.add_argument(
            "--extra-disc-step-both",
            type=bool,
            default=False,
            help=
            "Specify if the discriminator should be trained twice on real and generated data per generator epoch"
        )
        self._parser.add_argument(
            "--use-extra-first-inputs",
            type=bool,
            default=False,
            help=
            "Specify if the generator's second inputs should be used multiple times with different first inputs. Note: this doubles data set size!"
        )

        # task modifications
        self._parser.add_argument(
            "--conditioned-discriminator",
            type=bool,
            default=False,
            help=
            "Specify if the discriminator should discriminate the combination of generation input+output"
        )
        self._parser.add_argument(
            "--inpainting",
            type=bool,
            default=False,
            help="Specify if the generator should perform image inpainting")
        self._parser.add_argument(
            "--real-image-noise-stdev",
            type=float,
            default=0.0,
            help=
            "The stdev of the noise that should be added to the real images for the discriminator"
        )

        # augmentation, test data
        self._parser.add_argument(
            "--augmentation-flip-lr",
            type=bool,
            default=False,
            help=
            "Specify as true if the data should be augmented by L-R-flipped images"
        )
        self._parser.add_argument(
            "--augmentation-flip-ud",
            type=bool,
            default=False,
            help=
            "Specify as true if the data should be augmented by U-D-flipped images"
        )
        self._parser.add_argument(
            "--test-data-percentage",
            type=float,
            default=0.0,
            help=
            "If no test data is available, use the specified percentage of training data as a test set"
        )
        self._parser.add_argument(
            "--online-augmentation",
            type=bool,
            default=False,
            help=
            "Specify as true if the data should be augmented on the fly - slower and less shuffled"
        )

        # final experiments
        self._parser.add_argument(
            "--final-experiment",
            type=bool,
            default=False,
            help="Specify as true if this is a final experiment")
        self._parser.add_argument(
            "--keep-all-checkpoints",
            type=bool,
            default=False,
            help="Specify as true if all checkpoints should be kept")
        self._parser.add_argument(
            "--keep-final-checkpoints",
            type=bool,
            default=False,
            help=
            "Specify as true if checkpoints of the last 5 epochs should be kept"
        )
        self._parser.add_argument(
            "--scores-every-epoch",
            type=bool,
            default=False,
            help=
            "Specify as true if the full scores should be calculated every epoch"
        )

        directories = self._parser.add_argument_group("Directories")
        directories.add_argument("--data-dir",
                                 type=str,
                                 help="Directory containing the data set")
        directories.add_argument(
            "--second-data-dir",
            type=str,
            help=
            "Directory containing the second input data set, if different from the main input data dir"
        )
        directories.add_argument(
            "--target-data-dir",
            type=str,
            help=
            "Directory containing the targer data set, if different from the input data dir"
        )

        types = self._parser.add_argument_group("Input/output types")
        types.add_argument("--input-type",
                           type=str,
                           default="noise",
                           choices=list(data_subdirs.keys()) + ["noise"],
                           help="The type of the input for the generation")
        types.add_argument(
            "--second-input-type",
            type=str,
            choices=list(data_subdirs.keys()) + ["noise"],
            help="The type of the secondary input for the generation")
        types.add_argument("--target-type",
                           type=str,
                           default="image",
                           choices=data_subdirs.keys(),
                           help="The type of the target of the generation")
        types.add_argument("--match-pattern",
                           type=str,
                           default=None,
                           help="Pattern for files to match")

        losses = self._parser.add_argument_group("Loss weights")
        for loss in Model.all_individual_losses:
            losses.add_argument(
                "--loss-{}".format(loss.replace("_", "-")),
                type=int,
                default=0,
                help="The weight of the {} loss for the generator.".format(
                    loss))
            losses.add_argument(
                "--loss-{}-power".format(loss.replace("_", "-")),
                type=int,
                default=1,
                help="The power of the {} loss for the generator.".format(
                    loss))

        return vars(self._parser.parse_args())

    def _add_file_config_to_args(self, config_args, config_file):
        # the parsed args are the primary container and are updated with values from the config file
        # if the parsed args aren't left at default
        for file_key, file_value in config_file.items():
            key = file_key.replace("-", "_")
            if key not in config_args:
                tf.logging.error(
                    "Option '{}' ({}) from config file isn't a valid argument!"
                    .format(file_key, key))
                continue

            value = config_args[key]
            if self._parser.get_default(key) != value:
                tf.logging.warning(
                    "Ignoring option '{}={}' from config file as it's configured from the args as '{}'"
                    .format(key, file_value, value))
                continue

            tf.logging.debug("Setting '{}={}' from config file".format(
                key, file_value))
            config_args[key] = file_value

    def _add_derived_entries(self):
        if self.final_experiment:
            tf.logging.error(
                "This is a final experiment, setting flags accordingly!")
            for arg, value in zip([
                    "keep_all_checkpoints", "keep_final_checkpoints",
                    "scores_every_epoch"
            ], [True, True, True]):
                tf.logging.warning("Setting {}={} (was: {})".format(
                    arg, value, self.__dict__[arg]))
                self.__dict__[arg] = value

        self.has_colored_input = self.input_type == "image"
        self.has_colored_second_input = self.second_input_type == "image"
        self.has_colored_target = self.target_type == "image"
        self.has_noise_input = self.input_type == "noise"

        total_loss_weight = np.sum(
            np.abs([
                self.__dict__["loss_{}".format(loss)]
                for loss in Model.all_individual_losses
            ]))
        if total_loss_weight == 0:
            self.loss_adversarial = 1
            self.total_loss_weight = 1
        else:
            self.total_loss_weight = float(total_loss_weight)
        if self.loss_identity:
            assert self.has_colored_input and self.has_colored_target, "both domains need same dimensions"
        assert self.loss_adversarial > 0, "you probably didn't want to disable the adversarial loss"

        # by default, the disc has one output class
        self.discriminator_classes = 1

        # set up files and directories
        test_data_dir = self.data_dir + "-TEST"
        self.test_data_dir = test_data_dir if os.path.exists(
            os.path.join("data", test_data_dir)) else None
        test_second_data_dir = (self.second_data_dir or "/dev/null") + "-TEST"
        self.test_second_data_dir = test_second_data_dir if os.path.exists(
            os.path.join("data", test_second_data_dir)) else None
        test_target_data_dir = (self.target_data_dir or "/dev/null") + "-TEST"
        self.test_target_data_dir = test_target_data_dir if os.path.exists(
            os.path.join("data", test_target_data_dir)) else None
        assert not (bool(self.test_data_dir) and bool(self.test_data_percentage)), \
            "Shouldn't use training data for testing when test data is available"
        self.output_dir = os.path.join("output", self.eid)
        self.checkpoint_dir = os.path.join(self.output_dir, "checkpoints")
        if self.keep_final_checkpoints:
            self.final_checkpoint_dir = os.path.join(self.output_dir,
                                                     "final-checkpoints")
        self.samples_dir = os.path.join(self.output_dir, "samples")
        self.tensorboard_dir = os.path.join(self.output_dir, "tensorboard")
        self.figures_dir = os.path.join(self.output_dir, "figures")
        self.hq_figures_dir = os.path.join(self.output_dir, "figures-hq")
        self.gradients_dir = os.path.join(self.output_dir, "gradients")
        if not os.path.exists(self.figures_dir):
            os.makedirs(self.figures_dir)
        if not os.path.exists(self.hq_figures_dir):
            os.makedirs(self.hq_figures_dir)
        if not os.path.exists(self.gradients_dir):
            os.makedirs(self.gradients_dir)

    def _cleanup(self):
        if self._config_file:
            if self.eid == "foo":
                tf.logging.warning("Only copying the config file")
                shutil.copy(self._config_file,
                            os.path.join(self.output_dir, self._config_file))
            else:
                shutil.move(self._config_file,
                            os.path.join(self.output_dir, self._config_file))
        del self._config_file
        del self._parser

    def setup(self):
        # load config from args and file
        config_args = self._load_args()
        config_file = self._load_config_file()

        # combine and apply config
        self._add_file_config_to_args(config_args, config_file)
        self.__dict__.update(config_args)

        # prepare additional stuff
        self._add_derived_entries()
        self._cleanup()
Пример #31
0
def prompt_for_new_plot_args(
    *,
    old_argv: List[str],
    old_args: argparse.Namespace,
    parser: argparse.ArgumentParser,
) -> Tuple[dict, argparse.Namespace]:
    """Given some old arguments, prompts for new ones and returns a new
    list of argument values and the parsed argparse namespace result.

    Args:
        old_argv (List[str]): The old argument value list
        old_args (argparse.Namespace): The old set of parsed arguments
        parser (argparse.ArgumentParser): The parser to use for evaluating the
            newly specified argument value list

    Returns:
        Tuple[dict, argparse.Namespace]: The new argument values list and the
            parsed argument namespace.

    Raises:
        ValueError: Upon error in parsing the new arguments.
    """
    # Specify those arguments that may not be given in the prompt
    DISALLOWED_ARGS = ('run_cfg_path', 'run_dir_path', 'set_cfg',
                       'cluster_mode', 'suppress_data_tree', 'full_data_tree')

    # Create a new argument list for querying the user. For that, remove
    # those entries from the argvs that are meant to be in the query.
    prefix_argv = ('--interactive', old_args.model_name)
    to_query = [arg for arg in old_argv if arg not in prefix_argv]
    to_query_str = " ".join(to_query) + (" " if to_query else "")

    # Now, setup the startup hook with a callable that inserts those
    # arguments that shall be editable by the user. Configure readline to
    # allow tab completion for file paths after certain delimiters.
    readline.set_startup_hook(lambda: readline.insert_text(to_query_str))
    readline.parse_and_bind("tab: complete")
    readline.set_completer_delims(' \t\n=')

    # Generate the prompt and store the result, stripping whitespace
    prompt_str = ("\n{ansi.CYAN}${ansi.MAGENTA} "
                  "utopia eval -i {}"
                  "{ansi.RESET} ".format(old_args.model_name, ansi=ANSIesc))
    input_res = input(prompt_str).strip()
    print("")

    # Reset the startup hook to do nothing
    readline.set_startup_hook()

    # Prepare the new list of argument values.
    add_argv = input_res.split(' ') if input_res else []
    new_argv = list(prefix_argv) + add_argv

    # ... and parse it to the eval subparser.
    new_args = parser.parse_args(new_argv)
    # NOTE This may raise SystemExit upon the --help argument or other
    #      arguments that are not properly parsable.

    # Check that bad arguments were not used
    bad_args = [
        arg for arg in DISALLOWED_ARGS
        if getattr(new_args, arg) != parser.get_default(arg)
    ]
    if bad_args:
        print("{ansi.RED}During interactive plotting, arguments that are used "
              "to update the Multiverse meta-configuration cannot be used!"
              "{ansi.RESET}".format(ansi=ANSIesc))
        print("{ansi.DIM}Remove the offending argument{} ({}) and try again. "
              "Consult --help to find out the available plotting-related "
              "arguments."
              "{ansi.RESET}".format("s" if len(bad_args) != 1 else "",
                                    ", ".join(bad_args),
                                    ansi=ANSIesc))
        raise ValueError("Cannot specify arguments that are used for updating "
                         "the (already-in-use) meta-configuration of the "
                         "current Multiverse instance. Disallowed arguments: "
                         "{}".format(", ".join(DISALLOWED_ARGS)))

    return new_argv, new_args
Пример #32
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        # NOTE(kamo): Use '_' instead of '-' to avoid confusion
        assert check_argument_types()
        group = parser.add_argument_group(description="Task related")

        # NOTE(kamo): add_arguments(..., required=True) can't be used
        # to provide --print_config mode. Instead of it, do as
        required = parser.get_default("required")
        required += ["token_list"]

        group.add_argument(
            "--token_list",
            type=str_or_none,
            default=None,
            help="A text mapping int-id to token",
        )
        group.add_argument(
            "--init",
            type=lambda x: str_or_none(x.lower()),
            default=None,
            help="The initialization method",
            choices=[
                "chainer",
                "xavier_uniform",
                "xavier_normal",
                "kaiming_uniform",
                "kaiming_normal",
                None,
            ],
        )
        group.add_argument(
            "--model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(ESPnetLanguageModel),
            help="The keyword arguments for model class.",
        )

        group = parser.add_argument_group(description="Preprocess related")
        group.add_argument(
            "--use_preprocessor",
            type=str2bool,
            default=False,
            help="Apply preprocessing to data or not",
        )
        group.add_argument(
            "--token_type",
            type=str,
            default="bpe",
            choices=["bpe", "char", "word"],
            help="",
        )
        group.add_argument(
            "--bpemodel",
            type=str_or_none,
            default=None,
            help="The model file fo sentencepiece",
        )
        parser.add_argument(
            "--non_linguistic_symbols",
            type=str_or_none,
            help="non_linguistic_symbols file path",
        )
        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --encoder and --encoder_conf
            class_choices.add_arguments(group)

        assert check_return_type(parser)
        return parser
Пример #33
0
    def add_task_arguments(cls, parser: argparse.ArgumentParser):
        # NOTE(kamo): Use '_' instead of '-' to avoid confusion
        assert check_argument_types()
        group = parser.add_argument_group(description="Task related")

        # NOTE(kamo): add_arguments(..., required=True) can't be used
        # to provide --print_config mode. Instead of it, do as
        required = parser.get_default("required")
        required += ["token_list"]

        group.add_argument(
            "--token_list",
            type=str_or_none,
            default=None,
            help="A text mapping int-id to token",
        )
        group.add_argument(
            "--init",
            type=lambda x: str_or_none(x.lower()),
            default=None,
            help="The initialization method",
            choices=[
                "chainer",
                "xavier_uniform",
                "xavier_normal",
                "kaiming_uniform",
                "kaiming_normal",
                None,
            ],
        )
        group.add_argument(
            "--model_conf",
            action=NestedDictAction,
            default=get_default_kwargs(ESPnetLanguageModel),
            help="The keyword arguments for model class.",
        )

        group = parser.add_argument_group(description="Preprocess related")
        group.add_argument(
            "--use_preprocessor",
            type=str2bool,
            default=True,
            help="Apply preprocessing to data or not",
        )
        group.add_argument(
            "--token_type",
            type=str,
            default="bpe",
            choices=["bpe", "char", "word"],
            help="",
        )
        group.add_argument(
            "--bpemodel",
            type=str_or_none,
            default=None,
            help="The model file fo sentencepiece",
        )
        parser.add_argument(
            "--non_linguistic_symbols",
            type=str_or_none,
            help="non_linguistic_symbols file path",
        )
        parser.add_argument(
            "--cleaner",
            type=str_or_none,
            choices=[None, "tacotron", "jaconv", "vietnamese"],
            default=None,
            help="Apply text cleaning",
        )
        parser.add_argument(
            "--g2p",
            type=str_or_none,
            choices=[
                None,
                "g2p_en",
                "g2p_en_no_space",
                "pyopenjtalk",
                "pyopenjtalk_kana",
                "pyopenjtalk_accent",
                "pyopenjtalk_accent_with_pause",
                "pypinyin_g2p",
                "pypinyin_g2p_phone",
                "espeak_ng_arabic",
                "espeak_ng_german",
                "espeak_ng_french",
                "espeak_ng_spanish",
                "espeak_ng_russian",
                "g2pk",
                "g2pk_no_space",
            ],
            default=None,
            help="Specify g2p method if --token_type=phn",
        )

        for class_choices in cls.class_choices_list:
            # Append --<name> and --<name>_conf.
            # e.g. --encoder and --encoder_conf
            class_choices.add_arguments(group)

        assert check_return_type(parser)
        return parser