Beispiel #1
0
def main():
    """
    The main entry point, as specified in the ``setup.py`` file. Adds commands
    from other subsidiary entry points (specified in the ``commands`` variable
    above,) and then uses ``arch.dispatch()`` to start the process.

    The ``RuntimeStateConfig()`` object is created here and handed to the parser
    as the object that will recive all command line data, rather than using a
    standard argparse namespace object. This allows all runtime argument parsing
    to happen inside of these config objects rather than spread among all of the
    entry points.

    This function catches and recovers from :exc:`KeyboardInterupt` which means
    that doesn't dump a stack trace following a Control-C.
    """

    parser = get_base_parser()

    for namespace, entry_points in commands.items():
        if namespace == 'main':
            argh.add_commands(parser, entry_points)
        else:
            argh.add_commands(parser, entry_points, namespace=namespace)

    args = RuntimeStateConfig()
    try:
        argh.dispatch(parser, namespace=args)
    except KeyboardInterrupt:
        logger.error('operation interrupted by user.')
Beispiel #2
0
def main():
    """
    The main entry point, as specified in the ``setup.py`` file. Adds commands
    from other subsidiary entry points (specified in the ``commands`` variable
    above,) and then uses ``arch.dispatch()`` to start the process.

    The ``RuntimeStateConfig()`` object is created here and handed to the parser
    as the object that will recive all command line data, rather than using a
    standard argparse namespace object. This allows all runtime argument parsing
    to happen inside of these config objects rather than spread among all of the
    entry points.

    This function catches and recovers from :exc:`KeyboardInterupt` which means
    that doesn't dump a stack trace following a Control-C.
    """

    parser = get_base_parser()

    for namespace, entry_points in commands.items():
        if namespace == 'main':
            argh.add_commands(parser, entry_points)
        else:
            argh.add_commands(parser, entry_points, namespace=namespace)

    args = RuntimeStateConfig()
    try:
        argh.dispatch(parser, namespace=args)
    except KeyboardInterrupt:
        logger.error('operation interrupted by user.')
Beispiel #3
0
def main():
    signal.signal(signal.SIGINT, exit_gracefully)
    parser = argh.ArghParser()
    parser.add_commands(one_level_commands)
    for command in two_level_commands:
        argh.add_commands(parser, command.subcommands(), namespace=command.namespace(), help=command.help_message())
    parser.dispatch()
Beispiel #4
0
def main():
    parser = argh.ArghParser()
    parser.add_commands(one_level_commands)
    for command in two_level_commands:
        argh.add_commands(parser, command.subcommands(),
                          namespace=command.namespace(), help=command.help_message())
    parser.dispatch()
def main():
    log.debug(' '.join(sys.argv))
    parser = argh.helpers.ArghParser()
    argh.add_commands(parser, [print_installation_commands, install, install_full, 
                               list_data_sources, list_output, log_info, copy_files, 
                               version])
    argh.dispatch(parser)
Beispiel #6
0
def add_commands(arg_parser, functions, config, section='DEFAULT',
                 prefix=False, **kwargs):
    """
    Adds given functions as commands to given parser and also
    sets default values from given config.

    Parameters
    ----------
    arg_parser : ArghParser or ArgumentParser
        argument parser
    functions : list
        list of functions
    config : ConfigParser
        configuration, as read from config file(s)
    section : str
        section in configuration
    prefix: bool
        prefix argument with function name to create unique namespace

    Notes
    -----
    Current version fails with variable number of paramters (*args)
     or keyword arguments (**kwargs)
    """
    _inject_defaults(functions, config, section, prefix)
    argh.add_commands(arg_parser, functions, **kwargs)
Beispiel #7
0
def main():
    parser = argparse.ArgumentParser(
        description='Tools for %s.' % ssh.HOSTNAME,
    )
    argh.add_commands(parser, [update, proxy])
    git.namespace.add_subcommands(parser)
    argh.dispatch(parser)
Beispiel #8
0
 def add_subcommands(self, parser):
     argh.add_commands(
         parser,
         self.commands,
         namespace=self.name,
         namespace_kwargs=self.parser_kwargs
     )
Beispiel #9
0
def main():
    """
    Set up the context and connectors
    """
    try:
        init()
    except custom_exceptions.NotConfigured:
        configure()
        init()
    # Adding this in case users are trying to run without adding a jira url.
    # I would like to take this out in a release or two.
    # TODO: REMOVE
    except (AttributeError, ConfigParser.NoOptionError):
        logging.error('It appears that your configuration is invalid, please reconfigure the app and try again.')
        configure()
        init()

    parser = argparse.ArgumentParser()

    # Now simply auto-discovering the methods listed in this module
    current_module = sys.modules[__name__]
    module_methods = [getattr(current_module, a, None) for a in dir(current_module)
                      if isinstance(getattr(current_module, a, None), types.FunctionType)
                      and a != 'main']
    argh.add_commands(parser, module_methods)

    # Putting the error logging after the app is initialized because
    # we want to adhere to the user's preferences
    try:
        argh.dispatch(parser)
    # We don't want to report keyboard interrupts to rollbar
    except (KeyboardInterrupt, SystemExit):
        raise
    except Exception as e:
        if isinstance(e, jira.exceptions.JIRAError) and "HTTP 400" in e:
            logging.warning('It appears that your authentication with {0} is invalid. Please re-configure jtime: `jtime configure` with the correct credentials'.format(configuration.load_config['jira'].get('url')))
        elif configured.get('jira').get('error_reporting', True):
            # Configure rollbar so that we report errors
            import rollbar
            from . import __version__ as version
            root_path = os.path.dirname(os.path.realpath(__file__))
            rollbar.init('7541b8e188044831b6728fa8475eab9f', 'v%s' % version, root=root_path)
            logging.error('Sorry. It appears that there was an error when handling your command. '
                          'This error has been reported to our error tracking system. To disable '
                          'this reporting, please re-configure the app: `jtime config`.')
            extra_data = {
                # grab the command that we're running
                'cmd': sys.argv[1],
                # we really don't want to see jtime in the args
                'args': sys.argv[2:],
                # lets grab anything useful, python version?
                'python': str(sys.version),
            }
            # We really shouldn't thit this line of code when running tests, so let's not cover it.
            rollbar.report_exc_info(extra_data=extra_data)  # pragma: no cover
        else:
            logging.error('It appears that there was an error when handling your command.')
            raise
Beispiel #10
0
def main():
    signal.signal(signal.SIGINT, exit_gracefully)
    parser = argh.ArghParser()
    parser.add_commands(one_level_commands)
    for command in two_level_commands:
        argh.add_commands(parser,
                          command.subcommands(),
                          namespace=command.namespace(),
                          help=command.help_message())
    parser.dispatch()
Beispiel #11
0
def main():
    parser = get_base_parser()

    commands = [
        mongo_to_po,
        po_to_mongo,
        verifier,
    ]
    argh.add_commands(parser, commands)
    args = RuntimeStateConfig()

    argh.dispatch(parser, namespace=args)
Beispiel #12
0
    def add_to(cls, manager):
        '''
        Add all public instance methods of this class to the given Manager
        as commands.
        '''

        commands = cls(manager)
        argh.add_commands(manager.parser, [
            getattr(commands, name) for name in dir(commands)
            if not name.startswith('_')
            and is_instance_method(getattr(commands, name))
        ])
Beispiel #13
0
def main():
    parser = get_base_parser()

    commands = [
        mongo_to_po,
        po_to_mongo,
        verifier,
    ]
    argh.add_commands(parser, commands)
    args = RuntimeStateConfig()

    argh.dispatch(parser, namespace=args)
    def add_to(cls, manager):
        '''
        Add all public instance methods of this class to the given Manager
        as commands.
        '''

        commands = cls(manager)
        argh.add_commands(manager.parser, [
            getattr(commands, name)
            for name in dir(commands) if not name.startswith('_')
            and is_instance_method(getattr(commands, name))
        ])
Beispiel #15
0
def main():
    """
    The main entry point, as specified in the ``setup.py`` file. Adds commands
    from other subsidiary entry points (specified in the ``commands`` variable
    above,) and then uses ``arch.dispatch()`` to start the process.

    The "DaggerConfig" object allows the application to delegate all
    argument parsing validation using setters and getters in the object where
    arg(h)parse stores the configuration data.

    This function catches and recovers from :exc:`KeyboardInterupt` which means
    that doesn't dump a stack trace following a Control-C.
    """

    parser = get_base_parser()

    for namespace, entry_points in commands.items():
        if namespace == 'main':
            argh.add_commands(parser, entry_points)
        else:
            argh.add_commands(parser, entry_points, namespace=namespace)

    args = dagger.config.cli.DaggerCliConfig()

    # set the logging level early to ensure logging is configured during
    # argument parsing.
    args.level = "info"

    # run the command, catching user-interruption and common error types directly.
    try:
        argh.dispatch(parser, namespace=args)
    except KeyboardInterrupt:
        logger.error('operation interrupted by user.')
        exit(1)
    except RuntimeError:
        logger.error("exiting due to a runtime error")
        exit(1)
    except (ValueError, TypeError, AttributeError) as e:
        # catch data access and validation errors, and, in common operation,
        # suppress the traceback, unless logging at debug level.

        logger.info("error: {0}, type: {1}".format(e, type(e)))
        tb = traceback.format_exc()
        err = libgiza.error.Error(
            message=("encountered data validation or access "
                     "error during normal operation."),
            fatal=True,
            include_trace=True)
        err.payload = {"type": type(e), "error": e, "trace": tb}
        logger.debug(err.render_output())
        logger.debug("exception traceback: \n" + tb)
        exit(1)
Beispiel #16
0
def main():
    parser = get_base_parser()

    commands = [mine, stats, actions, setup]

    argh.add_commands(parser, commands)

    args = GithubRuntimeConfig()

    if args.level == 'info':
        args.level = 'warning'

    argh.dispatch(parser, namespace=args)
Beispiel #17
0
def main():
    parser = get_base_parser()

    commands = [mine, stats, actions, setup]

    argh.add_commands(parser, commands)

    args = GithubRuntimeConfig()

    if args.level == 'info':
        args.level = 'warning'

    argh.dispatch(parser, namespace=args)
Beispiel #18
0
def main():
    parser = get_base_parser()

    commands = [setup, setup_credential_file, config, progress, triage, make_versions, mirror_version, release]

    argh.add_commands(parser, commands)

    args = JeerahRuntimeStateConfig()

    if args.level == "info":
        args.level = "warning"

    argh.dispatch(parser, namespace=args)
Beispiel #19
0
def main():
    """
    The main entry point, as specified in the ``setup.py`` file. Adds commands
    from other subsidiary entry points (specified in the ``commands`` variable
    above,) and then uses ``arch.dispatch()`` to start the process.

    The "DaggerConfig" object allows the application to delegate all
    argument parsing validation using setters and getters in the object where
    arg(h)parse stores the configuration data.

    This function catches and recovers from :exc:`KeyboardInterupt` which means
    that doesn't dump a stack trace following a Control-C.
    """

    parser = get_base_parser()

    for namespace, entry_points in commands.items():
        if namespace == 'main':
            argh.add_commands(parser, entry_points)
        else:
            argh.add_commands(parser, entry_points, namespace=namespace)

    args = dagger.config.cli.DaggerCliConfig()

    # set the logging level early to ensure logging is configured during
    # argument parsing.
    args.level = "info"

    # run the command, catching user-interruption and common error types directly.
    try:
        argh.dispatch(parser, namespace=args)
    except KeyboardInterrupt:
        logger.error('operation interrupted by user.')
        exit(1)
    except RuntimeError:
        logger.error("exiting due to a runtime error")
        exit(1)
    except (ValueError, TypeError, AttributeError) as e:
        # catch data access and validation errors, and, in common operation,
        # suppress the traceback, unless logging at debug level.

        logger.info("error: {0}, type: {1}".format(e, type(e)))
        tb = traceback.format_exc()
        err = libgiza.error.Error(message=("encountered data validation or access "
                                           "error during normal operation."),
                                  fatal=True, include_trace=True)
        err.payload = {"type": type(e), "error": e, "trace": tb}
        logger.debug(err.render_output())
        logger.debug("exception traceback: \n" + tb)
        exit(1)
Beispiel #20
0
def main():
    parser = get_base_parser()

    for namespace, entry_points in commands.items():
        if namespace == 'main':
            argh.add_commands(parser, entry_points)
        else:
            argh.add_commands(parser, entry_points, namespace=namespace)

    args = RuntimeStateConfig()
    try:
        argh.dispatch(parser, namespace=args)
    except KeyboardInterrupt:
        logger.error('operation interrupted by user.')
Beispiel #21
0
def main():
    parser = get_base_parser()

    for namespace, entry_points in commands.items():
        if namespace == 'main':
            argh.add_commands(parser, entry_points)
        else:
            argh.add_commands(parser, entry_points, namespace=namespace)

    args = RuntimeStateConfig()
    try:
        argh.dispatch(parser, namespace=args)
    except KeyboardInterrupt:
        logger.error('operation interrupted by user.')
Beispiel #22
0
def main():
    parser = get_base_parser()

    commands = [
        setup, setup_credential_file, config, progress, triage, make_versions,
        mirror_version, release
    ]

    argh.add_commands(parser, commands)

    args = JeerahRuntimeStateConfig()

    if args.level == 'info':
        args.level = 'warning'

    argh.dispatch(parser, namespace=args)
Beispiel #23
0
def main():
    parser = get_base_parser()

    commands = [mine, stats, actions, setup]

    argh.add_commands(parser, commands)

    args = GithubRuntimeConfig()

    if args.level == 'info':
        args.level = 'warning'

    if args.runner == 'process':
        logger.warning('this operation does not support multiprocessing, falling back to threads')
        args.runner = 'thread'
    argh.dispatch(parser, namespace=args)
Beispiel #24
0
def main():
    parser = get_base_parser()

    commands = [mine, stats, actions, setup]

    argh.add_commands(parser, commands)

    args = GithubRuntimeConfig()

    if args.level == 'info':
        args.level = 'warning'

    if args.runner == 'process':
        logger.warning(
            'this operation does not support multiprocessing, falling back to threads'
        )
        args.runner = 'thread'
    argh.dispatch(parser, namespace=args)
Beispiel #25
0
def main(root_pkg, argv=None):
    """Invokes module functions in :mod:`pykern.pkcli`

    Looks in ``<root_pkg>.pkcli`` for the ``argv[1]`` module. It then
    invokes the ``argv[2]`` method of that module.

    Args:
        root_pkg (str): top level package name
        argv (list of str): Defaults to `sys.argv`. Only used for testing.

    Returns:
        int: 0 if ok. 1 if error (missing command, etc.)
    """
    pkconfig.append_load_path(root_pkg)
    if not argv:
        argv = list(sys.argv)
    prog = os.path.basename(argv.pop(0))
    if _is_help(argv):
        return _list_all(root_pkg, prog)
    module_name = argv.pop(0)
    cli = _module(root_pkg, module_name)
    if not cli:
        return 1
    prog = prog + ' ' + module_name
    parser = argparse.ArgumentParser(
        prog=prog, formatter_class=argh.PARSER_FORMATTER)
    cmds = _commands(cli)
    dc = _default_command(cmds, argv)
    if dc:
        argh.set_default_command(parser, dc)
    else:
        argh.add_commands(parser, cmds)
        if len(argv) < 1:
            # Python 3: parser doesn't exit if not enough commands
            parser.error('too few arguments')
        if argv[0][0] != '-':
            argv[0] = _module_to_cmd(argv[0])
    from pykern.pkdebug import pkdp
    try:
        res = argh.dispatch(parser, argv=argv)
    except CommandError as e:
        sys.stderr.write('error: {}\n'.format(e))
        return 1
    return 0
Beispiel #26
0
def main():
    errors = StringIO()
    parser = argh.ArghParser()
    argh.add_commands(parser, [
        source,
        run,
        run_piped,
        list_registered,
        source_registered,
        source_named,
        source_def,
        source_inline,
        update_env
    ], )
    argh.add_commands(parser,
                      functions=DaemonCommands.commands(),
                      namespace=DaemonCommands.namespace,
                      title=DaemonCommands.__doc__)
    argh.dispatch(parser, completion=False, errors_file=errors)
    if errors.len > 0:
        sys.exit(errors.getvalue().strip())
Beispiel #27
0
def main():
    github_subcommands = [
        migration.create_repositories,
        migration.delete_repositories,
        migration.edit_repositories,
        migration.import_repositories,
        migration.import_attachments,
        migration.import_issues,
        migration.import_issues_for_project
    ]

    ow2_subcommands = [
        migration.clone_repositories,
        migration.gc_repositories,
        migration.prune_repositories
    ]

    parser = argparse.ArgumentParser()
    argh.add_commands(parser, github_subcommands, namespace='github')
    argh.add_commands(parser, ow2_subcommands, namespace='ow2')
    argh.dispatch(parser)
Beispiel #28
0
def main(root_pkg, argv=None):
    """Invokes module functions in :mod:`pykern.pykern_cli`

    Looks in ``<root_pkg>.pykern_cli`` for the ``argv[1]`` module. It then
    invokes the ``argv[2]`` method of that module.

    Args:
        root_pkg (str): top level package name
        argv (list of str): Defaults to `sys.argv`. Only used for testing.

    Returns:
        int: 0 if ok. 1 if error (missing command, etc.)
    """
    pkconfig.append_load_path(root_pkg)
    if not argv:
        argv = list(sys.argv)
    prog = os.path.basename(argv.pop(0))
    if _is_help(argv):
        return _list_all(root_pkg, prog)
    module_name = argv.pop(0)
    cli = _module(root_pkg, module_name)
    if not cli:
        return 1
    prog = prog + ' ' + module_name
    parser = argparse.ArgumentParser(
        prog=prog, formatter_class=argh.PARSER_FORMATTER)
    cmds = _commands(cli)
    dc = _default_command(cmds, argv)
    if dc:
        argh.set_default_command(parser, dc)
    else:
        argh.add_commands(parser, cmds)
        if len(argv) < 1:
            # Python 3: parser doesn't exit if not enough commands
            parser.error('too few arguments')
        if argv[0][0] != '-':
            argv[0] = argv[0].replace('_', '-')
    argh.dispatch(parser, argv=argv)
    return 0
                                  capacity=batch_size * 64,
                                  min_after_dequeue=batch_size * 32,
                                  allow_smaller_final_batch=False)
    logger.debug('x shape: {}, y shape: {}'.format(x.get_shape(),
                                                   y.get_shape()))

    # 初始化所有的op
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(init)
        # 启动队列
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for i in range(2):
            val, l = sess.run([x, y])
            # l = to_categorical(l, 12)
            print(val, l)
        coord.join()

    logger.debug('Test read tf record Succeed')


parser = argparse.ArgumentParser()
argh.add_commands(parser, [tfrecord, test])

if __name__ == "__main__":
    argh.dispatch(parser)
Beispiel #30
0
    ei = np.array(ei) - e_min

    #e_min = min(ei_)
    #ei_ = np.array(ei_) - e_min

    plt.figure()   
    plt.ylabel(r'$Total$ $Energy$ ($eV$)')
    plt.xlabel(r'$MD$ $Step$')
    # plt.xlim(0,i)
    # plt.ylim(0,np.max(hist)+0.01)

    plt.plot(ei,alpha=0.9,
             linestyle='-',# marker='o',markerfacecolor='k',markersize=5,
             color='k',label='ReaxFF-MPNN')

    # plt.plot(v,ei,alpha=0.9,
    #          linestyle='-',marker='^',markerfacecolor='none',
    #          markeredgewidth=1,markeredgecolor='blue',markersize=5,
    #          color='blue',label='IRFF')
    # plt.text( 0.0, e_max, '%.3f' %e_min, fontdict={'size':10.5, 'color': 'k'})
    plt.legend(loc='best',edgecolor='yellowgreen') # lower left upper right
    plt.savefig('Energy.pdf',transparent=True) 
    plt.close() 


if __name__ == '__main__':
   ''' use commond like ./cp.py scale-md --T=2800 to run it'''
   parser = argparse.ArgumentParser()
   argh.add_commands(parser, [e])
   argh.dispatch(parser)
Beispiel #31
0
    batch_size = 8
    x, y = tf.train.shuffle_batch([image, label], batch_size=batch_size, capacity=batch_size * 64,
                                  min_after_dequeue=batch_size * 32, allow_smaller_final_batch=False)
    logger.debug('x shape: {}, y shape: {}'.format(x.get_shape(), y.get_shape()))

    # 初始化所有的op
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(init)
        # 启动队列
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for i in range(2):
            val, l = sess.run([x, y])
            # l = to_categorical(l, 12)
            print(val, l)
        coord.join()

    logger.debug('Test read tf record Succeed')


parser = argparse.ArgumentParser()
argh.add_commands(parser, [tfrecord, test])

if __name__ == "__main__":
    argh.dispatch(parser)
Beispiel #32
0
    buf = ExampleBuffer(positions, samples_per_game=samples_per_game)
    files = []
    for _, model in sorted(models, reverse=True):
        local_model_dir = os.path.join(local_dir, model)
        if not tf.gfile.Exists(local_model_dir):
            print("Rsyncing", model)
            _rsync_dir(os.path.join(game_dir, model), local_model_dir)
        files.extend(tf.gfile.Glob(os.path.join(local_model_dir, '*.zz')))
        print("{}: {} games".format(model, len(files)))
        if len(files) * samples_per_game > positions:
            break

    print("Filling from {} files".format(len(files)))

    buf.parallel_fill(files, threads=threads)
    print(buf)
    output = os.path.join(output_dir, str(model_num) + '.tfrecord.zz')
    print("Writing to", output)
    buf.flush(output)


parser = argparse.ArgumentParser()
argh.add_commands(
    parser,
    [fill_and_wait_models, fill_and_wait_time, smart_rsync, make_chunk_for])

if __name__ == "__main__":
    import sys
    remaining_argv = flags.FLAGS(sys.argv, known_only=True)
    argh.dispatch(parser, argv=remaining_argv[1:])
Beispiel #33
0
        if TRAINING_CHUNK_RE.match(fname)]
    if read_file is not None:
        read_file = os.path.join(os.getcwd(), save_file)
    n = PolicyNetwork()
    n.initialize_variables(read_file)
    if logdir is not None:
        n.initialize_logging(logdir)
    last_save_checkpoint = 0
    for i in range(epochs):
        random.shuffle(train_chunk_files)
        for file in train_chunk_files:
            print("Using %s" % file)
            with timer("load dataset"):
                train_dataset = DataSet.read(file)
            with timer("training"):
                n.train(train_dataset)
            with timer("save model"):
                n.save_variables(save_file)
            if n.get_global_step() > last_save_checkpoint + checkpoint_freq:
                with timer("test set evaluation"):
                    n.check_accuracy(test_dataset)
                last_save_checkpoint = n.get_global_step()



parser = argparse.ArgumentParser()
argh.add_commands(parser, [gtp, preprocess, train])

if __name__ == '__main__':
    argh.dispatch(parser)
Beispiel #34
0
        already_processed = set()

    num_already_processed = len(already_processed)

    for model_name, record_files in sorted(model_gamedata.items()):
        if set(record_files) <= already_processed:
            continue
        print("Gathering files for %s:" % model_name)
        for i, example_batch in enumerate(
                tqdm(preprocessing.shuffle_tf_examples(examples_per_record, record_files))):
            output_record = os.path.join(output_directory,
                                         '{}-{}.tfrecord.zz'.format(model_name, str(i)))
            preprocessing.write_tf_examples(
                output_record, example_batch, serialize=False)
        already_processed.update(record_files)

    print("Processed %s new files" %
          (len(already_processed) - num_already_processed))
    with gfile.GFile(meta_file, 'w') as f:
        f.write('\n'.join(sorted(already_processed)))
    qmeas.stop_time('gather')


parser = argparse.ArgumentParser()
argh.add_commands(parser, [gtp, bootstrap, train,
                           selfplay, gather, evaluate, validate])

if __name__ == '__main__':
    cloud_logging.configure()
    argh.dispatch(parser)
Beispiel #35
0
def main():
    parser = get_base_parser()

    commands = [
        render_config,
        clean,
        sphinx,
        deploy, push
    ]
    argh.add_commands(parser, commands)

    git_commands = [
        giza.operations.git.apply_patch,
        giza.operations.git.pull_rebase,
        giza.operations.git.cherry_pick,
    ]
    argh.add_commands(parser, git_commands, namespace='git')

    generate_commands = [
        giza.operations.generate.api,
        giza.operations.generate.assets,
        giza.operations.generate.images,
        giza.operations.generate.intersphinx,
        giza.operations.generate.options,
        giza.operations.generate.primer,
        giza.operations.generate.steps,
        giza.operations.generate.tables,
        giza.operations.generate.toc,
    ]
    argh.add_commands(parser, generate_commands, namespace='generate')

    include_commands = [
        giza.operations.includes.recursive,
        giza.operations.includes.changed,
        giza.operations.includes.once,
        giza.operations.includes.unused,
        giza.operations.includes.list,
        giza.operations.includes.graph,
        giza.operations.includes.clean,
    ]
    argh.add_commands(parser, include_commands, namespace='includes')

    packaging_commands = [
        giza.operations.packaging.fetch,
        giza.operations.packaging.unwind,
        giza.operations.packaging.create,
        giza.operations.packaging.deploy,
    ]
    argh.add_commands(parser, packaging_commands, namespace='package')

    translation_commands = [
        giza.operations.tx.check_orphaned,
        giza.operations.tx.update_translations,
        giza.operations.tx.pull_translations,
        giza.operations.tx.push_translations,
    ]
    argh.add_commands(parser, translation_commands, namespace='tx')

    args = RuntimeStateConfig()
    argh.dispatch(parser, namespace=args)
Beispiel #36
0
    write_gulp_in(A,
                  runword='md qiterative conv',
                  T=T,
                  time_step=time_step,
                  tot_step=tot_step,
                  lib='reax')
    print('\n-  running gulp nvt ...')
    system('gulp<inp-gulp>gulp.out')
    xyztotraj('his.xyz', mode=mode)


def x(mode='w'):
    xyztotraj('his.xyz', mode=mode)


def pm(gen='md.traj', index=-1):
    ''' pressMol '''
    A = read(gen, index=index)
    cell = A.get_cell()
    print(cell)
    A = press_mol(A)
    A.write('poscar.gen')
    del A


if __name__ == '__main__':
    ''' use commond like ./cp.py scale-md --T=2800 to run it'''
    parser = argparse.ArgumentParser()
    argh.add_commands(parser, [md, mmd, cmd, opt, traj, nvt, pm, x])
    argh.dispatch(parser)
Beispiel #37
0

site = make_site(
    filters=filters,
    outpath=outputpath,
    contexts=[
        (r'.*.html', loadAcademyData),
        (r'.*.custom', loadAcademyData),
    ],
    rules=[
        (r'coaching-detail-pages.custom', render_coaching_detail_pages),
        (r'project-detail-pages.custom', render_project_detail_pages),
    ],
    searchpath=searchpath,
    staticpaths=['static', '../data'],
)

manager = Manager(
    site_name='govlabacademy.org',
    site=site,
    sass_src_path=path.join(ROOT_DIR, 'sass', 'styles.scss'),
    sass_dest_path=path.join(searchpath, 'static', 'styles',
                             'styles.css')
)

argh.add_commands(manager.parser, [deploy])


if __name__ == '__main__':
    manager.run()
commands.append(pip)


def alembic(*args):
    prepare()

    if len(args) == 1:
        args = shlex.split(args[0])

    binargs = ['alembic'] + list(args)
    os.execvp(binargs[0], binargs)
commands.append(alembic)


def runserver(*args):
    prepare()

    if len(args) == 1:
        args = shlex.split(args[0])

    os.chdir(os.path.join(ROOT_DIR, 'server'))
    binargs = ['python', 'main.py'] + list(args)
    os.execvp(binargs[0], binargs)
commands.append(runserver)


if __name__ == '__main__':
    parser = ArgumentParser()
    add_commands(parser, commands)
    dispatch(parser)
Beispiel #39
0
        rpcsqa.build_swift_rings(False, management_node, proxy_nodes, storage_nodes, 3)

        print '#' * 60
        print "## Then run chef-client on all nodes in the following order: "
        print "## Management Node: {0}".format(rpcsqa.print_server_info(swift_management))

        for proxy in swift_proxy:
            print "## Swift Proxy Server: {0} ##".format(rpcsqa.print_server_info(proxy))

        for storage in swift_storage:
            print "## Swift Storage Server: {0} ##".format(rpcsqa.print_server_info(storage))
    
    #####################################################################
    # Successful Setup, exit
    #####################################################################

    print '#' * 60
    print "############# Swift Cluster Build Successful ###############"
    print '#' * 60

def test(name='autotest'):
    raise NotImplementedError

def teardown(name='autotest'):
    raise NotImplementedError

if __name__ == "__main__":
    parser = argh.ArghParser()
    argh.add_commands([build, test, teardown])
    parser.dispatch()
Beispiel #40
0
    for i in range(start, end):
        atoms = images[i]
        his.write(atoms=atoms)
    his.close()


def collect(traj='siesta.traj', start=0, end=20):
    newt = traj[:-5] + '_.traj'
    # images = Trajectory(traj)

    his = TrajectoryWriter(newt, mode='w')

    cdir = getcwd()
    trajs = listdir(cdir)

    for traj in trajs:
        if traj.find('.traj') > 0 and traj != 'siesta_.traj':
            print('- reading file %s ...' % traj)
            images = Trajectory(traj)
            for i in range(start, end):
                atoms = images[i]
                his.write(atoms=atoms)

    his.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    argh.add_commands(parser, [merge, collect, col])
    argh.dispatch(parser)
Beispiel #41
0
    )

if __name__ == '__main__':
    context = ReloadingContext()

    site = staticjinja.make_site(
        filters=filters(),
        outpath=_OUTPUTPATH,
        contexts=[
            (r'.*.html', context.get),
            (r'project-detail-pages.custom', context.get),
        ],
        rules=[
            (r'project-detail-pages.custom', render_project_detail_pages)
        ],
        searchpath=_SEARCHPATH,
        staticpaths=['static']
    )

    manager = Manager(
        sass_src_path=path.join(_SASSPATH, 'styles.scss'),
        sass_dest_path=path.join(_SEARCHPATH, 'static', 'styles',
                                 'styles.css'),
        site=site,
        site_name='www.thegovlab.org',
    )
    context.add_to(manager)
    argh.add_commands(manager.parser, [deploy, clean])

    manager.run()
Beispiel #42
0
    return resize(
        filename,
        dimensions=dimensions,
        format=format,
        quality=quality,
        fill=fill,
        bgcolor=bgcolor,
        upscale=upscale,
        progressive=progressive,
        placeholder=placeholder,
    )


parser = argh.ArghParser()

argh.add_commands(parser, [generate])

argh.add_commands(
    parser,
    [list_cache, list_images],
    namespace='list',
    title="Commands for listing images and cache",
)
argh.add_commands(
    parser,
    [sync_cache],
    namespace='sync',
    title="Commands for syncing data",
)
argh.add_commands(
    parser,
Beispiel #43
0
    subprocess.check_call('git subtree push --prefix site origin gh-pages',
                          shell=True)


if __name__ == '__main__':
    context = ReloadingContext()

    site = staticjinja.make_site(filters=filters(),
                                 outpath=_OUTPUTPATH,
                                 contexts=[
                                     (r'.*.html', context.get),
                                     (r'project-detail-pages.custom',
                                      context.get),
                                 ],
                                 rules=[(r'project-detail-pages.custom',
                                         render_project_detail_pages)],
                                 searchpath=_SEARCHPATH,
                                 staticpaths=['static'])

    manager = Manager(
        sass_src_path=path.join(_SASSPATH, 'styles.scss'),
        sass_dest_path=path.join(_SEARCHPATH, 'static', 'styles',
                                 'styles.css'),
        site=site,
        site_name='www.thegovlab.org',
    )
    context.add_to(manager)
    argh.add_commands(manager.parser, [deploy, clean])

    manager.run()
Beispiel #44
0
    # restore all saved weights, except global_step
    meta_graph_def = meta_graph.read_meta_graph_file(load_file + '.meta')
    stored_var_names = set([
        n.name for n in meta_graph_def.graph_def.node if n.op == 'VariableV2'
    ])
    stored_var_names.remove('global_step')
    var_list = [
        v for v in tf.global_variables() if v.op.name in stored_var_names
    ]
    tf.train.Saver(var_list=var_list).restore(sess, load_file)

    # manually set the global step
    global_step_tensor = tf.train.get_or_create_global_step()
    assign_op = tf.assign(global_step_tensor, global_step_value)
    sess.run(assign_op)

    # export a new savedmodel that has the right global step type
    tf.train.Saver().save(sess, dest_file)
    sess.close()
    tf.reset_default_graph()


parser = argparse.ArgumentParser()
argh.add_commands(
    parser,
    [gtp, bootstrap, train, selfplay, gather, evaluate, validate, convert])

if __name__ == '__main__':
    cloud_logging.configure()
    argh.dispatch(parser)
Beispiel #45
0
    # Get module for reference version
    refVersMod = importlib.import_module(
            f'.v{referenceVersion}', 'CureCompanion.mariadb')

    # Make sure it has a trim function
    if not hasattr(refVersMod, 'trim'):
        raise CommandError('Version ' + referenceVersion + ' does not ' +
            'support the trim operation.')

    # Set default trim version
    if trimVersion is None:
        trimVersion = refVersMod.previousVersion

    # Confirm with user
    response = askYesOrNoQuestion('Trim is a destructive and ' +
        'irreversible operation. Are you sure you want to proceed?!')

    if response == 'y':
        # Delegate to appropriate trim function
        refVersMod.trim(trimVersion)
    else:
        _logger.info('Trim not performed (whew!)')


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='MariaDB database operations')
    argh.add_commands(parser, [execute, installSchema, importData,
        getSchemaVersion, upgrade, overlay, trim])
    argh.dispatch(parser)
Beispiel #46
0
    assign_op = tf.assign(global_step_tensor, global_step_value)
    sess.run(assign_op)

    # export a new savedmodel that has the right global step type
    tf.train.Saver().save(sess, dest_file)
    sess.close()
    tf.reset_default_graph()


def freeze_graph(load_file):
    """ Loads a network and serializes just the inference parts for use by e.g. the C++ binary """
    n = dual_net.DualNetwork(load_file)
    out_graph = tf.graph_util.convert_variables_to_constants(
        n.sess, n.sess.graph.as_graph_def(), ["policy_output", "value_output"])
    with gfile.GFile(os.path.join(load_file + '.pb'), 'wb') as f:
        f.write(out_graph.SerializeToString())


parser = argparse.ArgumentParser()
argh.add_commands(parser, [
    gtp, bootstrap, train, train_dir, freeze_graph, selfplay, evaluate,
    validate, convert
])

if __name__ == '__main__':
    cloud_logging.configure()
    # Let absl.flags parse known flags from argv, then pass the remaining flags
    # into argh for dispatching.
    remaining_argv = flags.FLAGS(sys.argv, known_only=True)
    argh.dispatch(parser, argv=remaining_argv[1:])
Beispiel #47
0
    ensure_dir_exists(output_dir)
    models = [model for model in fsdb.get_models() if model[0] < model_num]
    buf = ExampleBuffer(positions, samples_per_game=samples_per_game)
    files = []
    for _, model in sorted(models, reverse=True):
        local_model_dir = os.path.join(local_dir, model)
        if not tf.gfile.Exists(local_model_dir):
            print("Rsyncing", model)
            _rsync_dir(os.path.join(game_dir, model), local_model_dir)
        files.extend(tf.gfile.Glob(os.path.join(local_model_dir, '*.zz')))
        print("{}: {} games".format(model, len(files)))
        if len(files) * samples_per_game > positions:
            break

    print("Filling from {} files".format(len(files)))

    buf.parallel_fill(files, threads=threads)
    print(buf)
    output = os.path.join(output_dir, str(model_num) + '.tfrecord.zz')
    print("Writing to", output)
    buf.flush(output)


parser = argparse.ArgumentParser()
argh.add_commands(parser, [fill_and_wait, smart_rsync, make_chunk_for])

if __name__ == "__main__":
    import sys
    remaining_argv = flags.FLAGS(sys.argv, known_only=True)
    argh.dispatch(parser, argv=remaining_argv[1:])
Beispiel #48
0
def main():
    parser = get_base_parser()

    commands = [
        make_project,
        render_config,
        clean,
        sphinx,
        deploy, push
    ]
    argh.add_commands(parser, commands)

    git_commands = [
        giza.operations.git.apply_patch,
        giza.operations.git.pull_rebase,
        giza.operations.git.cherry_pick,
        giza.operations.git.merge,
    ]
    argh.add_commands(parser, git_commands, namespace='git')

    generate_commands = [
        giza.operations.generate.api,
        giza.operations.generate.assets,
        giza.operations.generate.images,
        giza.operations.generate.intersphinx,
        giza.operations.generate.options,
        giza.operations.generate.primer,
        giza.operations.generate.steps,
        giza.operations.generate.tables,
        giza.operations.generate.toc,
        giza.operations.generate.examples,
        giza.operations.generate.redirects
    ]
    argh.add_commands(parser, generate_commands, namespace='generate')

    include_commands = [
        giza.operations.includes.recursive,
        giza.operations.includes.changed,
        giza.operations.includes.once,
        giza.operations.includes.unused,
        giza.operations.includes.list,
        giza.operations.includes.graph,
        giza.operations.includes.clean,
    ]
    argh.add_commands(parser, include_commands, namespace='includes')

    packaging_commands = [
        giza.operations.packaging.fetch,
        giza.operations.packaging.unwind,
        giza.operations.packaging.create,
        giza.operations.packaging.deploy,
    ]
    argh.add_commands(parser, packaging_commands, namespace='package')

    translate_commands = [
            giza.operations.translate.create_corpora,
            giza.operations.translate.build_translation_model,
            giza.operations.translate.model_results,
            giza.operations.translate.merge_translations,
            giza.operations.translate.po_to_corpus,
            giza.operations.translate.dict_to_corpus,
            giza.operations.translate.translate_po,
            giza.operations.translate.translate_text_doc,
            giza.operations.translate.flip_text,
            giza.operations.translate.auto_approve_obvious_po,
    ]
    argh.add_commands(parser, translate_commands, namespace='translate')

    translation_commands = [
        giza.operations.tx.check_orphaned,
        giza.operations.tx.update_translations,
        giza.operations.tx.pull_translations,
        giza.operations.tx.push_translations,
    ]
    argh.add_commands(parser, translation_commands, namespace='tx')

    args = RuntimeStateConfig()
    argh.dispatch(parser, namespace=args)
Beispiel #49
0
    features, labels = dual_net.get_inference_input()
    dual_net.model_fn(features, labels, tf.estimator.ModeKeys.PREDICT,
                      dual_net.get_default_hyperparams())

    for model_name in tqdm(models):
        if model_name.endswith('-upgrade'):
            continue
        try:
            load_file = os.path.join(MODELS_DIR, model_name)
            dest_file = os.path.join(MODELS_DIR, model_name)
            main.convert(load_file, dest_file)
        except:
            print('failed on', model_name)
            continue


def echo():
    pass  # Flags are echo'd in the ifmain block below.


parser = argparse.ArgumentParser()

argh.add_commands(parser, [train, selfplay, gather, echo, backfill,
                           bootstrap, game_counts, validate])

if __name__ == '__main__':
    print_flags()
    cloud_logging.configure()
    remaining_argv = flags.FLAGS(sys.argv, known_only=True)
    argh.dispatch(parser, argv=remaining_argv[1:])
Beispiel #50
0

def gather():
    print("Gathering game output...")
    main.gather(input_directory=SELFPLAY_DIR,
                output_directory=TRAINING_CHUNK_DIR)


def train(logdir=None):
    model_num, model_name = get_latest_model()
    print("Training on gathered game data, initializing from {}".format(model_name))
    new_model_name = shipname.generate(model_num + 1)
    print("New model will be {}".format(new_model_name))
    load_file = os.path.join(MODELS_DIR, model_name)
    save_file = os.path.join(MODELS_DIR, new_model_name)
    try:
        main.train(TRAINING_CHUNK_DIR, save_file=save_file, load_file=load_file,
                   generation_num=model_num, logdir=logdir)
    except:
        print("Got an error training, muddling on...")
        logging.exception("Train error")


parser = argparse.ArgumentParser()
argh.add_commands(parser, [train, selfplay, gather, bootstrap, convert_all])

if __name__ == '__main__':
    print_flags()
    cloud_logging.configure()
    argh.dispatch(parser)
Beispiel #51
0
        # will get called within a graph context containing our model graph.

        self.summary_writer = SummaryWriterCache.get(self.working_dir)
        self.weight_tensors = tf.trainable_variables()
        self.global_step = tf.train.get_or_create_global_step()

    def before_run(self, run_context):
        global_step = run_context.session.run(self.global_step)
        if global_step % self.every_n_steps == 0:
            self.before_weights = run_context.session.run(self.weight_tensors)

    def after_run(self, run_context, run_values):
        global_step = run_context.session.run(self.global_step)
        if self.before_weights is not None:
            after_weights = run_context.session.run(self.weight_tensors)
            weight_update_summaries = compute_update_ratio(
                self.weight_tensors, self.before_weights, after_weights)
            self.summary_writer.add_summary(weight_update_summaries,
                                            global_step)
            self.before_weights = None


parser = argparse.ArgumentParser()
argh.add_commands(parser, [train, export_model, validate])

if __name__ == '__main__':
    # Let absl.flags parse known flags from argv, then pass the remaining flags
    # into argh for dispatching.
    remaining_argv = flags.FLAGS(sys.argv, known_only=True)
    argh.dispatch(parser, argv=remaining_argv[1:])
Beispiel #52
0
    for model_name, record_files in sorted(model_gamedata.items()):
        with timer("Processing %s" % model_name):
            if set(record_files) <= already_processed:
                print("%s is already fully processed" % model_name)
                continue
            for i, example_batch in enumerate(
                    tqdm(
                        preprocessing.shuffle_tf_examples(
                            examples_per_record, record_files))):
                output_record = os.path.join(
                    output_directory,
                    '{}-{}.tfrecord.zz'.format(model_name, str(i)))
                preprocessing.write_tf_examples(output_record,
                                                example_batch,
                                                serialize=False)
            already_processed.update(record_files)

    print("Processed %s new files" %
          (len(already_processed) - num_already_processed))
    with gfile.GFile(meta_file, 'w') as f:
        f.write('\n'.join(sorted(already_processed)))


parser = argparse.ArgumentParser()
argh.add_commands(parser, [gtp, bootstrap, train, selfplay, gather, evaluate])

if __name__ == '__main__':
    cloud_logging.configure()
    argh.dispatch(parser)
Beispiel #53
0
    js.dump(j, fj, sort_keys=True, indent=2)
    fj.close()


def init_bonds(p_):
    spec, bonds, offd, angs, torp, hbs = [], [], [], [], [], []
    for key in p_:
        k = key.split('_')
        if k[0] == 'bo1':
            bonds.append(k[1])
        elif k[0] == 'rosi':
            kk = k[1].split('-')
            if len(kk) == 2:
                offd.append(k[1])
            elif len(kk) == 1:
                spec.append(k[1])
        elif k[0] == 'theta0':
            angs.append(k[1])
        elif k[0] == 'tor1':
            torp.append(k[1])
        elif k[0] == 'rohb':
            hbs.append(k[1])
    return spec, bonds, offd, angs, torp, hbs


if __name__ == '__main__':
    ''' use commond like ./gmd.py nvt --T=2800 to run it'''
    parser = argparse.ArgumentParser()
    argh.add_commands(parser, [q, i, ii, j, jj])
    argh.dispatch(parser)
Beispiel #54
0
    models = [m[1] for m in fsdb.get_models()]

    import dual_net
    import tensorflow as tf
    from tqdm import tqdm
    features, labels = dual_net.get_inference_input()
    dual_net.model_fn(features, labels, tf.estimator.ModeKeys.PREDICT,
                      dual_net.get_default_hyperparams())

    for model_name in tqdm(models):
        if model_name.endswith('-upgrade'):
            continue
        try:
            load_file = os.path.join(fsdb.models_dir(), model_name)
            dest_file = os.path.join(fsdb.models_dir(), model_name)
            main.convert(load_file, dest_file)
        except:
            print('failed on', model_name)
            continue


parser = argparse.ArgumentParser()

argh.add_commands(
    parser, [train, selfplay, backfill, bootstrap, fsdb.game_counts, validate])

if __name__ == '__main__':
    cloud_logging.configure()
    remaining_argv = flags.FLAGS(sys.argv, known_only=True)
    argh.dispatch(parser, argv=remaining_argv[1:])
Beispiel #55
0
    if model_num is None:
        model_num, model_name = get_latest_model()
    else:
        model_num = int(model_num)
        model_name = get_model(model_num)

    # Model N was trained on games up through model N-2, so the validation set
    # should only be for models through N-2 as well, thus the (model_num - 1)
    # term.
    models = list(
        filter(lambda num_name: num_name[0] < (model_num - 1), get_models()))
    # Run on the most recent 50 generations,
    # TODO(brianklee): make this hyperparameter dependency explicit/not hardcoded
    holdout_dirs = [os.path.join(HOLDOUT_DIR, pair[1])
                    for pair in models[-50:]]

    main.validate(ESTIMATOR_WORKING_DIR, *holdout_dirs,
                  checkpoint_name=os.path.join(MODELS_DIR, model_name),
                  validate_name=validate_name)


parser = argparse.ArgumentParser()

argh.add_commands(parser, [train, selfplay, gather,
                           bootstrap, game_counts, validate])

if __name__ == '__main__':
    print_flags()
    cloud_logging.configure()
    argh.dispatch(parser)
Beispiel #56
0
    ]
    save_file = os.path.join(os.getcwd(), save_file)
    n = PolicyNetwork()
    try:
        n.initialize_variables(save_file)
    except:
        n.initialize_variables(None)
    if logdir is not None:
        n.initialize_logging(logdir)
    last_save_checkpoint = 0
    for i in range(epochs):
        random.shuffle(train_chunk_files)
        for file in train_chunk_files:
            print("Using %s" % file)
            train_dataset = DataSet.read(file)
            train_dataset.shuffle()
            with timer("training"):
                n.train(train_dataset)
            n.save_variables(save_file)
            if n.get_global_step() > last_save_checkpoint + checkpoint_freq:
                with timer("test set evaluation"):
                    n.check_accuracy(test_dataset)
                last_save_checkpoint = n.get_global_step()


parser = argparse.ArgumentParser()
argh.add_commands(parser, [gtp, preprocess, train])

if __name__ == '__main__':
    argh.dispatch(parser)
Beispiel #57
0
def upload(vaultname, passphrase=None, key=AWS_ACCESS_KEY_ID, secret=AWS_SECRET_ACCESS_KEY, shelf_name=os.path.expanduser("~/glaciervault.db"), *filename):
    glacier = GlacierStore(key, secret, vaultname, shelf_name)
    glacier.upload(passphrase, *filename)


def delete(vaultname, key=AWS_ACCESS_KEY_ID, secret=AWS_SECRET_ACCESS_KEY, shelf_name=os.path.expanduser("~/glaciervault.db"), *filename):
    glacier = GlacierStore(key, secret, vaultname, shelf_name)
    glacier.delete(*filename)


def retrieve(vaultname, key=AWS_ACCESS_KEY_ID, secret=AWS_SECRET_ACCESS_KEY, topic='glacier_alerts', shelf_name=os.path.expanduser("~/glaciervault.db"), *filename):
    glacier = GlacierStore(key, secret, vaultname, shelf_name)
    glacier.retrieve(topic, *filename)


def monitor(vaultname, key=AWS_ACCESS_KEY_ID, secret=AWS_SECRET_ACCESS_KEY, passphrase=None, shelf_name=os.path.expanduser("~/glaciervault.db"), *dirs):
    glacier = GlacierStore(key, secret, vaultname, shelf_name)
    glacier.monitor(passphrase, *dirs)


def download(vaultname, key=AWS_ACCESS_KEY_ID, secret=AWS_SECRET_ACCESS_KEY, passphrase=None, shelf_name=os.path.expanduser("~/glaciervault.db")):
    glacier = GlacierStore(key, secret, vaultname, shelf_name)
    glacier.download(passphrase)

parser = argparse.ArgumentParser()
argh.add_commands(
    parser, [create, upload, delete, retrieve, monitor, download])

if __name__ == '__main__':
    argh.dispatch(parser)