Esempio n. 1
0
def dump_config(config):
    cfg = copy.deepcopy(config)
    alloc = cfg.get('eth', {}).get('block', {}).get('GENESIS_INITIAL_ALLOC', {})
    if len(alloc) > 100:
        log.info('omitting reporting of %d accounts in genesis' % len(alloc))
        del cfg['eth']['block']['GENESIS_INITIAL_ALLOC']
    konfig.dump_config(cfg)
Esempio n. 2
0
def run(ctx, dev, nodial, fake):
    """Start the client ( --dev to stop on error)"""
    config = ctx.obj['config']
    if nodial:
        # config['deactivated_services'].append(PeerManager.name)
        # config['deactivated_services'].append(NodeDiscovery.name)
        config['discovery']['bootstrap_nodes'] = []
        config['discovery']['listen_port'] = 29873
        config['p2p']['listen_port'] = 29873
        config['p2p']['min_peers'] = 0

    if fake:
        from ethereum import blocks
        blocks.GENESIS_DIFFICULTY = 1024
        blocks.BLOCK_DIFF_FACTOR = 16
        blocks.MIN_GAS_LIMIT = blocks.GENESIS_GAS_LIMIT / 2
        # workaround for genesis.json hack
        blocks.GENESIS_JSON["difficulty"] = blocks.int_to_hex(blocks.GENESIS_DIFFICULTY)

    # create app
    app = EthApp(config)

    # development mode
    if dev:
        gevent.get_hub().SYSTEM_ERROR = BaseException
        try:
            config['client_version'] += '/' + os.getlogin()
        except:
            log.warn("can't get and add login name to client_version")
            pass

    # dump config
    konfig.dump_config(config)

    # register services
    for service in services:
        assert issubclass(service, BaseService)
        if service.name not in app.config['deactivated_services']:
            assert service.name not in app.services
            service.register_with_app(app)
            assert hasattr(app.services, service.name)

    unlock_accounts(ctx.obj['unlock'], app.services.accounts, password=ctx.obj['password'])

    # start app
    log.info('starting')
    app.start()

    if config['post_app_start_callback'] is not None:
        config['post_app_start_callback'](app)

    # wait for interrupt
    evt = Event()
    gevent.signal(signal.SIGQUIT, evt.set)
    gevent.signal(signal.SIGTERM, evt.set)
    gevent.signal(signal.SIGINT, evt.set)
    evt.wait()

    # finally stop
    app.stop()
Esempio n. 3
0
def logger_init():
    logging.basicConfig(level=logging.DEBUG, format='%(module)15s %(asctime)s %(message)s', datefmt='%H:%M:%S')
    if config().log.to_file:
        log_filename = os.path.join(config().log.dir,
                                    config().log.prefix + datetime.datetime.now().strftime("%m%d%H%M%S"))
        logging.getLogger().addHandler(logging.FileHandler(log_filename))
    if config().log.dump_config:
        dump_config()
Esempio n. 4
0
def run(ctx, dev):
    """Start the client ( --dev to stop on error)"""
    # create app
    app = EthApp(ctx.obj['config'])

    # development mode
    if dev:
        gevent.get_hub().SYSTEM_ERROR = BaseException
        try:
            ctx.obj['config']['client_version'] += '/' + os.getlogin()
        except:
            log.warn("can't get and add login name to client_version")
            pass

    # dump config
    konfig.dump_config(ctx.obj['config'])

    # register services
    for service in services:
        assert issubclass(service, BaseService)
        if service.name not in app.config['deactivated_services']:
            assert service.name not in app.services
            service.register_with_app(app)
            assert hasattr(app.services, service.name)

    # start app
    app.start()

    # wait for interrupt
    evt = Event()
    gevent.signal(signal.SIGQUIT, evt.set)
    gevent.signal(signal.SIGTERM, evt.set)
    gevent.signal(signal.SIGINT, evt.set)
    evt.wait()

    # finally stop
    app.stop()
Esempio n. 5
0
def main():
    config = c.get_default_config()
    parser = ArgumentParser(version=__version__)

    config = create_config(parser)

    # Logging
    configure_logging(config.get('misc', 'logging') or '', verbosity=config.getint('misc', 'verbosity'))
    logger.info('PyEPM %s', __version__)
    logger.info('=====')

    logger.debug(c.dump_config(config))

    args = parser.parse_args()

    for filename in args.filename:
        if not os.path.exists(filename):
            logger.warn("File does not exist: %s" % filename)
        else:
            logger.info("Deploying %s..." % filename)
            deployment = deploy.Deploy(filename, config)
            deployment.deploy()
Esempio n. 6
0
def main():
    config = c.get_default_config()
    parser = ArgumentParser(version=__version__)

    config = create_config(parser)

    # Logging
    configure_logging(config.get('misc', 'logging') or '', verbosity=config.getint('misc', 'verbosity'))
    logger.info(colors.HEADER + '=====' + colors.ENDC)
    logger.info(colors.OKGREEN + 'PyEPM ' + colors.ENDC + '%s', __version__)
    logger.info(colors.HEADER + '=====' + colors.ENDC)

    logger.debug(c.dump_config(config))

    args = parser.parse_args()

    for filename in args.filename:
        if not os.path.exists(filename):
            logger.warn("\nFile does not exist: %s" % filename)
        else:
            logger.info("\nDeploying " + colors.BOLD + "%s" % filename + colors.ENDC + "...")
            deployment = deploy.Deploy(filename, config)
            deployment.deploy()
Esempio n. 7
0
def main():
    config = c.get_default_config()
    parser = ArgumentParser(version=__version__)

    config = create_config(parser)

    # Logging
    configure_logging(config.get('misc', 'logging') or '',
                      verbosity=config.getint('misc', 'verbosity'))
    logger.info('PyEPM %s', __version__)
    logger.info('=====')

    logger.debug(c.dump_config(config))

    args = parser.parse_args()

    for filename in args.filename:
        if not os.path.exists(filename):
            logger.warn("File does not exist: %s" % filename)
        else:
            logger.info("Deploying %s..." % filename)
            deployment = deploy.Deploy(filename, config)
            deployment.deploy()
Esempio n. 8
0
def config(ctx):
    """Show the config"""
    konfig.dump_config(ctx.obj['config'])
Esempio n. 9
0
def run_training(cfg):
    """
    run the training loops
    """

    # torch gpu setup
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu_idx)
    if cfg.model_zoo is not None:
        os.environ["TORCH_MODEL_ZOO"] = cfg.model_zoo

    # make the exp dir
    os.makedirs(cfg.exp_dir, exist_ok=True)

    # set the seeds
    np.random.seed(cfg.seed)
    torch.manual_seed(cfg.seed)

    # set cudnn to reproducibility mode
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # dump the exp config to the exp dir
    dump_config(cfg)

    # setup datasets
    dset_train, dset_val, dset_test = dataset_zoo(**cfg.DATASET)

    # init loaders
    trainloader = torch.utils.data.DataLoader(dset_train,
                                              num_workers=cfg.num_workers,
                                              pin_memory=True,
                                              batch_size=cfg.batch_size,
                                              shuffle=True)

    if dset_val is not None:
        valloader = torch.utils.data.DataLoader(dset_val,
                                                num_workers=cfg.num_workers,
                                                pin_memory=True,
                                                batch_size=cfg.batch_size,
                                                shuffle=False)
    else:
        valloader = None

    # test loaders
    if dset_test is not None:
        testloader = torch.utils.data.DataLoader(dset_test,
                                                 num_workers=cfg.num_workers,
                                                 pin_memory=True,
                                                 batch_size=cfg.batch_size,
                                                 shuffle=False)
        _, _, eval_vars = eval_zoo(cfg.DATASET.dataset_name)
    else:
        testloader = None
        eval_vars = None

    # init the model
    model, stats, optimizer_state = init_model(cfg, add_log_vars=eval_vars)
    start_epoch = stats.epoch + 1

    # move model to gpu
    if torch.cuda.is_available():
        model.cuda()

    # init the optimizer
    optimizer, scheduler = init_optimizer(model,
                                          optimizer_state=optimizer_state,
                                          **cfg.SOLVER)

    # loop through epochs
    scheduler.last_epoch = start_epoch
    for epoch in range(start_epoch, cfg.SOLVER.max_epochs):
        with stats:  # automatic new_epoch and plotting at every epoch start

            print("scheduler lr = %1.2e" % float(scheduler.get_lr()[-1]))

            # train loop
            trainvalidate(model,
                          stats,
                          epoch,
                          trainloader,
                          optimizer,
                          False,
                          visdom_env_root=get_visdom_env(cfg),
                          **cfg)

            # val loop
            if valloader is not None:
                trainvalidate(model,
                              stats,
                              epoch,
                              valloader,
                              optimizer,
                              True,
                              visdom_env_root=get_visdom_env(cfg),
                              **cfg)

            # eval loop (optional)
            if testloader is not None:
                eval_result = run_eval(cfg, model, testloader, stats=stats)
                dump_eval_result(cfg, eval_result)

            assert stats.epoch == epoch, "inconsistent stats!"

            # delete previous models if required
            if cfg.store_checkpoints_purge > 0 and cfg.store_checkpoints:
                for prev_epoch in range(epoch - cfg.store_checkpoints_purge):
                    purge_epoch(cfg.exp_dir, prev_epoch)

            # save model
            if cfg.store_checkpoints:
                outfile = get_checkpoint(cfg.exp_dir, epoch)
                save_model(model, stats, outfile, optimizer=optimizer)

            scheduler.step()

    # the final eval
    if testloader is not None:
        eval_result = run_eval(cfg, model, testloader, stats=None)
        dump_eval_result(cfg, eval_result)
        return eval_result
    else:
        return None
Esempio n. 10
0
def run_training(cfg):
    # run the training loops

    # make the exp dir
    os.makedirs(cfg.exp_dir, exist_ok=True)

    # set the seed
    np.random.seed(cfg.seed)

    # dump the exp config to the exp dir
    dump_config(cfg)

    # setup datasets
    dset_train, dset_val, dset_test = dataset_zoo(**cfg.DATASET)

    # init loaders
    if cfg.batch_sampler == 'default':
        trainloader = torch.utils.data.DataLoader(dset_train,
                                                  num_workers=cfg.num_workers,
                                                  pin_memory=True,
                                                  batch_size=cfg.batch_size,
                                                  shuffle=False)
    elif cfg.batch_sampler == 'sequence':
        trainloader = torch.utils.data.DataLoader(
            dset_train,
            num_workers=cfg.num_workers,
            pin_memory=True,
            batch_sampler=SceneBatchSampler(
                torch.utils.data.SequentialSampler(dset_train),
                cfg.batch_size,
                True,
            ))
    else:
        raise BaseException()

    if dset_val is not None:
        if cfg.batch_sampler == 'default':
            valloader = torch.utils.data.DataLoader(
                dset_val,
                num_workers=cfg.num_workers,
                pin_memory=True,
                batch_size=cfg.batch_size,
                shuffle=False)
        elif cfg.batch_sampler == 'sequence':
            valloader = torch.utils.data.DataLoader( dset_val,
                 num_workers=cfg.num_workers, pin_memory=True,
                 batch_sampler=SceneBatchSampler( \
                   torch.utils.data.SequentialSampler(dset_val),
                   cfg.batch_size,
                   True,
            ) )
        else:
            raise BaseException()
    else:
        valloader = None

    # test loaders
    if dset_test is not None:
        testloader = torch.utils.data.DataLoader(
            dset_test,
            num_workers=cfg.num_workers,
            pin_memory=True,
            batch_size=cfg.batch_size,
            shuffle=False,
        )
        _, _, eval_vars = eval_zoo(cfg.DATASET.dataset_name)
    else:
        testloader = None
        eval_vars = None

    # init the model
    model, stats, optimizer_state = init_model(cfg, add_log_vars=eval_vars)
    start_epoch = stats.epoch + 1

    # annotate dataset with c3dpo outputs
    if cfg.annotate_with_c3dpo_outputs:
        for dset in dset_train, dset_val, dset_test:
            if dset is not None:
                run_c3dpo_model_on_dset(dset, cfg.MODEL.nrsfm_exp_path)

    # move model to gpu
    model.cuda(0)

    # init the optimizer
    optimizer, scheduler = init_optimizer(\
     model, optimizer_state=optimizer_state, **cfg.SOLVER)

    # loop through epochs
    scheduler.last_epoch = start_epoch
    for epoch in range(start_epoch, cfg.SOLVER.max_epochs):
        with stats:  # automatic new_epoch and plotting of stats at every epoch start

            print("scheduler lr = %1.2e" % float(scheduler.get_lr()[-1]))

            # train loop
            trainvalidate(model, stats, epoch, trainloader, optimizer, False, \
                   visdom_env_root=get_visdom_env(cfg), **cfg )

            # val loop
            if valloader is not None:
                trainvalidate(model, stats, epoch, valloader,   optimizer, True,  \
                       visdom_env_root=get_visdom_env(cfg), **cfg  )

            # eval loop (optional)
            if testloader is not None:
                if cfg.eval_interval >= 0:
                    if cfg.eval_interval == 0 or \
                     ((epoch % cfg.eval_interval)==0 and epoch > 0):
                        torch.cuda.empty_cache(
                        )  # we have memory heavy eval ...
                        with torch.no_grad():
                            run_eval(cfg, model, stats, testloader)

            assert stats.epoch == epoch, "inconsistent stats!"

            # delete previous models if required
            if cfg.store_checkpoints_purge > 0 and cfg.store_checkpoints:
                for prev_epoch in range(epoch - cfg.store_checkpoints_purge):
                    period = cfg.store_checkpoints_purge_except_every
                    if (period > 0 and prev_epoch % period == period - 1):
                        continue
                    purge_epoch(cfg.exp_dir, prev_epoch)

            # save model
            if cfg.store_checkpoints:
                outfile = get_checkpoint(cfg.exp_dir, epoch)
                save_model(model, stats, outfile, optimizer=optimizer)

            scheduler.step()
Esempio n. 11
0
def config(ctx):
    """Show the config"""
    konfig.dump_config(ctx.obj['config'])
Esempio n. 12
0
def set_repo_version(config, version):
    config['REPO_VERSION'] = version
    dump_config(config, config_path)
Esempio n. 13
0
from config import config, overwrite_config_with_args, dump_config
from read_data import index_ent_rel, graph_size, read_data
from data_utils import heads_tails, inplace_shuffle, batch_by_num
from trans_e import TransE
from trans_d import TransD
from distmult import DistMult
from compl_ex import ComplEx
from logger_init import logger_init
from select_gpu import select_gpu
from corrupter import BernCorrupterMulti


logger_init()
torch.cuda.set_device(select_gpu())
overwrite_config_with_args()
dump_config()

task_dir = config().task.dir
kb_index = index_ent_rel(os.path.join(task_dir, 'train.txt'),
                         os.path.join(task_dir, 'valid.txt'),
                         os.path.join(task_dir, 'test.txt'))
n_ent, n_rel = graph_size(kb_index)

models = {'TransE': TransE, 'TransD': TransD, 'DistMult': DistMult, 'ComplEx': ComplEx}
gen_config = config()[config().g_config]
dis_config = config()[config().d_config]
gen = models[config().g_config](n_ent, n_rel, gen_config)
dis = models[config().d_config](n_ent, n_rel, dis_config)
gen.load(os.path.join(task_dir, gen_config.model_file))
dis.load(os.path.join(task_dir, dis_config.model_file))
Esempio n. 14
0
 def dump_config(self, conf):
     config.dump_config(conf, os.path.join(self.data_dir, 'config.cnf'));