Ejemplo n.º 1
0
def test(args):
    trainer = Trainer(args)
    trainer.load_model()
    for pair in trainer.test_data:
        i, t, p = trainer.translate(pair)
        logger.info('--' * 10)
        logger.info('input: {}'.format(i))
        logger.info('truth: {}'.format(t))
        logger.info('predict: {}'.format(p))
Ejemplo n.º 2
0
    def train(self, model, device):

        from libs.trainer import Trainer
        from utils.data_loader import CreateDataLoader
        from utils.common import CommonUtils

        train_loader, validate_loader = CreateDataLoader.build_for_train(self.exec_type, self.config)

        trainer = Trainer(model, device, self.config, self.save_dir)
        trainer.run(train_loader, validate_loader)
def main(args):
    print(args)
    trainer = Trainer(args)
    evaluator = Evaluator(trainer)
    for i_epoch in range(1, args.epoch + 1):

        print('%dth epoch' % i_epoch)

        log_dict = {}
        log_dict['autoencoder'] = {}
        log_dict['autoencoder']['tgt'] =\
            trainer.train_one_epoch_autoencoder('tgt')
        log_dict['autoencoder']['src'] =\
            trainer.train_one_epoch_autoencoder('src')

        log_dict['cross_domain'] = {}
        log_dict['cross_domain']['tgt'] =\
            trainer.train_one_epoch_cross_domain('tgt',
                                                 first_iter=i_epoch == 1)
        log_dict['cross_domain']['src'] =\
            trainer.train_one_epoch_cross_domain('src',
                                                 first_iter=i_epoch == 1)

        disc_loss, gen_loss = trainer.train_one_epoch_adversarial()
        log_dict['adversarial'] = {}
        log_dict['adversarial']['disc'] = disc_loss
        log_dict['adversarial']['gen'] = gen_loss

        trainer.clip_current_model()

        if i_epoch > 1:
            log_dict['samples'] = evaluator.sample_translation()

        pprint(log_dict)
Ejemplo n.º 4
0
def interactive(args):
    trainer = Trainer(args)
    trainer.load_model()
    sent = input('input a sentence: ')
    sent = sent.split() + ['</s>']
    sent = utils.prepare_sequence(sent, trainer.s_w2i, False).view(1, -1)
    src = sent

    sent = ['fake', '</s>']
    sent = utils.prepare_sequence(sent, trainer.s_w2i, False).view(1, -1)
    tgt = sent

    pair = (src, tgt)
    i, t, p = trainer.translate(pair)
    logger.info('--' * 10)
    logger.info('input: {}'.format(i))
    logger.info('predict: {}'.format(p))
Ejemplo n.º 5
0
def main(args):
    print(args)
    trainer = Trainer(args)
    evaluator = Evaluator(trainer)

    best_val_loss = 1e+10

    for i_epoch in range(0, args.epoch + 1):

        # train
        log_dict = {}  # per sample
        log_dict['epoch'] = i_epoch

        trainer.train_one_epoch(log_dict)
        # trainer.translation_validate()

        # evaluation and logging
        evaluator.calc_test_loss(log_dict)
        # evaluator.bleu(log_dict)
        # evaluator.sample_translation()

        if best_val_loss > log_dict['test_loss']:
            best_val_loss = log_dict['test_loss']
            trainer.dump_model(args.output_dir)

        pprint(log_dict)
Ejemplo n.º 6
0
def train_parallel(rank, world_size, seed, config):
    # This function is performed in parallel in several processes, one for each available GPU
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '8882'
    dist.init_process_group(backend='nccl', world_size=world_size, rank=rank)
    torch.manual_seed(seed)
    np.random.seed(seed)
    torch.cuda.set_device(rank)
    device = 'cuda:%d' % torch.cuda.current_device()
    print("process %d, GPU: %s" % (rank, device))

    # create model
    config.model = PointDSC(
        in_dim=config.in_dim,
        num_layers=config.num_layers,
        num_channels=config.num_channels,
        num_iterations=config.num_iterations,
        inlier_threshold=config.inlier_threshold,
        sigma_d=config.sigma_d,
        ratio=config.ratio,
        k=config.k,
    )

    # create optimizer
    if config.optimizer == 'SGD':
        config.optimizer = optim.SGD(
            config.model.parameters(),
            lr=config.lr,
            momentum=config.momentum,
            weight_decay=config.weight_decay,
        )
    elif config.optimizer == 'ADAM':
        config.optimizer = optim.Adam(
            config.model.parameters(),
            lr=config.lr,
            betas=(0.9, 0.999),
            # momentum=config.momentum,
            weight_decay=config.weight_decay,
        )
    config.scheduler = optim.lr_scheduler.ExponentialLR(
        config.optimizer,
        gamma=config.scheduler_gamma,
    )

    # create dataset and dataloader
    DL_config = edict({
        'voxel_size': 0.3,
        'positive_pair_search_voxel_size_multiplier': 4,
        'use_random_rotation': False,
        'use_random_scale': False
    })
    config.train_loader = make_data_loader(config.dataset,
                                           DL_config,
                                           'train',
                                           config.batch_size,
                                           rank,
                                           world_size,
                                           seed,
                                           config.num_workers,
                                           shuffle=True)
    config.val_loader = make_data_loader(config.dataset,
                                         DL_config,
                                         'val',
                                         config.batch_size,
                                         rank,
                                         world_size,
                                         seed,
                                         config.num_workers,
                                         shuffle=False)

    config.train_feature_extractor = LidarFeatureExtractor(
        split='train',
        in_dim=config.in_dim,
        inlier_threshold=config.inlier_threshold,
        num_node=config.num_node,
        use_mutual=config.use_mutual,
        augment_axis=config.augment_axis,
        augment_rotation=config.augment_rotation,
        augment_translation=config.augment_translation,
        fcgf_weights_file=config.fcgf_weights_file)

    config.val_feature_extractor = LidarFeatureExtractor(
        split='val',
        in_dim=config.in_dim,
        inlier_threshold=config.inlier_threshold,
        num_node=config.num_node,
        use_mutual=config.use_mutual,
        augment_axis=0,
        augment_rotation=0.0,
        augment_translation=0.0,
        fcgf_weights_file=config.fcgf_weights_file)

    # create evaluation
    config.evaluate_metric = {
        "ClassificationLoss":
        ClassificationLoss(balanced=config.balanced),
        "SpectralMatchingLoss":
        SpectralMatchingLoss(balanced=config.balanced),
        "TransformationLoss":
        TransformationLoss(re_thre=config.re_thre, te_thre=config.te_thre),
    }
    config.metric_weight = {
        "ClassificationLoss": config.weight_classification,
        "SpectralMatchingLoss": config.weight_spectralmatching,
        "TransformationLoss": config.weight_transformation,
    }

    trainer = Trainer(config, rank)
    trainer.train()
Ejemplo n.º 7
0
                            num_node=config.num_node,
                            use_mutual=config.use_mutual,
                            downsample=config.downsample,
                            augment_axis=config.augment_axis,
                            augment_rotation=config.augment_rotation,
                            augment_translation=config.augment_translation,
                            )
    config.train_loader = get_dataloader(dataset=train_set, 
                                        batch_size=config.batch_size,
                                        num_workers=config.num_workers,
                                        )
    config.val_loader = get_dataloader(dataset=val_set,
                                        batch_size=config.batch_size,
                                        num_workers=config.num_workers,
                                        )
    
    # create evaluation
    config.evaluate_metric = {
        "ClassificationLoss": ClassificationLoss(balanced=config.balanced),
        "SpectralMatchingLoss": SpectralMatchingLoss(balanced=config.balanced),
        "TransformationLoss": TransformationLoss(re_thre=config.re_thre, te_thre=config.te_thre),
    }
    config.metric_weight = {
        "ClassificationLoss": config.weight_classification,
        "SpectralMatchingLoss": config.weight_spectralmatching,
        "TransformationLoss": config.weight_transformation,
    }


    trainer = Trainer(config)
    trainer.train()
Ejemplo n.º 8
0
        net = MatrixLSTMResNet(
            (params.input_height, params.input_width),
            train_loader.dataset.num_classes, params.embedding_size,
            params.hidden_size, params.region_shape, params.region_stride,
            params.add_coords_feature, params.add_time_feature_mode,
            params.normalize_relative, params.lstm_type,
            params.keep_most_recent, params.frame_intervals,
            params.frame_intervals_mode, params.resnet_type,
            params.resnet_pretrain, params.resnet_freeze, params.eventdrop,
            params.framedrop, params.fcdrop, params.frame_actfn,
            params.resnet_meanstd_norm, params.resnet_add_last_fc,
            params.lstm_num_layers, params.resnet_replace_first,
            params.resnet_replace_first_bn, params.add_se_layer)

        trainer = Trainer(net,
                          torch.optim.Adam,
                          train_loader,
                          val_loader,
                          test_loader,
                          params,
                          lr_scheduler=lr_scheduler)
        acc = trainer.train_network()
        acc_results.append(acc)

    print("\n-------------------------")
    print("Multiple seed evaluation:")
    print("Results:", acc_results)
    print("Aggregate result: {} +/- {}".format(np.mean(acc_results),
                                               np.std(acc_results)))
Ejemplo n.º 9
0
def main():
    warnings.filterwarnings('ignore', category=UserWarning)

    parser = argparse.ArgumentParser()

    parser.add_argument('--model', default='latent_ode')
    parser.add_argument('--batch_size', default=256, type=int)
    parser.add_argument('--n_epoch', default=100, type=int)
    parser.add_argument('--version', default=1, type=int)
    parser.add_argument('--train_dataset_size', default=1000, type=int)
    parser.add_argument('--val_dataset_size', type=int)
    parser.add_argument('--time_size', default=30, type=int)
    parser.add_argument('--learning_rate', default=0.001, type=float)
    parser.add_argument('--device', default='cpu', type=str)
    parser.add_argument('--log_dir', default='./logs', type=str)

    parser.add_argument('--save', dest='save', action='store_true')
    parser.set_defaults(save=False)
    parser.add_argument('--const_samples',
                        dest='const_samples',
                        action='store_true')
    parser.set_defaults(const_samples=False)

    args = parser.parse_args()

    device = torch.device(args.device)
    model = LatentODE(x_size=1,
                      z_size=64,
                      hid_size=128,
                      noise_sigma=0.01,
                      device=device,
                      reverse=False,
                      method='dopri5')

    train_functions = FunctionsDataset(args.train_dataset_size,
                                       np.linspace(0, 5, 100),
                                       args.time_size,
                                       t_predict_points=np.linspace(
                                           5, 10, 100)[1:],
                                       const_samples=args.const_samples)
    train_dataloader = FunctionsDataLoader(train_functions,
                                           batch_size=args.batch_size)

    val_dataset_size = args.val_dataset_size if args.val_dataset_size is not None else args.train_dataset_size
    val_functions = FunctionsDataset(val_dataset_size,
                                     np.linspace(0, 5, 100),
                                     args.time_size,
                                     t_predict_points=np.linspace(5, 10,
                                                                  100)[1:],
                                     const_samples=args.const_samples)
    val_dataloader = FunctionsDataLoader(val_functions,
                                         batch_size=args.batch_size,
                                         shuffle=False)

    trainer = Trainer(model,
                      args.model,
                      train_dataloader,
                      val_dataloader,
                      device=device,
                      version=args.version,
                      log_dir=args.log_dir,
                      learning_rate=args.learning_rate,
                      custom_time=np.linspace(5, 30, 600))
    trainer.train(args.n_epoch, args.save)
Ejemplo n.º 10
0
def train(args):
    trainer = Trainer(args)
    trainer.train()
Ejemplo n.º 11
0
import argparse
from distutils.util import strtobool
from libs.trainer import Trainer

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data-dir', type=str, default='../DATA/giga-fren')
    parser.add_argument('--src-lang', type=str, default='fr')
    parser.add_argument('--tgt-lang', type=str, default='en')
    parser.add_argument('--batch-size', type=int, default=8)
    parser.add_argument('--src-vocab-size', type=int, default=30000)
    parser.add_argument('--tgt-vocab-size', type=int, default=30000)
    parser.add_argument('--src-embedding-size', type=int, default=256)
    parser.add_argument('--encoder-dropout-p', type=float, default=0.1)
    parser.add_argument('--encoder-hidden-n', type=int, default=256)
    parser.add_argument('--encoder-num-layers', type=int, default=1)
    parser.add_argument('--tgt-embedding-size', type=int, default=256)
    parser.add_argument('--decoder-dropout-p', type=float, default=0.1)
    parser.add_argument('--decoder-hidden-n', type=int, default=256)
    parser.add_argument('--decoder-num-layers', type=int, default=1)
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--use-cuda', type=strtobool, default='1')
    parser.add_argument('--encoder-bidirectional', type=strtobool, default='0')
    parser.add_argument('--decoder-bidirectional', type=strtobool, default='0')
    args = parser.parse_args()
    print(args)

    trainer = Trainer(args)
    for _ in range(50):
        trainer.train_one_epoch()