Ejemplo n.º 1
0
def main():
    """
    Descrition : main module to run code
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=24)
    parser.add_argument('--epochs', type=int, default=150)
    parser.add_argument('--N', type=int, default=10)
    parser.add_argument('--K', type=int, default=5)
    parser.add_argument('--iterations', type=int, default=1000)
    parser.add_argument('--input_dims', type=int, default=64)
    parser.add_argument('--download', type=bool, default=False)
    parser.add_argument('--GPU_COUNT', type=int, default=1)
    parser.add_argument('--logdir', type=str, default='./log')
    parser.add_argument('--modeldir', type=str, default='./models')
    config = parser.parse_args()

    # create output dir
    try:
        os.makedirs(config.logdir)
        os.makedirs(config.modeldir)
    except OSError:
        pass

    trainer = Train(config)

    trainer.train()
    if config.generation:
        trainer.generation()
Ejemplo n.º 2
0
def start_train(train_iter, dev_iter, test_iter, model, config):
    """
    :param train_iter:  train batch data iterator
    :param dev_iter:  dev batch data iterator
    :param test_iter:  test batch data iterator
    :param model:  nn model
    :param config:  config
    :return:  None
    """
    t = Train(train_iter=train_iter, dev_iter=dev_iter, test_iter=test_iter, model=model, config=config)
    t.train()
    print("Finish Train.")
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--epoches', type=int, default=50)
    parser.add_argument('--GPU_COUNT', type=int, default=2)
    parser.add_argument('--show_status', type=bool, default=True)
    config = parser.parse_args()

    trainer = Train(config)

    trainer.train()
Ejemplo n.º 4
0
def main(config):
    """Main entry point of train module."""
    # Initialize the dataset
    # Full dataset
    # dataset = ICDARDataset('/content/ch4_training_images', '/content/ch4_training_localization_transcription_gt')
    data_df = pd.read_csv(f"{config['data_base_dir']}/train.csv")
    dataset = Synth800kPreprocessedDataset(config["data_base_dir"], data_df)

    # Train test split
    val_size = config["val_fraction"]
    val_len = int(val_size * len(dataset))
    train_len = len(dataset) - val_len
    icdar_train_dataset, icdar_val_dataset = torch.utils.data.random_split(
        dataset, [train_len, val_len])

    icdar_train_data_loader = DataLoader(icdar_train_dataset,
                                         pin_memory=True,
                                         **config["dataset_config"],
                                         worker_init_fn=seed_worker
                                         # collate_fn=icdar_collate
                                         )

    icdar_val_data_loader = DataLoader(icdar_val_dataset,
                                       **config["dataset_config"],
                                       pin_memory=True,
                                       worker_init_fn=seed_worker
                                       # collate_fn=icdar_collate
                                       )

    # Initialize the model
    model = FOTSModel()

    # Count trainable parameters
    print(f'The model has {count_parameters(model):,} trainable parameters.')

    loss = FOTSLoss(config)
    optimizer = model.get_optimizer(config["optimizer"],
                                    config["optimizer_config"])

    lr_schedular = getattr(optim.lr_scheduler, config["lr_schedular"],
                           "ReduceLROnPlateau")(
                               optimizer, **config["lr_scheduler_config"])

    trainer = Train(model, icdar_train_data_loader, icdar_val_data_loader,
                    loss, fots_metric, optimizer, lr_schedular, config)

    trainer.train()
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser()
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--epoches', type=int, default=10)
    parser.add_argument('--mu', type=int, default=128)
    parser.add_argument('--n_residue', type=int, default=24)
    parser.add_argument('--n_skip', type=int, default=128)
    parser.add_argument('--dilation_depth', type=int, default=10)
    parser.add_argument('--n_repeat', type=int, default=2)
    parser.add_argument('--seq_size', type=int, default=20000)
    parser.add_argument('--use_gpu', type=bool, default=False)
    parser.add_argument('--generation', type=bool, default=True)
    config = parser.parse_args()

    trainer = Train(config)

    trainer.train()
    if (config.generation):
        trainer.generation()
def main(config):
    cudnn.benchmark = True
    torch.manual_seed(1593665876)
    torch.cuda.manual_seed_all(4099049913103886)

    transform = transforms.Compose([
        transforms.Resize((config.input_height, config.input_width)),
        transforms.ToTensor(),
    ])

    ThreeD_dataloader = None
    kitti_dataloader = None

    if config.ThreeD == True:
        ThreeD_loader = ThreeD60.get_datasets(config.train_path, \
                datasets=["suncg","m3d", "s2d3d"],
                placements=[ThreeD60.Placements.CENTER,ThreeD60.Placements.RIGHT,ThreeD60.Placements.UP],
                image_types=[ThreeD60.ImageTypes.COLOR, ThreeD60.ImageTypes.DEPTH, ThreeD60.ImageTypes.NORMAL], longitudinal_rotation=True)
        ThreeD_dataloader = DataLoader(ThreeD_loader,
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=config.num_workers)
    if config.KITTI == True:
        kitti_loader = KITTI_loader(config.kitti_train_path, transform)
        kitti_dataloader = DataLoader(kitti_loader,
                                      batch_size=config.batch_size,
                                      shuffle=True,
                                      num_workers=config.num_workers)

    if config.mode == 'train':
        train = Train(config, ThreeD_dataloader, kitti_dataloader)
        train.train()

    elif config.mode == 'sample':
        train = Train(config, ThreeD_dataloader, kitti_dataloader)
        eval_name = 'evaluation'
        train.evaluate(config.val_path, config.checkpoint_path, eval_name)

    elif config.mode == 'make':
        train = Train(config, ThreeD_dataloader, kitti_dataloader)
        train.make_checkpoints()
Ejemplo n.º 7
0
def main():
    nltk.download('wordnet')
    nltk.download('stopwords')

    tp = TextPreproc(WordNetLemmatizer(), stopwords.words('english'))
    dataset = Dataset(tp)
    train_X, train_Y = dataset.readdataTrain('data/train/*.txt')
    test_X = dataset.readdataTest('data/test.txt')
    classifier = LinearSVC(dual=False)
    params = {'C': [10**p for p in range(-2, 5)]}
    if charCount:
        ngram = 3
        vectorizer = CharCountVectorizer(train_X, ngram)
        trainer = Train(classifier, params, -1, vectorizer)
        trainer.train(train_X, train_Y)
        trainer.predict(test_X, f'data/prediction/ngrams/ngrams_{ngram}')
    if tfidf:
        vectorizer = TfIdf(train_X)
        trainer = Train(classifier, params, -1, vectorizer)
        trainer.train(train_X, train_Y)
        trainer.predict(test_X, 'data/prediction/tfidf/tfidf')
    if word2:
        size = 300
        vectorizer = Word2Vec(train_X, size=size, window=3, workers=4)
        trainer = Train(classifier, params, -1, vectorizer)
        trainer.train(train_X, train_Y)
        trainer.predict(test_X, 'data/prediction/word2vec/word2vec')
Ejemplo n.º 8
0
        'params': [
            p for n, p in model.named_parameters()
            if any(nd in n for nd in no_decay)
        ],
        'weight_decay':
        0.0
    }]

    optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=1e-8)

    trainer = Train(
        model_name=args.model_name,
        train_loader=train_loader,
        test_loader=test_loader,
        device=args.device,
        model=model,
        optimizer=optimizer,
        epochs=12,
        print_step=1,
        early_stop_patience=3,
        save_model_path=f"./experiments/save_model/{args.model_name}",
        save_model_every_epoch=False,
        metric=accuracy_score,
        num_class=2,
        tensorboard_path=f'./experiments/tensorboard_log/{args.model_name}')
    # model_cache='./experiments/save_model/mr-gcn-layer-3/best-validate-model.pt')
    # print(trainer.eval())
    print(trainer.train())
    # print(trainer.test('./data/test.csv', './data/test_res.csv'))
    # print(trainer.test(test_loader, './data/test_new.csv', './data/test_new_res.csv'))
Ejemplo n.º 9
0
    optimizer_grouped_parameters = [
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
         'weight_decay': 0.0},
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]

    optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5, eps=1e-8)
    crit = torch.nn.CrossEntropyLoss()

    trainer = Train(model_name='weibo_bert_cat',
                    train_loader=train_loader,
                    val_loader=val_loader,
                    test_loader=test_loader,
                    model=model,
                    optimizer=optimizer,
                    loss_fn=crit,
                    epochs=10,
                    print_step=1,
                    early_stop_patience=3,
                    # save_model_path=f"./save_model/{params['model_name']}",
                    save_model_path=f"/sdd/yujunshuai/save_model/weibo_bert_cat",
                    save_model_every_epoch=False,
                    metric=accuracy_score,
                    num_class=2,
                    # tensorboard_path='./tensorboard_log')
                    tensorboard_path='/sdd/yujunshuai/tensorboard_log')
    trainer.train()
    trainer.test()


Ejemplo n.º 10
0
def start_train(train_iter, test_iter, model, config):
    T = Train(train_iter=train_iter,
              test_iter=test_iter,
              model=model,
              config=config)
    T.train()