Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--epoches', type=int, default=50)
    parser.add_argument('--GPU_COUNT', type=int, default=2)
    parser.add_argument('--show_status', type=bool, default=True)
    config = parser.parse_args()

    trainer = Train(config)

    trainer.train()
Пример #2
0
def start_train(train_iter, dev_iter, test_iter, model, config):
    """
    :param train_iter:  train batch data iterator
    :param dev_iter:  dev batch data iterator
    :param test_iter:  test batch data iterator
    :param model:  nn model
    :param config:  config
    :return:  None
    """
    t = Train(train_iter=train_iter, dev_iter=dev_iter, test_iter=test_iter, model=model, config=config)
    t.train()
    print("Finish Train.")
Пример #3
0
    def __init__(self, **kwargs):

        self.__name__ = kwargs.get('name', 'NSITBot')

        self.__owner__ = kwargs.get('owner', 'NSIT 2020')

        self.__version__ = kwargs.get('version', 'Test')
        """
        creating an instance of trainer inside NSITBot
        """

        self.trainer = Train(name='TrainerBot')

        self.hello_bot = [
            'bot_action', 'bot_author', 'bot_name', 'bot_version', 'hello',
            'endings'
        ]
        """
        fetching the classifier and vectorizer that
        the NSITBot will use to predict the
        classes of the recieved messaged from
        the user end.
        """

        self.classifier, self.vectorizer = self.trainer.getClassifier()
Пример #4
0
def main(config):
    """Main entry point of train module."""
    # Initialize the dataset
    # Full dataset
    # dataset = ICDARDataset('/content/ch4_training_images', '/content/ch4_training_localization_transcription_gt')
    data_df = pd.read_csv(f"{config['data_base_dir']}/train.csv")
    dataset = Synth800kPreprocessedDataset(config["data_base_dir"], data_df)

    # Train test split
    val_size = config["val_fraction"]
    val_len = int(val_size * len(dataset))
    train_len = len(dataset) - val_len
    icdar_train_dataset, icdar_val_dataset = torch.utils.data.random_split(
        dataset, [train_len, val_len])

    icdar_train_data_loader = DataLoader(icdar_train_dataset,
                                         pin_memory=True,
                                         **config["dataset_config"],
                                         worker_init_fn=seed_worker
                                         # collate_fn=icdar_collate
                                         )

    icdar_val_data_loader = DataLoader(icdar_val_dataset,
                                       **config["dataset_config"],
                                       pin_memory=True,
                                       worker_init_fn=seed_worker
                                       # collate_fn=icdar_collate
                                       )

    # Initialize the model
    model = FOTSModel()

    # Count trainable parameters
    print(f'The model has {count_parameters(model):,} trainable parameters.')

    loss = FOTSLoss(config)
    optimizer = model.get_optimizer(config["optimizer"],
                                    config["optimizer_config"])

    lr_schedular = getattr(optim.lr_scheduler, config["lr_schedular"],
                           "ReduceLROnPlateau")(
                               optimizer, **config["lr_scheduler_config"])

    trainer = Train(model, icdar_train_data_loader, icdar_val_data_loader,
                    loss, fots_metric, optimizer, lr_schedular, config)

    trainer.train()
Пример #5
0
    def get_trainer(self):
        if self._trainer is None:
            self._trainer = Train(
                model_dir=self.model_dir,
                label_mapper=self.get_label_mapper(),
                epochs=self.epochs,
                early_stopping_patience=self.early_stopping_patience,
                checkpoint_frequency=self.checkpoint_frequency,
                checkpoint_dir=self.checkpoint_dir,
                accumulation_steps=self.grad_accumulation_steps)

        return self._trainer
Пример #6
0
def main():
    """
    Descrition : main module to run code
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=24)
    parser.add_argument('--epochs', type=int, default=150)
    parser.add_argument('--N', type=int, default=10)
    parser.add_argument('--K', type=int, default=5)
    parser.add_argument('--iterations', type=int, default=1000)
    parser.add_argument('--input_dims', type=int, default=64)
    parser.add_argument('--download', type=bool, default=False)
    parser.add_argument('--GPU_COUNT', type=int, default=1)
    parser.add_argument('--logdir', type=str, default='./log')
    parser.add_argument('--modeldir', type=str, default='./models')
    config = parser.parse_args()

    # create output dir
    try:
        os.makedirs(config.logdir)
        os.makedirs(config.modeldir)
    except OSError:
        pass

    trainer = Train(config)

    trainer.train()
    if config.generation:
        trainer.generation()
Пример #7
0
def main():
    """
    Description : run lipnet training code using argument info
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--image_path', type=str, default='./data/datasets/')
    parser.add_argument('--align_path', type=str, default='./data/align/')
    parser.add_argument('--dr_rate', type=float, default=0.5)
    parser.add_argument('--num_gpus', type=int, default=1)
    parser.add_argument('--num_workers', type=int, default=0)
    parser.add_argument('--model_path', type=str, default=None)
    config = parser.parse_args()
    trainer = Train(config)
    trainer.build_model(dr_rate=config.dr_rate, path=config.model_path)
    trainer.load_dataloader()
    trainer.run(epochs=config.epochs)
Пример #8
0
def main():
    """
    Description : run lipnet training code using argument info
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--image_path', type=str, default='./data/datasets/')
    parser.add_argument('--align_path', type=str, default='./data/align/')
    parser.add_argument('--dr_rate', type=float, default=0.5)
    parser.add_argument('--num_gpus', type=int, default=1)
    parser.add_argument('--num_workers', type=int, default=0)
    parser.add_argument('--model_path', type=str, default=None)
    config = parser.parse_args()
    trainer = Train(config)
    trainer.build_model(dr_rate=config.dr_rate, path=config.model_path)
    trainer.load_dataloader()
    trainer.run(epochs=config.epochs)
Пример #9
0
def main():
    """
    Description : run lipnet training code using argument info
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--image_path', type=str, default='./data/datasets/')
    parser.add_argument('--align_path', type=str, default='./data/align/')
    parser.add_argument('--num_gpus', type=int, default=1)
    parser.add_argument('--num_workers', type=int, default=0)
    parser.add_argument('--data_type', type=str, default='valid')
    parser.add_argument('--model_path', type=str, default=None)
    config = parser.parse_args()
    trainer = Train(config)
    trainer.build_model(path=config.model_path)
    trainer.load_dataloader()

    if config.data_type == 'train':
        data_loader = trainer.train_dataloader
    elif config.data_type == 'valid':
        data_loader = trainer.valid_dataloader

    trainer.infer_batch(data_loader)
Пример #10
0
def main():
    """
    Description : run lipnet training code using argument info
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--image_path', type=str, default='./data/datasets/')
    parser.add_argument('--align_path', type=str, default='./data/align/')
    parser.add_argument('--num_gpus', type=int, default=1)
    parser.add_argument('--num_workers', type=int, default=0)
    parser.add_argument('--data_type', type=str, default='valid')
    parser.add_argument('--model_path', type=str, default=None)
    config = parser.parse_args()
    trainer = Train(config)
    trainer.build_model(path=config.model_path)
    trainer.load_dataloader()

    if config.data_type == 'train':
        data_loader = trainer.train_dataloader
    elif config.data_type == 'valid':
        data_loader = trainer.valid_dataloader

    trainer.infer_batch(data_loader)
Пример #11
0
def main():
    parser = argparse.ArgumentParser()
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--epoches', type=int, default=10)
    parser.add_argument('--mu', type=int, default=128)
    parser.add_argument('--n_residue', type=int, default=24)
    parser.add_argument('--n_skip', type=int, default=128)
    parser.add_argument('--dilation_depth', type=int, default=10)
    parser.add_argument('--n_repeat', type=int, default=2)
    parser.add_argument('--seq_size', type=int, default=20000)
    parser.add_argument('--use_gpu', type=bool, default=False)
    parser.add_argument('--generation', type=bool, default=True)
    config = parser.parse_args()

    trainer = Train(config)

    trainer.train()
    if (config.generation):
        trainer.generation()
Пример #12
0
        ],
        'weight_decay':
        0.0
    }]

    optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5, eps=1e-8)
    # scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=len(train_loader))
    crit = torch.nn.CrossEntropyLoss()

    trainer = Train(model_name='pure-bert',
                    train_loader=train_loader,
                    val_loader=val_loader,
                    test_loader=test_loader,
                    model=model,
                    optimizer=optimizer,
                    loss_fn=crit,
                    epochs=10,
                    print_step=10,
                    early_stop_patience=3,
                    save_model_path='/sdd/yujunshuai/save_model/pure_bert',
                    save_model_every_epoch=True,
                    metric=accuracy_score,
                    num_class=2,
                    tensorboard_path='/sdd/yujunshuai/tensorboard_log')

    trainer.train()
    print(f"Testing result :{trainer.test()}")

# pure bert
# Testing result :{'accuracy': 0.880089848977251, 'recall': 0.8658997206940136, 'f1': 0.8698654132943923}
Пример #13
0
        'params': [
            p for n, p in model.named_parameters()
            if any(nd in n for nd in no_decay)
        ],
        'weight_decay':
        0.0
    }]

    optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=1e-8)

    trainer = Train(
        model_name=args.model_name,
        train_loader=train_loader,
        test_loader=test_loader,
        device=args.device,
        model=model,
        optimizer=optimizer,
        epochs=12,
        print_step=1,
        early_stop_patience=3,
        save_model_path=f"./experiments/save_model/{args.model_name}",
        save_model_every_epoch=False,
        metric=accuracy_score,
        num_class=2,
        tensorboard_path=f'./experiments/tensorboard_log/{args.model_name}')
    # model_cache='./experiments/save_model/mr-gcn-layer-3/best-validate-model.pt')
    # print(trainer.eval())
    print(trainer.train())
    # print(trainer.test('./data/test.csv', './data/test_res.csv'))
    # print(trainer.test(test_loader, './data/test_new.csv', './data/test_new_res.csv'))
Пример #14
0
            if any(nd in n for nd in no_decay)
        ],
        'weight_decay':
        0.0
    }]

    optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=1e-8)

    trainer = Train(
        model_name=args.model_name,
        train_loader=train_loader,
        test_loader=test_loader,
        device=args.device,
        model=model,
        optimizer=optimizer,
        epochs=args.epochs,
        print_step=1,
        early_stop_patience=args.early_stop_patience,
        save_model_path=f"./experiments/save_model/{args.model_name}",
        save_model_every_epoch=False,
        metric=accuracy_score,
        num_class=2,
        tensorboard_path=f'./experiments/tensorboard_log/{args.model_name}')

    if args.test and args.best_model_path:
        print(
            trainer.test(test_loader,
                         './data/processed_modified_test_data.json',
                         './data/processed_modified_test_data_result.json',
                         args.best_model_path))
    else:
Пример #15
0
            no_decay = ['bias', 'LayerNorm.weight']
            optimizer_grouped_parameters = [
                {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
                 'weight_decay': 0.0},
                {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
                 'weight_decay': 0.0}
            ]

            optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5, eps=1e-8)
            crit = torch.nn.CrossEntropyLoss()

            trainer = Train(model_name=model_name,
                            train_loader=train_loader,
                            val_loader=val_loader,
                            test_loader=test_loader,
                            model=model,
                            optimizer=optimizer,
                            loss_fn=crit,
                            epochs=12,
                            print_step=1,
                            early_stop_patience=3,
                            # save_model_path=f"./save_model/{params['model_name']}",
                            save_model_path=f"/sdd/yujunshuai/save_model/{model_name}",
                            save_model_every_epoch=False,
                            metric=accuracy_score,
                            num_class=2,
                            # tensorboard_path='./tensorboard_log')
                            tensorboard_path='/sdd/yujunshuai/tensorboard_log')
            print(trainer.train())
            print(trainer.test())
def main(config):
    cudnn.benchmark = True
    torch.manual_seed(1593665876)
    torch.cuda.manual_seed_all(4099049913103886)

    transform = transforms.Compose([
        transforms.Resize((config.input_height, config.input_width)),
        transforms.ToTensor(),
    ])

    ThreeD_dataloader = None
    kitti_dataloader = None

    if config.ThreeD == True:
        ThreeD_loader = ThreeD60.get_datasets(config.train_path, \
                datasets=["suncg","m3d", "s2d3d"],
                placements=[ThreeD60.Placements.CENTER,ThreeD60.Placements.RIGHT,ThreeD60.Placements.UP],
                image_types=[ThreeD60.ImageTypes.COLOR, ThreeD60.ImageTypes.DEPTH, ThreeD60.ImageTypes.NORMAL], longitudinal_rotation=True)
        ThreeD_dataloader = DataLoader(ThreeD_loader,
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=config.num_workers)
    if config.KITTI == True:
        kitti_loader = KITTI_loader(config.kitti_train_path, transform)
        kitti_dataloader = DataLoader(kitti_loader,
                                      batch_size=config.batch_size,
                                      shuffle=True,
                                      num_workers=config.num_workers)

    if config.mode == 'train':
        train = Train(config, ThreeD_dataloader, kitti_dataloader)
        train.train()

    elif config.mode == 'sample':
        train = Train(config, ThreeD_dataloader, kitti_dataloader)
        eval_name = 'evaluation'
        train.evaluate(config.val_path, config.checkpoint_path, eval_name)

    elif config.mode == 'make':
        train = Train(config, ThreeD_dataloader, kitti_dataloader)
        train.make_checkpoints()
Пример #17
0
def start_train(train_iter, test_iter, model, config):
    T = Train(train_iter=train_iter,
              test_iter=test_iter,
              model=model,
              config=config)
    T.train()
Пример #18
0
                'params': [
                    p for n, p in model.named_parameters()
                    if any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.0
            }]

            optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5, eps=1e-8)
            crit = torch.nn.CrossEntropyLoss()

            trainer = Train(
                model_name=model_name,
                train_loader=train_loader,
                val_loader=val_loader,
                test_loader=test_loader,
                model=model,
                optimizer=optimizer,
                loss_fn=crit,
                epochs=10,
                print_step=10,
                early_stop_patience=3,
                save_model_path=f'/sdd/yujunshuai/save_model/{model_name}',
                save_model_every_epoch=False,
                metric=accuracy_score,
                num_class=2,
                tensorboard_path='/sdd/yujunshuai/tensorboard_log/fakeddit/')

            print(trainer.train())
            print(f"Testing result :{trainer.test()}")
Пример #19
0
    'weight_decay':
    0.0
}, {
    'params': [
        p for n, p in model.named_parameters()
        if any(nd in n for nd in no_decay)
    ],
    'weight_decay':
    0.0
}]

optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, eps=1e-8)
crit = torch.nn.CrossEntropyLoss()

trainer = Train(model_name=args.model_name,
                train_loader=train_loader,
                val_loader=test_loader,
                test_loader=None,
                model=model,
                optimizer=optimizer,
                epochs=args.epochs,
                print_step=args.log_interval,
                early_stop_patience=args.early_stop_patience,
                save_model_path=args.save_dir,
                save_model_every_epoch=False,
                metric=accuracy_score,
                num_class=args.output_size,
                tensorboard_path=args.save_dir,
                device=args.device)

print(trainer.train())
Пример #20
0
def main():
    nltk.download('wordnet')
    nltk.download('stopwords')

    tp = TextPreproc(WordNetLemmatizer(), stopwords.words('english'))
    dataset = Dataset(tp)
    train_X, train_Y = dataset.readdataTrain('data/train/*.txt')
    test_X = dataset.readdataTest('data/test.txt')
    classifier = LinearSVC(dual=False)
    params = {'C': [10**p for p in range(-2, 5)]}
    if charCount:
        ngram = 3
        vectorizer = CharCountVectorizer(train_X, ngram)
        trainer = Train(classifier, params, -1, vectorizer)
        trainer.train(train_X, train_Y)
        trainer.predict(test_X, f'data/prediction/ngrams/ngrams_{ngram}')
    if tfidf:
        vectorizer = TfIdf(train_X)
        trainer = Train(classifier, params, -1, vectorizer)
        trainer.train(train_X, train_Y)
        trainer.predict(test_X, 'data/prediction/tfidf/tfidf')
    if word2:
        size = 300
        vectorizer = Word2Vec(train_X, size=size, window=3, workers=4)
        trainer = Train(classifier, params, -1, vectorizer)
        trainer.train(train_X, train_Y)
        trainer.predict(test_X, 'data/prediction/word2vec/word2vec')