Beispiel #1
0
def parse_args():
    parser = argparse.ArgumentParser("image_classification")
    parser.add_argument('-t',
                        '--train',
                        action='store_true',
                        help='Whether to run trainning.')
    parser.add_argument("-i",
                        '--infer',
                        action='store_true',
                        help="Whether to run inference on the test dataset.")
    parser.add_argument('--model_path',
                        type=str,
                        default='',
                        required=True,
                        help="Model storage path.")
    parser.add_argument(
        '-g',
        '--use_cuda',
        action='store_true',
        help='Choose, if you want to run training with GPU performance.')
    Trainer.add_cmdline_argument(parser)
    Infer.add_cmdline_argument(parser)
    args = parser.parse_args()

    if len(sys.argv) <= 1:
        parser.print_help(sys.stderr)
        sys.exit(1)

    return args
Beispiel #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--tasktype', type=str, default='dc', help='ac or dc')
    parser.add_argument('--taskname',
                        type=str,
                        default='DCGAN',
                        help='taskname for model saving etc')
    parser.add_argument('--resume',
                        type=bool,
                        default=None,
                        help='resume or not')
    parser.add_argument('--epochs', type=int, default=200)
    parser.add_argument('--batch_size', type=int, default=128)

    parser.add_argument('--img_channel_num', type=int, default=3)
    parser.add_argument('--z_dim', type=int, default=100)
    parser.add_argument('--n_classes', type=int, default=2)
    parser.add_argument('--img_size', type=int, default=64)
    args = parser.parse_args()
    print('tasktype', args.tasktype, 'taskname', args.taskname, '# epochs',
          args.epochs, 'batch_size', args.batch_size)

    if not os.path.exists('ckpt/%s' % args.taskname):
        os.mkdir('ckpt/%s' % args.taskname)
        print('created', 'ckpt/%s' % args.taskname)

    if args.tasktype == 'dc':
        trainer = Trainer(args)
    else:
        trainer = ACTrainer(args)

    trainer.train()
Beispiel #3
0
def main(config):
    dataloader = DataLoader()
    train_loader, valid_loader = dataloader.get_loaders(
        config, config.file_path)

    vocab = dataloader.text.vocab
    label = dataloader.label.vocab
    vocab_size = len(vocab)
    n_classes = len(label)

    model = DisasterClassifier(input_size=vocab_size,
                               embedding_dim=config.embedding_dim,
                               num_layers=config.num_layers,
                               hidden_size=config.hidden_size,
                               dropout=config.dropout,
                               n_classes=n_classes)

    loss = nn.NLLLoss()
    optimizer = optim.Adam(model.parameters())

    trainer = Trainer(config)
    trainer.train(model, optimizer, loss, train_loader, valid_loader)

    torch.save(
        {
            'model': model.state_dict(),
            'config': config,
            'vocab': vocab,
            'label': label,
        }, config.model_fn)
Beispiel #4
0
def main():

    config, args, opt = configurations('LSUN_basic')
    check_directories(opt.dir_list)
    config.datatype='lsun'
    opt.save_model=True    
    
    trainer = Trainer(config, args, opt)
    trainer.train()
Beispiel #5
0
def main(use_cuda):
    if use_cuda and not fluid.core.is_compiled_with_cuda():
        logging.warning('Your PC is not support CUDA!')
        return

    #print_paddle_envs()

    trainer = Trainer(args)
    infer = Infer(args)

    if args.train:
        trainer.train(use_cuda, args.model_path)
    elif args.infer:
        infer.infer(use_cuda, args.model_path)
    else:
        pass
def main():
    config, args, opt = configurations('MODLE_MODIFY_relu')
    check_directories(opt.dir_list)
    trainer = Trainer(config, args, opt)
    args.use_relu = False
    trainer.train()

    config, args, opt = configurations('MODLE_MODIFY_batchnorm')
    check_directories(opt.dir_list)
    trainer = Trainer(config, args, opt)
    args.use_batchnorm = False
    trainer.train()
def main(args):
    current_path = os.getcwd()
    logging.info(f'current python path {current_path}...')
    logging.info('Load data...')
    
    with open(f"{args.dataset}/train_dataset.pkl", "rb") as f:
        train_dataset = pickle.load(f)
    with open(f"{args.dataset}/valid_dataset.pkl", "rb") as f:
        valid_dataset = pickle.load(f)
    with open(f"{args.dataset}/test_dataset.pkl", "rb") as f:
        test_dataset = pickle.load(f)
    
    logging.info('Making dataloader...')
    train_loader = DataLoader(
        dataset = train_dataset,
        batch_size = args.batch_size,
        shuffle = True,
        collate_fn = lambda x: Bert_dataset.collate_fn(train_dataset, x)
    )

    valid_loader = DataLoader(
        dataset = valid_dataset,
        batch_size = args.batch_size,
        collate_fn = lambda x: Bert_dataset.collate_fn(valid_dataset, x)
    )

    test_loader = DataLoader(
        dataset = test_dataset,
        batch_size = args.batch_size,
        collate_fn = lambda x: Bert_dataset.collate_fn(test_dataset, x)
    )
    
    logging.info('Load model and parameters...')
    model = BertForTokenClassification.from_pretrained("bert-base-chinese",
        num_labels = 3,
        output_attentions = False,
        output_hidden_states = False
    )
    
    trainer = Trainer(model, train_loader, valid_loader)
    
    logging.info('Test validation dataset...')
    acc, total_loss = trainer.evaluation(test=False)
    print(f"device: {trainer.device} classification acc: {acc: .4f} validation loss: {total_loss:.4f}")
    
    logging.info('Start training...')
    trainer.training_process(early_stopping = True, 
                             n_iter_no_change = 5, 
                             max_epoch = args.max_epoch, 
                             save_params = True, 
                             verbose = True, 
                             learning_rate = args.learning_rate, 
                             save_paths = args.save_paths)
    
    logging.info('Training ends!')
    logging.info('Test validation dataset...')
    acc, total_loss = trainer.evaluation(test=False)
    print(f"device: {trainer.device} classification acc: {acc: .4f} validation loss: {total_loss:.4f}")
    logging.info('Finish!')
Beispiel #8
0
def main():
    config, args, opt = configurations('BASIC_CELEBA', 'celeba')
    check_directories(opt.dir_list)
    trainer = Trainer(config, args, opt)
    trainer.train()
Beispiel #9
0
def main():
    config, args, opt = configurations('BASIC_MNIST')
    check_directories(opt.dir_list)
    trainer = Trainer(config, args, opt)
    trainer.train()
Beispiel #10
0
 def predict(self, model_restored, data_loader):
     trainer = Trainer(model_restored, data_loader, self.ckpt_dir)
     y_pred = trainer.test_prediction(self.batch_size)
     return y_pred
Beispiel #11
0
if __name__ == "__main__":
    logging.basicConfig(
        format="%(asctime)s | %(levelname)s | %(message)s",
        level=logging.INFO,
        datefmt="%Y-%m-%d %H:%M:%S",
    )
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logging.info("device use to train {}".format(device))

    # Preparing arguments
    parser = ArgParser()
    args = parser.parse()

    # Preparing dataset
    if args.data_dir is None:
        raise Exception("Data directory is not given.")
    data = create_train_valid_dataset(args)

    # Preparing model
    model = create_model(args, data["trainset"].num_classes)

    # Preparing trainer
    trainer = Trainer(
        args=args,
        train_dataloader=data["train_dataloader"],
        valid_dataloader=data["valid_dataloader"],
        model=model,
    )
    trainer.fit(args.epochs)
Beispiel #12
0
def main():
    config, args, opt = configurations('BASIC_MNIST')
    trainer = Trainer(config, args, opt)
    trainer.train()
Beispiel #13
0
def main(config):
    if config.gpu_id < 0:
        print("Device: CPU")
    else:
        print("Device:", torch.cuda.get_device_name(config.gpu_id))

    loaders = DataLoader(
        train_fn=config.train_fn,
        batch_size=config.batch_size,
        min_freq=config.min_vocab_freq,
        max_vocab=config.max_vocab_size,
        device=config.gpu_id)

    print('|train| =', len(loaders.train_loader.dataset),
          '|valid| =', len(loaders.valid_loader.dataset))

    vocab_size = len(loaders.text.vocab)
    n_classes = len(loaders.label.vocab)
    print('|vocab| =', vocab_size, '|classes| =', n_classes)

    if config.rnn is False and config.cnn is False:
        raise Exception('You need to specify an architecture to train. (--rnn or --cnn)')

    if config.rnn:
        model = RNNClassifier(
            input_size=vocab_size,
            word_vec_size=config.word_vec_size,
            hidden_size=config.hidden_size,
            n_classes=n_classes,
            n_layers=config.n_layers,
            dropout_p=config.dropout)
        optimizer= optim.Adam(model.parameters())
        crit = nn.NLLLoss()
        print(model)

        if config.gpu_id >= 0:
            model.cuda(config.gpu_id)
            crit.cuda(config.gpu_id)

        rnn_trainer = Trainer(config)
        rnn_model = rnn_trainer.train(
            model,
            crit,
            optimizer,
            loaders.train_loader,
            loaders.valid_loader)

    if config.cnn:
        model = CNNClassifier(
            input_size=vocab_size,
            word_vec_size=config.word_vec_size,
            n_classes=n_classes,
            use_batch_norm=config.use_batch_norm,
            dropout_p=config.dropout,
            window_sizes=config.window_sizes,
            n_filters=config.n_filters)
        optimizer = optim.Adam(model.parameters())
        crit = nn.NLLLoss()
        print(model)

        if config.gpu_id >= 0:
            model.cuda(config.gpu_id)
            crit.cuda(config.gpu_id)

        cnn_trainer = Trainer(config)
        cnn_model = cnn_trainer.train(
            model,
            crit,
            optimizer,
            loaders.train_loader,
            loaders.valid_loader)

    torch.save({
        'rnn': rnn_model.state_dict() if config.rnn else None,
        'cnn': cnn_model.state_dict() if config.cnn else None,
        'config': config,
        'vocab': loaders.text.vocab,
        'classes': loaders.label.vocab,
    }, config.model_fn)
def subtask(config, args, opt):
    trainer = Trainer(config, args, opt)
    trainer.train()
    # metric
    metric = MulticlassAccuracy()
    if args.re_ranking:
        val_metric = ReRankingAccuracy(
            num_query=len(val_dataset),
            max_rank=gallery_images["labels"].shape[0],
            k1=args.k1,
            k2=args.k2,
            lambda_value=args.lambda_value,
        )
    else:
        val_metric = Accuracy()

    trainer = Trainer(
        model,
        optimizer,
        criterion,
        train_dataloader,
        val_dataloader,
        gallery_images,
        scheduler,
        writer,
        metric,
        val_metric,
        args.save_dir,
        device,
        accumulate_gradient=args.ag,
    )

    trainer.fit(args.epochs)