Esempio n. 1
0
def dataloader_factory(args):
    dataset = dataset_factory(args)
    dataloader = DATALOADERS[args.dataloader_code]
    dataloader = dataloader(args, dataset)
    meta = dataloader.get_meta()
    train, val, test = dataloader.get_pytorch_dataloaders()
    return meta, train, val, test
Esempio n. 2
0
 def test_with_dataset_factory(self):
     train_dataset = dataset_factory('none')
     idx = random.randint(a=0, b=len(train_dataset))
     img, label = train_dataset[idx]
     self.assertEqual(2867, len(train_dataset))
     self.assertTupleEqual((3, 513, 513), np.shape(img))
     self.assertTupleEqual((513, 513), np.shape(label))
Esempio n. 3
0
def main():
    import argparse

    parser = argparse.ArgumentParser(description="Evaluate model on dataset")
    parser.add_argument("dataset", choices=["casia-b"])
    parser.add_argument("weights_path")
    parser.add_argument("data_path")
    parser.add_argument("--network_name", default="resgcn-n39-r8")
    parser.add_argument("--sequence_length", type=int, default=60)
    parser.add_argument("--batch_size", type=int, default=256)
    parser.add_argument("--embedding_layer_size", type=int, default=128)
    parser.add_argument("--use_multi_branch", action="store_true")
    parser.add_argument("--shuffle", action="store_true")

    opt = parser.parse_args()

    # Config for dataset
    graph = Graph("coco")
    dataset_class = dataset_factory(opt.dataset)
    evaluation_fn = None
    if opt.dataset == "casia-b":
        evaluation_fn = _evaluate_casia_b

    # Load data
    dataset = dataset_class(
        opt.data_path,
        train=False,
        sequence_length=opt.sequence_length,
        transform=transforms.Compose([
            SelectSequenceCenter(opt.sequence_length),
            ShuffleSequence(opt.shuffle),
            MultiInput(graph.connect_joint, opt.use_multi_branch),
            ToTensor()
        ]),
    )
    data_loader = DataLoader(dataset, batch_size=opt.batch_size)
    print(f"Data loaded: {len(data_loader)} batches")

    # Init model
    model, model_args = get_model_resgcn(graph, opt)

    if torch.cuda.is_available():
        model.cuda()

    # Load weights
    checkpoint = torch.load(opt.weights_path)
    model.load_state_dict(checkpoint["model"])

    result, accuracy_avg, sub_accuracies, dataframe = evaluate(data_loader,
                                                               model,
                                                               evaluation_fn,
                                                               use_flip=True)

    print("\n")
    print((dataframe * 100).round(2))
    print(f"AVG: {accuracy_avg*100} %")
    print("=================================")
    print((dataframe * 100).round(1).to_latex())
    print((dataframe * 100).round(1).to_markdown())
Esempio n. 4
0
def test(args):

    model = BertForQuestionAnswering.from_pretrained(PRETRAINED_MODEL_NAME)
    model.load_state_dict(
        torch.load('{}/model/best_model.bin'.format(args.load_model_path)))

    test_set = dataset_factory(args, tokenizer)

    BertQA = BertQATrainer(args, model, None, None, test_set)
    BertQA.evaluate(tokenizer, _write=True)
    pass
Esempio n. 5
0
def dataloader_factory(args):
    """
    This method loads the specified dataset using the dataset factory and returns the three data loaders
    :param args: system wide arguments from options.py
    :return: train, validation, test data loaders
    """
    dataset = dataset_factory(args)
    dataloader = DATALOADERS[args.dataloader_code]
    dataloader = dataloader(args, dataset)
    train, val, test = dataloader.get_pytorch_dataloaders()
    return train, val, test
Esempio n. 6
0
def train(args):

    # Load BERT QA pre-trained model
    model = BertForQuestionAnswering.from_pretrained(PRETRAINED_MODEL_NAME)
    # high-level 顯示此模型裡的 modules
    displayBertModules(model)

    train_loader, val_loader = dataset_factory(args, tokenizer)

    BertQA = BertQATrainer(args, model, train_loader, val_loader, None)
    BertQA.train()
    pass
Esempio n. 7
0
def test(args):

    test_loader = dataset_factory(args, tokenizer)

    # Load BERT QA pre-trained model
    model = BertForTokenClassification.from_pretrained(
        PRETRAINED_MODEL_NAME,
        return_dict=True,
        num_labels=args.tag_nums  # 設定模型 output label nums
    )
    model.load_state_dict(
        torch.load('{}/model/best_model.bin'.format(args.load_model_path)))
    # high-level 顯示此模型裡的 modules
    displayBertModules(model)

    BertTC = BertTCTrainer(args, model, None, None, test_loader)
    BertTC.evaluate(tokenizer)
    pass
Esempio n. 8
0
def train(args):

    train_loader, val_loader = dataset_factory(args, tokenizer)

    print(len(train_loader), len(val_loader))
    print(args)

    # Load BERT QA pre-trained model
    model = BertForTokenClassification.from_pretrained(
        PRETRAINED_MODEL_NAME,
        return_dict=True,
        num_labels=args.tag_nums  # 設定模型 output label nums
    )
    # high-level 顯示此模型裡的 modules
    displayBertModules(model)

    BertTC = BertTCTrainer(args, model, train_loader, val_loader, None)
    BertTC.train()
    pass
Esempio n. 9
0
def main(args):
    export_root, args = setup_experiments(args)
    device = args.device
    model_checkpoint_path = os.path.join(export_root, 'models')

    train_dataset = dataset_factory(args.train_transform_type, is_train=True)
    val_dataset = dataset_factory(args.val_transform_type, is_train=False)

    dataloaders = dataloaders_factory(train_dataset, val_dataset,
                                      args.batch_size, args.test)
    model = model_factory(args)

    writer = SummaryWriter(os.path.join(export_root, 'logs'))

    train_loggers = [
        MetricGraphPrinter(writer,
                           key='loss',
                           graph_name='loss',
                           group_name='Train'),
        MetricGraphPrinter(writer,
                           key='epoch',
                           graph_name='Epoch',
                           group_name='Train'),
    ]
    val_loggers = [
        MetricGraphPrinter(writer,
                           key='mean_iou',
                           graph_name='mIOU',
                           group_name='Validation'),
        MetricGraphPrinter(writer,
                           key='acc',
                           graph_name='Accuracy',
                           group_name='Validation'),
        RecentModelLogger(model_checkpoint_path),
        BestModelLogger(model_checkpoint_path, metric_key='mean_iou'),
        ImagePrinter(writer, train_dataset, log_prefix='train'),
        ImagePrinter(writer, val_dataset, log_prefix='val')
    ]

    criterion = create_criterion(args)
    optimizer = create_optimizer(model, args)

    if args.pretrained_weights:
        load_pretrained_weights(args, model)

    if args.resume_training:
        setup_to_resume(args, model, optimizer)

    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=args.decay_step,
                                          gamma=args.gamma)
    trainer = Trainer(model,
                      dataloaders,
                      optimizer,
                      criterion,
                      args.epoch,
                      args,
                      num_classes=args.classes,
                      log_period_as_iter=args.log_period_as_iter,
                      train_loggers=train_loggers,
                      val_loggers=val_loggers,
                      lr_scheduler=scheduler,
                      device=device)
    trainer.train()
    writer.close()
Esempio n. 10
0
def main(opt):
    opt = setup_environment(opt)
    graph = Graph("coco")

    # Dataset
    transform = transforms.Compose([
        MirrorPoses(opt.mirror_probability),
        FlipSequence(opt.flip_probability),
        RandomSelectSequence(opt.sequence_length),
        ShuffleSequence(opt.shuffle),
        PointNoise(std=opt.point_noise_std),
        JointNoise(std=opt.joint_noise_std),
        MultiInput(graph.connect_joint, opt.use_multi_branch),
        ToTensor()
    ], )

    dataset_class = dataset_factory(opt.dataset)
    dataset = dataset_class(
        opt.train_data_path,
        train=True,
        sequence_length=opt.sequence_length,
        transform=TwoNoiseTransform(transform),
    )

    dataset_valid = dataset_class(
        opt.valid_data_path,
        sequence_length=opt.sequence_length,
        transform=transforms.Compose([
            SelectSequenceCenter(opt.sequence_length),
            MultiInput(graph.connect_joint, opt.use_multi_branch),
            ToTensor()
        ]),
    )

    train_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batch_size,
        num_workers=opt.num_workers,
        pin_memory=True,
        shuffle=True,
    )

    val_loader = torch.utils.data.DataLoader(
        dataset_valid,
        batch_size=opt.batch_size_validation,
        num_workers=opt.num_workers,
        pin_memory=True,
    )

    # Model & criterion
    model, model_args = get_model_resgcn(graph, opt)
    criterion = SupConLoss(temperature=opt.temp)

    print("# parameters: ", count_parameters(model))

    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model, opt.gpus)

    if opt.cuda:
        model.cuda()
        criterion.cuda()

    # Trainer
    optimizer, scheduler, scaler = get_trainer(model, opt, len(train_loader))

    # Load checkpoint or weights
    load_checkpoint(model, optimizer, scheduler, scaler, opt)

    # Tensorboard
    writer = SummaryWriter(log_dir=opt.tb_path)

    sample_input = torch.zeros(opt.batch_size, model_args["num_input"],
                               model_args["num_channel"], opt.sequence_length,
                               graph.num_node).cuda()
    writer.add_graph(model, input_to_model=sample_input)

    best_acc = 0
    loss = 0
    for epoch in range(opt.start_epoch, opt.epochs + 1):
        # train for one epoch
        time1 = time.time()
        loss = train(train_loader, model, criterion, optimizer, scheduler,
                     scaler, epoch, opt)

        time2 = time.time()
        print(f"epoch {epoch}, total time {time2 - time1:.2f}")

        # tensorboard logger
        writer.add_scalar("loss/train", loss, epoch)
        writer.add_scalar("learning_rate", optimizer.param_groups[0]["lr"],
                          epoch)

        # evaluation
        result, accuracy_avg, sub_accuracies, dataframe = evaluate(
            val_loader, model, opt.evaluation_fn, use_flip=True)
        writer.add_text("accuracy/validation", dataframe.to_markdown(), epoch)
        writer.add_scalar("accuracy/validation", accuracy_avg, epoch)
        for key, sub_accuracy in sub_accuracies.items():
            writer.add_scalar(f"accuracy/validation/{key}", sub_accuracy,
                              epoch)

        print(f"epoch {epoch}, avg accuracy {accuracy_avg:.4f}")
        is_best = accuracy_avg > best_acc
        if is_best:
            best_acc = accuracy_avg

        if opt.tune:
            tune.report(accuracy=accuracy_avg)

        if epoch % opt.save_interval == 0 or (
                is_best and epoch > opt.save_best_start * opt.epochs):
            save_file = os.path.join(
                opt.save_folder,
                f"ckpt_epoch_{'best' if is_best else epoch}.pth")
            save_model(model, optimizer, scheduler, scaler, opt, opt.epochs,
                       save_file)

    # save the last model
    save_file = os.path.join(opt.save_folder, "last.pth")
    save_model(model, optimizer, scheduler, scaler, opt, opt.epochs, save_file)

    log_hyperparameter(writer, opt, best_acc, loss)

    print(f"best accuracy: {best_acc*100:.2f}")
Esempio n. 11
0
else:
    device = torch.device('cpu')
# seed
if opt.manualSeed is None:
    opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.device > -1:
    torch.cuda.manual_seed_all(opt.manualSeed)


#######################################################################################################################
# Data
#######################################################################################################################
# -- load data
setup, (train_data, test_data), relations = dataset_factory(opt.datadir, opt.dataset, opt.nt_train,opt.khop)
train_data = train_data.to(device)
test_data = test_data.to(device)
relations = relations.to(device)
for k, v in setup.items():
    opt[k] = v

# -- train inputs
t_idx = torch.arange(opt.nt_train, out=torch.LongTensor()).unsqueeze(1).expand(opt.nt_train, opt.nx).contiguous()
x_idx = torch.arange(opt.nx, out=torch.LongTensor()).expand_as(t_idx).contiguous()
# dynamic
idx_dyn = torch.stack((t_idx[1:], x_idx[1:])).view(2, -1).to(device)
nex_dyn = idx_dyn.size(1)
# decoder
idx_dec = torch.stack((t_idx, x_idx)).view(2, -1).to(device)
nex_dec = idx_dec.size(1)
Esempio n. 12
0
else:
    device = torch.device('cpu')
# seed
if opt.manualSeed is None:
    opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.device > -1:
    torch.cuda.manual_seed_all(opt.manualSeed)

#######################################################################################################################
# Data
#######################################################################################################################
# -- load data
setup, (train_data,
        test_data), relations = dataset_factory(opt.datadir, opt.dataset,
                                                opt.khop)
train_data = train_data.to(device)
test_data = test_data.to(device)
relations = relations.to(device)
for k, v in setup.items():
    opt[k] = v

# -- train inputs
t_idx = torch.arange(opt.nt_train, out=torch.LongTensor()).unsqueeze(1).expand(
    opt.nt_train, opt.nx).contiguous()
x_idx = torch.arange(opt.nx,
                     out=torch.LongTensor()).expand_as(t_idx).contiguous()
# dynamic
idx_dyn = torch.stack((t_idx[1:], x_idx[1:])).view(2, -1).to(device)
nex_dyn = idx_dyn.size(1)
# decoder
Esempio n. 13
0
    device = torch.device('cpu')
# seed
if opt.manualSeed is None:
    opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.device > -1:
    torch.cuda.manual_seed_all(opt.manualSeed)

#######################################################################################################################
# Data
#######################################################################################################################
# -- load data
setup, (train_data, test_data), relations, (exogenous_train,
                                            exogenous_test) = dataset_factory(
                                                opt.datadir, opt.dataset,
                                                opt.khop)
train_data = train_data.to(device)
test_data = test_data.to(device)
exogenous_train = exogenous_train.to(device)
exogenous_test = exogenous_test.to(device)
relations = relations.to(device)
for k, v in setup.items():
    opt[k] = v

# -- train inputs
t_idx = torch.arange(opt.nt_train, out=torch.LongTensor()).unsqueeze(1).expand(
    opt.nt_train, opt.nx).contiguous()
x_idx = torch.arange(opt.nx,
                     out=torch.LongTensor()).expand_as(t_idx).contiguous()
# dynamic
Esempio n. 14
0
def get_dataloader(args):
    dataset = dataset_factory(args)
    dataloader = DATALOADERS[args.dataloader_code]
    dataloader = dataloader(args, dataset)
    return dataloader