Exemplo n.º 1
0
def train(model_path: str, num_epochs: int, seed: int, split: int, end_idx, seq_length=32):
    # Train set
    sampler = MySampler(end_idx, seq_length)
    transform = transforms.Compose([
        transforms.Resize((1280, 720)),
        transforms.ToTensor()
    ])

    dataset = MyDataset(
      image_paths=class_image_paths,
      seq_length=seq_length,
      transform=transform,
      length=len(sampler))

    train_loader = DataLoader(
        dataset,
        batch_size=64,
        sampler=sampler
    )

    # config
    train_config = TrainingConfigBase("ehpi_jhmdb_{}_split_{}".format(seed, split), model_path)
    train_config.learning_rate = lr
    train_config.learning_rate_scheduler = LearningRateSchedulerStepwise(lr_decay=0.1, lr_decay_epoch=50)
    train_config.weight_decay = weight_decay
    train_config.num_epochs = num_epochs
    train_config.checkpoint_epoch = num_epochs

    trainer = TrainerEhpi()
    trainer.train(train_loader, train_config, model=EHPISmallNet(21))
Exemplo n.º 2
0
    def train(self,
              train_loader: DataLoader,
              train_config: TrainingConfigBase,
              model,
              test_loader: DataLoader = None):
        print("Train model: {}".format(train_config.model_name))

        model.to(device)

        loss_func = nn.CrossEntropyLoss()
        optimizer = optim.SGD(model.parameters(),
                              lr=train_config.learning_rate,
                              momentum=train_config.momentum,
                              weight_decay=train_config.weight_decay)

        losses_out = []
        accuracies_out = []
        for epoch in range(train_config.num_epochs):
            model.train()
            train_config.learning_rate_scheduler(optimizer, epoch)
            losses = []
            for i, data in enumerate(train_loader):
                x = Variable(data["x"]).to(device)
                y = Variable(torch.tensor(data["y"],
                                          dtype=torch.long)).to(device)

                optimizer.zero_grad()
                outputs = model(x)
                loss = loss_func(outputs, y)
                loss.backward()
                losses.append(loss.item())
                optimizer.step()

            loss_total = sum(losses) / len(losses)
            losses_out.append(loss_total)
            print("{}: {}".format(epoch, loss_total))
            if epoch != 0 and epoch % train_config.checkpoint_epoch == 0:
                if test_loader is not None:
                    accuracy = self.test_by_seq(model, test_loader=test_loader)
                    accuracies_out.append(accuracy)
        if test_loader is not None:
            self.test_by_seq(model, test_loader=test_loader)
        torch.save(model.state_dict(),
                   train_config.get_output_path(train_config.num_epochs))
        return losses_out, accuracies_out
def train(training_set_path: str, model_path: str, num_epochs: int, seed: int,
          split: int):
    # Train set
    train_set = get_training_set(training_set_path, image_size)
    train_set.print_label_statistics()
    sampler = ImbalancedDatasetSampler(train_set, dataset_type=EhpiDataset)
    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              sampler=sampler)

    # config
    train_config = TrainingConfigBase(
        "ehpi_jhmdb_{}_split_{}".format(seed, split), model_path)
    train_config.learning_rate = lr
    train_config.learning_rate_scheduler = LearningRateSchedulerStepwise(
        lr_decay=0.1, lr_decay_epoch=50)
    train_config.weight_decay = weight_decay
    train_config.num_epochs = num_epochs
    train_config.checkpoint_epoch = num_epochs

    trainer = TrainerEhpi()
    trainer.train(train_loader, train_config, model=EHPISmallNet(21))
Exemplo n.º 4
0
                    val_loader = DataLoader(val_set,
                                            batch_size=1,
                                            shuffle=False)

                    set_seed(seed)

                    # Dataset Sampler
                    sampler = ImbalancedDatasetSampler(
                        train_set, dataset_type=EhpiDataset)
                    train_loader = DataLoader(train_set,
                                              batch_size=batch_size,
                                              sampler=sampler)

                    # config
                    train_config = TrainingConfigBase(
                        "ehpi_jhmdb_{}".format(seed),
                        os.path.join(models_dir, "val_jhmdb"))
                    train_config.learning_rate = lr
                    train_config.learning_rate_scheduler = LearningRateSchedulerStepwise(
                        lr_decay=0.1, lr_decay_epoch=50)
                    train_config.weight_decay = weight_decay
                    train_config.num_epochs = 350
                    train_config.checkpoint_epoch = 10

                    trainer = TrainerEhpi()
                    losses, accuracies = trainer.train(train_loader,
                                                       train_config,
                                                       test_loader=val_loader,
                                                       model=EHPISmallNet(21))

                    with open("losses_seed_{}.txt".format(seed),
                        FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
                        NormalizeEhpi(image_size)
                    ]), num_joints=num_joints, dataset_part=DatasetPart.TRAIN),
    ]
    for dataset in datasets:
        dataset.print_label_statistics()

    return ConcatDataset(datasets)


if __name__ == '__main__':
    batch_size = 128
    seed = 0
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    # Train set
    train_set = get_train_set(ehpi_dataset_path, image_size=ImageSize(1280, 720))
    sampler = ImbalancedDatasetSampler(train_set, dataset_type=EhpiDataset)
    train_loader = DataLoader(train_set, batch_size=batch_size, sampler=sampler, num_workers=1)

    # config
    train_config = TrainingConfigBase("ehpi_model", "models")
    train_config.weight_decay = 0
    train_config.num_epochs = 140

    trainer = TrainerEhpi()

    trainer.train(train_loader, train_config, model=ShuffleNetV2(3))
        "sim": get_sim,
        "full": get_full
    }
    for seed in seeds:
        use_case_dataset_path = os.path.join(ehpi_dataset_path, "use_case")
        for dataset_name, get_dataset in datasets.items():
            # Train set
            set_seed(seed)
            train_set = get_dataset(use_case_dataset_path,
                                    image_size=ImageSize(1280, 720))
            sampler = ImbalancedDatasetSampler(train_set,
                                               dataset_type=EhpiDataset)
            train_loader = DataLoader(train_set,
                                      batch_size=batch_size,
                                      sampler=sampler,
                                      num_workers=8)

            # config
            train_config = TrainingConfigBase(
                "itsc2019_{}_seed_{}".format(dataset_name, seed),
                os.path.join(models_dir, "train_use_case"))
            train_config.learning_rate_scheduler = LearningRateSchedulerStepwise(
                lr_decay=0.1, lr_decay_epoch=50)
            train_config.learning_rate = 0.05
            train_config.weight_decay = 5e-4
            train_config.num_epochs = 140

            trainer = TrainerEhpi()

            trainer.train(train_loader, train_config, model=ShuffleNetV2(3))
Exemplo n.º 7
0
    torch.cuda.manual_seed_all(seed)
    np.random.seed(0)


if __name__ == '__main__':
    batch_size = 256
    seeds = [0, 104, 123, 142, 200]
    datasets = {
        "gt": get_training_set_gt,
        "pose": get_training_posealgo,
        "both": get_training_set_both
    }
    for seed in seeds:
        for dataset_name, get_dataset in datasets.items():
            set_seed(seed)
            train_set = get_dataset(ehpi_dataset_path, ImageSize(1280, 720))
            train_loader = DataLoader(train_set,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=1)

            # config
            train_config = TrainingConfigBase(
                "ehpi_journal_2019_03_{}_seed_{}".format(dataset_name, seed),
                os.path.join(models_dir, "train_its_journal"))
            train_config.weight_decay = 0
            train_config.num_epochs = 200

            trainer = TrainerEhpi()
            trainer.train(train_loader, train_config, model=EhpiLSTM(15, 5))