Beispiel #1
0
file_ptr = open(mapping_file, 'r')
actions = file_ptr.read().split('\n')[:-1]
file_ptr.close()
actions_dict = dict()
for a in actions:
    actions_dict[a.split()[1]] = int(a.split()[0])

num_classes = len(actions_dict)

trainer = Trainer(num_stages,
                  num_layers,
                  num_f_maps,
                  features_dim,
                  num_classes,
                  pooling_type=pooling_type,
                  dropout=dropout)
if args.action == "train":
    batch_gen = BatchGenerator(num_classes, actions_dict, gt_path,
                               features_path, sample_rate)
    batch_gen.read_data(vid_list_file)
    trainer.train(model_dir,
                  batch_gen,
                  num_epochs=num_epochs,
                  batch_size=bz,
                  learning_rate=lr,
                  device=device)

if args.action == "predict":
    trainer.predict(model_dir, results_dir, features_path, vid_list_file_tst,
                    num_epochs, actions_dict, device, sample_rate)
Beispiel #2
0
    actions_dict[a.split()[1]] = int(a.split()[0])

num_classes = len(actions_dict)

# initialize model & trainer
model = MultiStageModel(args, num_classes)
trainer = Trainer(num_classes)

# ====== Main Program ====== #
start_time = time.time()
if args.action == "train":
    batch_gen_source = BatchGenerator(num_classes, actions_dict, gt_path,
                                      features_path, sample_rate)
    batch_gen_target = BatchGenerator(num_classes, actions_dict, gt_path,
                                      features_path, sample_rate)
    batch_gen_source.read_data(
        vid_list_file)  # read & shuffle the source training list
    batch_gen_target.read_data(
        vid_list_file_target)  # read & shuffle the target training list
    trainer.train(model, model_dir, results_dir, batch_gen_source,
                  batch_gen_target, device, args)

if args.action == "predict":
    predict(model, model_dir, results_dir, features_path, vid_list_file_test,
            args.num_epochs, actions_dict, device, sample_rate, args)

end_time = time.time()

if args.verbose:
    print('')
    print('total running time:', end_time - start_time)
def main():
    config = get_arg()
    config.save_folder = os.path.join(config.save_folder, config.model)
    if not os.path.exists(config.save_folder):
        os.makedirs(config.save_folder)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    if device == "cuda":
        torch.set_default_tensor_type("torch.cuda.FloatTensor")
    else:
        torch.set_default_tensor_type("torch.FloatTensor")

    # Traindataset = featDataset(mode="train", feat_model=config.model)
    num_classes = 11
    actions_dict = {
        "opening": 0,
        "moving": 1,
        "hidden": 2,
        "painting": 3,
        "battle": 4,
        "respawn": 5,
        "superjump": 6,
        "object": 7,
        "special": 8,
        "map": 9,
        "ending": 10,
    }
    actions_dict = utils.label_to_id
    gt_path = "../../../data/training/feature_ext/vgg"
    features_path = "../../../data/training/feature_ext/vgg"
    Traindataset = BatchGenerator(num_classes, actions_dict, gt_path,
                                  features_path)
    Traindataset.read_data()
    Testdataset = BatchGenerator(num_classes, actions_dict, gt_path,
                                 features_path)
    Testdataset.read_data(mode="test")

    num_stages = 2
    num_layers = 2
    num_f_maps = 8
    features_dim = 4
    num_f_maps = 64
    features_dim = 512 * 8 * 8
    # num_f_maps = 512 * 8 * 8
    # features_dim = 2048

    model = LSTMclassifier(1, 1, 256)

    model = model.to(device)
    optimizer = optim.Adam(model.parameters(), lr=config.lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9)
    criterion = nn.CrossEntropyLoss()

    best_eval = 0
    for epoch in range(1, 1 + config.epochs):
        print("epoch:", epoch)
        t0 = time.time()
        train(
            model=model,
            optimizer=optimizer,
            criterion=criterion,
            dataset=Traindataset,
            config=config,
            device=device,
            # dataset_perm=dataset_perm,
        )
        t1 = time.time()
        scheduler.step()
        print(f"\nlr: {scheduler.get_last_lr()}")
        t1 = time.time()
        print(f"\ntraining time :{round(t1 - t0)} sec")

        best_eval = test(
            model=model,
            dataset=Testdataset,
            config=config,
            device=device,
            best_eval=best_eval,
        )
Beispiel #4
0
def main(args, device, model_load_dir, model_save_dir, results_save_dir):

    if args.action == 'train' and args.extract_save_pseudo_labels == 0:
        # load train dataset and test dataset
        print(f'Load train data: {args.train_data}')
        train_loader = DataLoader(args, args.train_data, 'train')
        print(f'Load test data: {args.test_data}')
        test_loader = DataLoader(args, args.test_data, 'test')

        print(f'Start training.')
        trainer = Trainer(
                    args.num_stages,
                    args.num_layers,
                    args.num_f_maps,
                    args.features_dim,
                    train_loader.num_classes,
                    device,
                    train_loader.weights,
                    model_save_dir
                    )

        eval_args = [
            args,
            model_save_dir,
            results_save_dir,
            test_loader.features_dict,
            test_loader.gt_dict,
            test_loader.eval_gt_dict,
            test_loader.vid_list,
            args.num_epochs,
            device,
            'eval',
            args.classification_threshold,
        ]

        batch_gen = BatchGenerator(
            train_loader.num_classes,
            train_loader.gt_dict,
            train_loader.features_dict,
            train_loader.eval_gt_dict
            )

        batch_gen.read_data(train_loader.vid_list)
        trainer.train(
            model_save_dir,
            batch_gen,
            args.num_epochs,
            args.bz,
            args.lr,
            device,
            eval_args,
            pretrained=model_load_dir)

    elif args.extract_save_pseudo_labels and args.pseudo_label_type != 'PL':
        # extract/ generate pseudo labels and save in "data/pseudo_labels"
        print(f'Load test data: {args.test_data}')
        test_loader = DataLoader(args, args.test_data, args.extract_set, results_dir=results_save_dir)
        print(f'Extract {args.pseudo_label_type}')
        
        if args.pseudo_label_type == 'local':
            get_save_local_fusion(args, test_loader.features_dict, test_loader.gt_dict)
        elif args.pseudo_label_type == 'merge':
            merge_PL_CP(args, test_loader.features_dict, test_loader.gt_dict)
        elif args.pseudo_label_type == 'CMPL':
            CMPL(args, test_loader.features_dict, test_loader.gt_dict)
        elif args.pseudo_label_type == 'CP':
            extract_CP(args, test_loader.features_dict)
        
        print('Self labelling process finished')


    else:
        print(f'Load test data: {args.test_data}')
        test_loader = DataLoader(args, args.test_data, args.extract_set, results_dir=results_save_dir)

        if args.extract_save_pseudo_labels and args.pseudo_label_type == 'PL':
            print(f'Extract {args.pseudo_label_type}')
            extract_save_PL = 1
        else:
            print(f'Start inference.')
            extract_save_PL = 0

        trainer = Trainer(
            args.num_stages,
            args.num_layers,
            args.num_f_maps,
            args.features_dim,
            test_loader.num_classes,
            device,
            test_loader.weights,
            results_save_dir)

        trainer.predict(
            args,
            model_load_dir,
            results_save_dir,
            test_loader.features_dict,
            test_loader.gt_dict,
            test_loader.eval_gt_dict,
            test_loader.vid_list,
            args.num_epochs,
            device,
            'test',
            args.classification_threshold,
            uniform=args.uniform,
            save_pslabels=extract_save_PL,
            CP_dict=test_loader.CP_dict,
            )
Beispiel #5
0
def main():
    config = get_arg()
    config.save_folder = os.path.join(config.save_folder, config.model)
    if not os.path.exists(config.save_folder):
        os.makedirs(config.save_folder)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    if device == "cuda":
        torch.set_default_tensor_type("torch.cuda.FloatTensor")
    else:
        torch.set_default_tensor_type("torch.FloatTensor")

    # Traindataset = featDataset(mode="train", feat_model=config.model)
    num_classes = 11
    actions_dict = {
        "opening": 0,
        "moving": 1,
        "hidden": 2,
        "painting": 3,
        "battle": 4,
        "respawn": 5,
        "superjump": 6,
        "object": 7,
        "special": 8,
        "map": 9,
        "ending": 10,
    }
    gt_path = "../../../data/training/feature_ext/vgg"
    features_path = "../../../data/training/feature_ext/vgg"
    Traindataset = BatchGenerator(num_classes, actions_dict, gt_path,
                                  features_path)
    Traindataset.read_data()
    Testdataset = BatchGenerator(num_classes, actions_dict, gt_path,
                                 features_path)
    Testdataset.read_data(mode="test")

    # while Traindataset.has_next():
    #     batch_input, batch_target = Traindataset.next_batch(config.batch_size)

    # Testdataset = featDataset(mode="test", feat_model=config.model)
    # model = featModel(input_channel=1280)
    num_stages = 2
    num_layers = 2
    num_f_maps = 8
    features_dim = 4
    num_f_maps = 64
    features_dim = 512 * 8 * 8
    # num_f_maps = 512 * 8 * 8
    # features_dim = 2048

    # model = MultiStageModel(
    #     num_stages, num_layers, num_f_maps, features_dim, num_classes
    # )
    model = TCN(features_dim, 11, [20])

    model = model.to(device)
    optimizer = optim.Adam(model.parameters(), lr=config.lr)
    criterion = nn.CrossEntropyLoss()

    best_eval = 0
    for epoch in range(1, 1 + config.epochs):
        print("epoch:", epoch)
        t0 = time.time()
        train(
            model=model,
            optimizer=optimizer,
            criterion=criterion,
            dataset=Traindataset,
            config=config,
            device=device,
            # dataset_perm=dataset_perm,
        )
        t1 = time.time()
        print("\ntraining time :", round(t1 - t0))

        best_eval = test(
            model=model,
            dataset=Testdataset,
            config=config,
            device=device,
            best_eval=best_eval,
        )