Exemple #1
0
file_ptr = open(mapping_file, 'r')
actions = file_ptr.read().split('\n')[:-1]
file_ptr.close()
actions_dict = dict()
for a in actions:
    actions_dict[a.split()[1]] = int(a.split()[0])

num_classes = len(actions_dict)

trainer = Trainer(num_stages,
                  num_layers,
                  num_f_maps,
                  features_dim,
                  num_classes,
                  pooling_type=pooling_type,
                  dropout=dropout)
if args.action == "train":
    batch_gen = BatchGenerator(num_classes, actions_dict, gt_path,
                               features_path, sample_rate)
    batch_gen.read_data(vid_list_file)
    trainer.train(model_dir,
                  batch_gen,
                  num_epochs=num_epochs,
                  batch_size=bz,
                  learning_rate=lr,
                  device=device)

if args.action == "predict":
    trainer.predict(model_dir, results_dir, features_path, vid_list_file_tst,
                    num_epochs, actions_dict, device, sample_rate)
Exemple #2
0
def main(args, device, model_load_dir, model_save_dir, results_save_dir):

    if args.action == 'train' and args.extract_save_pseudo_labels == 0:
        # load train dataset and test dataset
        print(f'Load train data: {args.train_data}')
        train_loader = DataLoader(args, args.train_data, 'train')
        print(f'Load test data: {args.test_data}')
        test_loader = DataLoader(args, args.test_data, 'test')

        print(f'Start training.')
        trainer = Trainer(
                    args.num_stages,
                    args.num_layers,
                    args.num_f_maps,
                    args.features_dim,
                    train_loader.num_classes,
                    device,
                    train_loader.weights,
                    model_save_dir
                    )

        eval_args = [
            args,
            model_save_dir,
            results_save_dir,
            test_loader.features_dict,
            test_loader.gt_dict,
            test_loader.eval_gt_dict,
            test_loader.vid_list,
            args.num_epochs,
            device,
            'eval',
            args.classification_threshold,
        ]

        batch_gen = BatchGenerator(
            train_loader.num_classes,
            train_loader.gt_dict,
            train_loader.features_dict,
            train_loader.eval_gt_dict
            )

        batch_gen.read_data(train_loader.vid_list)
        trainer.train(
            model_save_dir,
            batch_gen,
            args.num_epochs,
            args.bz,
            args.lr,
            device,
            eval_args,
            pretrained=model_load_dir)

    elif args.extract_save_pseudo_labels and args.pseudo_label_type != 'PL':
        # extract/ generate pseudo labels and save in "data/pseudo_labels"
        print(f'Load test data: {args.test_data}')
        test_loader = DataLoader(args, args.test_data, args.extract_set, results_dir=results_save_dir)
        print(f'Extract {args.pseudo_label_type}')
        
        if args.pseudo_label_type == 'local':
            get_save_local_fusion(args, test_loader.features_dict, test_loader.gt_dict)
        elif args.pseudo_label_type == 'merge':
            merge_PL_CP(args, test_loader.features_dict, test_loader.gt_dict)
        elif args.pseudo_label_type == 'CMPL':
            CMPL(args, test_loader.features_dict, test_loader.gt_dict)
        elif args.pseudo_label_type == 'CP':
            extract_CP(args, test_loader.features_dict)
        
        print('Self labelling process finished')


    else:
        print(f'Load test data: {args.test_data}')
        test_loader = DataLoader(args, args.test_data, args.extract_set, results_dir=results_save_dir)

        if args.extract_save_pseudo_labels and args.pseudo_label_type == 'PL':
            print(f'Extract {args.pseudo_label_type}')
            extract_save_PL = 1
        else:
            print(f'Start inference.')
            extract_save_PL = 0

        trainer = Trainer(
            args.num_stages,
            args.num_layers,
            args.num_f_maps,
            args.features_dim,
            test_loader.num_classes,
            device,
            test_loader.weights,
            results_save_dir)

        trainer.predict(
            args,
            model_load_dir,
            results_save_dir,
            test_loader.features_dict,
            test_loader.gt_dict,
            test_loader.eval_gt_dict,
            test_loader.vid_list,
            args.num_epochs,
            device,
            'test',
            args.classification_threshold,
            uniform=args.uniform,
            save_pslabels=extract_save_PL,
            CP_dict=test_loader.CP_dict,
            )
Exemple #3
0

separator = ','
reader = open(target_file, 'r')
header = reader.readline().split(separator)
count = 0

print('Number of columns: %d' % len(header))
print('Reading %s\n' % target_file)

model = Trainer()
model.load_model('./SVM_classifier.sav')

while True:
    row = reader.readline()
    if not row:
        time.sleep(0.1)
        continue
    count += 1
    # sys.stdout.write('\r' + str(count))
    # sys.stdout.flush()

    # Preprocess
    row = row.split(separator)[:-1]
    row = np.delete(np.array(row), [0, 1, 2, 3, 5, 6], 0)
    row = row.astype(np.float32)

    # Classify
    label = model.predict([row])
    print(label)
Exemple #4
0
    dataset, target = readTrainCSV(filecsv, fea_sel=0)

    # Split
    from sklearn.model_selection import train_test_split
    X_train, X_test, y_train, y_test = train_test_split(
        dataset, target, test_size=0.2, random_state=0)
    X_train = np.asarray(X_train).astype(np.float)
    X_test = np.asarray(X_test).astype(np.float)
    y_train = np.asarray(y_train).astype(np.float)
    y_test = np.asarray(y_test).astype(np.float)



    model_name = 'SVM'

    classifer = Trainer(model_name, kernel='rbf', C=1, gamma='scale')

    # classifer.load_model('kNN_classifier.txt')


    # parameters = {'kernel':('linear', 'rbf'), 'C':[1,2]}

    # classifer.gridsearchCV(parameters)

    classifer.fit(X_train, y_train)
    classifer.predict(X_test)
    classifer.report(y_test)
    print(classifer.model)
    classifer.save_model('SVM_classifier.sav')