예제 #1
0
                    action='store_true',
                    default=True,
                    help='save log')

# Working directory - model_path. Currently disabled
# parser.add_argument('--work-dir', type=str, default=model_path, metavar='WD',
#                     help='path to save')
# TO ADD: save_result

args = parser.parse_args()
device = 'cuda:0'

# pr = processor1.Processor(args, None, device)
# data, labels, data_train, labels_train, data_test, labels_test = \
#     loader.load_data(data_path)
a = loader.TrainTestLoader(False)
d, l = a.__getitem__(0)
print(type(d))
print(type(l))
print(l.shape)
print(d.shape)
# print(np.amax(d))
img__ = d.numpy()
# print(np.amax(l))
img_ = np.zeros((d.shape[1], d.shape[2], 3))
img_[:, :, 0] = d[0, :, :]
img_[:, :, 1] = d[1, :, :]
img_[:, :, 2] = d[2, :, :]
img__ = img_.astype('float32')
io.imshow(img__)
io.show()
예제 #2
0
parser.add_argument('--work-dir',
                    type=str,
                    default=model_path,
                    metavar='WD',
                    help='path to save')
# TO ADD: save_result

args = parser.parse_args()
device = 'cuda:0'

data, labels, data_train, labels_train, data_test, labels_test =\
    loader.load_data(data_path, ftype_real, ftype_synth, coords, joints, cycles=cycles)
num_classes = np.unique(labels_train).shape[0]
data_loader = {
    'train':
    torch.utils.data.DataLoader(dataset=loader.TrainTestLoader(
        data_train, labels_train, joints, coords, num_classes),
                                batch_size=args.batch_size,
                                shuffle=True,
                                drop_last=True),
    'test':
    torch.utils.data.DataLoader(dataset=loader.TrainTestLoader(
        data_test, labels_test, joints, coords, num_classes),
                                batch_size=args.batch_size,
                                shuffle=True,
                                drop_last=True)
}
graph_dict = {'strategy': 'spatial'}
print('Train set size: {:d}'.format(len(data_train)))
print('Test set size: {:d}'.format(len(data_test)))
print('Number of classes: {:d}'.format(num_classes))
pr = processor.Processor(args,
예제 #3
0
metrics_file_full_path = 'metrics.txt'
if not os.path.exists(metrics_file_full_path):
    for init_idx in range(num_inits):
        for fold_idx, (data_train, labels_train, data_test, labels_test) in enumerate(zip(data_train_all_folds, labels_train_all_folds,
                                                                                        data_test_all_folds, labels_test_all_folds)):
            print('Running init {:02d}, fold {:02d}'.format(init_idx, fold_idx))
            # saving trained models for each init and split in separate folders
            model_path = os.path.join(base_path, 'model_classifier_combined_lstm_init_{:02d}_fold_{:02d}/features'.format(init_idx, fold_idx) + ftype)
            args.work_dir = model_path
            os.makedirs(model_path, exist_ok=True)
            aff_features = len(data_train[0][0])
            num_classes = np.unique(labels_train).shape[0]
            data_loader = list()
            data_loader.append(torch.utils.data.DataLoader(
                dataset=loader.TrainTestLoader(data_train, labels_train, joints, coords),
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.num_worker * ngpu(device),
                drop_last=True))
            data_loader.append(torch.utils.data.DataLoader(
                dataset=loader.TrainTestLoader(data_test, labels_test, joints, coords),
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.num_worker * ngpu(device),
                drop_last=True))
            data_loader = dict(train=data_loader[0], test=data_loader[1])
            graph_dict = {'strategy': 'spatial'}
            pr = processor.Processor(args, data_loader, coords*joints, aff_features, num_classes, graph_dict, device=device)
            if args.train:
                pr.train()
예제 #4
0
# Working directory - model_path. Currently disabled
# parser.add_argument('--work-dir', type=str, default=model_path, metavar='WD',
#                     help='path to save')
# TO ADD: save_result

args = parser.parse_args()
device = 'cuda:0'

#%% TBD: Load the dataset
if DEBUG == False:
    # data, labels, data_train, labels_train, data_test, labels_test = \
    # loader.load_data(data_path, ftype, coords, joints, cycles=cycles)
    # num_classes = np.unique(labels_train).shape[0]
    data_loader_train_test = list()
    data_loader_train_test.append(
        torch.utils.data.DataLoader(dataset=loader.TrainTestLoader(train=True),
                                    batch_size=args.batch_size,
                                    shuffle=True,
                                    num_workers=args.num_worker *
                                    torchlight.ngpu(device),
                                    drop_last=True))
    data_loader_train_test.append(
        torch.utils.data.DataLoader(
            dataset=loader.TrainTestLoader(train=False),
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_worker * torchlight.ngpu(device),
            drop_last=True))
    data_loader_train_test = dict(train=data_loader_train_test[0],
                                  test=data_loader_train_test[1])
else:
예제 #5
0
affs_dim = affective_features_train.shape[-1] + deep_dim
affective_features = np.concatenate(
    (affective_features_train, affective_features_test), axis=0)
affective_features, affs_max, affs_min = loader.scale_data(affective_features)
affective_features_train, _, _ = loader.scale_data(affective_features_train,
                                                   affs_max, affs_min)
affective_features_test, _, _ = loader.scale_data(affective_features_test,
                                                  affs_max, affs_min)
num_frames_max = rotations_train.shape[1]
num_frames_out = num_frames_max - 1
num_frames_train_norm = num_frames_train / num_frames_max
num_frames_test_norm = num_frames_test / num_frames_max
data_loader = list()
data_loader.append(
    torch.utils.data.DataLoader(dataset=loader.TrainTestLoader(
        data_train, poses_train, rotations_train, translations_train,
        affective_features_train, num_frames_train_norm, labels_train, coords,
        num_labels),
                                batch_size=args.batch_size,
                                shuffle=True,
                                num_workers=args.num_worker * ngpu(device),
                                drop_last=True))
data_loader.append(
    torch.utils.data.DataLoader(dataset=loader.TrainTestLoader(
        data_test, poses_test, rotations_test, translations_test,
        affective_features_test, num_frames_test_norm, labels_test, coords,
        num_labels),
                                batch_size=args.batch_size,
                                shuffle=True,
                                num_workers=args.num_worker * ngpu(device),
                                drop_last=True))
data_loader = dict(train=data_loader[0], test=data_loader[1])