示例#1
0
            data_loader = list()
            data_loader.append(torch.utils.data.DataLoader(
                dataset=loader.TrainTestLoader(data_train, labels_train, joints, coords),
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.num_worker * ngpu(device),
                drop_last=True))
            data_loader.append(torch.utils.data.DataLoader(
                dataset=loader.TrainTestLoader(data_test, labels_test, joints, coords),
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.num_worker * ngpu(device),
                drop_last=True))
            data_loader = dict(train=data_loader[0], test=data_loader[1])
            graph_dict = {'strategy': 'spatial'}
            pr = processor.Processor(args, data_loader, coords*joints, aff_features, num_classes, graph_dict, device=device)
            if args.train:
                pr.train()

            best_features, label_preds = pr.extract_best_feature(data_test, joints, coords)
            # print('{:.4f}'.format(sum(labels == label_preds)/labels.shape[0]))
            # common.plot_features(best_features, labels)

            precision, recall, fscore, _ = precision_recall_fscore_support(labels_test, label_preds, average='weighted')
            accuracy = sum(labels_test == label_preds) / np.shape(labels_test)[0]
            # accuracy = '{:.4f}'.format(sum(labels_test == label_preds)/np.shape(labels_test)[0])
            print(precision, recall, fscore, accuracy)
            metrics_file_full_path.write('Running init {:02d}, fold {:02d} ... \n'.format(init_idx, fold_idx))
            # metrics_file_full_path.write('Running init {:02d}, dataset {} ... \n'.format(init_idx, datasets[dataset_idx]))
            metrics_file_full_path.write('precision= {:.4f}, recall= {:.4f}, f-score= {:.4f}, accuracy= {:.4f} \n\n'.format(precision, recall, fscore, accuracy))
            # print('{:.4f}'.format(sum(labels == label_preds)/labels.shape[0]))
示例#2
0
    loader.load_data(data_path, ftype_real, ftype_synth, coords, joints, cycles=cycles)
num_classes = np.unique(labels_train).shape[0]
data_loader = {
    'train':
    torch.utils.data.DataLoader(dataset=loader.TrainTestLoader(
        data_train, labels_train, joints, coords, num_classes),
                                batch_size=args.batch_size,
                                shuffle=True,
                                drop_last=True),
    'test':
    torch.utils.data.DataLoader(dataset=loader.TrainTestLoader(
        data_test, labels_test, joints, coords, num_classes),
                                batch_size=args.batch_size,
                                shuffle=True,
                                drop_last=True)
}
graph_dict = {'strategy': 'spatial'}
print('Train set size: {:d}'.format(len(data_train)))
print('Test set size: {:d}'.format(len(data_test)))
print('Number of classes: {:d}'.format(num_classes))
pr = processor.Processor(args,
                         data_loader,
                         coords,
                         num_classes,
                         graph_dict,
                         device=device)
if args.train:
    pr.train()
if args.save_features:
    f = pr.save_best_feature(ftype_real, ftype_synth, data, joints, coords)
示例#3
0
文件: main.py 项目: emotionwalk/taew
        labels_test).cuda()

    train_set = TensorDataset(X_train, Y_train)
    val_set = TensorDataset(X_val, Y_val)

    train_loader = DataLoader(train_set, batch_size=128)
    val_loader = DataLoader(val_set, batch_size=128)

    data_loader_train_test = dict(train=train_loader, test=val_loader)
    print('Train set size: {:d}'.format(len(data_train)))
    print('Test set size: {:d}'.format(len(data_test)))
    print('Number of classes: {:d}'.format(num_classes))
    pr = processor.Processor(args,
                             data_loader_train_test,
                             coords,
                             num_classes,
                             graph_dict,
                             device=device,
                             verbose=True)
    pr.train()
else:
    pr = processor.Processor(args,
                             None,
                             coords,
                             num_classes,
                             graph_dict,
                             device=device,
                             verbose=False)
    preds = pr.generate_predictions(data, num_labels[0], joints, coords)
    labels_pred = to_multi_hot(preds)
    labels_true = to_multi_hot(labels)
示例#4
0
    data_loader_train_test = dict(train=data_loader_train_test[0],
                                  test=data_loader_train_test[1])
else:
    data_loader_train_test = list()
    data_loader_train_test.append(
        torch.utils.data.DataLoader(
            dataset=loader_test.NYU_Depth_V2(train=True),
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_worker * torchlight.ngpu(device),
            drop_last=False))
    data_loader_train_test.append(
        torch.utils.data.DataLoader(
            dataset=loader_test.NYU_Depth_V2(train=False),
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_worker * torchlight.ngpu(device),
            drop_last=False))
    data_loader_train_test = dict(train=data_loader_train_test[0],
                                  test=data_loader_train_test[1])

pr = processor.Processor(args, data_loader_train_test, device=device)
if args.train:
    pr.train()
# TBD: Saving the best features to be done later
# if args.save_features:
#     f = pr.save_best_feature(ftype, data, joints, coords)

# TBD: The test function has to be implemented, instead of the per_test function.
pr.per_test()
示例#5
0
                    '<td><img src = "perceived/{}_{:02d}.png"></td>'.format(
                        tag_categories[0][emo_idx], aff_idx))
            inf.write('</tr>')
        inf.write('</table>')

pr = processor.Processor(args,
                         data_path,
                         data_loader,
                         text_length,
                         num_frames + 2,
                         affs_dim,
                         num_joints,
                         coords,
                         rots_dim,
                         tag_categories,
                         intended_emotion_dim,
                         intended_polarity_dim,
                         acting_task_dim,
                         gender_dim,
                         age_dim,
                         handedness_dim,
                         native_tongue_dim,
                         joint_names,
                         joint_parents,
                         generate_while_train=True,
                         save_path=base_path)

if args.train:
    pr.train()
k = 0
index = str(k).zfill(6)
示例#6
0
data_loader.append(
    torch.utils.data.DataLoader(dataset=loader.TrainTestLoader(
        data_test, poses_test, rotations_test, translations_test,
        affective_features_test, num_frames_test_norm, labels_test, coords,
        num_labels),
                                batch_size=args.batch_size,
                                shuffle=True,
                                num_workers=args.num_worker * ngpu(device),
                                drop_last=True))
data_loader = dict(train=data_loader[0], test=data_loader[1])
pr = processor.Processor(args,
                         dataset,
                         data_loader,
                         num_frames_max,
                         num_joints,
                         coords,
                         diffs_dim,
                         affs_dim,
                         num_frames_out,
                         joint_parents,
                         num_labels,
                         affs_max,
                         affs_min,
                         label_weights=label_weights,
                         generate_while_train=False,
                         save_path=base_path,
                         device=device)
if args.train:
    pr.train()
pr.evaluate_model(load_saved_model=True)
#     data_loader = dict(train=data_dict, test=data_dict)
prefix_length = int(0.3 * num_frames)
target_length = int(num_frames - prefix_length)
rots_dim = data_dict['0']['rotations'].shape[-1]
affs_dim = data_dict['0']['affective_features'].shape[-1]

pr = processor.Processor(args,
                         dataset,
                         data_loader,
                         num_frames,
                         num_joints,
                         coords,
                         rots_dim,
                         affs_dim,
                         spline_dim,
                         joints_dict,
                         joint_names,
                         joint_offsets,
                         joint_parents,
                         num_labels,
                         prefix_length,
                         target_length,
                         generate_while_train=False,
                         save_path=base_path,
                         device=device)

# idx = 1302
# display_animations(np.swapaxes(np.reshape(
#     np.expand_dims(data_dict[str(idx)]['positions_world'], axis=0),
#     (1, num_frames, -1)), 2, 1), num_joints, coords, joint_parents,
#     save=True,