Пример #1
0
        # generator
        model = FullNetwork(vp_value_count=VP_VALUE_COUNT, stdev=STDEV,
                            output_shape=(BATCH_SIZE, CHANNELS, FRAMES, HEIGHT, WIDTH))
        model = model.to(device)

        if device == 'cuda':
            net = torch.nn.DataParallel(model)
            cudnn.benchmark = True

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=LR)

        # data
        trainset = NTUDataset(root_dir=data_root_dir, data_file=train_split, param_file=param_file,
                              resize_height=HEIGHT, resize_width=WIDTH,
                              clip_len=FRAMES, skip_len=SKIP_LEN,
                              random_all=RANDOM_ALL, precrop=PRECROP)
        trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)

        testset = NTUDataset(root_dir=data_root_dir, data_file=test_split, param_file=param_file,
                             resize_height=HEIGHT, resize_width=WIDTH,
                             clip_len=FRAMES, skip_len=SKIP_LEN,
                             random_all=RANDOM_ALL, precrop=PRECROP)
        testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)

    elif DATASET.lower() == 'panoptic':
        data_root_dir, train_split, test_split, close_cams_file, weight_file = panoptic_config()

        # generator
        model = FullNetwork(vp_value_count=VP_VALUE_COUNT, stdev=STDEV,
                            output_shape=(BATCH_SIZE, CHANNELS, FRAMES, HEIGHT, WIDTH), use_est_vp=False)
Пример #2
0
    #
    # for sample in data_file:
    #     sample = sample.split(' ')
    #     sample_id = sample[0][sample[0].index('/') + 1:]
    #     scene, pid, rid, action = decrypt_vid_name(sample_id)
    #     print(action)
    #     if action == 18:
    #         if pid not in actors:
    #             actors.append(pid)
    #
    # actors.sort()
    #
    # print(actors)

    trainset = NTUDataset(root_dir=data_root_dir, data_file=train_split, param_file=param_file,
                          resize_height=HEIGHT, resize_width=WIDTH,
                          clip_len=FRAMES, skip_len=SKIP_LEN,
                          random_all=RANDOM_ALL, precrop=PRECROP, diff_actors=DIFF_ACTORS, diff_scenes=DIFF_SCENES)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)

    for batch_idx, info in enumerate(trainloader):
        if info[0] != info[1]:
            print('cry')
        # print(info)



# import torch

# x = torch.tensor([2.0,2.0])
# print(x.requires_grad_())
# x = x.requires_grad_()