train_augmentation = net.tsn.get_augmentation()

input_mean = net.tsn.input_mean
input_std = net.tsn.input_std
if modality != 'RGBDiff':
    normalize = GroupNormalize(input_mean, input_std)
else:
    normalize = IdentityTransform()

train_loader = torch.utils.data.DataLoader(
    TSNDataSet("", test_list, num_segments=num_segments,
                new_length=data_length,
                modality=modality,
                image_tmpl="img_{:05d}.jpg" if modality in ["RGB", "RGBDiff"] else flow_prefix+"{}_{:05d}.jpg",
                test_mode=True,
                transform=torchvision.transforms.Compose([
                    GroupCenterCrop([224, 224]),
                    Stack(roll=arch == 'BNInception'),
                    ToTorchFormatTensor(div=arch != 'BNInception'),
                    normalize,
                ])
    ),
    batch_size=batch_size, shuffle=False,
    num_workers=workers, pin_memory=True, drop_last=False)
print("Length of dataset is {}".format(len(train_loader)))

''' Start Testing Process '''
accur = []
gt = []
for epoch in range(1):
    for idx, (input, target, indice) in enumerate(train_loader):
train_augmentation = net.tsn.get_augmentation()

input_mean = net.tsn.input_mean
input_std = net.tsn.input_std
if modality != 'RGBDiff':
    normalize = GroupNormalize(input_mean, input_std)
else:
    normalize = IdentityTransform()

train_loader = torch.utils.data.DataLoader(TSNDataSet(
    "",
    train_list,
    num_segments=num_segments,
    new_length=data_length,
    modality=modality,
    image_tmpl="img_{:05d}.jpg"
    if modality in ["RGB", "RGBDiff"] else flow_prefix + "{}_{:05d}.jpg",
    transform=torchvision.transforms.Compose([
        train_augmentation,
        Stack(roll=arch == 'BNInception'),
        ToTorchFormatTensor(div=arch != 'BNInception'),
        normalize,
    ])),
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=workers,
                                           pin_memory=True,
                                           drop_last=True)
print("Length of dataset is {}".format(len(train_loader)))
''' Start Training Process '''
for epoch in range(400):
    for idx, (input, target, indice) in enumerate(train_loader):