def __init__(self, dataset_folder, spt_path, boxes_file, vid2idx, mode='train',get_loader=get_default_video_loader, sample_size=112, classes_idx=None): self.dataset_folder = dataset_folder self.sample_size = sample_size self.boxes_file = boxes_file self.vid2idx = vid2idx self.mode = mode self.data, self.max_frames, self.max_actions = make_dataset_names( dataset_folder, spt_path, boxes_file, mode) self.loader = get_loader() self.classes_idx = classes_idx # mean = [112.07945832, 112.87372333, 106.90993363] # ucf-101 24 classes mean = [103.29825354, 104.63845484, 90.79830328] # jhmdb from .png spatial_transform = Compose([Scale(sample_size), # [Resize(sample_size), ToTensor(), Normalize(mean, [1, 1, 1])]) self.spatial_transform=spatial_transform
os.path.join(root_path, dataset_cfg.dataset.split_txt_path)) ### get videos id actions = dataset_cfg.dataset.classes cls2idx = {actions[i]: i for i in range(0, len(actions))} vid2idx, vid_names = get_vid_dict(dataset_frames) # # get mean # mean = [112.07945832, 112.87372333, 106.90993363] # ucf-101 24 classes mean = [0.5, 0.5, 0.5] std = [0.5, 0.5, 0.5] spatial_transform = Compose([ Scale(sample_size), # [Resize(sample_size), ToTensor(), Normalize(mean, std) ]) temporal_transform = LoopPadding(sample_duration) n_classes = len(actions) ####################################################### # Part 1-1 - train nTPN - without reg # ####################################################### print(' -----------------------------------------------------') print('| Part 1-1 - train TPN - without reg |') print(' -----------------------------------------------------') ## Define Dataloaders train_data = Video_Dataset_small_clip(
'__background__', 'Basketball', 'BasketballDunk', 'Biking', 'CliffDiving', 'CricketBowling', 'Diving', 'Fencing', 'FloorGymnastics', 'GolfSwing', 'HorseRiding', 'IceDancing', 'LongJump', 'PoleVault', 'RopeClimbing', 'SalsaSpin', 'SkateBoarding', 'Skiing', 'Skijet', 'SoccerJuggling', 'Surfing', 'TennisSwing', 'TrampolineJumping', 'VolleyballSpiking', 'WalkingWithDog' ] cls2idx = {actions[i]: i for i in range(0, len(actions))} ### get videos id spatial_transform = Compose([ Scale(sample_size), # [Resize(sample_size), ToTensor(), Normalize(mean, [1, 1, 1]) ]) temporal_transform = LoopPadding(sample_duration) n_classes = len(actions) # Init action_net model = ACT_net(actions, sample_duration) model.create_architecture() model = nn.DataParallel(model) model.to(device) model_data = torch.load('./action_net_model_both_without_avg.pwf') # model.load_state_dict(model_data)