예제 #1
0
    def __init__(self, root, split, clip_len):
        super(Kinetics, self).__init__()

        # self.root = os.path.join(root, 'frames')
        self.root = root
        self.bbox = os.path.join(root, 'bbox')
        self.split = split
        self.clip_len = clip_len

        self.sample_frame = [i * 8 for i in range(4)]

        if not os.path.exists('data/kinetics_data.pth'):
            parse_annotations(self.root)
            print('Annotations created!')
        annotations = torch.load('data/kinetics_data.pth')

        self.labels = annotations['labels']
        self.train_data = annotations['train_data']
        self.val_data = annotations['val_data']
        self.data = self.train_data if self.split == 'train' else self.val_data
        print('%d train clips | %d val clips' %
              (len(self.train_data), len(self.val_data)))

        self.clip_transform = util.clip_transform(self.split, self.clip_len)
        self.loader = lambda fl: Image.open(os.path.join(self.root, fl)
                                            ).convert('RGB')
    def __init__(self, root, split, max_len, sample_rate):
        self.root = root
        self.split = split
        self.max_len = max_len
        self.sample_rate = sample_rate

        if self.max_len==-1:
            self.max_len = 32
            print ('Max length not chosen. Setting max length to:', self.max_len)
        
        self.clip_transform = util.clip_transform(self.split, self.max_len)
        self.img_transform = util.default_transform(self.split)
        self.pair_transform = util.PairedTransform(self.split)
예제 #3
0
 def __init__(self, root, split, clip_len):
     super(KineticsMultiCrop, self).__init__(root, split, clip_len)
     self.clip_transform = util.clip_transform('3crop', self.clip_len)