Exemple #1
0
    def load_data(self, n_similar=3, is_training=True):
        

        if is_training==True:
            features = []
            labels = []
            idx = []

            # Load similar pairs
            rand_classid = np.random.choice(len(self.classwiseidx), size=n_similar)
            for rid in rand_classid:
                rand_sampleid = np.random.choice(len(self.classwiseidx[rid]), size=2)
                idx.append(self.classwiseidx[rid][rand_sampleid[0]])
                idx.append(self.classwiseidx[rid][rand_sampleid[1]])

            # Load rest pairs
            rand_sampleid = np.random.choice(len(self.trainidx), size=self.batch_size-2*n_similar)
            for r in rand_sampleid:
                idx.append(self.trainidx[r])
          
            return np.array([utils.process_feat(self.features[i], self.t_max) for i in idx]), np.array([self.labels_multihot[i] for i in idx])
        else:
            labs = self.labels_multihot[self.testidx[self.currenttestidx]]
            feat = self.features[self.testidx[self.currenttestidx]]
            vid_name = self.videoname[self.testidx[self.currenttestidx]]

            if self.currenttestidx == len(self.testidx)-1:
                done = True; self.currenttestidx = 0
            else:
                done = False; self.currenttestidx += 1
         
            return np.array(feat), np.array(labs), vid_name, done
Exemple #2
0
    def load_data(self, is_training=True):

        if is_training == True:
            features = []
            labels = []
            idx = []

            # random sampling
            rand_sampleid = np.random.choice(len(self.trainidx),
                                             size=self.batch_size)
            for r in rand_sampleid:
                idx.append(self.trainidx[r])

            count_labels = np.array([self.count_labels[i] for i in idx])
            if self.labels101to20 is not None:
                count_labels = count_labels[:, self.labels101to20]

            return np.array([
                utils.process_feat(self.features[i], self.t_max) for i in idx
            ]), np.array([self.labels_multihot[i] for i in idx]), count_labels

        else:
            labs = self.labels_multihot[self.testidx[self.currenttestidx]]
            feat = self.features[self.testidx[self.currenttestidx]]

            if self.currenttestidx == len(self.testidx) - 1:
                done = True
                self.currenttestidx = 0
            else:
                done = False
                self.currenttestidx += 1

            return np.array([feat]), np.array(labs), done
Exemple #3
0
    def __getitem__(self, index):

        features = np.array(np.load(self.list[index].strip('\n')), dtype=np.float32)

        if self.tranform is not None:
            features = self.tranform(features)
        if self.test_mode:
            # name = os.path.basename(self.list[index].strip('\n'))
            return features
        else:
            features = process_feat(features, 32)
            return features
Exemple #4
0
    def load_test_data(self):
        # if train_test ==True:
        features = []
        labels = []
        idx = []

        # Load similar pairs
        # rand_classid = np.random.choice(len(self.classwiseidx), size=n_similar)
        # for rid in rand_classid:
        #     rand_sampleid = np.random.choice(len(self.classwiseidx[rid]), size=2)
        #     idx.append(self.classwiseidx[rid][rand_sampleid[0]])
        #     idx.append(self.classwiseidx[rid][rand_sampleid[1]])

        # Load rest pairs
        rand_sampleid = np.random.choice(len(self.testidx), size=self.batch_size)
        for r in rand_sampleid:
            idx.append(self.testidx[r])
      
        return np.array([utils.process_feat(self.features[i], self.t_max) for i in idx]), np.array([self.labels_multihot[i] for i in idx])
Exemple #5
0
    def __getitem__(self, index):

        label = self.get_label(index)  # get video level label 0/1
        features = np.load(self.list[index].strip('\n'), allow_pickle=True)
        features = np.array(features, dtype=np.float32)

        if self.tranform is not None:
            features = self.tranform(features)
        if self.test_mode:
            return features
        else:
            features = features.transpose(1, 0, 2)  # [10, B, T, F]
            divided_features = []
            for feature in features:
                feature = process_feat(feature, 32)
                divided_features.append(feature)
            divided_features = np.array(divided_features, dtype=np.float32)

            return divided_features, label
Exemple #6
0
    def load_data(self, n_similar=3, is_training=True):
        if is_training == True:
            features = []
            labels = []
            idx = []  # [10,16,111,115,117,112] 3个类别*每个类别2个视频

            # Load similar pairs
            rand_classid = np.random.choice(len(self.classwiseidx),
                                            size=n_similar)  # 随机选择3个类别id
            for rid in rand_classid:
                rand_sampleid = np.random.choice(len(self.classwiseidx[rid]),
                                                 size=2)  # 从某个类别中再随机选择一对视频
                idx.append(self.classwiseidx[rid][rand_sampleid[0]])
                idx.append(self.classwiseidx[rid][rand_sampleid[1]])

            # Load rest pairs
            rand_sampleid = np.random.choice(len(self.trainidx),
                                             size=self.batch_size -
                                             2 * n_similar)
            for r in rand_sampleid:
                idx.append(self.trainidx[r])

            return np.array([
                utils.process_feat(self.features[i], self.t_max) for i in idx
            ]), np.array([self.labels_multihot[i] for i in idx])

        else:
            labs = self.labels_multihot[self.testidx[self.currenttestidx]]
            feat = self.features[self.testidx[self.currenttestidx]]

            if self.currenttestidx == len(self.testidx) - 1:
                done = True
                self.currenttestidx = 0
            else:
                done = False
                self.currenttestidx += 1

            return np.array(feat), np.array(labs), done
Exemple #7
0
    def load_data(self, n_similar=0, is_training=True, similar_size=2):
        if is_training:
            labels = []
            idx = []

            # Load similar pairs
            if n_similar != 0:
                rand_classid = np.random.choice(len(self.classwiseidx),
                                                size=n_similar)
                for rid in rand_classid:
                    rand_sampleid = np.random.choice(
                        len(self.classwiseidx[rid]),
                        size=similar_size,
                        replace=False,
                    )

                    for k in rand_sampleid:
                        idx.append(self.classwiseidx[rid][k])

            # Load rest pairs
            if self.batch_size - similar_size * n_similar < 0:
                self.batch_size = similar_size * n_similar

            rand_sampleid = np.random.choice(
                len(self.trainidx),
                size=self.batch_size - similar_size * n_similar,
            )

            for r in rand_sampleid:
                idx.append(self.trainidx[r])

            feat = np.array([
                utils.process_feat(self.features[i], self.t_max,
                                   self.normalize) for i in idx
            ])
            labels = np.array([self.labels_multihot[i] for i in idx])

            if self.mode == "rgb":
                feat = feat[..., :self.feature_size]
            elif self.mode == "flow":
                feat = feat[..., self.feature_size:]
            return feat, labels

        else:
            labs = self.labels_multihot[self.testidx[self.currenttestidx]]
            feat = self.features[self.testidx[self.currenttestidx]]
            feat = utils.process_feat(feat, normalize=self.normalize)

            if self.currenttestidx == len(self.testidx) - 1:
                done = True
                self.currenttestidx = 0
            else:
                done = False
                self.currenttestidx += 1

            feat = np.array(feat)
            if self.mode == "rgb":
                feat = feat[..., :self.feature_size]
            elif self.mode == "flow":
                feat = feat[..., self.feature_size:]
            return feat, np.array(labs), done
Exemple #8
0
    def __getitem__(self, index):
        if self.normal_flag in self.list[index]:
            label = 0.0
        else:
            label = 1.0

        if self.modality == 'AUDIO':
            features = np.array(np.load(self.list[index].strip('\n')),
                                dtype=np.float32)
        elif self.modality == 'RGB':
            features = np.array(np.load(self.list[index].strip('\n')),
                                dtype=np.float32)
        elif self.modality == 'FLOW':
            features = np.array(np.load(self.list[index].strip('\n')),
                                dtype=np.float32)
        elif self.modality == 'MIX':
            features1 = np.array(np.load(self.list[index].strip('\n')),
                                 dtype=np.float32)
            features2 = np.array(np.load(self.flow_list[index].strip('\n')),
                                 dtype=np.float32)
            if features1.shape[0] == features2.shape[0]:
                features = np.concatenate((features1, features2), axis=1)
            else:  # because the frames of flow is one less than that of rgb
                features = np.concatenate((features1[:-1], features2), axis=1)
        elif self.modality == 'MIX2':
            features1 = np.array(np.load(self.list[index].strip('\n')),
                                 dtype=np.float32)
            features2 = np.array(np.load(self.audio_list[index //
                                                         5].strip('\n')),
                                 dtype=np.float32)
            if features1.shape[0] == features2.shape[0]:
                features = np.concatenate((features1, features2), axis=1)
            else:  # because the frames of flow is one less than that of rgb
                features = np.concatenate((features1[:-1], features2), axis=1)
        elif self.modality == 'MIX3':
            features1 = np.array(np.load(self.list[index].strip('\n')),
                                 dtype=np.float32)
            features2 = np.array(np.load(self.audio_list[index //
                                                         5].strip('\n')),
                                 dtype=np.float32)
            if features1.shape[0] == features2.shape[0]:
                features = np.concatenate((features1, features2), axis=1)
            else:  # because the frames of flow is one less than that of rgb
                features = np.concatenate((features1[:-1], features2), axis=1)
        elif self.modality == 'MIX_ALL':
            features1 = np.array(np.load(self.list[index].strip('\n')),
                                 dtype=np.float32)
            features2 = np.array(np.load(self.flow_list[index].strip('\n')),
                                 dtype=np.float32)
            features3 = np.array(np.load(self.audio_list[index //
                                                         5].strip('\n')),
                                 dtype=np.float32)
            if features1.shape[0] == features2.shape[0]:
                features = np.concatenate((features1, features2, features3),
                                          axis=1)
            else:  # because the frames of flow is one less than that of rgb
                features = np.concatenate(
                    (features1[:-1], features2, features3[:-1]), axis=1)
        else:
            assert 1 > 2, 'Modality is wrong!'
        if self.tranform is not None:
            features = self.tranform(features)
        if self.test_mode:
            return features

        else:
            features = process_feat(features, self.max_seqlen, is_random=False)
            return features, label
    def __getitem__(self, index):

        if self.args.larger_mem:
            if self.train:
                train_video_name = []
                start_index = []
                anomaly_indexs = random.sample(self.anomaly_video_train,
                                               self.args.sample_size)
                normaly_indexs = random.sample(self.normal_video_train,
                                               self.args.sample_size)
                anomaly_features = torch.zeros(0)
                normaly_features = torch.zeros(0)
                for a_i, n_i in zip(anomaly_indexs, normaly_indexs):
                    anomaly_data_video_name = a_i.replace('\n', '').replace(
                        'Ped', 'ped')
                    normaly_data_video_name = n_i.replace('\n', '').replace(
                        'Ped', 'ped')
                    train_video_name += anomaly_data_video_name
                    train_video_name += normaly_data_video_name
                    anomaly_feature = self.data_dict[anomaly_data_video_name]
                    anomaly_feature, r = utils.process_feat_sample(
                        anomaly_feature, self.t_max)
                    start_index += r
                    anomaly_feature = torch.from_numpy(
                        anomaly_feature).unsqueeze(0)
                    # shape = (1, seq_len, feature_dim )
                    normaly_feature = self.data_dict[normaly_data_video_name]
                    normaly_feature, r = utils.process_feat(
                        normaly_feature, self.t_max, self.args.sample_step)
                    start_index += r
                    normaly_feature = torch.from_numpy(
                        normaly_feature).unsqueeze(0)
                    anomaly_features = torch.cat(
                        (anomaly_features, anomaly_feature),
                        dim=0)  # combine anomaly_feature of different a_i
                    normaly_features = torch.cat(
                        (normaly_features, normaly_feature),
                        dim=0)  # combine normaly_feature of different n_i
                # normaly_label = torch.zeros((self.args.sample_size, 1))
                # anomaly_label = torch.ones((self.args.sample_size, 1))
                if args.label_type == 'binary':
                    normaly_label = torch.cat(
                        (torch.ones((self.args.sample_size, 1)),
                         torch.zeros((self.args.sample_size, 1))),
                        dim=1)
                    anomaly_label = torch.cat(
                        (torch.ones((self.args.sample_size, 1)),
                         torch.ones((self.args.sample_size, 1))),
                        dim=1)
                else:
                    normaly_label = torch.cat(
                        (torch.ones((self.args.sample_size, 1)),
                         torch.zeros((self.args.sample_size, 1))),
                        dim=1)
                    anomaly_label = torch.cat(
                        (torch.zeros((self.args.sample_size, 1)),
                         torch.ones((self.args.sample_size, 1))),
                        dim=1)

                return [anomaly_features,
                        normaly_features], [anomaly_label, normaly_label
                                            ], [train_video_name, start_index]
            else:
                data_video_name = self.testlist[index].replace(
                    '\n', '').replace('Ped', 'ped')
                self.feature = self.data_dict[data_video_name]
                return self.feature, data_video_name

        else:
            if self.train:
                anomaly_train_video_name = []
                normaly_train_video_name = []
                anomaly_start_index = []
                anomaly_len_index = []
                normaly_start_index = []
                normaly_len_index = []
                anomaly_indexs = random.sample(self.anomaly_video_train,
                                               self.args.sample_size)
                normaly_indexs = random.sample(self.normal_video_train,
                                               self.args.sample_size)
                anomaly_features = torch.zeros(0)
                normaly_features = torch.zeros(0)
                for a_i, n_i in zip(anomaly_indexs, normaly_indexs):
                    anomaly_data_video_name = a_i.replace('\n', '').replace(
                        'Ped', 'ped')
                    normaly_data_video_name = n_i.replace('\n', '').replace(
                        'Ped', 'ped')
                    anomaly_train_video_name.append(anomaly_data_video_name)
                    normaly_train_video_name.append(normaly_data_video_name)
                    anomaly_feature = np.load(file=os.path.join(
                        self.feature_path, anomaly_data_video_name,
                        'feature.npy'))
                    anomaly_len_index.append(anomaly_feature.shape[0])
                    anomaly_feature, r = utils.process_feat_sample(
                        anomaly_feature, self.t_max)
                    anomaly_start_index.append(r)
                    anomaly_feature = torch.from_numpy(
                        anomaly_feature).unsqueeze(0)
                    normaly_feature = np.load(file=os.path.join(
                        self.feature_path, normaly_data_video_name,
                        'feature.npy'))
                    normaly_len_index.append(normaly_feature.shape[0])
                    normaly_feature, r = utils.process_feat(
                        normaly_feature, self.t_max, self.args.sample_step)
                    normaly_feature = torch.from_numpy(
                        normaly_feature).unsqueeze(0)
                    normaly_start_index.append(r)
                    anomaly_features = torch.cat(
                        (anomaly_features, anomaly_feature),
                        dim=0)  # combine anomaly_feature of different a_i
                    normaly_features = torch.cat(
                        (normaly_features, normaly_feature),
                        dim=0)  # combine normaly_feature of different n_i
                if self.args.label_type == 'binary':
                    normaly_label = torch.cat(
                        (torch.ones((self.args.sample_size, 1)),
                         torch.zeros((self.args.sample_size, 1))),
                        dim=1)
                    anomaly_label = torch.cat(
                        (torch.ones((self.args.sample_size, 1)),
                         torch.ones((self.args.sample_size, 1))),
                        dim=1)
                elif self.args.label_type == 'unary':
                    normaly_label = torch.zeros((self.args.sample_size, 1))
                    anomaly_label = torch.ones((self.args.sample_size, 1))
                else:
                    normaly_label = torch.cat(
                        (torch.ones((self.args.sample_size, 1)),
                         torch.zeros((self.args.sample_size, 1))),
                        dim=1)
                    anomaly_label = torch.cat(
                        (torch.zeros((self.args.sample_size, 1)),
                         torch.ones((self.args.sample_size, 1))),
                        dim=1)

                train_video_name = anomaly_train_video_name + normaly_train_video_name
                start_index = anomaly_start_index + normaly_start_index
                len_index = anomaly_len_index + normaly_len_index

                return [anomaly_features, normaly_features
                        ], [anomaly_label, normaly_label
                            ], [train_video_name, start_index, len_index]
            else:
                data_video_name = self.testlist[index].replace(
                    '\n', '').replace('Ped', 'ped')
                self.feature = np.load(file=os.path.join(
                    self.feature_path, data_video_name, 'feature.npy'))
                return self.feature, data_video_name