Ejemplo n.º 1
0
    def next_batch(self, batch_size):
        batch = self.list_of_examples[self.index:self.index + batch_size]
        self.index += batch_size

        batch_input = []
        batch_target = []
        for vid in batch:
            if vid == "r23mp4":
                vid = "r23.mp4"
            features = torch.load(
                os.path.join(self.features_path,
                             vid.split(".")[0] + ".pth"))
            labelpath = ("../../../data/training/feature_ext/" +
                         self.feat_model + "/" + vid[:-4] + ".txt")
            with open(labelpath, mode="r") as f:
                lines = f.read().splitlines()
            labels = [utils.label_to_id(i) for i in lines]
            labels = torch.tensor(labels)

            # file_ptr = open(self.gt_path + vid, "r")
            # content = file_ptr.read().split("\n")[:-1]
            # classes = np.zeros(min(np.shape(features)[1], len(content)))
            # for i in range(len(classes)):
            # classes[i] = self.actions_dict[content[i]]
            batch_input.append(features)
            batch_target.append(labels)
        return features.unsqueeze(0), labels.unsqueeze(0)
    def __getitem__(self, idx):
        path = os.path.join(
            "../../../data/training/tmp_images/",
            self.files[idx][:-4] + "_resized",
        )
        files = os.listdir(path)
        images = [f for f in files if f[-3:] == "jpg"]
        texts = [f for f in files if f[-3:] == "txt"]
        images.sort()
        texts.sort()
        num = min(len(images), len(texts))
        if len(images) > len(texts):
            images = images[:num]
        elif len(images) < len(texts):
            texts = texts[:num]

        if len(images) != len(texts):
            assert "image size and texts size not match"
            exit()
        # print(len(images), len(texts))

        labels = []
        for i in texts:
            with open(os.path.join(path, i), mode="r") as f:
                labels.append(f.read())
        labels = [utils.label_to_id(i) for i in labels]
        labels = torch.tensor(labels)
        imgs = []
        for i in images:
            im = Image.open(os.path.join(path, i))
            im = self.transform(im)
            imgs.append(im.unsqueeze(0))
        imgs = torch.cat(imgs, axis=0)
        return imgs, labels
    def __getitem__(self, idx):
        path = ("./data/training/feature_ext/" + self.feat_model + "/" +
                self.files[idx][:-4] + ".pth")

        feature = torch.load(path)
        labelpath = ("./data/training/feature_ext/" + self.feat_model + "/" +
                     self.files[idx][:-4] + ".txt")
        with open(labelpath, mode="r") as f:
            lines = f.read().splitlines()
        labels = [utils.label_to_id(i) for i in lines]
        labels = torch.tensor(labels)
        if self._mode == "test":
            return feature.unsqueeze(0), labels.unsqueeze(
                0), self.files[idx][:-4]
        if self._config.head == "lstm" or self._config.head == "tcn":
            return lstm_slice_dataset(feature, labels, self._config.batch_size)
        bd, bl = batch_maker(
            feature,
            labels,
            shuffle=self._config.shuffle,
            drop_last=True,
            batch_size=self._config.batch_size,
            n_sample=self._config.n_sample,
        )
        return bd, bl
Ejemplo n.º 4
0
 def return_F1_purpuse_score(self):
     purpuse_classes = [["painting", "battle"], ["moving", "hidden"]]
     for cls in purpuse_classes:
         cls_index = np.array([utils.label_to_id(i) for i in cls])
         c1 = self.x == cls_index[0]
         c2 = self.x == cls_index[1]
         c = c1 | c2
         a = self.x[c]
         # a = self.reset_label(a, cls_index)
         b = self.y[c]
         a, b = self.reset_label(a, b, cls_index)
         print("f1_score", f1_score(a, b))
    def __getitem__(self, idx):
        path = ("../../../data/training/feature_ext/" + self.feat_model + "/" +
                self.files[idx][:-4] + ".pth")

        # path = "../../../data/feature_ext/" + self.feat_model + "/" + "r25" + ".pth"
        feature = torch.load(path)
        labelpath = ("../../../data/training/feature_ext/" + self.feat_model +
                     "/" + self.files[idx][:-4] + ".txt")
        with open(labelpath, mode="r") as f:
            lines = f.read().splitlines()
        labels = [utils.label_to_id(i) for i in lines]
        labels = torch.tensor(labels)
        return feature, labels, self.files[idx]
Ejemplo n.º 6
0
 def _print_acc(self, r=5, ignore_classes=False):
     x_total, y_total = self.x, self.y
     if not ignore_classes:
         self.accuracy = len(x_total[x_total == y_total]) / len(x_total)
     else:
         index_total = np.array([False for _ in range(x_total.shape[0])])
         for ic in ignore_classes:
             c = utils.label_to_id(ic)
             index = x_total == c
             index_total = index | index_total
         x_total = x_total[~index_total]
         y_total = y_total[~index_total]
         self.accuracy = len(x_total[x_total == y_total]) / len(x_total)
     print("total_accuracy:", round(self.accuracy, r))
     if True:
         for c in range(self.classes):
             x = x_total[x_total == c]
             y = y_total[x_total == c]
             if len(x) != 0:
                 accuracy = len(x[x == y]) / len(x)
                 print(utils.id_to_label(c), ":", round(accuracy, r))
     return accuracy
Ejemplo n.º 7
0
    def forward(self, outputs, labels):
        total_loss = self._ce(outputs, labels)
        outputs = self._softmax(outputs)

        for pair in self._pairs:
            label1, label2 = [torch.tensor(label_to_id(i)) for i in pair]
            index = labels == label1
            wrong_out = outputs[index, label2]
            if len(wrong_out) > 0:
                t = wrong_out.shape
                zeros_label = torch.zeros(len(wrong_out)).float()
                loss = self._bce_loss(wrong_out, zeros_label)
                loss *= self._bce_weight
                total_loss += loss
            label1, label2 = label2, label1
            index = labels == label1
            wrong_out = outputs[index, label2]
            if len(wrong_out) > 0:
                t = wrong_out.shape
                zeros_label = torch.zeros(len(wrong_out)).float()
                loss = self._bce_loss(wrong_out, zeros_label)
                loss *= self._bce_weight
                total_loss += loss
        return total_loss