Example #1
0
    def test(self):
        target = []
        predict = []
        predict_without_threshold = []

        self.model.eval()
        losssum = 0
        for (in_data, out_target) in tqdm(self.test_data):
            ### transform data ###
            in_data_1 = transform_torch_vars(in_data[0], self.is_cuda)
            in_data_2 = transform_torch_vars(in_data[1], self.is_cuda)
            out_data = transform_torch_vars(out_target, self.is_cuda)

            # cal loss
            output = self.model.forward_encoder_vector(in_data_1, in_data_2)
            out_data = torch.unsqueeze(out_data,1)
            temp_loss = self.evaluate_metric(output, out_data)
            losssum += temp_loss.item()

            out_pred = self._postprocess_output(output)
            predict.extend(self._postprocess_label(out_pred))
            predict_without_threshold.extend(self._postprocess_label_without_threshold(out_pred))
            target.extend(self._postprocess_label_without_threshold(out_target))

        print(losssum/len(self.test_data))

        FileTool.writePickle("predict_score.pkl", predict_without_threshold)
        self._print_out_result(target, predict)
Example #2
0
 def __init__(self, img_dir, file_info, transform):
     print("read pickle files")
     self.img_dir = img_dir
     self.imgIds = sorted(FileTool.readPickle(file_info["ids"]))
     self.file_info = FileTool.readPickle(file_info["file_info"])
     self.annotations = FileTool.readPickle(file_info["annotations"])
     self.transform = transform
Example #3
0
    def _print_out_result(self, target, pred):
        """
        change code here to display result
        :param pred:
        :param target:
        :return:
        """
        FileTool.writePickle("target.pkl", target)
        FileTool.writePickle("predict.pkl", pred)

        print(precision_recall_fscore_support(target, pred))
        print(accuracy_score(target, pred))
Example #4
0
    def encode(self):
        encoder_file = []

        self.model.eval()
        for (in_data, out_target) in tqdm(self.test_data):
            ### transform data ###
            in_data = transform_torch_vars(in_data, self.is_cuda)
            out_data = transform_torch_vars(out_target, self.is_cuda)

            # cal loss
            output = self.model.forward_encode(in_data[0])
            predict = list(output.detach().cpu().numpy())
            target = self._postprocess_label_without_threshold(out_data)
            encoder_file.extend([[data, target[id]] for id, data in enumerate(predict)])

        FileTool.writePickle("features_siamese.pkl", encoder_file)
Example #5
0
    def get_img_annotation(self, ind):
        img_path = self.imgIds[ind]
        image = cv2.imread(img_path)
        annos = json.loads(
            FileTool.read_text_file(img_path.replace(".jpg", ".json"))[0])

        handpoints = np.array(annos["hand_pts"])
        hand_center = np.array(annos["hand_box_center"])
        max_x, min_x = np.max(handpoints[:, 0]), np.min(handpoints[:, 0])
        max_y, min_y = np.max(handpoints[:, 1]), np.min(handpoints[:, 1])
        size = int(max_x - min_x) * 3 if (max_x - min_x) > (max_y - min_y) \
            else int(max_y - min_y) * 3

        image = cv2.copyMakeBorder(image, size // 2, size // 2, size // 2,
                                   size // 2, cv2.BORDER_REPLICATE)
        handpoints[:, 0:2] = handpoints[:, 0:2] + size // 2
        hand_center = hand_center + size // 2

        image = image[int(hand_center[1] - size / 2):int(hand_center[1] +
                                                         size / 2),
                      int(hand_center[0] - size / 2):int(hand_center[0] +
                                                         size / 2), :]

        handpoints[:, 0] = handpoints[:, 0] - int(hand_center[0] - size / 2)
        handpoints[:, 1] = handpoints[:, 1] - int(hand_center[1] - size / 2)

        return image, handpoints
Example #6
0
        if ele["image_id"] not in ids_add:
            ids_add.append(ele["image_id"])
        if ele["image_id"] in annotations:
            annotations[ele["image_id"]].append({
                "image_id": ele["image_id"],
                "is_crowd": ele["iscrowd"],
                "area": ele["area"],
                "keypoints": ele["keypoints"],
                "num_keypoints": ele["num_keypoints"]
            })
        else:
            annotations[ele["image_id"]] = [{
                "image_id": ele["image_id"],
                "is_crowd": ele["iscrowd"],
                "area": ele["area"],
                "keypoints": ele["keypoints"],
                "num_keypoints": ele["num_keypoints"]
            }]

import os

print("write output")
# FileTool.writePickle(os.path.join(output_file_path, "val_ids.pkl"), ids_add)
# FileTool.writePickle(os.path.join(output_file_path, "val_file_infos.pkl"), file_infos)
# FileTool.writePickle(os.path.join(output_file_path, "val_annotation_ids.pkl"), annotations)

FileTool.writePickle(os.path.join(output_file_path, "ids.pkl"), ids_add)
FileTool.writePickle(os.path.join(output_file_path, "file_infos.pkl"), file_infos)
FileTool.writePickle(os.path.join(output_file_path, "annotation_ids.pkl"), annotations)