示例#1
0
    def eval_images(
        self, imgdir: str, coco_gt: CocoManager, 
        classes: List[str]=None, thre_iou: float=0.5,
        keypoints: List[str]=None, skeleton: List[List[str]]=None
    ) -> (pd.DataFrame, pd.DataFrame):
        imgdir = correct_dirpath(imgdir)
        list_df = []
        if classes is None:
            classes   = MetadataCatalog.get(self.dataset_name).thing_classes
        if keypoints is None:
            keypoints = MetadataCatalog.get(self.dataset_name).get("keypoint_names") # Noneの場合はNoneとなる
        if skeleton is None:
            skeleton  = MetadataCatalog.get(self.dataset_name).get("keypoint_flip_map") if keypoints is not None else None
        for y in [imgdir + x for x in coco_gt.df_json["images_file_name"].unique()]:
            print(f"eval image: {y}")
            df = self.eval_a_image(y, coco_gt, classes, thre_iou=thre_iou, keypoints=keypoints, skeleton=skeleton)
            list_df.append(df)
        df_org = pd.concat(list_df, axis=0, ignore_index=True, sort=False)

        # df_org の解析
        list_df = []
        for thre in np.arange(0, 1, 0.1):
            list_df.append(self.eval_with_custom_dataframe(df_org, coco_gt, thre, classes=classes, keypoints=keypoints, skeleton=skeleton))
        df_ana = pd.concat(list_df, axis=0, sort=False, ignore_index=True)
        df_ana = df_ana[list_df[0].columns]
        return df_ana, df_org
示例#2
0
def same_images_concat(dir_paths: List[str],
                       save_images_path: str = None,
                       str_regex: List[str] = [r"png$", r"jpg$"]):
    dict_ret = {}
    # 最初のdirectory を BASE にする
    list_images = get_file_list(dir_paths[0], str_regex)
    for x in [os.path.basename(y) for y in list_images]:
        dict_ret[x] = concats([correct_dirpath(_x) + x for _x in dir_paths])

    if save_images_path is not None:
        makedirs(save_images_path, exist_ok=True, remake=False)
        for x in dict_ret.keys():
            if save_images_path is not None:
                cv2.imwrite(correct_dirpath(save_images_path) + x, dict_ret[x])

    return dict_ret
示例#3
0
 def __init__(
     self,
     root_dirpath: str,
     json_label: str,
     transforms: List[object] = None,
 ):
     super().__init__()
     """
     label infomation load
     Format::
         {
             "test0.png": 1,  # or [1,2,3]. int や str, List で記述する
             "test1.png": 1,
             "test2.png": 1,
             ...
         }
     """
     self.json_label = json.load(
         open(json_label)) if type(json_label) == str else json_label
     self.json_label = {
         x: tuple(y) if type(y) in [list, tuple] else (y, )
         for x, y in self.json_label.items()
     }  # 全てTupleに変換しておく
     self.root_dirpath = correct_dirpath(root_dirpath)
     self.image_names = [x for x in self.json_label.keys()]
     self.transforms = transforms
     self.len = len(self.image_names)
示例#4
0
    def preview_augmentation(self,
                             src,
                             outdir: str = "./preview_augmentation",
                             n_output: int = 100):
        """
        面倒なのでcocoを作り直してからpreviewさせる
        Params::
            src: str, List[str], index, List[index]
        """
        outdir = correct_dirpath(outdir)
        coco = CocoManager()
        coco.add_json(self.coco_json_path)
        # src で絞る
        if type(src) == str:
            coco.df_json = coco.df_json.loc[coco.df_json["images_file_name"] ==
                                            src]
        elif type(src) == int:
            coco.df_json = coco.df_json.iloc[src:src + 1]
        elif type(src) == list or type(src) == tuple:
            if type(src[0]) == str:
                coco.df_json = coco.df_json.loc[
                    coco.df_json["images_file_name"].isin(src)]
            elif type(src[0]) == int:
                coco.df_json = coco.df_json.iloc[src, :]
        else:
            raise Exception("")
        coco.save(self.coco_json_path + ".cocomanager.json")

        # 作り直したcocoで再度読み込みさせる
        self.coco_json_path = self.coco_json_path + ".cocomanager.json"
        DatasetCatalog.remove(self.dataset_name)  # key を削除しないと再登録できない
        MetadataCatalog.remove(self.dataset_name)  # key を削除しないと再登録できない
        self.__register_coco_instances(self.dataset_name, self.coco_json_path,
                                       self.image_root)
        super().__init__(self.cfg)
        makedirs(outdir, exist_ok=True, remake=True)
        count = 0
        for i, x in enumerate(self.data_loader):
            # x には per batch 分の size (2個とか) 入っているので、それ分回す
            for j, data in enumerate(x):
                if j > 0: continue
                ## Visualizer を predictor と統一するため, gt_*** -> pred_*** に copy する
                img = self.img_conv_dataloader(data)
                ins = data["instances"].to("cpu")
                if ins.has("gt_boxes"): ins.set("pred_boxes", ins.gt_boxes)
                if ins.has("gt_classes"):
                    ins.set("pred_classes", ins.gt_classes)
                if ins.has("gt_keypoints"):
                    ins.set("pred_keypoints", ins.gt_keypoints)
                if ins.has("gt_masks"):
                    ## gt_mask では [x1, y1, x2, y2, ... ]の形式になっているのでそれを pred [False, True, True, ...] 形式に変換する
                    segs = ins.get("gt_masks").polygons
                    list_ndf = []
                    for seg_a_class in segs:
                        ndf = convert_seg_point_to_bool(
                            img.shape[0], img.shape[1], seg_a_class)
                        list_ndf.append(ndf)
                    ndf = np.concatenate([[ndfwk] for ndfwk in list_ndf],
                                         axis=0)
                    ins.set("pred_masks",
                            torch.from_numpy(ndf))  # Tensor 形式に変換
                data["instances"] = ins
                img = self.draw_annoetation(img, data)
                cv2.imwrite(
                    outdir + "preview_augmentation." + str(i) + "." + str(j) +
                    ".png", img)
            count += 1
            if count > n_output: break

        DatasetCatalog.remove(self.dataset_name)  # key を削除しないと再登録できない
        MetadataCatalog.remove(self.dataset_name)  # key を削除しないと再登録できない
        self.coco_json_path = self.coco_json_path_org
        self.__register_coco_instances(self.dataset_name, self.coco_json_path,
                                       self.image_root)
        super().__init__(self.cfg)
示例#5
0
        weight_path=weight_path,
        resume=False,
        is_keyseg=True,
        classes=['hook', "pole"],
        keypoint_names=[
            'kpt_a', 'kpt_cb', 'kpt_c', 'kpt_cd', 'kpt_e', 'kpt_b', 'kpt_d'
        ],
        keypoint_flip_map=[['kpt_a', 'kpt_cb'], ['kpt_cb', 'kpt_c'],
                           ['kpt_c', 'kpt_cd'], ['kpt_cd', 'kpt_e'],
                           ['kpt_b', 'kpt_d']],
        threshold=0.5,
        is_train=False,
    )

    # predict train data
    makedirs(correct_dirpath(outdir) + "train/", exist_ok=True, remake=True)
    for x in get_file_list(imgdir_train,
                           regex_list=[r"jpg$", r"png$", r"JPG$"])[:100]:
        img = cv2.imread(x)
        output = det2.show(img, only_best=False)
        cv2.imwrite(
            correct_dirpath(outdir) + "train/" + os.path.basename(x), output)

    # predict test data
    makedirs(correct_dirpath(outdir) + "test/", exist_ok=True, remake=True)
    for x in get_file_list(imgdir_test, regex_list=[r"jpg$", r"png$",
                                                    r"JPG$"]):
        img = cv2.imread(x)
        output = det2.show(img, only_best=False)
        cv2.imwrite(
            correct_dirpath(outdir) + "test/" + os.path.basename(x), output)
示例#6
0
 def __init__(
         self,
         # network
         mynn: nn.Module,
         # train dataset
         dataset_train: torch.utils.data.Dataset,
         # train dataloader
         num_workers: int = 1,
         batch_size: int = 2,
         # validation dataset
         dataset_valids: List[torch.utils.data.Dataset] = [],
         # validation dataloader
         valid_step: int = None,
         batch_size_valid: int = 2,
         # optimizer
         lr: float = 0.001,
         epoch: int = 100,
         # output
         outdir: str = "./output_" +
     datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
         save_step: int = 50):
     # NN
     self.mynn = mynn
     # optimizer
     self.optimizer = optim.RAdam(self.mynn.parameters(),
                                  lr=lr,
                                  weight_decay=0)
     # DataLoader
     self.dataloader_train = torch.utils.data.DataLoader(
         dataset_train,
         batch_size=batch_size,
         shuffle=True,
         num_workers=num_workers,
         drop_last=True,
         collate_fn=self.collate_fn)
     self.dataloader_valids = []
     for dataset_valid in dataset_valids:
         self.dataloader_valids.append(
             torch.utils.data.DataLoader(dataset_valid,
                                         batch_size=batch_size_valid,
                                         shuffle=True,
                                         num_workers=num_workers,
                                         drop_last=True,
                                         collate_fn=partial(
                                             self.collate_fn,
                                             is_train=False)))
     # Process
     self.process_data_train_pre = []
     self.process_data_train_aft = []
     self.process_data_valid_pre = []
     self.process_data_valid_aft = []
     self.process_label = []
     # Loss
     self.loss_funcs = []
     # validation
     self.valid_step = valid_step
     # Config
     self.is_cuda = False
     self.epoch = epoch
     # Other
     self.iter = 0
     self.min_loss = float("inf")
     self.best_params = {}
     self.outdir = correct_dirpath(outdir)
     makedirs(self.outdir, exist_ok=True, remake=True)
     self.save_step = save_step
     # TensorBoard
     self.writer = SummaryWriter(log_dir=self.outdir + "logs")
示例#7
0
    def __init__(
            self,
            # network
            mynn: nn.Module,
            # dataset
            root_dirpath: str,
            json_path: str,
            # optimizer, batch
            lr: float = 0.001,
            batch_size: int = 2,
            num_workers: int = 1,
            epoch: int = 100,
            # validation
            validation_samples: float = -1,
            json_valid_paths: dict = {},
            batch_size_valid: int = 1,
            valid_step: int = 10,
            # output
            outdir: str = "./output_" +
        datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
            save_step: int = 50):
        # NN
        self.mynn = mynn
        # optimizer
        self.optimizer = optim.RAdam(self.mynn.parameters(),
                                     lr=lr,
                                     weight_decay=0)
        # Loss Function
        self.loss_funcs = [
            nn.BCELoss(),
            #nn.CrossEntropyLoss(),
            #nn.SmoothL1Loss(),
        ]
        self.loss_preprocs = [
            lambda x: x.to(torch.float32),
            #lambda x: x.to(torch.long),
        ]
        # Transform
        self.preprocess_img = transforms.Compose([
            MyResize(224),
            transforms.CenterCrop(224),
        ])
        self.preprocess = transforms.Compose([
            transforms.ToTensor(),
            lambda x: x[:3],
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        # default augmentations
        self.augmentations = transforms.Compose([
            MyRandAugment(1, 1),
            pil2cv,
            RandomRotation90(),
            RandomFliplr(),
            cv2pil,
        ])

        # DataSet
        self.mydataset = MyDataset(root_dirpath,
                                   json_path,
                                   transforms=self.transform)
        # Validation
        self.valid_step = valid_step
        self.is_validation = True if (
            type(validation_samples) in [int, float]
            and validation_samples > 0) or len(json_valid_paths) > 0 else False
        self.dataloaders_valid: OrderedDict = OrderedDict()
        dataset_train, dataset_valid = self.mydataset, None
        if self.is_validation:
            transform_valid = partial(self.transform, is_train=False)
            if (type(validation_samples) in [int, float]
                    and validation_samples > 0):
                listwk = list(self.mydataset.json_label.keys())
                validation_samples = int(len(listwk) * validation_samples)
                listwk = np.random.permutation(listwk)
                samples_train = listwk[:validation_samples]
                samples_valid = listwk[validation_samples:]
                dataset_train = MyDataset(
                    root_dirpath,
                    {x: self.mydataset.json_label[x]
                     for x in samples_train},
                    transforms=self.transform)
                dataset_valid = MyDataset(
                    root_dirpath,
                    {x: self.mydataset.json_label[x]
                     for x in samples_valid},
                    transforms=transform_valid)
                # Train data split
                self.dataloaders_valid[
                    "normal_validation"] = torch.utils.data.DataLoader(
                        dataset_valid,
                        batch_size=batch_size_valid,
                        shuffle=True,
                        num_workers=num_workers,
                        drop_last=True,
                        collate_fn=self.collate_fn)
            # Custom validation dataset
            for i_valid, (
                    valid_dirpath,
                    json_valid_path,
            ) in enumerate(json_valid_paths.items()):
                dataset_valid = MyDataset(correct_dirpath(valid_dirpath),
                                          json_valid_path,
                                          transforms=transform_valid)
                self.dataloaders_valid[
                    "custom_validation_" +
                    str(i_valid)] = torch.utils.data.DataLoader(
                        dataset_valid,
                        batch_size=batch_size_valid,
                        shuffle=True,
                        num_workers=num_workers,
                        drop_last=True,
                        collate_fn=self.collate_fn)
        # Train DataLoader
        self.dataloader_train = torch.utils.data.DataLoader(
            dataset_train,
            batch_size=batch_size,
            shuffle=True,
            num_workers=num_workers,
            drop_last=True,
            collate_fn=self.collate_fn)
        # Config
        self.is_cuda = False
        self.epoch = epoch
        # Other
        self.iter = 0
        self.min_loss = np.inf
        self.best_params = {}
        self.outdir = correct_dirpath(outdir)
        makedirs(self.outdir, exist_ok=True, remake=True)
        self.save_step = save_step
        # TensorBoard
        self.writer = SummaryWriter(log_dir=self.outdir + "logs")