コード例 #1
0
ファイル: isi.py プロジェクト: jgsimard/video_sem_seg
    def __init__(self, root_dir, split="train", hd=False):
        self.root_dir = root_dir
        self.split = split
        self.imgs_dir = os.path.join(root_dir, "RGB_Images")
        self.masks_dir = os.path.join(root_dir, "Masks")
        base_size = 1080 if hd else 513
        self.transform_train = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=base_size, crop_size=513, fill=0),
            tr.RandomRotate(15),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
            tr.ToTensor()
        ])

        self.transform_validation = transforms.Compose([
            tr.FixScaleCrop(crop_size=base_size),
            tr.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
            tr.ToTensor()
        ])

        with open(os.path.join(root_dir, "Sets", f"{split}.txt"), 'r') as f:
            basenames, imgs, masks = [], [], []
            for basename in f:
                img, mask = self.get_filenames(basename)
                basenames.append(basename)
                imgs.append(img)
                masks.append(mask)
            self.files = pd.DataFrame(
                list(zip(basenames, imgs, masks)),
                columns=['basename', 'img_filename', 'label_filename'])
コード例 #2
0
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.crop_size),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)
コード例 #3
0
ファイル: isi.py プロジェクト: jgsimard/video_sem_seg
    def __init__(self,
                 root_dir,
                 split="train",
                 sampeled_images_path="/home/deepsight/DeepSightData",
                 train_range=2000,
                 eval_distance=5):
        self.root_dir = root_dir
        self.split = split
        self.imgs_dir = os.path.join(root_dir, "RGB_Images")
        self.masks_dir = os.path.join(root_dir, "Masks")
        self.unlabeled_dir = os.path.join(root_dir, "unlabeled_rgb_images")
        self.train_range = train_range
        self.eval_distance = eval_distance

        self.datasets = ['Feb_11_CDE', 'Feb_8_CDE', 'March_1_CDE']
        self.mapping = video_scene_to_name(self.datasets, sampeled_images_path)

        self.transform_train = transforms.Compose([
            tr.RandomHorizontalFlip(temporal=True),
            tr.RandomScaleCrop(base_size=513,
                               crop_size=513,
                               fill=0,
                               temporal=True),
            tr.RandomRotate(15, temporal=True),
            tr.RandomGaussianBlur(temporal=True),
            tr.Normalize(mean=(0.5, 0.5, 0.5),
                         std=(0.5, 0.5, 0.5),
                         temporal=True),
            tr.ToTensor(temporal=True)
        ])

        self.transform_validation = transforms.Compose([
            tr.FixScaleCrop(crop_size=513, temporal=True),
            tr.Normalize(mean=(0.5, 0.5, 0.5),
                         std=(0.5, 0.5, 0.5),
                         temporal=True),
            tr.ToTensor(temporal=True)
        ])

        with open(os.path.join(root_dir, "Sets", f"{split}.txt"), 'r') as f:
            scenes, ids = [], []
            for basename in f:
                scene, id = basename.strip("\n").split("scene")
                scenes.append(scene)
                ids.append(id)
            self.files = pd.DataFrame(list(zip(scenes, ids)),
                                      columns=['scene', 'id'])
コード例 #4
0
        temporal_model = torch.nn.DataParallel(temporal_model, device_ids=args.gpu_ids)
        patch_replication_callback(temporal_model)
        temporal_model = temporal_model.cuda()

        # LOAD
        checkpoint = torch.load(args.resume)
        spatial_model.module.load_state_dict(checkpoint['spatial_model_state_dict'])
        temporal_model.module.load_state_dict(checkpoint['temporal_model_state_dict'])

        #EVAL
        temporal_model.eval()

        return temporal_model


rgb_transform = transforms.Compose([tr.FixScaleCrop(crop_size=513),
                                    tr.Normalize(mean=(0.5, 0.5, 0.5),
                                                 std=(0.5, 0.5, 0.5)),
                                    tr.ToTensor()])

class DeepSightDemoRGB(Dataset):
    NUM_CLASSES = 11
    CLASSES = ['background', 'ortable', 'psc', 'vsc', 'human', 'cielinglight', 'mayostand', 'table', 'anesthesiacart', 'cannula', 'instrument']

    def __init__(self, root_dir):
        self.root_dir = root_dir
        self.transform = rgb_transform
        self.images = [f for f in os.listdir(self.root_dir) if os.path.isfile(os.path.join(self.root_dir, f))]

    def __len__(self):
        return len(self.images)