Ejemplo n.º 1
0
    def __init__(self, root_dir, split="train", hd=False):
        self.root_dir = root_dir
        self.split = split
        self.imgs_dir = os.path.join(root_dir, "RGB_Images")
        self.masks_dir = os.path.join(root_dir, "Masks")
        base_size = 1080 if hd else 513
        self.transform_train = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=base_size, crop_size=513, fill=0),
            tr.RandomRotate(15),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
            tr.ToTensor()
        ])

        self.transform_validation = transforms.Compose([
            tr.FixScaleCrop(crop_size=base_size),
            tr.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
            tr.ToTensor()
        ])

        with open(os.path.join(root_dir, "Sets", f"{split}.txt"), 'r') as f:
            basenames, imgs, masks = [], [], []
            for basename in f:
                img, mask = self.get_filenames(basename)
                basenames.append(basename)
                imgs.append(img)
                masks.append(mask)
            self.files = pd.DataFrame(
                list(zip(basenames, imgs, masks)),
                columns=['basename', 'img_filename', 'label_filename'])
Ejemplo n.º 2
0
    def __init__(self,
                 root_dir,
                 split="train",
                 train_ratio=0.9,
                 transform=None,
                 seed=1234):
        self.root_dir = root_dir
        self.depths_dir = os.path.join(root_dir, "Depths")
        self.images_dir = os.path.join(root_dir, "Images")
        self.masks_dir = os.path.join(root_dir, "Masks")
        self.overlays_dir = os.path.join(root_dir, "Overlays")
        self.plys_dir = os.path.join(root_dir, "PLYs")
        self.xmls_dir = os.path.join(root_dir, "XMLs")
        self.split = split
        self.weights = [1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1]

        self.transform_train = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=(287, 352),
                               crop_size=(287, 352),
                               fill=0),
            tr.RandomRotate(15),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
            tr.ToTensor()
        ])

        self.transform_validation = transforms.Compose([
            tr.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
            tr.ToTensor()
        ])

        xml_files = []
        for dir_path, dir_names, file_names in os.walk(self.xmls_dir):
            xml_files += [os.path.join(dir_path, file) for file in file_names]
        sequences = [file.split("/")[-2] for file in xml_files]
        frame_ids = [file.split("/")[-1].split(".")[0] for file in xml_files]
        self.files = pd.DataFrame(list(zip(sequences, frame_ids)),
                                  columns=['sequence', 'frame_id'])

        # self.files = self.files[:100]
        np.random.seed(seed)
        mask = np.random.rand(len(self.files)) < train_ratio
        if split == "train":
            self.files = self.files[mask]
        else:
            self.files = self.files[~mask]
        self.files = self.files.reset_index()
Ejemplo n.º 3
0
    def __init__(self, root_dir, split="train"):
        self.root_dir = root_dir
        self.transform = transforms.Compose([tr.Normalize(mean=(0.5, 0.5, 0.5),
                                                          std=(0.5, 0.5, 0.5)),
                                             tr.ToTensor()])

        self.images = [f for f in os.listdir(self.root_dir) if os.path.isfile(os.path.join(self.root_dir, f))]
Ejemplo n.º 4
0
    def __init__(self,
                 root_dir,
                 split="train",
                 sampeled_images_path="/home/deepsight/DeepSightData",
                 train_range=2000,
                 eval_distance=5):
        self.root_dir = root_dir
        self.split = split
        self.imgs_dir = os.path.join(root_dir, "RGB_Images")
        self.masks_dir = os.path.join(root_dir, "Masks")
        self.unlabeled_dir = os.path.join(root_dir, "unlabeled_rgb_images")
        self.train_range = train_range
        self.eval_distance = eval_distance

        self.datasets = ['Feb_11_CDE', 'Feb_8_CDE', 'March_1_CDE']
        self.mapping = video_scene_to_name(self.datasets, sampeled_images_path)

        self.transform_train = transforms.Compose([
            tr.RandomHorizontalFlip(temporal=True),
            tr.RandomScaleCrop(base_size=513,
                               crop_size=513,
                               fill=0,
                               temporal=True),
            tr.RandomRotate(15, temporal=True),
            tr.RandomGaussianBlur(temporal=True),
            tr.Normalize(mean=(0.5, 0.5, 0.5),
                         std=(0.5, 0.5, 0.5),
                         temporal=True),
            tr.ToTensor(temporal=True)
        ])

        self.transform_validation = transforms.Compose([
            tr.FixScaleCrop(crop_size=513, temporal=True),
            tr.Normalize(mean=(0.5, 0.5, 0.5),
                         std=(0.5, 0.5, 0.5),
                         temporal=True),
            tr.ToTensor(temporal=True)
        ])

        with open(os.path.join(root_dir, "Sets", f"{split}.txt"), 'r') as f:
            scenes, ids = [], []
            for basename in f:
                scene, id = basename.strip("\n").split("scene")
                scenes.append(scene)
                ids.append(id)
            self.files = pd.DataFrame(list(zip(scenes, ids)),
                                      columns=['scene', 'id'])
Ejemplo n.º 5
0
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.crop_size),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)
Ejemplo n.º 6
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomRotateB(),
            tr.RandomScaleCrop(base_size=self.base_size, crop_size=self.crop_size, fill=255),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)
Ejemplo n.º 7
0
    def __init__(self, root_dir, split="train", transform=None, seed=1234):
        self.root_dir = root_dir
        self.depths_dir = os.path.join(root_dir, "Depths")
        self.images_dir = os.path.join(root_dir, "Images")
        self.masks_dir = os.path.join(root_dir, "Masks")
        self.overlays_dir = os.path.join(root_dir, "Overlays")
        self.plys_dir = os.path.join(root_dir, "PLYs")
        self.xmls_dir = os.path.join(root_dir, "XMLs")
        self.split = split
        # self.weights = [True, True, True, True, True, False, False, True, False, False, False, False, False]

        np.random.seed(seed)

        # RandomNoise needs to be after RandomBrightnessContrast due to the requirement of uint8 in RandomBrightnessContrast
        self.augmentation = aug.Compose([
            aug.RandomBrightnessContrast(
                p=0.5),  # , brightness_limit=0.5, contrast_limit=0.5),
            RandomNoise(p=0.5),
        ])

        self.normalize = tr.Normalize(mean=(0.5, 0.5, 0.5),
                                      std=(0.5, 0.5, 0.5),
                                      multiview=True)
        self.to_tensor = tr.ToTensor(multiview=True)

        # find the sequences
        self.sequences = os.listdir(self.xmls_dir)
        self.sequences.sort()

        # read the frames
        self.files = [[], [], [], []]
        for idx, seq in enumerate(self.sequences):
            self.files[idx] = os.listdir(os.path.join(self.xmls_dir, seq))
            self.files[idx].sort()
            self.files[idx] = [file.split('.')[0] for file in self.files[idx]]

        # read data split index
        if self.split == "train":
            filename = "train_data.txt"
        elif self.split == "validation":
            filename = "validation_data.txt"
        file = open(os.path.join(self.root_dir, filename), 'r').readlines()

        # get indexes
        self.index = []
        for i in range(len(file)):
            self.index.append(int(i))
Ejemplo n.º 8
0
        # LOAD
        checkpoint = torch.load(args.resume)
        spatial_model.module.load_state_dict(checkpoint['spatial_model_state_dict'])
        temporal_model.module.load_state_dict(checkpoint['temporal_model_state_dict'])

        #EVAL
        temporal_model.eval()

        return temporal_model


rgb_transform = transforms.Compose([tr.FixScaleCrop(crop_size=513),
                                    tr.Normalize(mean=(0.5, 0.5, 0.5),
                                                 std=(0.5, 0.5, 0.5)),
                                    tr.ToTensor()])

class DeepSightDemoRGB(Dataset):
    NUM_CLASSES = 11
    CLASSES = ['background', 'ortable', 'psc', 'vsc', 'human', 'cielinglight', 'mayostand', 'table', 'anesthesiacart', 'cannula', 'instrument']

    def __init__(self, root_dir):
        self.root_dir = root_dir
        self.transform = rgb_transform
        self.images = [f for f in os.listdir(self.root_dir) if os.path.isfile(os.path.join(self.root_dir, f))]

    def __len__(self):
        return len(self.images)

    def __getitem__(self, item):
        path = os.path.join(self.root_dir, self.images[item])