Esempio n. 1
0
 def get(self, i):
     object_fn, parts_fn = super().get(i)
     obj = fv.open_mask(object_fn, convert_mode='I')
     if parts_fn:
         parts = fv.open_mask(parts_fn, convert_mode='L')
     else:
         parts = fv.ImageSegment(torch.zeros_like(obj.px))
     return ObjectAndParts(obj, parts)
Esempio n. 2
0
def load_mask(
    mask_or_path: Union[np.ndarray, Union[str, Path]]
) -> fastai.vision.image.ImageSegment:
    """ Load mask using "open_mask" function from fast.ai.

    Args:
        mask_or_path: mask object or mask location to be loaded

    Return:
        Mask
    """
    if isinstance(mask_or_path, (str, Path)):
        mask = open_mask(mask_or_path)
    else:
        mask = mask_or_path
    return mask
Esempio n. 3
0
 def open(self, fn):
     return open_mask(fn, div=False, convert_mode='L')
Esempio n. 4
0
label_paths = vision.get_image_files(LABEL_PATH)

# Load some samples to see what's inside
rand_indx = np.random.randint(0, len(image_paths))
sample_image_path = image_paths[rand_indx]
sample_image = vision.open_image(sample_image_path)
sample_image.show(figsize=(6, 6))
# Function to match between image and its label path. E.g. image path: /root/.fastai/data/camvid/images/0006R0_f02910.png; label path: /root/.fastai/data/camvid/labels/0006R0_f02910_P.png
segment_name_fn = lambda image_path: path.sep.join(
    [LABEL_PATH, f'{image_path.stem}_P{image_path.suffix}'])
# Load image segmentation by defaults (segment image given in dataset) and vision.open_mask()
sample_label_path = segment_name_fn(sample_image_path)
sample_label = vision.open_image(sample_label_path)
sample_label.show(figsize=(6, 6))
# Note sample segment after preprocess based on vision.open_mask just has 1 depth instead of 3 depth as origin segment
sample_label_preprocessed = vision.open_mask(sample_label_path)
sample_label_preprocessed.show(figsize=(6, 6))
print(sample_label_preprocessed.data
      )  # sample_label_preprocessed is also fastai tensor

# get image dimension (height and width)
image_size = np.array(sample_label_preprocessed.shape[1:])
data_size = image_size // 2
objects_in_image = np.loadtxt(path.sep.join([PATH, 'codes.txt']), dtype=str)

# Determine batch size by gpu free memory to avoid CUDA out pf memory
if torch.cuda.is_available():
    free = mem.gpu_mem_get_free_no_cache()
    if free > 8200:
        BATCH_SIZE = 8
    else:
 def open(self, fn):
     return open_mask(fn, div=True)
 def open(self, fn):
     mask = open_mask(fn).data
     mask = torch.cat([mask==1, mask==2, mask==3, mask==4], dim=0).float()
     return ImageSegmentFloat(mask)