Beispiel #1
0
 def test_gets_elements_generator(self):
     a = [
         0,
         {
             'a': 1,
             'b': 2,
             'c': (3, 4, 5),
             'd': 6
         },
         (7, {
             'e': 8,
             'f': 9
         }),
     ]
     values = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
     gen = bd.recurse_get_elements(a)
     for i, x in enumerate(bd.recurse_get_elements(gen)):
         assert x == values[i]
Beispiel #2
0
def make_grid(
    images,
    view=None,
    size=None,
    inter_pad=0,
    fill_value=0,
    scale_each=False,
):
    """Creates a single image grid from a set of images.

    Args:
        images (Tensor, Array, list or tuple): Torch Tensor(s) and/or Numpy Array(s).
        view (str, optional): The image view e.g. 'hwc-bgr' or 'torch'
            (default 'torch').
        color (bool, optional): Treat images as colored or not (default True).
        size (list or tuple, optional): Grid dimensions, rows x columns. (default None).
        inter_pad (int or list/tuple, optional): Padding separating the images (default None).
        fill_value (int, optional): Fill value for inter-padding (default 0).
        scale_each (bool, optional): Scale each image to [0-1] (default False).

    Returns:
        Tensor or Array: The resulting grid. If any of the inputs is an Array
        then the result is an Array, otherwise a Tensor.

    Notes:
        - Images of **different sizes are padded** to match the largest.
        - Works for **color** (3 channels) or **grey** (1 channel/0 channel)
          images.
        - Images must have the same view (e.g. chw-rgb (torch))
        - The Tensors/Arrays can be of **any dimension >= 2**. The last 2 (grey)
          or last 3 (color) dimensions are the images and all other dimensions
          are stacked. E.g. a 4x5x3x256x256 (torch view) input will be treated:

            - As 20 3x256x256 color images if color is True.
            - As 60 256x256 grey images if color is False.

        - If color is False, then only the last two channels are considered
          (as hw) thus any colored images will be split into their channels.
        - The image list can contain both **Torch Tensors and Numpy Arrays**.
          at the same time as long as they have the same view.
        - If size is not given, the resulting grid will be the smallest square
          in which all the images fit. If the images are more than the given
          size then the default smallest square is used.

    Raises:
        TypeError: If images are not Arrays, Tensors, a list or a tuple
        ValueError: If channels or dimensions are wrong.

    """

    view = view or bd.default_view
    # Determine view
    orig_view = bd.determine_view(view)

    # Flag if we need to convert back to array
    should_convert_to_array = False

    images = [x for x in bd.recurse_get_elements(images)]
    images = [x for x in images if bd.is_array(x) or bd.is_tensor(x)]
    should_convert_to_array = any([bd.is_array(x) for x in images])
    images = [_make_tensor_4d_with_3_channels(x, orig_view) for x in images]
    if not images:
        return None
    # Pad images to match largest
    if len(images) > 1:
        maxh, maxw = (
            max([x.size(-2) for x in images]),
            max([x.size(-1) for x in images]),
        )
        for i, img in enumerate(images):
            imgh, imgw = img.size(-2), img.size(-1)
            if (img.size(-2) < maxh) or (img.size(-1) < maxw):
                padhl = int((maxh - imgh) / 2)
                padhr = maxh - imgh - padhl
                padwl = int((maxw - imgw) / 2)
                padwr = maxw - imgw - padwl
                images[i] = torch.nn.functional.pad(
                    img, (padwl, padwr, padhl, padhr), value=fill_value)
    images = torch.cat(images, 0)

    # Scale each
    if scale_each:
        for i in range(images.size(0)):
            images[i] = bd.map_range(images[i])

    # Create grid
    b, c, im_w, im_h = (
        images.size()[0],
        images.size()[1],
        images.size()[2],
        images.size()[3],
    )
    # Get number of columns and rows (width and height)
    if (size is not None and b > size[0] * size[1]) or size is None:
        n_row = int(np.ceil(np.sqrt(b)))
        n_col = int(np.ceil(b / n_row))
    else:
        n_col = size[0]
        n_row = size[1]

    if isinstance(inter_pad, int):
        inter_pad = (inter_pad, inter_pad)

    w_pad, h_pad = inter_pad[1], inter_pad[0]
    total_w_padding = max(w_pad, 0) * (n_col - 1)
    total_h_padding = max(h_pad, 0) * (n_row - 1)

    w = int(im_w * n_col) + total_w_padding
    h = int(im_h * n_row) + total_h_padding
    grid = torch.Tensor(c, w, h).type_as(images).fill_(fill_value)
    for i in range(b):
        i_row = i % n_row
        i_col = int(i / n_row)
        grid[:, i_col * (im_w + w_pad):(i_col) * (im_w + w_pad) + im_w, i_row *
             (im_h + h_pad):(i_row) * (im_h + h_pad) + im_h, ].copy_(images[i])

    if should_convert_to_array:
        grid = bd.to_array(grid)
    if orig_view != 'torch':
        grid = bd.change_view(grid, 'torch', orig_view)
    return grid
Beispiel #3
0
 def test_gets_element_single(self):
     i = 0
     for x in bd.recurse_get_elements(1):
         assert x == 1
         i += 1
     assert i == 1
Beispiel #4
0
 def test_gets_elements_dict(self):
     a = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
     values = [1, 2, 3, 4]
     for i, x in enumerate(bd.recurse_get_elements(a)):
         assert x == values[i]
Beispiel #5
0
 def test_gets_elements_list(self):
     a = [1, 2, 3, 4]
     for i, x in enumerate(bd.recurse_get_elements(a)):
         assert x == a[i]
Beispiel #6
0
 def test_gets_elements_tuple(self):
     a = (1, 2, 3, 4)
     for i, x in enumerate(bd.recurse_get_elements(a)):
         assert x == a[i]