示例#1
0
def create_uniform_pixel_coords_image(image_dims, batch_shape=None, normalized=False, dev_str=None):
    """
    Create image of homogeneous integer :math:`xy` pixel co-ordinates :math:`\mathbf{X}\in\mathbb{Z}^{h×w×3}`, stored
    as floating point values. The origin is at the top-left corner of the image, with :math:`+x` rightwards, and
    :math:`+y` downwards. The final homogeneous dimension are all ones. In subsequent use of this image, the depth of
    each pixel can be represented using this same homogeneous representation, by simply scaling each 3-vector by the
    depth value. The final dimension therefore always holds the depth value, while the former two dimensions hold depth
    scaled pixel co-ordinates.\n
    `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=172>`_
    deduction from top of page 154, section 6.1, equation 6.1

    :param image_dims: Image dimensions.
    :type image_dims: sequence of ints.
    :param batch_shape: Shape of batch. Assumed no batch dimensions if None.
    :type batch_shape: sequence of ints, optional
    :param normalized: Whether to normalize x-y pixel co-ordinates to the range 0-1.
    :type normalized: bool
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
    :type dev_str: str, optional
    :return: Image of homogeneous pixel co-ordinates *[batch_shape,height,width,3]*
    """

    # shapes as lists
    batch_shape = [] if batch_shape is None else batch_shape
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    # other shape specs
    num_batch_dims = len(batch_shape)
    flat_shape = [1] * num_batch_dims + image_dims + [3]
    tile_shape = batch_shape + [1] * 3

    # H x W x 1
    pixel_x_coords = _ivy.cast(_ivy.reshape(_ivy.tile(_ivy.arange(image_dims[1], dev_str=dev_str), [image_dims[0]]),
                                            (image_dims[0], image_dims[1], 1)), 'float32')
    if normalized:
        pixel_x_coords = pixel_x_coords / (float(image_dims[1]) + MIN_DENOMINATOR)

    # W x H x 1
    pixel_y_coords_ = _ivy.cast(_ivy.reshape(_ivy.tile(_ivy.arange(image_dims[0], dev_str=dev_str), [image_dims[1]]),
                                             (image_dims[1], image_dims[0], 1)), 'float32')

    # H x W x 1
    pixel_y_coords = _ivy.transpose(pixel_y_coords_, (1, 0, 2))
    if normalized:
        pixel_y_coords = pixel_y_coords / (float(image_dims[0]) + MIN_DENOMINATOR)

    # H x W x 1
    ones = _ivy.ones_like(pixel_x_coords, dev_str=dev_str)

    # BS x H x W x 3
    return _ivy.tile(_ivy.reshape(_ivy.concatenate((pixel_x_coords, pixel_y_coords, ones), -1),
                                  flat_shape), tile_shape)
示例#2
0
def create_trimesh_indices_for_image(batch_shape, image_dims, dev_str='cpu:0'):
    """
    Create triangle mesh for image with given image dimensions

    :param batch_shape: Shape of batch.
    :type batch_shape: sequence of ints
    :param image_dims: Image dimensions.
    :type image_dims: sequence of ints
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
    :type dev_str: str, optional
    :return: Triangle mesh indices for image *[batch_shape,h*w*some_other_stuff,3]*
    """

    # shapes as lists
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    # other shape specs
    num_batch_dims = len(batch_shape)
    tri_dim = 2 * (image_dims[0] - 1) * (image_dims[1] - 1)
    flat_shape = [1] * num_batch_dims + [tri_dim] + [3]
    tile_shape = batch_shape + [1] * 2

    # 1 x W-1
    t00_ = _ivy.reshape(_ivy.arange(image_dims[1] - 1, dtype_str='float32', dev_str=dev_str), (1, -1))

    # H-1 x 1
    k_ = _ivy.reshape(_ivy.arange(image_dims[0] - 1, dtype_str='float32', dev_str=dev_str), (-1, 1)) * image_dims[1]

    # H-1 x W-1
    t00_ = _ivy.matmul(_ivy.ones((image_dims[0] - 1, 1), dev_str=dev_str), t00_)
    k_ = _ivy.matmul(k_, _ivy.ones((1, image_dims[1] - 1), dev_str=dev_str))

    # (H-1xW-1) x 1
    t00 = _ivy.expand_dims(t00_ + k_, -1)
    t01 = t00 + 1
    t02 = t00 + image_dims[1]
    t10 = t00 + image_dims[1] + 1
    t11 = t01
    t12 = t02

    # (H-1xW-1) x 3
    t0 = _ivy.concatenate((t00, t01, t02), -1)
    t1 = _ivy.concatenate((t10, t11, t12), -1)

    # BS x 2x(H-1xW-1) x 3
    return _ivy.tile(_ivy.reshape(_ivy.concatenate((t0, t1), 0),
                                  flat_shape), tile_shape)
示例#3
0
    def _group_tensor_into_windowed_tensor(self, x, valid_first_frame):
        if self._window_size == 1:
            valid_first_frame_pruned = ivy.cast(valid_first_frame[:, 0],
                                                'bool')
        else:
            valid_first_frame_pruned = ivy.cast(
                valid_first_frame[:1 - self._window_size, 0], 'bool')
        if ivy.reduce_sum(ivy.cast(valid_first_frame_pruned, 'int32'))[0] == 0:
            valid_first_frame_pruned =\
                ivy.cast(ivy.one_hot(0, self._sequence_lengths[0] - self._window_size + 1), 'bool')
        window_idxs_single = ivy.indices_where(valid_first_frame_pruned)

        gather_idxs_list = list()
        for w_idx in window_idxs_single:
            gather_idxs_list.append(
                ivy.expand_dims(
                    ivy.arange(w_idx[0] + self._window_size, w_idx[0], 1), 0))
        gather_idxs = ivy.concatenate(gather_idxs_list, 0)
        gather_idxs = ivy.reshape(gather_idxs, (-1, 1))
        num_valid_windows_for_seq = ivy.shape(window_idxs_single)[0:1]
        return ivy.reshape(
            ivy.gather_nd(x, gather_idxs),
            ivy.concatenate(
                (num_valid_windows_for_seq, ivy.array(
                    [self._window_size]), ivy.shape(x)[1:]), 0))
示例#4
0
def _se_to_mask(se: ivy.Array) -> ivy.Array:
    se_h, se_w = se.shape
    se_flat = ivy.reshape(se, (-1,))
    num_feats = se_h * se_w
    i_s = ivy.expand_dims(ivy.arange(num_feats, dev_str=ivy.dev_str(se)), -1)
    y_s = i_s % se_h
    x_s = i_s // se_h
    indices = ivy.concatenate((i_s, ivy.zeros_like(i_s, dtype_str='int32'), x_s, y_s), -1)
    out = ivy.scatter_nd(
        indices, ivy.cast(se_flat >= 0, ivy.dtype_str(se)), (num_feats, 1, se_h, se_w), dev_str=ivy.dev_str(se))
    return out
示例#5
0
 def _group_tensor_into_windowed_tensor_simple(self, x, seq_info):
     seq_info = self._update_seq_info_for_window(seq_info)
     if self._fixed_sequence_length:
         return ivy.reshape(ivy.gather_nd(x, ivy.array(self._gather_idxs)),
                            (self._windows_per_seq, self._window_size) +
                            x.shape[1:])
     else:
         num_windows_in_seq = int(
             ivy.to_numpy(
                 ivy.maximum(seq_info.length[0] - self._window_size + 1,
                             1)))
         window_idxs_in_seq = ivy.arange(num_windows_in_seq, 0, 1)
         gather_idxs = ivy.tile(
             ivy.reshape(ivy.arange(self._window_size, 0, 1),
                         (1, self._window_size)),
             (num_windows_in_seq, 1)) + ivy.expand_dims(
                 window_idxs_in_seq, -1)
         gather_idxs_flat = ivy.reshape(
             gather_idxs, (self._window_size * num_windows_in_seq, 1))
         return ivy.reshape(ivy.gather_nd(x, gather_idxs_flat),
                            (num_windows_in_seq, self._window_size) +
                            x.shape[1:])
示例#6
0
    def _forward(self, x, prev_state):
        prev_read_vector_list = prev_state[1]

        controller_input = ivy.concatenate([x] + prev_read_vector_list, axis=1)
        controller_output, controller_state = self._controller(ivy.expand_dims(controller_input, -2),
                                                               initial_state=prev_state[0])
        controller_output = controller_output[..., -1, :]

        parameters = self._controller_proj(controller_output)
        parameters = ivy.clip(parameters, -self._clip_value, self._clip_value)
        head_parameter_list = \
            ivy.split(parameters[:, :self._num_parameters_per_head * self._num_heads], self._num_heads,
                          axis=1)
        erase_add_list = ivy.split(parameters[:, self._num_parameters_per_head * self._num_heads:],
                                       2 * self._write_head_num, axis=1)

        prev_w_list = prev_state[2]
        prev_M = prev_state[4]
        w_list = []
        for i, head_parameter in enumerate(head_parameter_list):
            k = ivy.tanh(head_parameter[:, 0:self._memory_vector_dim])
            beta = ivy.softplus(head_parameter[:, self._memory_vector_dim])
            g = ivy.sigmoid(head_parameter[:, self._memory_vector_dim + 1])
            s = ivy.softmax(
                head_parameter[:, self._memory_vector_dim + 2:self._memory_vector_dim +
                                                              2 + (self._shift_range * 2 + 1)])
            gamma = ivy.softplus(head_parameter[:, -1]) + 1
            w = self._addressing(k, beta, g, s, gamma, prev_M, prev_w_list[i])
            w_list.append(w)

        # Reading (Sec 3.1)

        read_w_list = w_list[:self._read_head_num]
        if self._step == 0:
            usage_indicator = ivy.zeros_like(w_list[0])
        else:
            usage_indicator = prev_state[3] + ivy.reduce_sum(ivy.concatenate(read_w_list, 0))
        read_vector_list = []
        for i in range(self._read_head_num):
            read_vector = ivy.reduce_sum(ivy.expand_dims(read_w_list[i], axis=2) * prev_M, axis=1)
            read_vector_list.append(read_vector)

        # Writing (Sec 3.2)

        prev_wrtie_w_list = prev_w_list[self._read_head_num:]
        w_wr_size = math.ceil(self._memory_size / 2) if self._retroactive_updates else self._memory_size
        if self._sequential_writing:
            batch_size = ivy.shape(x)[0]
            if self._step < w_wr_size:
                w_wr_list = [ivy.tile(ivy.cast(ivy.one_hot(
                    ivy.array([self._step]), w_wr_size), 'float32'),
                    (batch_size, 1))] * self._write_head_num
            else:
                batch_idxs = ivy.expand_dims(ivy.arange(batch_size, 0), -1)
                mem_idxs = ivy.expand_dims(ivy.argmax(usage_indicator[..., :w_wr_size], -1), -1)
                total_idxs = ivy.concatenate((batch_idxs, mem_idxs), -1)
                w_wr_list = [ivy.scatter_nd(total_idxs, ivy.ones((batch_size,)),
                                                (batch_size, w_wr_size))] * self._write_head_num
        else:
            w_wr_list = w_list[self._read_head_num:]
        if self._retroactive_updates:
            w_ret_list = [self._retroactive_discount * prev_wrtie_w[..., w_wr_size:] +
                          (1 - self._retroactive_discount) * prev_wrtie_w[..., :w_wr_size]
                          for prev_wrtie_w in prev_wrtie_w_list]
            w_wrtie_list = [ivy.concatenate((w_wr, w_ret), -1) for w_wr, w_ret in zip(w_wr_list, w_ret_list)]
        else:
            w_wrtie_list = w_wr_list
        M = prev_M
        for i in range(self._write_head_num):
            w = ivy.expand_dims(w_wrtie_list[i], axis=2)
            if self._with_erase:
                erase_vector = ivy.expand_dims(ivy.sigmoid(erase_add_list[i * 2]), axis=1)
                M = M * ivy.ones(ivy.shape(M)) - ivy.matmul(w, erase_vector)
            add_vector = ivy.expand_dims(ivy.tanh(erase_add_list[i * 2 + 1]), axis=1)
            M = M + ivy.matmul(w, add_vector)

        NTM_output = self._output_proj(ivy.concatenate([controller_output] + read_vector_list, axis=1))
        NTM_output = ivy.clip(NTM_output, -self._clip_value, self._clip_value)

        self._step += 1
        return NTM_output, NTMControllerState(
            controller_state=controller_state, read_vector_list=read_vector_list, w_list=w_list,
            usage_indicator=usage_indicator, M=M)
示例#7
0
    def _get_dataset(self, starting_example, ending_example):
        class ContainerIdxMap:
            def __init__(self,
                         sizes,
                         fpath_template=None,
                         seq_idxs=None,
                         start=None,
                         end=None,
                         max_seq_len=None,
                         conts_to_skip=None,
                         pruned_sizes=None):
                if isinstance(sizes, (tuple, list)):
                    pruned_sizes = ivy.default(pruned_sizes, [
                        SeqDataLoader._compute_seq_len(i, sl, conts_to_skip)
                        for i, sl in enumerate(sizes)
                    ])
                    num_empty = sum([ps == 0 for ps in pruned_sizes])
                    self._raw_sizes = dict(
                        zip(range(start, end + 1 + num_empty),
                            sizes[start:end + 1 + num_empty]))
                    self._pruned_sizes = dict(
                        zip(range(start, end + 1 + num_empty),
                            pruned_sizes[start:end + 1 + num_empty]))
                elif isinstance(sizes, (int, dict)):
                    self._raw_sizes = sizes
                    self._pruned_sizes = ivy.default(pruned_sizes, sizes)
                    if isinstance(self._pruned_sizes, int):
                        pruned_dict = dict()
                        for seq_idx, win_idx in conts_to_skip:
                            if seq_idx not in pruned_dict:
                                pruned_dict[seq_idx] = list()
                            pruned_dict[seq_idx].append(win_idx)
                        pruned_dict = {
                            k: len(set(v))
                            for k, v in pruned_dict.items()
                        }
                        pruned_sizes_dict = {
                            k: self._pruned_sizes - num_pruned
                            for k, num_pruned in pruned_dict.items()
                        }
                        num_empty = sum(
                            [size == 0 for size in pruned_sizes_dict.values()])
                        pruned_sizes = collections.defaultdict(
                            lambda: self._pruned_sizes, pruned_sizes_dict)
                    else:
                        num_empty = sum([ps == 0 for ps in self._pruned_sizes])
                else:
                    raise Exception(
                        'Invalid type for sizes, expected one of int, dict, tuple or list,'
                        'but found {} or type {}'.format(sizes, type(sizes)))
                self._constant_size = isinstance(self._raw_sizes, int)
                if max_seq_len:
                    self._max_seq_len = max_seq_len
                else:
                    self._max_seq_len = self._pruned_sizes if self._constant_size else max(
                        self._pruned_sizes.values())
                self._fpath_template = fpath_template
                self._conts_to_skip = conts_to_skip
                if seq_idxs:
                    self._seq_idxs = seq_idxs
                else:
                    vals = [
                        v
                        for i, v in enumerate(range(start, end + 1 +
                                                    num_empty))
                        if pruned_sizes[i] > 0
                    ]
                    keys = range(0, min(end - start + 1 + num_empty,
                                        len(vals)))
                    self._seq_idxs = dict(zip(keys, vals))

            def __getitem__(self, slice_obj):
                if isinstance(slice_obj, slice):
                    seq_idxs = collections.OrderedDict([
                        (i, self._seq_idxs[idx]) for i, idx in enumerate(
                            range(slice_obj.start, slice_obj.stop,
                                  ivy.default(slice_obj.step, 1)))
                    ])
                elif isinstance(slice_obj, int):
                    seq_idxs = collections.OrderedDict(
                        {0: self._seq_idxs[slice_obj]})
                else:
                    raise Exception(
                        'Invalid type for slice_obj, expected either slice or int,'
                        'but found {} of type {}'.format(
                            slice_obj, type(slice_obj)))
                if self._constant_size:
                    sizes = self._raw_sizes
                else:
                    sizes = collections.OrderedDict({
                        seq_idx: self._raw_sizes[seq_idx]
                        for seq_idx in seq_idxs.values()
                    })
                return ContainerIdxMap(sizes,
                                       self._fpath_template,
                                       seq_idxs,
                                       max_seq_len=self._max_seq_len,
                                       conts_to_skip=self._conts_to_skip,
                                       pruned_sizes=self._pruned_sizes)

            def __len__(self):
                return len(self._seq_idxs)

            def shuffle(self):
                mapped_idxs = list(self._seq_idxs.values())
                np.random.shuffle(mapped_idxs)
                self._seq_idxs = collections.OrderedDict(
                    zip(self._seq_idxs.keys(), mapped_idxs))

            def to_idxs(self):
                seq_idxs = self._seq_idxs.values()
                sizes = [
                    self._raw_sizes
                    if self._constant_size else self._raw_sizes[seq_idx]
                    for seq_idx in seq_idxs
                ]
                rets = [[(seq_idx, win_idx) for win_idx in range(size)
                         if not SeqDataLoader._skip_cont(
                             seq_idx, win_idx, self._conts_to_skip)]
                        for seq_idx, size in zip(seq_idxs, sizes)]
                return [
                    r + [(None, None)] * (self._max_seq_len - len(r))
                    for r in rets if list(set(r)) != [None]
                ]

            def to_filepaths(self):
                if not ivy.exists(self._fpath_template):
                    raise Exception(
                        'to_filepaths method is not valid if fpath_template has not been specified'
                        'in the constructor.')
                seq_idxs = self._seq_idxs.values()
                sizes = [
                    self._raw_sizes
                    if self._constant_size else self._raw_sizes[seq_idx]
                    for seq_idx in seq_idxs
                ]
                rets = [[
                    self._fpath_template % (seq_idx, win_idx)
                    for win_idx in range(size) if not SeqDataLoader._skip_cont(
                        seq_idx, win_idx, self._conts_to_skip)
                ] for seq_idx, size in zip(seq_idxs, sizes)]
                return [
                    r + [''] * (self._max_seq_len - len(r)) for r in rets
                    if ''.join(r) != ''
                ]

            @property
            def sizes(self):
                return self._pruned_sizes

        # container filepaths
        if self._spec.container_load_mode in ['preload', 'dynamic']:
            fpath_template = os.path.join(
                self._container_data_dir,
                self._spec.dataset_spec.cont_fname_template)
        else:
            fpath_template = None
        container_idx_map = ContainerIdxMap(
            self._spec.dataset_spec.unpruned_sequence_lengths,
            fpath_template,
            start=starting_example,
            end=ending_example,
            conts_to_skip=self._spec.containers_to_skip)

        if self._spec.num_sequences != -1:
            container_idx_map = container_idx_map[0:self._spec.num_sequences]

        # shuffle sequences
        if self._spec.preshuffle_data:
            container_idx_map.shuffle()

        # extract sequence lengths
        if self._fixed_sequence_length:
            self._sequence_lengths =\
                collections.OrderedDict(zip(range(len(container_idx_map)),
                                            [self._spec.dataset_spec.sequence_lengths] * len(container_idx_map)))
            self._windows_per_seq = self._sequence_lengths[
                0] - self._window_size + 1
            # windowing values
            window_idxs_per_seq = ivy.reshape(
                ivy.arange(self._windows_per_seq, 0, 1),
                (self._windows_per_seq, 1))
            gather_idxs_list = list()
            for x in window_idxs_per_seq:
                gather_idxs_list.append(
                    ivy.expand_dims(
                        ivy.arange(x[0] + self._window_size, x[0], 1), 0))
            gather_idxs = ivy.concatenate(gather_idxs_list, 0)
            self._gather_idxs = \
                ivy.to_numpy(ivy.reshape(gather_idxs, (self._windows_per_seq * self._window_size, 1))).tolist()
        else:
            self._sequence_lengths = container_idx_map.sizes

        # maybe pre-load containers
        if self._spec.container_load_mode == 'preload':
            # load containers with vector data and image filepath entries
            container_slices = self._get_containers_w_filepath_img_entries_as_tensor_slices(
                container_idx_map.to_filepaths())
            if self._first_frame_validity_fn is not None:
                container_slices =\
                    self._first_frame_validity_fn(container_slices, [ending_example - starting_example + 1])

            # prune unwanted chains of keys
            if 'unused_key_chains' in self._spec:
                container_slices = self._prune_unused_key_chains(
                    container_slices)

            dataset = Dataset(ivy.Container.list_stack([
                c[0]
                for c in container_slices.unstack(0, container_slices.shape[0])
            ], 0),
                              'base',
                              container_slices.shape[0],
                              numpy_loading=True,
                              cache_size=self._base_cache_size,
                              queue_timeout=self._spec.queue_timeout)
        else:
            if self._spec.container_load_mode == 'dynamic':
                # load containers with filepath entries
                dataset = Dataset(ivy.Container({'fpaths': container_idx_map}),
                                  'base',
                                  len(container_idx_map),
                                  trans_fn=lambda cont: cont.map(
                                      lambda x_, kc: x_.to_filepaths()),
                                  elementwise_query_fn=False,
                                  numpy_loading=True,
                                  cache_size=self._base_cache_size,
                                  queue_timeout=self._spec.queue_timeout)
                dataset = dataset.map('loaded_json', self._load_json_files,
                                      self._num_workers.loaded_json)
                dataset = dataset.map('parsed_json', self._parse_json_strings,
                                      self._num_workers.parsed_json)
            else:
                dataset = Dataset(ivy.Container({'idx_map':
                                                 container_idx_map}),
                                  'base',
                                  len(container_idx_map),
                                  trans_fn=lambda cont: self._spec.
                                  custom_container_load_fn(self, cont),
                                  elementwise_query_fn=False,
                                  numpy_loading=True,
                                  cache_size=self._base_cache_size,
                                  queue_timeout=self._spec.queue_timeout)
            if 'unused_key_chains' in self._spec:
                dataset = dataset.map('keychain_pruned',
                                      self._prune_unused_key_chains,
                                      self._num_workers.keychain_pruned)
            if self._first_frame_validity_fn is not None:
                dataset = dataset.map(
                    'valid_first_frames',
                    lambda x_: self._first_frame_validity_fn(x_, None),
                    self._num_workers.valid_first_frames)
        if not (self._spec.dataset_spec.sequence_lengths == 1
                and self._window_size == 1):
            # ToDo: add other conditionals which make the loading more efficient if only one of the
            #  above two conditions is True
            dataset = dataset.map(
                'windowed', self._group_container_into_windowed_container,
                self._num_workers.windowed)
            dataset = dataset.unbatch(
                'unbatched',
                self._num_workers.unbatched,
                batch_sizes=[
                    max(seq_len, self._window_size) - self._window_size + 1
                    for seq_len in self._sequence_lengths.values()
                    if seq_len > 0
                ])
        if self._spec.shuffle_buffer_size > 0:
            dataset = dataset.shuffle('shuffled',
                                      self._spec.shuffle_buffer_size,
                                      self._num_workers.shuffled)
        dataset = dataset.map('loaded_data',
                              self._load_data_from_filepath_tensors,
                              self._num_workers.loaded_data)
        dataset = dataset.batch('batched', self._batch_size,
                                self._num_workers.batched)
        dataset = dataset.map(
            'from_np',
            lambda cont: cont.map(lambda x_, kc: ivy.array(x_, dev_str='cpu')),
            self._num_workers.from_np,
            numpy_loading=False)
        if ivy.exists(self._spec.post_proc_fn):
            dataset = dataset.map('post_processed', self._spec.post_proc_fn,
                                  self._num_workers.post_processed)
        if self._spec.with_prefetching:
            dataset = dataset.prefetch('prefetch')
        # ToDo: find way to make pre-fetching to GPU actually pre-fetch, ideally using multi-processing.
        #  For example, swapping prefetch and to_gpu ops around would work if to_gpu could accept self._num_workers.
        if self._spec.prefetch_to_devs:
            if isinstance(self._spec.prefetch_to_devs, str):
                dataset = dataset.to_dev('to_dev', self._spec.prefetch_to_devs)
            elif len(self._spec.prefetch_to_devs) == 1:
                dataset = dataset.to_dev('to_dev',
                                         self._spec.prefetch_to_devs[0])
            else:
                dataset = dataset.to_devs('to_devs',
                                          self._spec.prefetch_to_devs)
        return dataset
示例#8
0
def rasterize_triangles(pixel_coords_triangles, image_dims, batch_shape=None, dev_str=None):
    """
    Rasterize image-projected triangles
    based on: https://www.scratchapixel.com/lessons/3d-basic-rendering/rasterization-practical-implementation/rasterization-stage
    and: https://www.scratchapixel.com/lessons/3d-basic-rendering/rasterization-practical-implementation/rasterization-practical-implementation

    :param pixel_coords_triangles: Projected image-space triangles to be rasterized
                                    *[batch_shape,input_size,3,3]*
    :type pixel_coords_triangles: array
    :param image_dims: Image dimensions.
    :type image_dims: sequence of ints
    :param batch_shape: Shape of batch. Inferred from Inputs if None.
    :type batch_shape: sequence of ints, optional
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
    :type dev_str: str, optional
    :return: Rasterized triangles
    """

    if batch_shape is None:
        batch_shape = []

    if dev_str is None:
        dev_str = _ivy.dev_str(pixel_coords_triangles)

    # shapes as list
    batch_shape = list(batch_shape)
    num_batch_dims = len(batch_shape)
    image_dims = list(image_dims)
    input_image_dims = pixel_coords_triangles.shape[num_batch_dims:-2]
    input_image_dims_prod = _reduce(_mul, input_image_dims, 1)

    # BS x 3 x 2
    pixel_xy_coords = pixel_coords_triangles[..., 0:2]

    # BS x 3 x 1
    pixel_x_coords = pixel_coords_triangles[..., 0:1]
    pixel_y_coords = pixel_coords_triangles[..., 1:2]

    # 1
    x_min = _ivy.reshape(_ivy.reduce_min(pixel_x_coords, keepdims=True), (-1,))
    x_max = _ivy.reshape(_ivy.reduce_max(pixel_x_coords, keepdims=True), (-1,))
    x_range = x_max - x_min
    y_min = _ivy.reshape(_ivy.reduce_min(pixel_y_coords, keepdims=True), (-1,))
    y_max = _ivy.reshape(_ivy.reduce_max(pixel_y_coords, keepdims=True), (-1,))
    y_range = y_max - y_min

    # 2
    bbox = _ivy.concatenate((x_range, y_range), 0)
    img_bbox_list = [int(item) for item in _ivy.to_list(_ivy.concatenate((y_range + 1, x_range + 1), 0))]

    # BS x 2
    v0 = pixel_xy_coords[..., 0, :]
    v1 = pixel_xy_coords[..., 1, :]
    v2 = pixel_xy_coords[..., 2, :]
    tri_centres = (v0 + v1 + v2) / 3

    # BS x 1
    v0x = v0[..., 0:1]
    v0y = v0[..., 1:2]
    v1x = v1[..., 0:1]
    v1y = v1[..., 1:2]
    v2x = v2[..., 0:1]
    v2y = v2[..., 1:2]

    # BS x BBX x BBY x 2
    uniform_sample_coords = _ivy_svg.create_uniform_pixel_coords_image(img_bbox_list, batch_shape)[..., 0:2]
    P = _ivy.round(uniform_sample_coords + tri_centres - bbox / 2)

    # BS x BBX x BBY x 1
    Px = P[..., 0:1]
    Py = P[..., 1:2]
    v0v1_edge_func = ((Px - v0x) * (v1y - v0y) - (Py - v0y) * (v1x - v0x)) >= 0
    v1v2_edge_func = ((Px - v1x) * (v2y - v1y) - (Py - v1y) * (v2x - v1x)) >= 0
    v2v0_edge_func = ((Px - v2x) * (v0y - v2y) - (Py - v2y) * (v0x - v2x)) >= 0
    edge_func = _ivy.logical_and(_ivy.logical_and(v0v1_edge_func, v1v2_edge_func), v2v0_edge_func)

    batch_indices_list = list()
    for i, batch_dim in enumerate(batch_shape):
        # get batch shape
        batch_dims_before = batch_shape[:i]
        num_batch_dims_before = len(batch_dims_before)
        batch_dims_after = batch_shape[i + 1:]
        num_batch_dims_after = len(batch_dims_after)

        # [batch_dim]
        batch_indices = _ivy.arange(batch_dim, dtype_str='int32', dev_str=dev_str)

        # [1]*num_batch_dims_before x batch_dim x [1]*num_batch_dims_after x 1 x 1
        reshaped_batch_indices = _ivy.reshape(batch_indices, [1] * num_batch_dims_before + [batch_dim] +
                                              [1] * num_batch_dims_after + [1, 1])

        # BS x N x 1
        tiled_batch_indices = _ivy.tile(reshaped_batch_indices, batch_dims_before + [1] + batch_dims_after +
                                        [input_image_dims_prod * 9, 1])
        batch_indices_list.append(tiled_batch_indices)

    # BS x N x (num_batch_dims + 2)
    all_indices = _ivy.concatenate(
        batch_indices_list + [_ivy.cast(_ivy.flip(_ivy.reshape(P, batch_shape + [-1, 2]), -1),
                                        'int32')], -1)

    # offset uniform images
    return _ivy.cast(_ivy.flip(_ivy.scatter_nd(_ivy.reshape(all_indices, [-1, num_batch_dims + 2]),
                                               _ivy.reshape(_ivy.cast(edge_func, 'int32'), (-1, 1)),
                                               batch_shape + image_dims + [1],
                                               reduction='replace' if _ivy.backend == 'mxnd' else 'sum'), -3), 'bool')