示例#1
0
    def _cdf(self, value, probs=None):
        r"""
        Cumulative distribution function (cdf) of Categorical distributions.

        Args:
            value (Tensor): The value to be evaluated.
            probs (Tensor): Event probabilities. Default: self.probs.
        """
        value = self._check_value(value, 'value')
        probs = self._check_param_type(probs)

        # find the right integer to compute index
        # here we simulate casting to int but still keeping float dtype
        value = self.cast(value, self.dtypeop(probs))

        zeros = self.fill(self.dtypeop(value), self.shape(value), 0.0)
        between_zero_neone = self.logicand(self.less(
            value,
            0,
        ), self.greater(value, -1.))
        value = self.select(between_zero_neone, zeros, P.Floor()(value))

        # handle the case when value is of shape () and probs is a scalar batch
        drop_dim = False
        if self.shape(value) == () and self.shape(probs)[:-1] == ():
            drop_dim = True
            # manually add one more dimension: () -> (1,)
            # drop this dimension before return
            value = self.expand_dim(value, -1)

        value = self.expand_dim(value, -1)

        broadcast_shape_tensor = probs * value
        broadcast_shape = self.shape(broadcast_shape_tensor)
        num_classes = broadcast_shape[-1]
        label_shape = broadcast_shape[:-1]

        probs = self.broadcast(probs, broadcast_shape_tensor)
        value = self.broadcast(value, broadcast_shape_tensor)[..., :1]

        # flatten value to shape (number of labels, 1)
        value = self.reshape(value, (-1, 1))

        # drop one dimension to match cdf
        # clip value to be in range from 0 to num_classes -1 and cast into int32
        less_than_zero = self.squeeze_last_axis(self.less(value, 0.0))
        value_clipped = self.clip_by_value(value, 0.0, num_classes - 1)
        value_clipped = self.cast(value_clipped, self.index_type)

        index = self.reshape(nn.Range(0, self.shape(value)[0], 1)(), (-1, 1))
        index = self.concat((index, value_clipped))

        # reshape probs and fill less_than_zero places with 0
        probs = self.reshape(probs, (-1, num_classes))
        cdf = self.gather(self.cumsum(probs, 1), index)
        zeros = self.fill(self.dtypeop(cdf), self.shape(cdf), 0.0)
        cdf = self.select(less_than_zero, zeros, cdf)
        cdf = self.reshape(cdf, label_shape)

        if drop_dim:
            return self.squeeze(cdf)
        return cdf
示例#2
0
    def _log_prob(self, value, probs=None):
        r"""
        Evaluate log probability.

        Args:
            value (Tensor): The value to be evaluated.
            probs (Tensor): Event probabilities. Default: self.probs.
        """
        value = self._check_value(value, 'value')

        probs = self._check_param_type(probs)
        logits = self.log(probs)

        # find the right integer to compute index
        # here we simulate casting to int but still keeping float dtype
        value = self.cast(value, self.dtypeop(probs))

        zeros = self.fill(self.dtypeop(value), self.shape(value), 0.0)
        neg_one = self.fill(self.dtypeop(value), self.shape(value), -1.0)
        value = self.select(self.is_nan(value), neg_one, value)
        between_zero_neone = self.logicand(self.less(
            value,
            0,
        ), self.greater(value, -1.))
        value = self.select(between_zero_neone, zeros, P.Floor()(value))

        # handle the case when value is of shape () and probs is a scalar batch
        drop_dim = False
        if self.shape(value) == () and self.shape(probs)[:-1] == ():
            drop_dim = True
            # manually add one more dimension: () -> (1,)
            # drop this dimension before return
            value = self.expand_dim(value, -1)

        value = self.expand_dim(value, -1)

        broadcast_shape_tensor = logits * value
        broadcast_shape = self.shape(broadcast_shape_tensor)
        num_classes = broadcast_shape[-1]
        label_shape = broadcast_shape[:-1]

        # broadcasting logits and value
        # logit_pmf shape (num of labels, C)
        logits = self.broadcast(logits, broadcast_shape_tensor)
        value = self.broadcast(value, broadcast_shape_tensor)[..., :1]

        # flatten value to shape (number of labels, 1)
        # clip value to be in range from 0 to num_classes -1 and cast into int32
        value = self.reshape(value, (-1, 1))
        out_of_bound = self.squeeze_last_axis(self.logicor(\
                        self.less(value, 0.0), self.less(num_classes-1, value)))
        # deal with the case the there is only one class.
        value_clipped = self.clip_by_value(value, 0.0, num_classes - 1)
        value_clipped = self.cast(value_clipped, self.index_type)
        # create index from 0 ... NumOfLabels
        index = self.reshape(nn.Range(0, self.shape(value)[0], 1)(), (-1, 1))
        index = self.concat((index, value_clipped))

        # index into logit_pmf, fill in out_of_bound places with -inf
        # reshape into label shape N
        logits_pmf = self.gather(self.reshape(logits, (-1, num_classes)),
                                 index)
        nan = self.fill(self.dtypeop(logits_pmf), self.shape(logits_pmf),
                        self.nan)
        logits_pmf = self.select(out_of_bound, nan, logits_pmf)
        ans = self.reshape(logits_pmf, label_shape)
        if drop_dim:
            return self.squeeze(ans)
        return ans
    # input two tensors, their shapes do not match
    ('FloorDiv2', {
        'block': (P.FloorDiv(), {
            'exception': ValueError,
            'error_keywords': ['FloorDiv']
        }),
        'desc_inputs': [
            Tensor(np.ones([3, 5]).astype(np.float32)),
            Tensor(np.ones([3, 4]).astype(np.float32))
        ],
        'skip': ['backward']
    }),

    # input x is Tensor(int32), not Tensor(float)
    ('Floor1', {
        'block': (P.Floor(), {
            'exception': TypeError,
            'error_keywords': ['Floor']
        }),
        'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32))],
        'skip': ['backward']
    }),

    # input two tensors, their shapes do not match
    ('FFloorMod2', {
        'block': (P.FloorMod(), {
            'exception': ValueError,
            'error_keywords': ['FloorMod']
        }),
        'desc_inputs': [
            Tensor(np.ones([3, 5]).astype(np.float32)),
示例#4
0
     'desc_inputs': [[2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('Pow_1', {
     'block': P.Pow(),
     'desc_inputs': [[3, 5], [2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('Exp', {
     'block': P.Exp(),
     'desc_inputs': [[2, 3]],
     'desc_bprop': [[2, 3]]}),
 ('Erf', {
     'block': P.Erf(),
     'desc_inputs': [Tensor(np.array([-2, -1, 0, 1, 2]).astype(np.float16))],
     'desc_bprop': [Tensor(np.array([-2, -1, 0, 1, 2]).astype(np.float16))]}),
 ('Floor', {
     'block': P.Floor(),
     'desc_inputs': [[2, 512, 56, 56]],
     'desc_bprop': [[2, 512, 56, 56]],
     'skip': ['backward']}),
 ('ACos', {
     'block': P.ACos(),
     'desc_inputs': [[2, 3]],
     'desc_bprop': [[2, 3]]}),
 ('Acosh', {
     'block': P.Acosh(),
     'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16))],
     'skip': ['backward']}),
 ('Sin', {
     'block': P.Sin(),
     'desc_inputs': [[2, 3]],
     'desc_bprop': [[2, 3]]}),
示例#5
0
 def __init__(self):
     super(FloorNet, self).__init__()
     self.floor = P.Floor()
示例#6
0
    def bilinear_sampler(self, img, x, y):
        """
        Performs bilinear sampling of the input images according to the
        normalized coordinates provided by the sampling grid. Note that
        the sampling is done identically for each channel of the input.

        To test if the function works properly, output image should be
        identical to input image when theta is initialized to identity
        transform.

        Input
        -----
        - img: batch of images in (B, H, W, C) layout.
        - grid: x, y which is the output of affine_grid_generator.

        Returns
        -------
        - out: interpolated images according to grids. Same size as grid.
        """
        shape = P.Shape()
        H = shape(img)[1]
        W = shape(img)[2]
        cast = P.Cast()
        max_y = cast(H - 1, mindspore.float32)
        max_x = cast(W - 1, mindspore.float32)
        zero = self.zero

        # rescale x and y to [0, W-1/H-1]
        x = 0.5 * ((x + 1.0) * (max_x - 1))
        y = 0.5 * ((y + 1.0) * (max_y - 1))

        # grab 4 nearest corner points for each (x_i, y_i)
        floor = P.Floor()
        x0 = floor(x)
        x1 = x0 + 1
        y0 = floor(y)
        y1 = y0 + 1

        # clip to range [0, H-1/W-1] to not violate img boundaries
        x0 = C.clip_by_value(x0, zero, max_x)
        x1 = C.clip_by_value(x1, zero, max_x)
        y0 = C.clip_by_value(y0, zero, max_y)
        y1 = C.clip_by_value(y1, zero, max_y)

        # get pixel value at corner coords
        Ia = self.get_pixel_value(img, x0, y0)
        Ib = self.get_pixel_value(img, x0, y1)
        Ic = self.get_pixel_value(img, x1, y0)
        Id = self.get_pixel_value(img, x1, y1)

        # recast as float for delta calculation
        x0 = cast(x0, mindspore.float32)
        x1 = cast(x1, mindspore.float32)
        y0 = cast(y0, mindspore.float32)
        y1 = cast(y1, mindspore.float32)

        # calculate deltas
        wa = (x1 - x) * (y1 - y)
        wb = (x1 - x) * (y - y0)
        wc = (x - x0) * (y1 - y)
        wd = (x - x0) * (y - y0)

        # add dimension for addition
        expand_dims = P.ExpandDims()
        wa = expand_dims(wa, 3)
        wb = expand_dims(wb, 3)
        wc = expand_dims(wc, 3)
        wd = expand_dims(wd, 3)

        # compute output
        add_n = P.AddN()
        out = add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])

        return out
 def __init__(self, strategy1, strategy2):
     super().__init__()
     self.matmul = P.MatMul().shard(strategy1)
     self.floor = P.Floor().shard(strategy2)
     self.matmul2 = P.MatMul().shard(strategy1)