Beispiel #1
0
def _euclidean_squared_distance(input1, input2):
    """Computes euclidean squared distance.

    Args:
        input1 : 2-D feature matrix.
        input2 : 2-D feature matrix.

    Returns:
        distance matrix.
    """
    m, n = input1.shape[0], input2.shape[0]
    temp1 = math.reduce_sum(math.pow(
        input1, flow.constant_like(input1, 2, dtype=flow.float32)),
                            axis=1)
    temp2 = math.reduce_sum(math.pow(
        input2, flow.constant_like(input2, 2, dtype=flow.float32)),
                            axis=1)
    shape_tensor1 = flow.constant(value=0.0, dtype=flow.float32, shape=(m, n))
    shape_tensor2 = flow.constant(value=0.0, dtype=flow.float32, shape=(n, m))
    temp1 = flow.broadcast_like(temp1, like=shape_tensor1, broadcast_axes=[1])
    temp2 = flow.transpose(flow.broadcast_like(temp2,
                                               like=shape_tensor2,
                                               broadcast_axes=[1]),
                           perm=(1, 0))

    dismat = math.add(temp1, temp2)

    return math.add(
        dismat,
        math.multiply(-2,
                      flow.matmul(input1, flow.transpose(input2,
                                                         perm=(1, 0)))))
Beispiel #2
0
    def build(self, inputs, targets):
        """
        Args:
            inputs (torch.Tensor): feature matrix with shape (batch_size, feat_dim).
            targets (torch.LongTensor): ground truth labels with shape (num_classes).
        """
        n = inputs.shape[0]
        dist = math.reduce_sum(math.pow(
            inputs, flow.constant_like(inputs, 2, dtype=flow.float32)),
                               axis=1)
        shape_tensor = flow.constant(value=0.0,
                                     dtype=flow.float32,
                                     shape=(n, n))
        dist = flow.broadcast_like(dist, like=shape_tensor, broadcast_axes=[1])
        dist = math.add(
            dist, flow.transpose(dist, perm=(1, 0),
                                 batch_axis_non_change=True))
        temp1 = math.multiply(
            -2,
            flow.matmul(
                inputs,
                flow.transpose(inputs, perm=(1, 0),
                               batch_axis_non_change=True)))
        dist = math.add(dist, temp1)
        dist = math.sqrt(flow.clamp(dist, min_value=1e-12))
        mask = math.equal(
            flow.broadcast_like(targets, like=shape_tensor,
                                broadcast_axes=[1]),
            flow.transpose(flow.broadcast_like(targets,
                                               like=shape_tensor,
                                               broadcast_axes=[1]),
                           perm=(1, 0),
                           batch_axis_non_change=True))
        mask_rev = math.not_equal(
            flow.broadcast_like(targets, like=shape_tensor,
                                broadcast_axes=[1]),
            flow.transpose(flow.broadcast_like(targets,
                                               like=shape_tensor,
                                               broadcast_axes=[1]),
                           perm=(1, 0),
                           batch_axis_non_change=True))
        dist_ap, dist_an = [], []
        for i in range(n):
            temp_dist = flow.slice_v2(dist, [(i, i + 1, 1)])
            temp_mask = flow.slice_v2(mask, [(i, i + 1, 1)])
            temp_mask_rev = flow.slice_v2(mask_rev, [(i, i + 1, 1)])
            dist_ap.append(
                math.reduce_max(
                    flow.gather_nd(temp_dist, flow.where(temp_mask))))
            dist_an.append(
                math.reduce_min(
                    flow.gather_nd(temp_dist, flow.where(temp_mask_rev))))
        dist_ap = flow.concat(dist_ap, 0)
        dist_an = flow.concat(dist_an, 0)
        y = flow.ones_like(dist_an)
        # return dist_an, dist_ap, y

        return self._MarginRankingLoss(dist_an, dist_ap, y)
Beispiel #3
0
def masked_select_op(input, mask):
    """

    Returns a new 1-D tensor which indexes the input tensor according to the boolean mask mask which is a BoolTensor(In oneFlow BoolTensor is replaced by Int8Tensor).

    The shapes of the mask tensor and the input tensor don’t need to match, but they must be broadcastable.

    Args:
        input (Tensor): the input tensor.
        mask (Tensor): the tensor containing the binary mask to index with

    For example:

    .. code-block:: python

        >>> import oneflow as flow
        >>> import numpy as np
        
        >>> input = flow.tensor(np.array([[-0.4620, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]), dtype=flow.float32)
        >>> mask = input.gt(0.05)
        >>> out = flow.masked_select(input, mask)
        >>> out
        tensor([0.3139, 0.3898], dtype=oneflow.float32)
    """

    assert len(input.shape) == len(
        mask.shape
    ), f"The dim of masked_select module's inputs can not match, please check!"
    broadcast_like_shape = []
    broadcast_x_axes = []
    broadcast_mask_axes = []
    for i in range(len(input.shape)):
        max_dim = max(input.shape[i], mask.shape[i])
        broadcast_like_shape.append(max_dim)
        if max_dim != input.shape[i]:
            broadcast_x_axes.append(i)
        if max_dim != mask.shape[i]:
            broadcast_mask_axes.append(i)
    broadcast_like_tensor = flow.zeros(tuple(broadcast_like_shape),
                                       dtype=flow.float32,
                                       device=input.device)
    broadcast_like_tensor.requires_grad = input.requires_grad or mask.requires_grad
    if len(broadcast_x_axes) != 0:
        input = flow.broadcast_like(input,
                                    broadcast_like_tensor,
                                    broadcast_axes=tuple(broadcast_x_axes))
    if len(broadcast_mask_axes) != 0:
        mask = flow.broadcast_like(mask,
                                   broadcast_like_tensor,
                                   broadcast_axes=tuple(broadcast_mask_axes))
    mask = mask.to(dtype=input.dtype)
    res = flow._C.mul(input, mask)
    indices = flow.argwhere(res)
    gather_res = flow._C.gather_nd(res, indices)
    return gather_res.flatten()
 def test_broadcast_like_runtime_error(test_case):
     with test_case.assertRaises(Exception) as context:
         x = flow.ones((1, 0), dtype=flow.float32, requires_grad=True)
         like = flow.ones((2, 2, 2), dtype=flow.float32, requires_grad=True)
         y = flow.broadcast_like(x, like)
     test_case.assertTrue(
         "The expanded size of the tensor" in str(context.exception))
def SeModule(name, x, channel, reduction=4):
    N, C, H, W = x.shape

    y = flow.nn.avg_pool2d(x, ksize=[H, W], strides=None, padding="SAME")
    y = flow.flatten(y, start_dim=1, end_dim=-1)
    y = flow.layers.dense(
        y,
        units=channel // reduction,
        use_bias=False,
        kernel_initializer=_get_initializer("dense_weight"),
        bias_initializer=_get_initializer("bias"),
        kernel_regularizer=_get_regularizer("dense_weight"),
        bias_regularizer=_get_regularizer("bias"),
        name=name + "dense1a",
    )
    y = flow.math.relu(y)
    y = flow.layers.dense(
        y,
        units=channel,
        use_bias=False,
        kernel_initializer=_get_initializer("dense_weight"),
        bias_initializer=_get_initializer("bias"),
        kernel_regularizer=_get_regularizer("dense_weight"),
        bias_regularizer=_get_regularizer("bias"),
        name=name + "dense2",
    )
    y = hsigmoid(y)
    y = flow.expand_dims(input=y, axis=2)
    y = flow.expand_dims(input=y, axis=3)
    y_expand = flow.broadcast_like(y, x, broadcast_axes=(2, 3))
    out = x * y_expand
    return out
Beispiel #6
0
def _test_broadcast_like_4dim(test_case, device):
    input = flow.tensor(
        np.ones(shape=(1, 3, 2, 1), dtype=np.float32),
        dtype=flow.float32,
        device=flow.device(device),
    )
    like_tensor = flow.tensor(
        np.ones(shape=(3, 3, 2, 3), dtype=np.float32),
        dtype=flow.float32,
        device=flow.device(device),
    )
    of_out = flow.broadcast_like(input, like_tensor, broadcast_axes=(0, 3))
    np_out = np.ones(shape=(3, 3, 2, 3))
    test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
Beispiel #7
0
 def watch_matmul_diff_job(
         images: tp.Numpy.Placeholder((3, 3),
                                      dtype=flow.float), ) -> None:
     weight_initializer = flow.constant_initializer(2)
     weight_shape = (3, 1)
     weight = flow.get_variable("three-weight",
                                shape=weight_shape,
                                initializer=weight_initializer)
     weight_broadcast = flow.broadcast_like(weight,
                                            like=images,
                                            broadcast_axes=(1, ))
     lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
     flow.optimizer.SGD(lr_scheduler,
                        momentum=0.9).minimize(weight_broadcast)
     flow.watch_diff(weight, watch_diff_handler)
Beispiel #8
0
def _test_broadcast_like_backward(test_case, device):
    input = flow.tensor(
        np.ones(shape=(3, 1, 1), dtype=np.float32),
        dtype=flow.float32,
        device=flow.device(device),
        requires_grad=True,
    )
    like_tensor = flow.tensor(
        np.ones(shape=(3, 3, 3), dtype=np.float32),
        dtype=flow.float32,
        device=flow.device(device),
        requires_grad=True,
    )
    of_out = flow.broadcast_like(input, like_tensor, broadcast_axes=(1, 2))
    of_out = of_out.sum()
    of_out.backward()
    np_grad = [[[9.0]], [[9.0]], [[9.0]]]
    test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-05,
                                     1e-05))
Beispiel #9
0
 def broadcast_like_forward(
         x: tp.Numpy.Placeholder(shape=input_shape, dtype=flow.float),
         y: tp.Numpy.Placeholder(shape=like_shape, dtype=flow.float),
 ):
     with flow.scope.placement(device_type, "0:0"):
         return flow.broadcast_like(x, y, broadcast_axes=broadcast_axes)
Beispiel #10
0
    def forward(self, inputs, targets):
        n = inputs.shape[0]
        # Compute pairwise distance, replace by the official when merged
        tempname = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S.%f')
        shape_tensor = flow.constant(value=0.0,
                                     dtype=flow.float32,
                                     shape=(n, n))
        if self.distance == 'euclidean':
            blob_2 = flow.get_variable(
                "blob_2_" + tempname,
                shape=inputs.shape,
                initializer=flow.constant_initializer(2),
                dtype=inputs.dtype)
            dist = flow.math.pow(inputs, blob_2)

            dist = flow.math.reduce_sum(dist, axis=1, keepdims=True)
            dist = flow.broadcast_like(dist, shape_tensor)
            tempdist = flow.transpose(dist)
            dist = dist + tempdist
            inputs_t = flow.transpose(inputs)
            dist = addmm(dist, inputs, inputs_t, beta=1, alpha=-2)
            dist = flow.clamp(dist, min_value=1e-12)
            dist = flow.math.sqrt(dist)
        elif self.distance == 'cosine':
            #fnorm=flow.math.l2_normalize(inputs, axis=1)
            fnorm = flow.math.reduce_mean(flow.math.divide(
                inputs, flow.math.l2_normalize(inputs, axis=1)),
                                          axis=1,
                                          keepdims=True)

            expand_fnorm = flow.broadcast_like(fnorm,
                                               like=inputs,
                                               broadcast_axes=[1])
            l2norm = flow.math.divide(inputs, expand_fnorm)
            l2norm_t = flow.transpose(l2norm, perm=(1, 0))
            dist = flow.math.negative(flow.matmul(l2norm, l2norm_t))
        # For each anchor, find the hardest positive and negative
        mask = math.equal(
            flow.broadcast_like(targets, like=shape_tensor,
                                broadcast_axes=[1]),
            flow.transpose(flow.broadcast_like(targets,
                                               like=shape_tensor,
                                               broadcast_axes=[1]),
                           perm=(1, 0),
                           batch_axis_non_change=True))
        mask_rev = math.not_equal(
            flow.broadcast_like(targets, like=shape_tensor,
                                broadcast_axes=[1]),
            flow.transpose(flow.broadcast_like(targets,
                                               like=shape_tensor,
                                               broadcast_axes=[1]),
                           perm=(1, 0),
                           batch_axis_non_change=True))
        dist_ap, dist_an = [], []
        for i in range(n):
            temp_dist = flow.slice_v2(dist, [(i, i + 1, 1)])
            temp_mask = flow.slice_v2(mask, [(i, i + 1, 1)])
            temp_mask_rev = flow.slice_v2(mask_rev, [(i, i + 1, 1)])
            temp_dist_ap = flow.expand_dims(
                math.reduce_max(
                    flow.gather_nd(temp_dist, flow.where(temp_mask))), 0)
            temp_dist_an = flow.expand_dims(
                math.reduce_min(
                    flow.gather_nd(temp_dist, flow.where(temp_mask_rev))), 0)
            dist_ap.append(temp_dist_ap)
            dist_an.append(temp_dist_an)
        dist_ap = flow.concat(dist_ap, 0)
        dist_an = flow.concat(dist_an, 0)
        y = flow.ones_like(dist_an)
        return self._MarginRankingLoss(dist_an, dist_ap, y)
Beispiel #11
0
    def decode(self, feature_map, anchors, stride, prefix='yolo'):
        '''
            return tensor of shape [batch_size, output_size, output_size, anchor_per_scale, 5 + num_classes]
               contains (x, y, w, h, score, probability)
        :param feature_map: [N, H, W, 3 * (5 + num_class)]
        :param anchors: [3, 2]
        :param stride:
        :return: (x, y, w, h, score, probability)
           [pred_xywh, pred_conf, pred_prob]:  [N, H, W, 3, 4+1+class_num]
        '''
        # [N, H, W, 3, 5 + num_class]
        feature_map = flow.reshape(
            feature_map,
            shape=(feature_map.shape[0], feature_map.shape[1],
                   feature_map.shape[2], self.anchor_per_scale, -1))

        # shape: [N, H, W, 3, 2]
        box_centers = flow.slice(feature_map,
                                 begin=[None, None, None, None, 0],
                                 size=[None, None, None, None, 2])
        # shape: [N, H, W, 3, 2]
        box_sizes = flow.slice(feature_map,
                               begin=[None, None, None, None, 2],
                               size=[None, None, None, None, 2])
        # shape: [N, H, W, 3, 1]
        conf_logits = flow.slice(feature_map,
                                 begin=[None, None, None, None, 4],
                                 size=[None, None, None, None, 1])
        # shape: [N, H, W, 3, class_num]
        prob_logits = flow.slice(
            feature_map,
            begin=[None, None, None, None, 5],
            size=[None, None, None, None, feature_map.shape[-1] - 5])

        # obtain the x_y_offset
        grid_size = feature_map.shape[1:3]
        grid_x = flow.range(grid_size[1],
                            dtype=flow.float32,
                            name=prefix + '_decode_range1')
        grid_x = flow.expand_dims(grid_x, axis=0)
        like_tensor = flow.constant(value=1.0,
                                    dtype=flow.float32,
                                    shape=(grid_size[0], grid_size[1]))
        grid_x = flow.broadcast_like(grid_x,
                                     like_tensor,
                                     broadcast_axes=(0, ),
                                     name=prefix + 'yolo_grid_x')
        grid_y = flow.range(grid_size[0],
                            dtype=flow.float32,
                            name=prefix + '_yolo_decode_range2')
        grid_y = flow.expand_dims(grid_y, axis=1)
        grid_y = flow.broadcast_like(grid_y,
                                     like_tensor,
                                     broadcast_axes=(1, ),
                                     name=prefix + 'yolo_grid_y')
        x_offset = flow.expand_dims(grid_x, axis=-1)
        y_offset = flow.expand_dims(grid_y, axis=-1)
        #shape: [1, H, W, 1 ,2]
        x_y_offset = flow.concat([x_offset, y_offset], axis=-1)
        x_y_offset = flow.expand_dims(x_y_offset, axis=0)
        x_y_offset = flow.expand_dims(x_y_offset, axis=-2)

        pred_xy = (flow.math.sigmoid(box_centers) + x_y_offset) * stride
        pred_wh = (flow.math.exp(box_sizes) *
                   anchors) * stride  # anchor relative to the feature map
        # shape: [N, H, W, 3, 4]
        pred_xywh = flow.concat([pred_xy, pred_wh], axis=-1)

        pred_conf = flow.math.sigmoid(conf_logits)
        pred_prob = flow.math.sigmoid(prob_logits)

        pred = flow.concat([pred_xywh, pred_conf, pred_prob], axis=-1)
        # shape:
        #  pred: [N, H, W, 3, 4+1+class_num]
        #  x_y_offset: [1, H, W, 1, 2]
        return pred, x_y_offset