Exemple #1
0
    def __init__(self, bins=10, momentum=0.0, mu=0.02):
        super(GHMRLoss, self).__init__()
        self.bins = bins
        self.momentum = momentum
        self.mu = mu
        edges_left = np.array([float(x) / bins for x in range(bins)], dtype=np.float32)
        self.edges_left = Tensor(edges_left.reshape((bins, 1, 1, 1, 1)))
        edges_right = np.array([float(x) / bins for x in range(1, bins + 1)], dtype=np.float32)
        edges_right[-1] += 1e-4
        self.edges_right = Tensor(edges_right.reshape((bins, 1, 1, 1, 1)))

        if momentum >= 0:
            self.acc_sum = Parameter(initializer(0, [bins], mstype.float32))

        self.abs = ops.Abs()
        self.sqrt = ops.Sqrt()
        self.cast = ops.Cast()
        self.select = ops.Select()
        self.reshape = ops.Reshape()
        self.reduce_sum = ops.ReduceSum()
        self.max = ops.Maximum()
        self.less = ops.Less()
        self.equal = ops.Equal()
        self.greater = ops.Greater()
        self.logical_and = ops.LogicalAnd()
        self.greater_equal = ops.GreaterEqual()
        self.zeros_like = ops.ZerosLike()
        self.expand_dims = ops.ExpandDims()
Exemple #2
0
 def __init__(self, mixture_size: int, do_layer_norm: bool = False) -> None:
     super(Scalar_mix, self).__init__()
     self.mixture_size = mixture_size
     self.do_layer_norm = do_layer_norm
     self.scalar_parameters = ParameterTuple([Parameter(Tensor(np.array([0.0]), mindspore.float32)) \
                                              for _ in range(mixture_size)])
     self.gamma = Parameter(Tensor(np.array([0.0]), mindspore.float32))
     self.sum = P.ReduceSum()
     self.sqrt = P.Sqrt()
     self.cat = P.Concat()
     self.unsqueeze = P.ExpandDims(0)
Exemple #3
0
    def __init__(self, log_scale_min=-7.0, reduce=True):
        super(mix_gaussian_loss, self).__init__()
        self.log_scale_min = log_scale_min
        self.reduce = reduce
        self.transpose_op = P.Transpose()
        self.maximum = P.Maximum()
        self.tile = P.Tile()
        self.exp = P.Exp()
        self.logsoftmax = P.LogSoftmax(-1)
        self.expand_dims = P.ExpandDims()
        self.sums = P.ReduceSum()
        self.lse = log_sum_exp()

        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.const = P.ScalarToArray()
        self.log = P.Log()
Exemple #4
0
    def __init__(self, log_scale_min=-7.0, reduce=True):
        super(mix_gaussian_loss, self).__init__()
        self.log_scale_min = log_scale_min
        self.reduce = reduce
        self.transpose_op = P.Transpose()
        self.maximum = P.Maximum()
        self.tile = P.Tile()
        self.exp = P.Exp()
        self.expand_dims = P.ExpandDims()
        self.sums = P.ReduceSum()
        self.lse = log_sum_exp()
        self.sq = P.Square()
        self.sqrt = P.Sqrt()
        self.const = P.ScalarToArray()
        self.log = P.Log()
        self.tensor_one = Tensor(1., ms.float32)

        if context.get_context("device_target") == "CPU":
            self.logsoftmax = log_softmax()
        else:
            self.logsoftmax = P.LogSoftmax(-1)
Exemple #5
0
 def __init__(self, net_config, K=100, enable_nms_fp16=True):
     super(MultiPoseDecode, self).__init__()
     self.K = K
     self.nms = NMS(enable_nms_fp16=enable_nms_fp16)
     self.shape = ops.Shape()
     self.gather_topk = GatherTopK()
     self.gather_topk_channel = GatherTopKChannel()
     self.gather_by_ind = GatherFeatureByInd()
     self.half = ops.Split(axis=-1, output_num=2)
     self.half_first = ops.Split(axis=0, output_num=2)
     self.split = ops.Split(axis=-1, output_num=4)
     self.flip_lr = FlipLR()
     self.flip_lr_off = FlipLROff()
     self.flip_tensor = FlipTensor()
     self.concat = ops.Concat(axis=1)
     self.concat_a2 = ops.Concat(axis=2)
     self.concat_a3 = ops.Concat(axis=3)
     self.trans_gather_feature = TransposeGatherFeature()
     self.expand_dims = ops.ExpandDims()
     self.reshape = ops.Reshape()
     self.add = ops.TensorAdd()
     self.dtype = ops.DType()
     self.cast = ops.Cast()
     self.thresh = 0.1
     self.transpose = ops.Transpose()
     self.perm_list = (0, 2, 1, 3)
     self.tile = ops.Tile()
     self.greater = ops.Greater()
     self.square = ops.Square()
     self.sqrt = ops.Sqrt()
     self.reduce_sum = ops.ReduceSum()
     self.min = ops.ArgMinWithValue(axis=3)
     self.max = ops.Maximum()
     self.hm_hp = net_config.hm_hp
     self.dense_hp = net_config.dense_hp
     self.reg_offset = net_config.reg_offset
     self.reg_hp_offset = net_config.reg_hp_offset
     self.hm_hp_ind = 3 if self.hm_hp else 2
     self.reg_ind = self.hm_hp_ind + 1 if self.reg_offset else self.hm_hp_ind
     self.reg_hp_ind = self.reg_ind + 1 if self.reg_hp_offset else self.reg_ind
Exemple #6
0
    def construct(self, inputs, targets):
        """
        Args:
        - inputs: feature matrix with shape (batch_size, feat_dim)
        - targets: ground truth labels with shape (num_classes)
        """
        n = inputs.shape[0]

        # Compute pairwise distance, replace by the official when merged
        pow = P.Pow()
        sum = P.ReduceSum(keep_dims=True)
        expand = P.BroadcastTo((n, n))
        transpose = P.Transpose()
        mul = P.Mul()
        add = P.Add()
        sqrt = P.Sqrt()
        equal = P.Equal()
        cat = P.Concat()
        ones_like = P.OnesLike()

        dist = pow(inputs, 2)
        dist = sum(dist, axis=1)
        dist = expand(dist)
        dist = dist + transpose(dist, (1, 0))

        temp1 = P.matmul(inputs, transpose(inputs, (1, 0)))
        temp1 = mul(-2, temp1)
        dist = add(dist, temp1)
        dist = P.composite.clip_by_value(
            dist, clip_value_min=1e-12, clip_value_max=100000000
        )  # for numerical stability, clip_value_max=? why must set?
        dist = sqrt(dist)

        # For each anchor, find the hardest positive and negative
        targets = expand(targets)
        mask = equal(targets, transpose(targets, (1, 0)))
        dist_ap = []
        dist_an = []

        # only for debugging
        #####################
        # print("dist is")
        # print(dist.shape)
        # print(dist)
        # print("mask is")
        # print(mask.shape)
        # print(mask)
        # print(mask[0])
        #####################

        for i in range(n):
            minval = -1.0
            maxval = -1.0
            for j in range(n):
                if mask[i][j] and dist[i][j] > maxval:
                    maxval = dist[i][j]
                if not mask[i][j] and (dist[i][j] < minval or minval == -1):
                    minval = dist[i][j]

            if (not isinstance(minval, Tensor)
                    or not isinstance(maxval, Tensor) or minval == -1.0
                    or maxval == -1.0):
                if self.error_msg is not None:
                    print("Error Msg", file=self.error_msg)
                    print("mask {} is".format(i), file=self.error_msg)
                    print(mask[i], file=self.error_msg)
                    print("dist is:", file=self.error_msg)
                    print(dist[i], file=self.error_msg)
                    print(maxval, file=self.error_msg)
                    print(minval, file=self.error_msg)
                    print(type(maxval), file=self.error_msg)
                    print(type(minval), file=self.error_msg)
                    self.error_msg.flush()

            # assert minval != -1.0 and isinstance(minval, Tensor)
            # assert maxval != -1.0 and isinstance(maxval, Tensor)
            dist_ap.append(maxval.asnumpy())
            dist_an.append(minval.asnumpy())

        dist_ap = Tensor(dist_ap, ms.float32)
        dist_an = Tensor(dist_an, ms.float32)
        # only for debugging
        #####################
        # print(dist_ap)
        # print(dist_ap.shape)
        # print(dist_an)
        #####################

        # Compute ranking hinge loss
        y = ones_like(dist_an)
        loss = self.ranking_loss(dist_an, dist_ap, y)

        # # compute accuracy
        # correct = torch.ge(dist_an, dist_ap).sum().item()
        return loss


# class GradOriTripletLoss(nn.Cell)
#     def __init__(self, net):
#         super(GradOriTripletLoss, self).__init__()
#         self.net = net
#         self.grad_op = P.GradOperation(get_all=True)
#
#     def construct(self, inputs, targets):
#         gradient_function = self.grad_op(self.net)
#         return gradient_function(inputs, targets)
def global_norm(x):
    sqrt = P.Sqrt()
    reduce_sum = P.ReduceSum()
    l2 = P.L2Normalize()
    x = sqrt(reduce_sum(P.functional.square(l2(x))))
    return x