Exemplo n.º 1
0
def log_loss(y_true, y_pred):
    return K.log(rmse(y_true, y_pred) + 1e-20) / K.log(10.)
Exemplo n.º 2
0
		def focal_loss_fixed(y_true, y_pred):
			pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
			pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
			return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
Exemplo n.º 3
0
 def call(self, inputs, mask=None):
   inputs_expand = tf.expand_dims(inputs, axis=1)
   outputs = K.log(inputs_expand)
   return outputs
Exemplo n.º 4
0
 def loss(y_true, y_pred):
     y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
     y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
     return -K.sum(y_true * K.log(K.dot(y_pred, P)), axis=-1)
Exemplo n.º 5
0
    def call(self, t1, t2, **kwargs):

        loss_t = K.mean(t1)
        loss_et = K.log(K.mean(K.exp(t2)))
        return loss_et - loss_t
Exemplo n.º 6
0
def psnr(y_true, y_pred):
    mse = K.mean(K.square(y_true - y_pred))
    psnr_score = 20 * K.log(1. / K.sqrt(mse))
    return psnr_score
Exemplo n.º 7
0
def yolo4_loss(args,
               anchors,
               num_classes,
               ignore_thresh=.5,
               label_smoothing=0,
               use_focal_loss=False,
               use_focal_obj_loss=False,
               use_softmax_loss=False,
               use_giou_loss=False,
               use_diou_loss=False):
    '''Return yolo4_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(N, 2), wh
    num_classes: integer
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)

    '''
    num_layers = len(anchors) // 3  # default setting
    yolo_outputs = args[:num_layers]
    y_true = args[num_layers:]
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]
                   ] if num_layers == 3 else [[3, 4, 5], [0, 1, 2]]
    input_shape = K.cast(
        K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
    grid_shapes = [
        K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0]))
        for l in range(num_layers)
    ]
    loss = 0
    total_location_loss = 0
    total_confidence_loss = 0
    total_class_loss = 0
    m = K.shape(yolo_outputs[0])[0]  # batch size, tensor
    mf = K.cast(m, K.dtype(yolo_outputs[0]))

    for l in range(num_layers):
        object_mask = y_true[l][..., 4:5]
        true_class_probs = y_true[l][..., 5:]
        if label_smoothing:
            true_class_probs = _smooth_labels(true_class_probs,
                                              label_smoothing)

        grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
                                                     anchors[anchor_mask[l]],
                                                     num_classes,
                                                     input_shape,
                                                     calc_loss=True)
        pred_box = K.concatenate([pred_xy, pred_wh])

        # Darknet raw box to calculate loss.
        raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid
        raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] *
                            input_shape[::-1])
        raw_true_wh = K.switch(object_mask, raw_true_wh,
                               K.zeros_like(raw_true_wh))  # avoid log(0)=-inf
        box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]),
                                     size=1,
                                     dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')

        def loop_body(b, ignore_mask):
            true_box = tf.boolean_mask(y_true[l][b, ..., 0:4],
                                       object_mask_bool[b, ..., 0])
            iou = box_iou(pred_box[b], true_box)
            best_iou = K.max(iou, axis=-1)
            ignore_mask = ignore_mask.write(
                b, K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
            return b + 1, ignore_mask

        _, ignore_mask = tf.while_loop(lambda b, *args: b < m, loop_body,
                                       [0, ignore_mask])
        ignore_mask = ignore_mask.stack()
        ignore_mask = K.expand_dims(ignore_mask, -1)

        if use_focal_obj_loss:
            # Focal loss for objectness confidence
            confidence_loss = sigmoid_focal_loss(object_mask, raw_pred[...,
                                                                       4:5])
        else:
            confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
                (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask

        if use_focal_loss:
            # Focal loss for classification score
            if use_softmax_loss:
                class_loss = softmax_focal_loss(true_class_probs, raw_pred[...,
                                                                           5:])
            else:
                class_loss = sigmoid_focal_loss(true_class_probs, raw_pred[...,
                                                                           5:])
        else:
            if use_softmax_loss:
                # use softmax style classification output
                class_loss = object_mask * K.expand_dims(
                    K.categorical_crossentropy(
                        true_class_probs, raw_pred[..., 5:], from_logits=True),
                    axis=-1)
            else:
                # use sigmoid style classification output
                class_loss = object_mask * K.binary_crossentropy(
                    true_class_probs, raw_pred[..., 5:], from_logits=True)

        if use_giou_loss:
            # Calculate GIoU loss as location loss
            raw_true_box = y_true[l][..., 0:4]
            giou = box_giou(pred_box, raw_true_box)
            giou_loss = object_mask * box_loss_scale * (1 - giou)
            giou_loss = K.sum(giou_loss) / mf
            location_loss = giou_loss
        elif use_diou_loss:
            # Calculate DIoU loss as location loss
            raw_true_box = y_true[l][..., 0:4]
            diou = box_diou(pred_box, raw_true_box)
            diou_loss = object_mask * box_loss_scale * (1 - diou)
            diou_loss = K.sum(diou_loss) / mf
            location_loss = diou_loss
        else:
            # Standard YOLO location loss
            # K.binary_crossentropy is helpful to avoid exp overflow.
            xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(
                raw_true_xy, raw_pred[..., 0:2], from_logits=True)
            wh_loss = object_mask * box_loss_scale * 0.5 * K.square(
                raw_true_wh - raw_pred[..., 2:4])
            xy_loss = K.sum(xy_loss) / mf
            wh_loss = K.sum(wh_loss) / mf
            location_loss = xy_loss + wh_loss

        confidence_loss = K.sum(confidence_loss) / mf
        class_loss = K.sum(class_loss) / mf
        loss += location_loss + confidence_loss + class_loss
        total_location_loss += location_loss
        total_confidence_loss += confidence_loss
        total_class_loss += class_loss

    # Fit for tf 2.0.0 loss shape
    loss = K.expand_dims(loss, axis=-1)

    return loss  #, total_location_loss, total_confidence_loss, total_class_loss
Exemplo n.º 8
0
def weighted_bce(y_true, y_pred):
    pos_weight = 1
    x_1 = y_true * pos_weight * -K.log(y_pred + 1e-6)
    x_2 = (1 - y_true) * -K.log(1 - y_pred + 1e-6)
    return (x_1 + x_2)
Exemplo n.º 9
0
 def loss(y_true, y_pred):
     y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
     y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
     loss = y_true * K.log(y_pred) * weights
     loss = -K.sum(loss, -1)
     return loss
Exemplo n.º 10
0
    def call(self, inputs, mask=None):
        X, A = inputs

        N = K.shape(A)[-1]
        # Check if the layer is operating in mixed or batch mode
        mode = ops.autodetect_mode(X, A)
        self.reduce_loss = mode in (modes.MIXED, modes.BATCH)

        # Get normalized adjacency
        if K.is_sparse(A):
            I_ = tf.sparse.eye(N, dtype=A.dtype)
            A_ = tf.sparse.add(A, I_)
        else:
            I_ = tf.eye(N, dtype=A.dtype)
            A_ = A + I_
        fltr = ops.normalize_A(A_)

        # Node embeddings
        Z = K.dot(X, self.kernel_emb)
        Z = ops.modal_dot(fltr, Z)
        if self.activation is not None:
            Z = self.activation(Z)

        # Compute cluster assignment matrix
        S = K.dot(X, self.kernel_pool)
        S = ops.modal_dot(fltr, S)
        S = activations.softmax(S, axis=-1)  # softmax applied row-wise
        if mask is not None:
            S *= mask[0]

        # Link prediction loss
        S_gram = ops.modal_dot(S, S, transpose_b=True)
        if mode == modes.MIXED:
            A = tf.sparse.to_dense(A)[None, ...]
        if K.is_sparse(A):
            LP_loss = tf.sparse.add(A, -S_gram)  # A/tf.norm(A) - S_gram/tf.norm(S_gram)
        else:
            LP_loss = A - S_gram
        LP_loss = tf.norm(LP_loss, axis=(-1, -2))
        if self.reduce_loss:
            LP_loss = K.mean(LP_loss)
        self.add_loss(LP_loss)

        # Entropy loss
        entr = tf.negative(
            tf.reduce_sum(tf.multiply(S, K.log(S + K.epsilon())), axis=-1)
        )
        entr_loss = K.mean(entr, axis=-1)
        if self.reduce_loss:
            entr_loss = K.mean(entr_loss)
        self.add_loss(entr_loss)

        # Pooling
        X_pooled = ops.modal_dot(S, Z, transpose_a=True)
        A_pooled = ops.matmul_at_b_a(S, A)

        output = [X_pooled, A_pooled]

        if self.return_mask:
            output.append(S)

        return output
Exemplo n.º 11
0
def custom_loss(y_true, y_pred):
    loss = -(1 / models_data[selected_model]["m"]) * Kb.sum(
        5 * y_true * Kb.log(Kb.abs(y_pred + 1 * 10**-8)) +
        (1 - y_true) * Kb.log(Kb.abs(1 - y_pred + 1 * 10**-8)))
    return loss
Exemplo n.º 12
0
def kl_discrete(dist,y,yk,temperature=0.67):
    dim=K.shape(dist)[1]
    dim=tf.cast(dim,tf.float32)
    kl_batch1=K.sum(K.log(dist+EPSILON)-temperature*yk,axis=1)-K.sum(K.log(y+EPSILON)-temperature*yk,axis=1)
    kl_batch2=-dim*tf.reduce_logsumexp(K.log(dist+EPSILON)-temperature*yk,1)+dim*tf.reduce_logsumexp(K.log(y+EPSILON)-temperature*yk,1)
    return tf.reduce_sum(kl_batch1+kl_batch2)
Exemplo n.º 13
0
    def call(self, x):
        # print("!",x.shape)

        E = K.reshape(x[:, :, 0], (-1, self.gs, 1))
        p1 = K.reshape(x[:, :, 1], (-1, self.gs, 1))
        p2 = K.reshape(x[:, :, 2], (-1, self.gs, 1))
        p3 = K.reshape(x[:, :, 3], (-1, self.gs, 1))

        pt = K.sqrt(p1**2 + p2**2)
        p = K.sqrt(pt**2 + p3**2)
        iszero = p**2 + E**2
        iszero = 1 - K.relu(1 - self.numericC * iszero) + K.relu(
            -self.numericC * iszero)

        #return iszero

        eta = iszero * 0.5 * K.log(0.0000000001 + (p + p3) /
                                   (p - p3 + 0.0000000001))
        #phi=iszero*t.math.acos(p3/(p+0.0000001))
        phi = iszero * t.math.atan2(p2, p1)

        #print("eta",eta.shape,"phi",phi.shape)

        meta = K.mean(eta, axis=-2)
        mphi = K.mean(phi, axis=-2)

        #print("meta",meta.shape,"mphi",mphi.shape)

        #deta=eta#-meta##not sure if abs here
        #dphi=phi#-mphi##not sure here either

        deta = iszero * K.permute_dimensions(
            K.permute_dimensions(eta, (1, 0, 2)) - meta,
            (1, 0, 2))  #not sure if abs here required
        dphi = iszero * K.permute_dimensions(
            K.permute_dimensions(phi, (1, 0, 2)) - mphi,
            (1, 0, 2))  #also not sure here either

        #print("deta",deta.shape,"dphi",dphi.shape)

        lpt = iszero * K.log(pt + 0.0000001)  ##
        lE = iszero * K.log(E + 0.0000001)  ##

        #print("lpt",lpt.shape,"lE",lE.shape)

        #rpt=K.reshape(pt,(-1,self.gs))

        spt = K.sum(pt, axis=-2)
        #print("spt",spt.shape)

        #return K.permute_dimensions(K.log(1.0+K.abs(K.permute_dimensions(pt,(1,0,2))/(K.abs(spt)+1.0))),(1,0,2))
        #ispt=1/(spt+0.000000001)
        #print("ispt",ispt.shape)

        ppt = -iszero * K.permute_dimensions(
            K.log(0.000000001 + K.permute_dimensions(pt, (1, 0, 2)) /
                  (spt + 0.0000001)), (1, 0, 2)
        )  #please note the added sign in comparison to the original paper
        #ppt=K.reshape(K.permute_dimensions(K.log(0.000000001+K.permute_dimensions(rpt,(1,0,2))/(spt+0.0000001)),(1,0)),(-1,self.gs,1))##
        #ppt=K.reshape(K.permute_dimensions(K.log(0.000000001+K.permute_dimensions(rpt,(1,0))/(spt+0.0000001)),(1,0)),(-1,self.gs,1))##
        #ppt=K.reshape(K.log(0.000000001+K.permute_dimensions(rpt,(1,0))/(spt+0.0000001)),(-1,self.gs,1))##
        #ppt=iszero*K.log(pt/(spt+0.00000001))##
        #print("ppt",ppt.shape)

        sE = K.sum(E, axis=-2)
        #print("sE",sE.shape)

        pE = -iszero * K.permute_dimensions(
            K.log(0.000000001 + K.permute_dimensions(E, (1, 0, 2)) /
                  (sE + 0.0000001)), (1, 0, 2))  #here was also a sign added
        #pE=-iszero*K.log(sE/(E+0.00000001))##
        #pE=-iszero
        #print("pE",pE.shape)

        dR = K.sqrt(deta**2 + dphi**2)  ##
        #print("dR",dR.shape)

        ret = K.concatenate((iszero, deta, dphi, lpt, lE, ppt, pE, dR),
                            axis=-1)  #adding iszero for numerical reasons

        #print(ret.shape,x.shape)
        #exit()

        return ret
Exemplo n.º 14
0
    def call(self, y_true, y_pred):
        """
        @param y_true: Dim(batch, grid, grid, 3,
                                (b_x, b_y, b_w, b_h, conf, prob_0, prob_1, ...))
        @param y_pred: Dim(batch, grid, grid, 3,
                                (b_x, b_y, b_w, b_h, conf, prob_0, prob_1, ...))
        """
        if len(y_pred.shape) == 4:
            _, grid_size, _, box_size = y_pred.shape
            box_size = box_size // 3
        else:
            _, grid_size, _, _, box_size = y_pred.shape

        y_true = tf.reshape(
            y_true, shape=(-1, grid_size * grid_size * 3, box_size)
        )
        y_pred = tf.reshape(
            y_pred, shape=(-1, grid_size * grid_size * 3, box_size)
        )

        truth_xywh = y_true[..., 0:4]
        truth_conf = y_true[..., 4:5]
        truth_prob = y_true[..., 5:]

        num_classes = truth_prob.shape[-1]

        pred_xywh = y_pred[..., 0:4]
        pred_conf = y_pred[..., 4:5]
        pred_prob = y_pred[..., 5:]

        one_obj = truth_conf
        num_obj = tf.reduce_sum(one_obj, axis=[1, 2])
        one_noobj = 1.0 - one_obj
        # Dim(batch, grid * grid * 3, 1)
        one_obj_mask = one_obj > 0.5

        zero = tf.zeros((1, grid_size * grid_size * 3, 1), dtype=tf.float32)

        # IoU Loss
        xiou = self.bbox_xiou(truth_xywh, pred_xywh)
        xiou_scale = 2.0 - truth_xywh[..., 2:3] * truth_xywh[..., 3:4]
        xiou_loss = one_obj * xiou_scale * (1.0 - xiou[..., tf.newaxis])
        xiou_loss = 3 * tf.reduce_mean(tf.reduce_sum(xiou_loss, axis=(1, 2)))

        # Confidence Loss
        i0 = tf.constant(0)

        def body(i, max_iou):
            object_mask = tf.reshape(one_obj_mask[i, ...], shape=(-1,))
            truth_bbox = tf.boolean_mask(truth_xywh[i, ...], mask=object_mask)
            # grid * grid * 3,      1, xywh
            #               1, answer, xywh
            #   => grid * grid * 3, answer
            _max_iou0 = tf.cond(
                tf.equal(num_obj[i], 0),
                lambda: zero,
                lambda: tf.reshape(
                    tf.reduce_max(
                        bbox_iou(
                            pred_xywh[i, :, tf.newaxis, :],
                            truth_bbox[tf.newaxis, ...],
                        ),
                        axis=-1,
                    ),
                    shape=(1, -1, 1),
                ),
            )
            # 1, grid * grid * 3, 1
            _max_iou1 = tf.cond(
                tf.equal(i, 0),
                lambda: _max_iou0,
                lambda: tf.concat([max_iou, _max_iou0], axis=0),
            )
            return tf.add(i, 1), _max_iou1

        _, max_iou = tf.while_loop(
            self.while_cond,
            body,
            [i0, zero],
            shape_invariants=[
                i0.get_shape(),
                tf.TensorShape([None, grid_size * grid_size * 3, 1]),
            ],
        )

        conf_obj_loss = one_obj * (0.0 - backend.log(pred_conf + 1e-9))
        conf_noobj_loss = (
            one_noobj
            * tf.cast(max_iou < 0.5, dtype=tf.float32)
            * (0.0 - backend.log(1.0 - pred_conf + 1e-9))
        )
        conf_loss = tf.reduce_mean(
            tf.reduce_sum(conf_obj_loss + conf_noobj_loss, axis=(1, 2))
        )

        # Probabilities Loss
        prob_loss = self.prob_binaryCrossentropy(truth_prob, pred_prob)
        prob_loss = one_obj * prob_loss[..., tf.newaxis]
        prob_loss = tf.reduce_mean(
            tf.reduce_sum(prob_loss, axis=(1, 2)) * num_classes
        )

        total_loss = xiou_loss + conf_loss + prob_loss

        if self.verbose != 0:
            tf.print(
                "grid:",
                grid_size,
                "iou_loss:",
                xiou_loss,
                "conf_loss:",
                conf_loss,
                "prob_loss:",
                prob_loss,
                "total_loss",
                total_loss,
            )

        return total_loss
Exemplo n.º 15
0
    def _nnpom(self, projected, thresholds):
        if self.use_tau == 1:
            projected = K.reshape(projected, shape=[-1]) / self.tau
        else:
            projected = K.reshape(projected, shape=[-1])

        # projected = K.Print(projected, data=[K.reduce_min(projected), K.reduce_max(projected), K.reduce_mean(projected)], message='projected min max mean')

        m = K.shape(projected)[0]
        a = K.reshape(K.tile(thresholds, [m]), shape=[m, -1])
        b = K.transpose(
            K.reshape(K.tile(projected, [self.num_classes - 1]), shape=[-1,
                                                                        m]))
        z3 = a - b

        # z3 = K.cond(K.reduce_min(K.abs(z3)) < 0.01, lambda: K.Print(z3, data=[K.reduce_min(K.abs(z3))], message='z3 abs min', summarize=100), lambda: z3)

        if self.link_function == 'probit':
            a3T = self.dist.cdf(z3)
        elif self.link_function == 'cloglog':
            a3T = 1 - K.exp(-K.exp(z3))
        elif self.link_function == 'glogit':
            a3T = 1.0 / K.pow(1.0 + K.exp(-self.lmbd *
                                          (z3 - self.mu)), self.alpha)
        elif self.link_function == 'cauchit':
            a3T = K.atan(z3 / math.pi) + 0.5
        elif self.link_function == 'lgamma':
            a3T = K.cond(
                self.q < 0, lambda: tf.math.igammac(
                    K.pow(self.q, -2),
                    K.pow(self.q, -2) * K.exp(self.q * z3)), lambda: K.cond(
                        self.q > 0, lambda: tf.math.igamma(
                            K.pow(self.q, -2),
                            K.pow(self.q, -2) * K.exp(self.q * z3)), lambda:
                        self.dist.cdf(z3)))
        elif self.link_function == 'gauss':
            # a3T = 1.0 / 2.0 + K.sign(z3) * K.igamma(1.0 / self.alpha, K.pow(K.abs(z3) / self.r, self.alpha)) / (2 * K.exp(K.lgamma(1.0 / self.alpha)))
            # z3 = K.Print(z3, data=[K.reduce_max(K.abs(z3))], message='z3 abs max')
            # K.sigmoid(z3 - self.p['mu']) - 1)
            a3T = 1.0 / 2.0 + K.tanh(z3 - self.p['mu']) * tf.math.igamma(
                1.0 / self.p['alpha'],
                K.pow(K.pow(
                    (z3 - self.p['mu']) / self.p['r'], 2), self.p['alpha'])
            ) / (2 * K.exp(tf.math.lgamma(1.0 / self.p['alpha'])))
        elif self.link_function == 'expgauss':
            u = self.lmbd * (z3 - self.mu)
            v = self.lmbd * self.sigma
            dist1 = distributions.Normal(loc=0., scale=v)
            dist2 = distributions.Normal(loc=v, scale=K.pow(v, 2))
            a3T = dist1.cdf(u) - K.exp(-u + K.pow(v, 2) / 2 +
                                       K.log(dist2.cdf(u)))
        elif self.link_function == 'ggamma':
            a3T = tf.math.igamma(
                self.p['d'] / self.p['p'],
                K.pow((z3 / self.p['a']), self.p['p'])) / K.exp(
                    tf.math.lgamma(self.p['d'] / self.p['p']))
        else:
            a3T = 1.0 / (1.0 + K.exp(-z3))

        a3 = K.concatenate([a3T, tf.ones([m, 1])], axis=1)
        a3 = K.concatenate(
            [K.reshape(a3[:, 0], shape=[-1, 1]), a3[:, 1:] - a3[:, 0:-1]],
            axis=-1)

        return a3
Exemplo n.º 16
0
    def call(self, inputs):
        if len(inputs) == 3:
            X, A, I = inputs
            if K.ndim(I) == 2:
                I = I[:, 0]
        else:
            X, A = inputs
            I = None

        N = K.shape(A)[-1]
        # Check if the layer is operating in mixed or batch mode
        mode = ops.autodetect_mode(A, X)
        self.reduce_loss = mode in (modes.MIXED, modes.BATCH)

        # Get normalized adjacency
        if K.is_sparse(A):
            I_ = tf.sparse.eye(N, dtype=A.dtype)
            A_ = tf.sparse.add(A, I_)
        else:
            I_ = tf.eye(N, dtype=A.dtype)
            A_ = A + I_
        fltr = ops.normalize_A(A_)

        # Node embeddings
        Z = K.dot(X, self.kernel_emb)
        Z = ops.filter_dot(fltr, Z)
        if self.activation is not None:
            Z = self.activation(Z)

        # Compute cluster assignment matrix
        S = K.dot(X, self.kernel_pool)
        S = ops.filter_dot(fltr, S)
        S = activations.softmax(S, axis=-1)  # softmax applied row-wise

        # Link prediction loss
        S_gram = ops.matmul_A_BT(S, S)
        if mode == modes.MIXED:
            A = tf.sparse.to_dense(A)[None, ...]
        if K.is_sparse(A):
            LP_loss = tf.sparse.add(
                A, -S_gram)  # A/tf.norm(A) - S_gram/tf.norm(S_gram)
        else:
            LP_loss = A - S_gram
        LP_loss = tf.norm(LP_loss, axis=(-1, -2))
        if self.reduce_loss:
            LP_loss = K.mean(LP_loss)
        self.add_loss(LP_loss)

        # Entropy loss
        entr = tf.negative(
            tf.reduce_sum(tf.multiply(S, K.log(S + K.epsilon())), axis=-1))
        entr_loss = K.mean(entr, axis=-1)
        if self.reduce_loss:
            entr_loss = K.mean(entr_loss)
        self.add_loss(entr_loss)

        # Pooling
        X_pooled = ops.matmul_AT_B(S, Z)
        A_pooled = ops.matmul_AT_B_A(S, A)

        output = [X_pooled, A_pooled]

        if I is not None:
            I_mean = tf.math.segment_mean(I, I)
            I_pooled = ops.repeat(I_mean, tf.ones_like(I_mean) * self.k)
            output.append(I_pooled)

        if self.return_mask:
            output.append(S)

        return output
Exemplo n.º 17
0
 def spp(x):
     return K.log(1 + K.exp(x)) - spp_alpha
Exemplo n.º 18
0
def _calc_entropy(y_pred):
    # actually minus entropy
    logp = K.log(_clip_value(y_pred))

    return K.sum(y_pred * logp, axis=1)
 def loss(y_true, y_pred):
     loss = y_true * K.log(y_pred) * weights
     loss = -K.sum(loss, -1)
     return loss
Exemplo n.º 20
0
def PSNR(y_true, y_pred):
    # 参考:https://ja.wikipedia.org/wiki/%E3%83%94%E3%83%BC%E3%82%AF%E4%BF%A1%E5%8F%B7%E5%AF%BE%E9%9B%91%E9%9F%B3%E6%AF%94
    pic_gt = y_true[:, :, :, :3]
    pic_pred = y_pred[:, :, :, :3]
    return 20 * K.log(2.0) / K.log(10.0) - 10.0 * K.log(
        K.mean(K.square(pic_gt - pic_pred), axis=(1, 2, 3))) / K.log(10.0)
Exemplo n.º 21
0
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):
    '''Return yolo_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(N, 2), wh
    num_classes: integer
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)

    '''
    num_layers = len(anchors) // 3  # default setting
    yolo_outputs = args[:num_layers]
    y_true = args[num_layers:]
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]
                   ] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
    input_shape = K.cast(
        K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
    grid_shapes = [
        K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0]))
        for l in range(num_layers)
    ]
    loss = 0
    m = K.shape(yolo_outputs[0])[0]  # batch size, tensor
    mf = K.cast(m, K.dtype(yolo_outputs[0]))

    for l in range(num_layers):
        object_mask = y_true[l][..., 4:5]
        true_class_probs = y_true[l][..., 5:]

        grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
                                                     anchors[anchor_mask[l]],
                                                     num_classes,
                                                     input_shape,
                                                     calc_loss=True)
        pred_box = K.concatenate([pred_xy, pred_wh])

        # Darknet raw box to calculate loss.
        raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid
        raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] *
                            input_shape[::-1])
        raw_true_wh = K.switch(object_mask, raw_true_wh,
                               K.zeros_like(raw_true_wh))  # avoid log(0)=-inf
        box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]),
                                     size=1,
                                     dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')

        def loop_body(b, ignore_mask):
            true_box = tf.boolean_mask(y_true[l][b, ..., 0:4],
                                       object_mask_bool[b, ..., 0])
            iou = box_iou(pred_box[b], true_box)
            best_iou = K.max(iou, axis=-1)
            ignore_mask = ignore_mask.write(
                b, K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
            return b + 1, ignore_mask

        _, ignore_mask = K.control_flow_ops.while_loop(lambda b, *args: b < m,
                                                       loop_body,
                                                       [0, ignore_mask])
        ignore_mask = ignore_mask.stack()
        ignore_mask = K.expand_dims(ignore_mask, -1)

        # K.binary_crossentropy is helpful to avoid exp overflow.
        xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(
            raw_true_xy, raw_pred[..., 0:2], from_logits=True)
        wh_loss = object_mask * box_loss_scale * 0.5 * K.square(
            raw_true_wh - raw_pred[..., 2:4])
        confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
            (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
        class_loss = object_mask * K.binary_crossentropy(
            true_class_probs, raw_pred[..., 5:], from_logits=True)

        xy_loss = K.sum(xy_loss) / mf
        wh_loss = K.sum(wh_loss) / mf
        confidence_loss = K.sum(confidence_loss) / mf
        class_loss = K.sum(class_loss) / mf
        loss += xy_loss + wh_loss + confidence_loss + class_loss
        if print_loss:
            loss = tf.Print(loss, [
                loss, xy_loss, wh_loss, confidence_loss, class_loss,
                K.sum(ignore_mask)
            ],
                            message='loss: ')
    return loss
Exemplo n.º 22
0
def focal_loss(y_true, y_pred, gamma=2., alpha=.25):
    y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
    loss_1 = - y_true * (alpha * K.pow((1 - y_pred), gamma) * K.log(y_pred))
    loss_0 = - (1 - y_true) * (alpha * K.pow(y_pred, gamma) * K.log(1 - y_pred))
    return K.mean(loss_0 + loss_1)
Exemplo n.º 23
0
def crossentropy(y_true, y_pred):
    # this gives the same result as using keras.objective.crossentropy
    y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
    y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
    return -K.sum(y_true * K.log(y_pred), axis=-1)
Exemplo n.º 24
0
 def call(self, y_true, y_pred, disc_r):
     log_lik = -(y_true * K.log(y_pred) + (1 - y_true) * K.log(1 - y_pred))
     loss = K.mean(log_lik * disc_r, keepdims=True)
     return loss
Exemplo n.º 25
0
        def custom_loss(y_true, y_pred):
            out = K.clip(y_pred, 1e-8, 1 - 1e-8)
            log_lik = y_true * K.log(out)

            return K.sum(-log_lik * advantages)
Exemplo n.º 26
0
def kl_divergence(kl_term, p, p_hat):

    return (kl_term - p * K.log(1e-10 + p_hat) -
            (1.0 - p) * K.log(1e-10 + 1.0 - p_hat))
Exemplo n.º 27
0
 def reconstruct_loss(self, x, mu_x, sigma_x):
     var_x = K.square(sigma_x)
     reconst_loss = -0.5 * K.sum(K.log(var_x), axis=2) + K.sum(
         K.square(x - mu_x) / var_x, axis=2)
     reconst_loss = K.reshape(reconst_loss, shape=(x.shape[0], 1))
     return K.mean(reconst_loss, axis=0)
Exemplo n.º 28
0
 def __init__(self, p=0.1, sparsityBeta=3.0):
     self.p = K.cast_to_floatx(p)
     self.sparsityBeta = K.cast_to_floatx(sparsityBeta)
     self.kl_term = p * K.log(p) + (1.0 - p) * K.log(1.0 - p)
def pairwise_cross_entropy(positive_score, negative_score, *args):
    positive_exp = K.exp(positive_score)
    return K.mean(-K.log(positive_exp /
                         (positive_exp + K.exp(negative_score))))
Exemplo n.º 30
0
    def call(self, x):
        # print("!",x.shape)

        E = K.reshape(x[:, :, 0], (-1, self.gs, 1))
        p1 = K.reshape(x[:, :, 1], (-1, self.gs, 1))
        p2 = K.reshape(x[:, :, 2], (-1, self.gs, 1))
        p3 = K.reshape(x[:, :, 3], (-1, self.gs, 1))

        pt = K.sqrt(p1**2 + p2**2)
        p = K.sqrt(pt**2 + p3**2)
        iszero = p**2 + E**2
        iszero = 1 - K.relu(1 - self.numericC * iszero) + K.relu(
            -self.numericC * iszero)

        #return iszero

        eta = iszero * 0.5 * K.log(0.0000000001 + (p + p3) /
                                   (p - p3 + 0.0000000001))
        #phi=iszero*t.math.acos(p3/(p+0.0000001))
        phi = iszero * t.math.atan2(p2, p1)

        #print("eta",eta.shape,"phi",phi.shape)

        eta = K.reshape(eta, (-1, self.gs))
        phi = K.reshape(phi, (-1, self.gs))

        meta = K.mean(eta, axis=-1)

        mp1 = K.mean(p1, axis=-2)
        mp2 = K.mean(p2, axis=-2)

        #print(p2.shape,p1.shape)
        #print(mp1.shape,mp2.shape)
        #exit()

        mphi = t.math.atan2(mp2, mp1)
        #print("meta",meta.shape,"mphi",mphi.shape)

        #mphi=K.mean(phi,axis=-1)

        #exit()

        meta = K.reshape(K.repeat_elements(meta, self.gs, 0), (-1, self.gs))
        mphi = K.repeat_elements(mphi, self.gs, 1)

        #print("meta",meta.shape,"mphi",mphi.shape)

        #deta=eta#-meta##not sure if abs here
        #dphi=phi#-mphi##not sure here either

        siszero = K.reshape(iszero, (-1, self.gs))

        deta = K.reshape(siszero * (eta - meta), (-1, self.gs, 1))
        dphi = K.reshape(siszero * (phi - mphi), (-1, self.gs, 1))

        pi = t.constant(math.pi)

        #dphi=K.min([t.math.floormod(dphi,2*pi),t.math.floormod(-dphi,2*pi)],axis=0)

        opta = t.math.floormod(dphi, 2 * pi)
        optb = t.math.floormod(-dphi, 2 * pi)

        dphi = t.where(t.greater(opta, optb), optb, -opta)

        #dphi=K.reshape(dphi,(-1,self.gs,1))#should actually be useless?

        #dphi=K.min(K.concatenate((t.math.floormod(dphi,2*pi),t.math.floormod(-dphi,2*pi)),axis=-1),axis=-1)

        #dphi=K.reshape(dphi,(-1,self.gs,1))

        #deta=iszero*K.permute_dimensions(K.permute_dimensions(eta,(1,0,2))-meta,(1,0,2))#not sure if abs here required
        #dphi=iszero*K.permute_dimensions(K.permute_dimensions(phi,(1,0,2))-mphi,(1,0,2))#also not sure here either

        spt = K.sum(pt, axis=-2)
        ppt = -iszero * K.permute_dimensions(
            K.log(0.000000001 + K.permute_dimensions(pt, (1, 0, 2)) /
                  (spt + 0.0000001)), (1, 0, 2)
        )  #please note the added sign in comparison to the original paper

        #print(iszero.shape,deta.shape,dphi.shape,ppt.shape)
        #print(eta.shape,meta.shape)
        #print(phi.shape,mphi.shape)
        #exit()

        #print(iszero.shape,deta.shape,dphi.shape,pt.shape)
        #exit()

        #meta=K.reshape(meta,(-1,self.gs,1))
        #mphi=K.reshape(mphi,(-1,self.gs,1))
        #phi=K.reshape(phi,(-1,self.gs,1))

        ret = K.concatenate((iszero, deta, dphi, ppt),
                            axis=-1)  #adding iszero for numerical reasons

        #print(ret.shape,x.shape)
        #exit()

        return ret