def correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''

    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape / image_shape))
    offset = (input_shape - new_shape) / 2. / input_shape
    scale = input_shape / new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes = K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes
예제 #2
0
def mask_aware_max(x):
    mask = K.not_equal(K.sum(K.abs(x), axis=2, keepdims=True), 0)
    mask = K.cast(mask, 'float32')
    vecmin = K.min(x, axis=1, keepdims=True)

    xstar = x + (vecmin * (1 - mask))  # setting masked values to the min value

    return K.max(xstar, axis=1, keepdims=False)
예제 #3
0
 def _batch_hard_triplet_loss(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor:
     mask_anchor_positive = self._get_anchor_positive_triplet_mask(y_true, pairwise_dist)
     anchor_positive_dist = mask_anchor_positive * pairwise_dist
     hardest_positive_dist = K.max(anchor_positive_dist, axis=1, keepdims=True)
     mask_anchor_negative = self._get_anchor_negative_triplet_mask(y_true, pairwise_dist)
     anchor_negative_dist = mask_anchor_negative * pairwise_dist
     mask_anchor_negative = self._get_semihard_anchor_negative_triplet_mask(anchor_negative_dist,
                                                                       hardest_positive_dist,
                                                                       mask_anchor_negative)
     max_anchor_negative_dist = K.max(pairwise_dist, axis=1, keepdims=True)
     anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
     hardest_negative_dist = K.min(anchor_negative_dist, axis=1, keepdims=True)
     triplet_loss = K.clip(hardest_positive_dist - hardest_negative_dist + self.margin, 0.0, None)
     triplet_loss = K.mean(triplet_loss)
     return triplet_loss
    def starGAN_train(self, D_lr, G_lr, lamda_gp, lamda_cls, lamda_rec):

        x_real = Input(shape=self.image_size)
        label_real = Input(shape=(self.n_class,))
        label_fake = Input(shape=(self.n_class,))
        label_real_matrix = Input(shape=(self.image_size[0],self.image_size[1],self.n_class))
        label_fake_matrix = Input(shape=(self.image_size[0],self.image_size[1],self.n_class))
        x_fake = self.generator([x_real, label_fake_matrix])

        # loss for discriminator
        d_out_src_real, d_out_cls_real = self.discriminator(x_real)
        d_loss_real = -K.mean(d_out_src_real)
        d_loss_cls = K.mean(K.categorical_crossentropy(label_real, d_out_cls_real))
        # cal acc
        label_sub = d_out_cls_real - label_real
        c1 = 1 + K.min(label_sub, axis=1)  # label为1的最小置信度
        c2 = K.max(label_sub, axis=1)  # label为0的最大置信度
        d_acc = K.mean(K.cast(K.greater(c1 - c2, 0), K.floatx()))  # 如果label为1的最小置信度大于label为0的最大置信度,则正确,否则错误
        # label_pred = K.cast(K.greater(K.clip(d_out_cls_real, 0, 1), 0.5), K.floatx())
        # d_acc = 1 - K.mean(K.clip(K.sum(K.abs(label_real - label_pred), axis=1), 0, 1))
        d_out_src_fake, d_out_cls_fake = self.discriminator(x_fake)
        d_loss_fake = K.mean(d_out_src_fake)

        # gradient penalty
        e = K.placeholder(shape=(None, 1, 1, 1))
        x_mixed = Input(shape=self.image_size, tensor=e * x_real + (1 - e) * x_fake)
        x_mixed_gradient = K.gradients(self.discriminator(x_mixed), [x_mixed])[0]
        x_mixed_gradient_norm = K.sqrt(K.sum(K.square(x_mixed_gradient), axis=[1, 2, 3]))  # not norm in batch_size
        gradient_penalty = K.mean(K.square(x_mixed_gradient_norm - 1))

        d_loss = d_loss_real + d_loss_fake + lamda_gp * gradient_penalty + lamda_cls * d_loss_cls
        d_training_updates = RMSprop(lr=D_lr).get_updates(d_loss, self.discriminator.trainable_weights)
        D_train = K.function([x_real, label_real, label_real_matrix, label_fake, label_fake_matrix, e], [d_loss, d_acc], d_training_updates)

        # loss for generator
        x_rec = self.generator([x_fake, label_real_matrix])
        g_out_src_fake, g_out_cls_fake = self.discriminator(x_fake)
        g_loss_fake = -K.mean(g_out_src_fake)
        g_loss_rec = K.mean(K.abs(x_real - x_rec))
        g_loss_cls = K.mean(K.categorical_crossentropy(label_fake, g_out_cls_fake))

        g_loss = g_loss_fake + lamda_rec * g_loss_rec + lamda_cls * g_loss_cls
        g_training_updates = RMSprop(lr=G_lr).get_updates(g_loss, self.generator.trainable_weights)
        G_train = K.function([x_real, label_real, label_real_matrix, label_fake, label_fake_matrix], [g_loss], g_training_updates)

        return D_train, G_train
예제 #5
0
def loss(y_true, y_pred):
        global output_feature;
        y_pred_positive=y_pred[:,:output_feature]
        y_pred_label=y_pred[:,output_feature:2*output_feature]
        y_pred_negative1=y_pred[:,2*output_feature:3*output_feature]
        y_pred_negative2=y_pred[:,3*output_feature:4*output_feature]
        y_pred_negative3=y_pred[:,4*output_feature:5*output_feature]
        y_pred_negative4=y_pred[:,5*output_feature:6*output_feature]
        y_pred_negative5=y_pred[:,6*output_feature:7*output_feature]
        y_pred_negative6=y_pred[:,7*output_feature:8*output_feature]
        l=K.min(y_true-y_true, axis=1)
        return K.maximum(cosine_proximity(y_pred_positive, y_pred_negative1)-cosine_proximity(y_pred_positive, y_pred_label)+0.3, 0.)\
        +K.maximum(cosine_proximity(y_pred_positive, y_pred_negative2)-cosine_proximity(y_pred_positive, y_pred_label)+0.3, 0.)\
        +K.maximum(cosine_proximity(y_pred_positive, y_pred_negative3)-cosine_proximity(y_pred_positive, y_pred_label)+0.3, 0.)\
        +K.maximum(cosine_proximity(y_pred_positive, y_pred_negative4)-cosine_proximity(y_pred_positive, y_pred_label)+0.3, 0.)\
        +K.maximum(cosine_proximity(y_pred_positive, y_pred_negative5)-cosine_proximity(y_pred_positive, y_pred_label)+0.3, 0.)\
        +K.maximum(cosine_proximity(y_pred_positive, y_pred_negative6)-cosine_proximity(y_pred_positive, y_pred_label)+0.3, 0.)\
        +K.maximum(0.98-cosine_proximity(y_pred_positive, y_pred_label), 0.)+l
예제 #6
0
 def step(self, input_energy_t, states, return_logZ=True):
     # not in the following  `prev_target_val` has shape = (B, F)
     # where B = batch_size, F = output feature dim
     # Note: `i` is of float32, due to the behavior of `K.rnn`
     prev_target_val, i, chain_energy = states[:3]
     t = K.cast(i[0, 0], dtype='int32')
     if len(states) > 3:
         if K.backend() == 'theano':
             m = states[3][:, t:(t + 2)]
         else:
             m = K.tf.slice(states[3], [0, t], [-1, 2])
         input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
         chain_energy = chain_energy * K.expand_dims(
             K.expand_dims(m[:, 0] * m[:, 1]))  # (1, F, F)*(B, 1, 1) -> (B, F, F)
     if return_logZ:
         energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val,
                                               2)  # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
         new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
         return new_target_val, [new_target_val, i + 1]
     else:
         energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
         min_energy = K.min(energy, 1)
         argmin_table = K.cast(K.argmin(energy, 1), K.floatx())  # cast for tf-version `K.rnn`
         return argmin_table, [min_energy, i + 1]
예제 #7
0
def triplet_loss(y_true,
                 y_pred,
                 margin=input_margin,
                 P=P_param,
                 K=K_param,
                 output_dim=input_output_dim):
    embeddings = Keras.reshape(y_pred, (-1, output_dim))
    loss = Keras.variable(0, dtype='float32')

    for i in range(P):
        for a in range(K):
            pred_anchor = embeddings[i * K + a]
            hard_pos = Keras.max(
                dist(pred_anchor, embeddings[i * K:(i + 1) * K]))
            hard_neg = Keras.min(
                dist(
                    pred_anchor,
                    Keras.concatenate(
                        [embeddings[0:i * K], embeddings[(i + 1) * K:]], 0)))
            if margin == None:
                loss += log1p(hard_pos - hard_neg)
            else:
                loss += Keras.maximum(margin + hard_pos - hard_neg, 0.0)
    return loss
예제 #8
0
def ranking_loss_with_margin(y_pred, y_true):
    """
    Using this loss trains the model to give scores to all correct elements in y_true that are
    higher than all scores it gives to incorrect elements in y_true, plus a margin.

    For example, let ``y_true = [0, 0, 1, 1, 0]``, and let ``y_pred = [-1, 1, 2, 0, -2]``.  We will
    find the lowest score assigned to correct elements in ``y_true`` (``0`` in this case), and the
    highest score assigned to incorrect elements in ``y_true`` (``1`` in this case).  We will then
    compute a hinge loss given these values: ``K.maximum(0.0, 1 + 1 - 0)``.

    Note that the way we do this uses ``K.max()`` and ``K.min()`` over the elements in ``y_true``,
    which means that if you have a lot of values in here, you'll only get gradients backpropping
    through two of them (the ones on the margin).  This could be an inefficient use of your
    computation time.  Think carefully about the data that you're using with this loss function.

    Because of the way masking works with Keras loss functions, also, you need to be sure that any
    masked elements in ``y_pred`` have very negative values before they get passed into this loss
    function.
    """
    correct_elements = y_pred + (1.0 - y_true) * VERY_LARGE_NUMBER
    lowest_scoring_correct = K.min(correct_elements, axis=-1)
    incorrect_elements = y_pred + y_true * VERY_NEGATIVE_NUMBER
    highest_scoring_incorrect = K.max(incorrect_elements, axis=-1)
    return K.mean(K.maximum(0.0, 1.0 + highest_scoring_incorrect - lowest_scoring_correct))
    def call(self, x, mask=None):
        J = self.num_component

        # Initialize loss as zero, we'll add terms to it later
        loss = backend.variable(0.)

        # The mean vector = [mu_0, mu_{1,..., J}]
        means = backend.concatenate([backend.variable([0.]), self.means], axis=0)

        # Convert back to 1 / variance = precision
        precision = backend.exp(self.gammas)

        # Get the mixing proportions back from exp(tau)
        # Employ the log_sum_exp trick to prevent over/underflow
        # log \sum exp(x_i) = a + log \sum exp(x_i - a)
        # a = min x_i
        a = backend.min(self.rhos)
        mix_prop = backend.exp(self.rhos - a)
        mix_prop = (1 - self.pi_0) * mix_prop / backend.sum(mix_prop)
        mix_prop = backend.concatenate([backend.variable([self.pi_0]), mix_prop], axis=0)

        # Compute the negative log likelihood of the weights w.r.t to the mixture model
        loss += sum([self.nll(weights, mix_prop, means, precision) for weights in self.arr_net_weight])

        # Gamma hyper-prior parameters - on the zero mean gaussian
        (alpha_0, beta_0) = (5000.0, 2.0)
        negative_log_prob = (1 - alpha_0) * backend.gather(self.gammas, [0]) + beta_0 * backend.gather(precision, [0])
        loss += backend.sum(negative_log_prob)

        # Gamma hyper-prior parameters - on the rest of the gaussians
        alpha, beta = (250, 0.1)
        index = np.arange(1, J)
        negative_log_prob = (1 - alpha) * backend.gather(self.gammas, index) + beta * backend.gather(precision, index)

        loss += backend.sum(negative_log_prob)
        return loss
예제 #10
0
def msml_loss(y_true, y_pred):
    global SN
    global PN
    feat_num = SN*PN # images num
    y_pred = K.l2_normalize(y_pred,axis=1)
    feat1 = K.tile(K.expand_dims(y_pred,axis = 0),[feat_num,1,1])
    feat2 = K.tile(K.expand_dims(y_pred,axis = 1),[1,feat_num,1])
    delta = feat1 - feat2
    dis_mat = K.sum(K.square(delta),axis = 2) + K.epsilon() # Avoid gradients becoming NAN
    dis_mat = K.sqrt(dis_mat)
    positive = dis_mat[0:SN,0:SN]
    negetive = dis_mat[0:SN,SN:]
    for i in range(1,PN):
        positive = tf.concat([positive,dis_mat[i*SN:(i+1)*SN,i*SN:(i+1)*SN]],axis = 0)
        if i != PN-1:
            negs = tf.concat([dis_mat[i*SN:(i+1)*SN,0:i*SN],dis_mat[i*SN:(i+1)*SN, (i+1)*SN:]],axis = 1)
        else:
            negs = tf.concat(dis_mat[i*SN:(i+1)*SN, 0:i*SN],axis = 0)
        negetive = tf.concat([negetive,negs],axis = 0)
    positive = K.max(positive)
    negetive = K.min(negetive) 
    a1 = 0.6
    loss = K.mean(K.maximum(0.0,positive-negetive+a1))
    return loss 
예제 #11
0
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape / image_shape))
    offset = (input_shape - new_shape) / 2. / input_shape
    scale = input_shape / new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes = K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes
예제 #12
0
def triplet_hard_loss(y_true, y_pred):
    global SN
    global PN
    feat_num = SN*PN # images num
    y_pred = K.l2_normalize(y_pred,axis=1)
    feat1 = K.tile(K.expand_dims(y_pred,axis = 0),[feat_num,1,1])
    feat2 = K.tile(K.expand_dims(y_pred,axis = 1),[1,feat_num,1])
    delta = feat1 - feat2
    dis_mat = K.sum(K.square(delta),axis = 2)
    dis_mat = K.sqrt(dis_mat) + 1e-8 #1e-8 is not necessary
    positive = dis_mat[0:SN,0:SN]
    negetive = dis_mat[0:SN,SN:]
    for i in range(1,PN):
        positive = tf.concat([positive,dis_mat[i*SN:(i+1)*SN,i*SN:(i+1)*SN]],axis = 0)
        if i != PN-1:
            negs = tf.concat([dis_mat[i*SN:(i+1)*SN,0:i*SN],dis_mat[i*SN:(i+1)*SN, (i+1)*SN:]],axis = 1)
        else:
            negs = tf.concat(dis_mat[i*SN:(i+1)*SN, 0:i*SN],axis = 0)
        negetive = tf.concat([negetive,negs],axis = 0)
    positive = K.max(positive,axis=1)
    negetive = K.min(negetive,axis=1) 
    a1 = 0.6
    loss = K.mean(K.maximum(0.0,positive-negetive+a1))
    return loss 
예제 #13
0
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    '''还原原始图像真实物理大小
    @param box_xy      预测值,相对全图大小偏移量,0-1之间,shape=>(batch_size,height,width,num_anchors,2)
    @param box_wh      预测值,相对全图大小比例值,       shape=>(batch_size,height,width,num_anchors,2)
    @param input_shape 模型输入尺寸,val=>(416,416)
    @param image_shape 图像真实尺寸,shape=>(?,?)
    @return boxes
    '''
    #归一化数据状态
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))  #模型输入大小
    image_shape = K.cast(image_shape, K.dtype(box_yx))  #图像真实大小
    new_shape = K.round(image_shape *
                        K.min(input_shape / image_shape))  #原始图像等比缩放后的shape
    offset = (input_shape -
              new_shape) / 2. / input_shape  #原始图像等比缩放后与模型输入shape的边偏移量(归一化)
    scale = input_shape / new_shape  #图像缩放倍数
    #真实数据复原,box_yx-offset为get_random_data的box[:, [0,2]] = box[:, [0,2]]*scale + dx的逆向操作
    box_yx = (box_yx - offset) * scale
    box_hw *= scale  #真实数据复原

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes = K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    #还原真实物理尺寸
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes
예제 #14
0
    def yolo_trans_boxes(box_xy, box_wh, input_shape, image_shape):
        # 获取到box的x,y,w,h信息
        box_yx = box_xy[..., ::-1]
        box_hw = box_wh[..., ::-1]
        # 进行shape的转换
        input_shape = K.cast(input_shape, K.dtype(box_yx))
        image_shape = K.cast(image_shape, K.dtype(box_yx))
        new_shape = K.round(image_shape * K.min(input_shape / image_shape))

        # 获取box的实际位置信息
        offset = (input_shape - new_shape) / 2. / input_shape
        scale = input_shape / new_shape
        box_yx = (box_yx - offset) * scale
        box_hw *= scale

        # box转换
        box_mins = box_yx - (box_hw / 2.)
        box_maxes = box_yx + (box_hw / 2.)
        boxes = K.concatenate([
            box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1],
            box_maxes[..., 1:2]
        ])
        boxes *= K.concatenate([image_shape, image_shape])
        return boxes
예제 #15
0
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):

    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],
        box_mins[..., 1:2],
        box_maxes[..., 0:1],
        box_maxes[..., 1:2]
    ])


    boxes *= K.concatenate([image_shape, image_shape])
    return boxes
예제 #16
0
def min(t, axis=None, keepdims=False):
    """
    min Minimum values of tensor, element-wise 

    Parameters
    ----------
    t : backend.Tensor
    axis : int, optional
        The dimension to min over, or if None min over all
        dimensions. By default None
    keepdims : bool, optional
        If `keepdims` is `False`, the rank of the tensor is reduced 
        by 1. If `keepdims` is `True`, the reduced dimension is  retained 
        with length 1., by default False

    Returns
    -------
    backend.Tensor
        Min of t
    """
    if isinstance(t, np.ndarray):
        return t.sum(axis=axis, keepdims=keepdims)

    return K.min(t, axis, keepdims)
예제 #17
0
 def _batch_hard_triplet_loss(self, y_true: Tensor,
                              pairwise_dist: Tensor) -> Tensor:
     mask_anchor_positive = self._get_anchor_positive_triplet_mask(
         y_true, pairwise_dist)
     anchor_positive_dist = mask_anchor_positive * pairwise_dist
     hardest_positive_dist = K.max(anchor_positive_dist,
                                   axis=1,
                                   keepdims=True)
     mask_anchor_negative = self._get_anchor_negative_triplet_mask(
         y_true, pairwise_dist)
     anchor_negative_dist = mask_anchor_negative * pairwise_dist
     mask_anchor_negative = self._get_semihard_anchor_negative_triplet_mask(
         anchor_negative_dist, hardest_positive_dist, mask_anchor_negative)
     max_anchor_negative_dist = K.max(pairwise_dist, axis=1, keepdims=True)
     anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (
         1.0 - mask_anchor_negative)
     hardest_negative_dist = K.min(anchor_negative_dist,
                                   axis=1,
                                   keepdims=True)
     triplet_loss = K.clip(
         hardest_positive_dist - hardest_negative_dist + self.margin, 0.0,
         None)
     triplet_loss = K.mean(triplet_loss)
     return triplet_loss
예제 #18
0
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''
    通过上面得到的系数,计算真实框的位置.

    :param box_xy:
    :param box_wh:
    :param input_shape:
    :param image_shape:
    :return:
    '''
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]  # 中心坐标 ,下面一行是长宽.
    box_hw = box_wh[..., ::-1]  #先逆序
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape / image_shape))
    offset = (input_shape - new_shape) / 2. / input_shape
    scale = input_shape / new_shape  #new_shape 是输出的图片大小,也就是label数据集里面图片大小.
    box_yx = (box_yx - offset) * scale  #image_shape 是在new_shape外面多一层0
    #box_yx表示0-1之间,的图片中心坐标.在图片整体的百分比坐标.

    #得到的box_yx 是在new_shape 图片大小意义下,中心点百分比坐标.
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes = K.concatenate([  #默认拼接最后一列.
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes
예제 #19
0
def huber_loss(y_true, y_pred):
    d = 0.15
    x = K.abs(y_true - y_pred)
    d_t = d * K.ones_like(x)
    quad = K.min(K.stack([x, d_t], axis=-1), axis=-1)
    return (0.5 * K.square(quad) + d * (x - quad))
예제 #20
0
def categorical_accuracy_per_sequence(y_true, y_pred):
    return K.mean(K.min(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), axis=-1))
예제 #21
0
 def active(y_true, y_pred):
     return K.max(y_pred) - K.min(y_pred)
예제 #22
0
def DirectedSumMin1(distance, x, y, N):
    x2, y2 = _repeat_nxn_matrix(x,y,N)
    d  = K.sum(distance(x2,y2), axis=-1) # [batch, N, N]

    sum_x = K.sum(K.min(d, axis=2), axis=1) # [batch]
    return K.mean(sum_x)
예제 #23
0
def max_min_1d(X):
    max_ = K.max(X, axis=1)
    min_ = K.min(X, axis=1)
    return K.concatenate([max_, min_], axis=1)
예제 #24
0
 def _all_accuracy(y_true, y_pred):
     acc_all = K.cast(K.equal(y_true, K.round(y_pred)), 'int8')
     acc_batch = K.min(acc_all, axis=[1, 2, 3, 4, 5])
     acc = K.mean(K.cast(acc_batch, 'float32'), axis=-1)
     return acc
예제 #25
0
 def call(self, inputs):
     if self.data_format == 'channels_last':
         pooled = K.min(inputs, axis=[1, 2])
     else:
         pooled = K.min(inputs, axis=[2, 3])
     return pooled
def metrics_pred_min(y_true, y_pred):
    return K.min(y_pred)
예제 #27
0
def act_min(y_true, y_pred):
    return K.min(y_pred)
예제 #28
0
def exact_accruacy(y_true, y_pred):
    return K.min(K.cast(K.equal(y_true, y_pred), K.floatx()), axis=-1)
예제 #29
0
 def metrics_pred_min(y_true, y_pred):
     return K.min(y_pred)
예제 #30
0
    fcsv = csv.reader(f)
    for i in range(10):
        next(fcsv)
    a = next(fcsv)
    pxl = a[1]

pxl = np.fromstring(pxl, dtype=float, sep=' ').reshape((1, 48, 48, 1))
input_img = model.input

val_proba = model.predict(pxl)
pred = val_proba.argmax(axis=-1)
target = K.mean(model.output[:, pred])
grads = K.gradients(target, input_img)[0]

grads = (grads - K.mean(grads)) / K.std(grads)
grads = (grads - K.min(grads)) / (K.max(grads) - K.min(grads))

fn = K.function([input_img, K.learning_phase()], [grads])

heatmap = fn([pxl, False])
heatmap = np.array(heatmap).reshape(48, 48)

thres = 0.5
see = pxl.reshape(48, 48)
see[np.where(heatmap <= thres)] = np.mean(see)

plt.figure()
plt.imshow(heatmap, cmap=plt.cm.jet)
plt.colorbar()
plt.tight_layout()
fig = plt.gcf()
 def min_error(y_true, y_pred):
     return K.min(K.abs(y_true - y_pred))
예제 #32
0
 def __call__(self, w):
     inc = K.minimum(w, K.epsilon()) * self.rate
     pos = w - K.min(inc, axis=self.axis, keepdims=True)
     abs_sum = K.sum(K.abs(pos), axis=self.axis, keepdims=True)
     desired = self.rate + (1 - self.rate) * abs_sum
     return pos * desired / (K.maximum(K.epsilon(), abs_sum))
예제 #33
0
def ctn(X):
    max_ = K.max(X, axis=1)
    min_ = K.min(X, axis=1)
    mean_ = K.mean(X, axis=1)
    return K.concatenate([mean_, max_, min_], axis=1)
예제 #34
0
    def call(self, x):
        """Build the actual logic."""

        # (None, n_features, n_particles) -> (None, n_features, n_particles)

        if self.debug:
            x = K.variable(
                np.array([[[
                    229.46118164, 132.46817017, 26.43243217, 13.2313776,
                    5.75571156
                ],
                           [
                               -195.08522034, -113.19028473, -22.73009872,
                               -10.31623554, -4.25184822
                           ],
                           [
                               -114.19178772, -65.08143616, -12.34527397,
                               -8.04754353, -2.59461427
                           ],
                           [
                               -39.42618179, -22.36474037, -5.44153976,
                               -1.97019398, -2.88409066
                           ]],
                          [[129., 135., 26., 15., 7.],
                           [-105., -114., -20., -10., -6.],
                           [-100., -60., -10., -8., -1.],
                           [-32., -20., -5., -1., -2.]]]))

        weight_index = 0
        metric_index = 0
        out_features = []

        metric3 = K.variable(np.array([0., 1., 1., 1.]))

        # Our input is of the form
        # (b,f,p)
        # -> (batch_size, features, particles)

        # Let's build a few helpful matrices

        # All the individual dimensions
        # bp
        Es = x[:, 0, :]
        Xs = x[:, 1, :]
        Ys = x[:, 2, :]
        Zs = x[:, 3, :]

        #        A1s = x[:,4,:]
        #        A2s = x[:,5,:]

        # Element wise square of x
        # bfp
        x2 = pow(x, 2)

        # Mass^2 and transverse momentum^2
        # bp
        Ms = x2[:, 0, :] - x2[:, 1, :] - x2[:, 2, :] - x2[:, 3, :]
        Pts = K.abs(K.sqrt(x2[:, 1, :] + x2[:, 2, :]))

        #Pts = K.map_fn( lambda x:K.switch(K.less(x,self.t[0,0]),0,1), Es)

        if self.ms:
            out_features.append(Ms)
        if self.es:
            out_features.append(Es)
        if self.pts:
            out_features.append(Pts)

#        out_features.append(A1s)
#        out_features.append(A2s)

        for i in range(self.n_train_es):
            out_features.append(
                theano.tensor.tensordot(Es,
                                        self.w[weight_index, :, :],
                                        axes=[1, 0]))
            weight_index += 1

        for i in range(self.n_train_ms):
            out_features.append(
                theano.tensor.tensordot(Ms,
                                        self.w[weight_index, :, :],
                                        axes=[1, 0]))
            weight_index += 1

        for i in range(self.n_train_pts):
            out_features.append(
                theano.tensor.tensordot(Pts,
                                        self.w[weight_index, :, :],
                                        axes=[1, 0]))
            weight_index += 1

        for i in range(self.n_train_a1s):
            out_features.append(
                theano.tensor.tensordot(A1s,
                                        self.w[weight_index, :, :],
                                        axes=[1, 0]))
            weight_index += 1

        for i in range(self.n_train_a2s):
            out_features.append(
                theano.tensor.tensordot(A2s,
                                        self.w[weight_index, :, :],
                                        axes=[1, 0]))
            weight_index += 1

        # Helper tensor for building sums/differences
        # magic1: 111 000 000
        #         000 111 000
        #         000 000 111
        #
        # magic2: 100 100 100
        #         010 010 010
        #         001 001 001
        magic1 = K.repeat_elements(K.eye(self.n_particles), self.n_particles,
                                   1)
        magic2 = K.tile(K.eye(self.n_particles), [1, self.n_particles])
        magic_diff = magic1 - magic2
        magic_sum = magic1 - magic2

        # Build d_ij^2 = (k_i - k_j)^mu (k_i - k_j)^nu eta_mu_nu

        # b f p p'
        # x * magic  gives b f p^2, reshape to b f p p'
        d2_ij = K.reshape(K.expand_dims(K.dot(x, magic_diff), -1),
                          (x.shape[0], x.shape[1], x.shape[2], x.shape[2]))

        # elements squared
        d2_ij = K.pow(d2_ij, 2)

        # fold with the metric
        # b f p p' * f  = b p p'

        for i in range(self.n_train_dijs):

            if self.debug:
                print("metric:")
                print(K.eval(self.m[metric_index, :].shape))
                print(K.eval(self.m[metric_index, :]))

            if self.debug:
                print("d2_ij:")
                print(K.eval(d2_ij.shape))
                print(K.eval(d2_ij))

            if self.train_metric:
                metric = self.m[metric_index, :]
                metric_index += 1
            else:
                metric = K.variable(np.array([-1., 1., 1., 1.]))

            m_d2_ij = theano.tensor.tensordot(d2_ij, metric, axes=[1, 0])

            out_features.append(
                K.sum(theano.tensor.tensordot(m_d2_ij,
                                              self.w[weight_index, :, :],
                                              axes=[1, 0]),
                      axis=2))
            weight_index += 1

        if self.debug:
            print("Done with d2_ijs")

        # Build d_ij^2 = (k_i - k_j)^mu (k_i - k_j)^nu eta_mu_nu

        # b f p p'
        # x * magic  gives b f p^2, reshape to b f p p'
        d2_3d_ij = K.reshape(K.expand_dims(K.dot(x, magic_diff), -1),
                             (x.shape[0], x.shape[1], x.shape[2], x.shape[2]))

        # elements squared
        d2_3d_ij = K.pow(d2_3d_ij, 2)

        # fold with the metric
        # b f p p' * f  = b p p'
        d2_3d_ij = theano.tensor.tensordot(d2_3d_ij, metric3, axes=[1, 0])

        for i in range(self.n_train_dijs_3d):
            out_features.append(
                K.sum(theano.tensor.tensordot(d2_3d_ij,
                                              self.w[weight_index, :, :],
                                              axes=[1, 0]),
                      axis=2))
            weight_index += 1

        # Build m_ij^2 = (k_i + k_j)^mu (k_i + k_j)^nu eta_mu_nu

        # b f p p'
        # x * magic  gives b f p^2, reshape to b f p p'
        m2_ij = K.reshape(K.expand_dims(K.dot(x, magic_sum), -1),
                          (x.shape[0], x.shape[1], x.shape[2], x.shape[2]))

        # elements squared
        m2_ij = K.pow(m2_ij, 2)

        # fold with the metric
        # b f p p' * f  = b p p'

        for i in range(self.n_train_mijs):

            if self.train_metric:
                metric = self.m[metric_index, :]
                metric_index += 1
            else:
                metric = K.variable(np.array([-1., 1., 1., 1.]))

            m_m2_ij = theano.tensor.tensordot(m2_ij, metric, axes=[1, 0])

            out_features.append(
                K.min(theano.tensor.tensordot(m_m2_ij,
                                              self.w[weight_index, :, :],
                                              axes=[1, 0]),
                      axis=2))
            weight_index += 1

        if self.debug:
            print("done with m2_ij")

        # Build m_ij^2 = (k_i + k_j)^mu (k_i + k_j)^nu (only 3metric)

        # b f p p'
        # x * magic  gives b f p^2, reshape to b f p p'
        m2_3d_ij = K.reshape(K.expand_dims(K.dot(x, magic_sum), -1),
                             (x.shape[0], x.shape[1], x.shape[2], x.shape[2]))

        # elements squared
        m2_3d_ij = K.pow(m2_3d_ij, 2)

        # fold with the metric
        # b f p p' * f  = b p p'
        m2_3d_ij = theano.tensor.tensordot(m2_3d_ij, metric3, axes=[1, 0])

        for i in range(self.n_train_mijs_3d):
            out_features.append(
                K.sum(theano.tensor.tensordot(m2_3d_ij,
                                              self.w[weight_index, :, :],
                                              axes=[1, 0]),
                      axis=2))
            weight_index += 1

        # Build cos_ij = m_ij^2 / 2 E_i E_j

        Ei = K.expand_dims(Es, -1)  # bpN
        Ej = K.expand_dims(Es, -2)  # bNp
        Eij = Ei * Ej  # bpp
        Eij = K.clip(Eij, 0.0001, 100000000.)

        ratio_m2ij_Eij = m2_ij * pow(Eij, -1)

        onemat = K.ones((self.n_particles, self.n_particles))
        onemat = K.expand_dims(onemat, 0)

        cos_ij = onemat + ratio_m2ij_Eij

        for i in range(self.n_train_cosijs):
            out_features.append(
                K.sum(theano.tensor.tensordot(cos_ij,
                                              self.w[weight_index, :, :],
                                              axes=[1, 0]),
                      axis=2))
            weight_index += 1

#        if self.debug:
#            print ("Eij:")
#            print (K.eval(Eij))
#            print (K.eval(Eij.shape))
#            print ("sum_ij:")
#            print (K.eval(sum_ij))
#            print (K.eval(sum_ij.shape))
#            print ("ratio:")
#            print (K.eval(ratio_dij_Eij))
#            print (K.eval(ratio_dij_Eij.shape))
#            print ("cos:")
#            print (K.eval(cos))
#            print (K.eval(cos.shape))

# TODO: Also enable these..
        Pts_over_lead = Pts / K.repeat_elements(
            K.expand_dims(Pts[:, 0], axis=-1), self.n_particles, -1)
        Es_over_lead = Es / K.repeat_elements(K.expand_dims(Es[:, 0], axis=-1),
                                              self.n_particles, -1)

        results = K.stack(out_features, axis=1)

        if self.debug:
            print("results:")
            print(K.eval(results))
            print(K.eval(results.shape))

        if self.debug:
            sys.exit()

        return results
예제 #35
0
 def call(self, inputs, **kwargs):
     return K.min(inputs, axis=1)
예제 #36
0
def f1_min(y_true, y_pred):
    f1 = base_f1(y_true, y_pred)
    return K.min(f1)
예제 #37
0
def DirectedSumMin2(distance, x, y, N):
    x2, y2 = _repeat_nxn_matrix(x,y,N)
    d  = K.sum(distance(x2,y2), axis=-1) # [batch, N, N]

    sum_y = K.sum(K.min(d, axis=1), axis=1) # [batch] --- mind the axis
    return K.mean(sum_y)
예제 #38
0
def loss_hardnet(x, anchor_swap = False, anchor_ave = False,\
                margin = 100.0, batch_reduce = 'min', loss_type = "triplet_margin"):
    """HardNet margin loss - calculates loss based on distance matrix based on positive distance and closest negative distance.
    """
    anchor, positive = x
    #print("anchor_shape, pos shape")
    #assert anchor.shape == positive.shape, "Input sizes between positive and negative must be equal."
    assert len(anchor.shape) == 2, "Inputd must be a 2D matrix."
    eps = 1e-8
    dist_matrix = distance_matrix_vector(anchor, positive) + eps
    eye = tf.cast(tf.diag(tf.fill(tf.shape(dist_matrix[0]), 1)),
                  dist_matrix.dtype)

    # steps to filter out same patches that occur in distance matrix as negatives
    pos1 = tf.linalg.tensor_diag_part(dist_matrix)
    dist_without_min_on_diag = dist_matrix + eye * 10
    #con =  tf.constant([0.008], dtype=dist_without_min_on_diag.dtype)
    #mask = (tf.cast(tf.math.greater_equal(dist_without_min_on_diag, con), dist_without_min_on_diag.dtype)-1)*-1
    #mask = (dist_without_min_on_diag.ge(0.008).float()-1)*-1

    #mask = tf.cast(mask, dist_without_min_on_diag.dtype)*10

    #dist_without_min_on_diag = dist_without_min_on_diag+mask
    if batch_reduce == 'min':
        min_neg = K.min(dist_without_min_on_diag, 1)[0]
        if anchor_swap:
            min_neg2 = K.min(dist_without_min_on_diag, 0)[0]
            min_neg = K.min(min_neg, min_neg2)
        # if False:
        #     dist_matrix_a = distance_matrix_vector(anchor, anchor)+ eps
        #     dist_matrix_p = distance_matrix_vector(positive,positive)+eps
        #     dist_without_min_on_diag_a = dist_matrix_a+eye*10
        #     dist_without_min_on_diag_p = dist_matrix_p+eye*10
        #     min_neg_a = torch.min(dist_without_min_on_diag_a,1)[0]
        #     min_neg_p = torch.t(torch.min(dist_without_min_on_diag_p,0)[0])
        #     min_neg_3 = torch.min(min_neg_p,min_neg_a)
        #     min_neg = torch.min(min_neg,min_neg_3)
        #     print (min_neg_a)
        #     print (min_neg_p)
        #     print (min_neg_3)
        #     print (min_neg)
        min_neg = min_neg
        pos = pos1
        #print("min_neg, pos", K.eval(min_neg), K.eval(pos))
    # elif batch_reduce == 'average':
    #     pos = pos1.repeat(anchor.size(0)).view(-1,1).squeeze(0)
    #     min_neg = dist_without_min_on_diag.view(-1,1)
    #     if anchor_swap:
    #         min_neg2 = torch.t(dist_without_min_on_diag).contiguous().view(-1,1)
    #         min_neg = torch.min(min_neg,min_neg2)
    #     min_neg = min_neg.squeeze(0)
    # elif batch_reduce == 'random':
    #     idxs = torch.autograd.Variable(torch.randperm(anchor.size()[0]).long()).cuda()
    #     min_neg = dist_without_min_on_diag.gather(1,idxs.view(-1,1))
    #     if anchor_swap:
    #         min_neg2 = torch.t(dist_without_min_on_diag).gather(1,idxs.view(-1,1))
    #         min_neg = torch.min(min_neg,min_neg2)
    #     min_neg = torch.t(min_neg).squeeze(0)
    #     pos = pos1
    # else:
    #     print ('Unknown batch reduce mode. Try min, average or random')
    #     sys.exit(1)
    if loss_type == "triplet_margin":
        loss = K.clip(margin + pos - min_neg, 0.0, 1e10)
    # elif loss_type == 'softmax':
    #     exp_pos = torch.exp(2.0 - pos);
    #     exp_den = exp_pos + torch.exp(2.0 - min_neg) + eps;
    #     loss = - torch.log( exp_pos / exp_den )
    # elif loss_type == 'contrastive':
    #     loss = torch.clamp(margin - min_neg, min=0.0) + pos;
    # else:
    #     print ('Unknown loss type. Try triplet_margin, softmax or contrastive')
    #     sys.exit(1)
    loss = K.expand_dims(loss, axis=1)  #K.mean(loss)
    return loss
예제 #39
0
 def min_pred(y_true, y_pred):
     return K.min(y_pred)
예제 #40
0
def content_loss(base, style, combination):
    # Changes from equation 7 (Pg# 5)
    G = style / (base + 1e-04)
    G_clamped = K.max(K.min(G, g_max), g_min) # Clamping values
    Fm = base * G_clamped
    return K.sum(K.square(combination - Fm))