def overlap(x1, w1, x2, w2):
    l1 = (x1) - w1 / 2
    l2 = (x2) - w2 / 2
    left = tf.where(K.greater(l1, l2), l1, l2)
    r1 = (x1) + w1 / 2
    r2 = (x2) + w2 / 2
    right = tf.where(K.greater(r1, r2), r2, r1)
    result = right - left
    return result
Exemplo n.º 2
0
 def _get_semihard_anchor_negative_triplet_mask(self, negative_dist: Tensor,
                                                hardest_positive_dist: Tensor,
                                                mask_negative: Tensor) -> Tensor:
     # mask max(dist(a,p)) < dist(a,n)
     mask = K.greater(negative_dist, hardest_positive_dist)
     mask = K.cast(mask, K.dtype(negative_dist))
     mask_semihard = K.cast(K.expand_dims(K.greater(K.sum(mask, 1), 0.0), 1), K.dtype(negative_dist))
     mask = mask_negative * (1 - mask_semihard) + mask * mask_semihard
     return mask
def iou(x_true, y_true, w_true, h_true, x_pred, y_pred, w_pred, h_pred, t, pred_confid_tf):
    x_true = K.expand_dims(x_true, 2)
    y_true = K.expand_dims(y_true, 2)
    w_true = K.expand_dims(w_true, 2)
    h_true = K.expand_dims(h_true, 2)
    x_pred = K.expand_dims(x_pred, 2)
    y_pred = K.expand_dims(y_pred, 2)
    w_pred = K.expand_dims(w_pred, 2)
    h_pred = K.expand_dims(h_pred, 2)

    xoffset = K.expand_dims(tf.convert_to_tensor(np.asarray([0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7], dtype=np.float32)),1)
    yoffset = K.expand_dims(tf.convert_to_tensor(np.asarray([0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4], dtype=np.float32)),1)


    # xoffset = K.cast_to_floatx((np.tile(np.arange(side),side)))
    # yoffset = K.cast_to_floatx((np.repeat(np.arange(side),side)))
    x = tf.where(t, x_pred, K.zeros_like(x_pred))
    y = tf.where(t, y_pred, K.zeros_like(y_pred))
    w = tf.where(t, w_pred, K.zeros_like(w_pred))
    h = tf.where(t, h_pred, K.zeros_like(h_pred))

    ow = overlap(x + xoffset, w * 256. , x_true + xoffset, w_true * 256.)
    oh = overlap(y + yoffset, h * 160., y_true + yoffset, h_true * 256.)

    ow = tf.where(K.greater(ow, 0), ow, K.zeros_like(ow))
    oh = tf.where(K.greater(oh, 0), oh, K.zeros_like(oh))
    intersection = ow * oh
    union = w * 256. * h * 160. + w_true * 256. * h_true * 160.  - intersection + K.epsilon()  # prevent div 0

    #
    # find best iou among bboxs
    # iouall shape=(-1, bnum*gridcells)
    iouall = intersection / union
    obj_count = K.sum(tf.where(t, K.ones_like(x_true), K.zeros_like(x_true)))

    ave_iou = K.sum(iouall) / (obj_count + 0.0000001)
    recall_t = K.greater(iouall, 0.5)
    # recall_count = K.sum(tf.select(recall_t, K.ones_like(iouall), K.zeros_like(iouall)))

    fid_t = K.greater(pred_confid_tf, 0.3)
    recall_count_all = K.sum(tf.where(fid_t, K.ones_like(iouall), K.zeros_like(iouall)))

    #  
    obj_fid_t = tf.logical_and(fid_t, t)
    obj_fid_t = tf.logical_and(fid_t, recall_t)
    effevtive_iou_count = K.sum(tf.where(obj_fid_t, K.ones_like(iouall), K.zeros_like(iouall)))

    recall = effevtive_iou_count / (obj_count + 0.00000001)
    precision = effevtive_iou_count / (recall_count_all + 0.0000001)
    return ave_iou, recall, precision, obj_count, intersection, union, ow, oh, x, y, w, h
Exemplo n.º 4
0
    def get_split_averages(input_tensor, input_mask, indices):
        # Splits input tensor into three parts based on the indices and
        # returns average of values prior to index, values at the index and
        # average of values after the index.
        # input_tensor: (batch_size, input_length, input_dim)
        # input_mask: (batch_size, input_length)
        # indices: (batch_size, 1)
        # (1, input_length)
        length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0)
        # (batch_size, input_length)
        batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0)
        tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1)  # (batch_size, input_length)
        greater_mask = K.greater(batched_range, tiled_indices)  # (batch_size, input_length)
        lesser_mask = K.lesser(batched_range, tiled_indices)  # (batch_size, input_length)
        equal_mask = K.equal(batched_range, tiled_indices)  # (batch_size, input_length)

        # We also need to mask these masks using the input mask.
        # (batch_size, input_length)
        if input_mask is not None:
            greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask))
            lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask))

        post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        pre_sum = K.sum(switch(K.expand_dims(lesser_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        values_at_indices = K.sum(switch(K.expand_dims(equal_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)

        post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)
        pre_normalizer = K.expand_dims(K.sum(lesser_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)

        return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32')
Exemplo n.º 5
0
 def call(self, X):
     if type(X) is not list or len(X) != 2:
         raise Exception("GaussianAttention must be called on a list of two tensors. Got: " + str(X))
     
     frame, position  = X[0], X[1]
     
     # Reshaping the input to exclude the time dimension
     frameShape = K.shape(frame)
     positionShape = K.shape(position)
     (chans, height, width) = frameShape[-3:]
     targetDim = positionShape[-1]
     frame = K.reshape(frame, (-1, chans, height, width))
     position = K.reshape(position, (-1, ) + (targetDim, ))
     
     cx = (position[:, 0] + position[:, 2]) / 2.0
     cy = (position[:, 1] + position[:, 3]) / 2.0
     sx = (position[:, 2] - cx) * 0.60
     sy = (position[:, 3] - cy) * 0.60
     rX = Data.linspace(-1.0, 1.0, width)
     rY = Data.linspace(-1.0, 1.0, height)
     FX = K.exp(-(rX - cx.dimshuffle(0, 'x')) ** 2 / (2.0 * (sx.dimshuffle(0, 'x') ** 2 + self.epsilon)))
     FY = K.exp(-(rY - cy.dimshuffle(0, 'x')) ** 2 / (2.0 * (sy.dimshuffle(0, 'x') ** 2 + self.epsilon)))
     m = (FY.dimshuffle(0, 1, 'x') * FX.dimshuffle(0, 'x', 1))
     m = m + self.alpha
     m = m - K.greater(m, 1.0) * (m - 1.0)
     
     frame = frame * m.dimshuffle(0, 'x', 1, 2)
     
     # Reshaping the frame to include time dimension
     output = K.reshape(frame, frameShape)
     
     return output
def yolo_v1_loss(y_true, y_pred):
    # Y_PRED is Batchx40x7 tensor. y_true is a 40x7 tensor

    truth_conf_tensor = K.expand_dims(y_true[:,:,0],2)#tf.slice(y_true, [0, 0, 0], [-1,-1, 0])
    truth_xy_tensor = y_true[:,:,1:3]#tf.slice(y_true, [0, 0, 1], [-1,-1, 2])
    truth_wh_tensor = y_true[:,:,3:5]#tf.slice(y_true, [0, 0, 3], [-1, -1, 4])
    truth_m_tensor = K.expand_dims(y_true[:,:,5],2)#tf.slice(y_true, [0, 0, 5], [-1, -1, 5])
    truth_v_tensor = K.expand_dims(y_true[:,:,6],2)#tf.slice(y_true, [0, 0, 6], [-1, -1, 6])

    pred_conf_tensor = K.expand_dims(y_pred[:,:,0],2)#tf.slice(y_pred, [0, 0, 0], [-1, -1, 0])
    #pred_conf_tensor = K.tanh(pred_conf_tensor)
    pred_xy_tensor = y_pred[:,:,1:3]#tf.slice(y_pred, [0, 0, 1], [-1, -1, 2])
    pred_wh_tensor = y_pred[:,:,3:5]#tf.slice(y_pred, [0, 0, 3], [-1, -1, 4])
    pred_m_tensor = K.expand_dims(y_pred[:,:,5],2)#tf.slice(y_pred, [0, 0, 5], [-1, -1, 5])
    pred_v_tensor = K.expand_dims(y_pred[:,:,6],2)#tf.slice(y_pred, [0, 0, 6], [-1, -1, 6])

    truth_xy_tensor = tf.Print(truth_xy_tensor, [truth_xy_tensor[:, 14:20, 0]], message='truth_xy', summarize=30)
    pred_xy_tensor = tf.Print(pred_xy_tensor, [pred_xy_tensor[:, 14:20, 0]], message='pred_xy', summarize=30)

    tens = K.greater(K.sigmoid(truth_conf_tensor), 0.5)
    tens_2d = K.concatenate([tens,tens], axis=-1)

    conf_loss = yolo_conf_loss(truth_conf_tensor, pred_conf_tensor,tens)
    xy_loss = yoloxyloss(truth_xy_tensor,pred_xy_tensor,tens_2d)
    wh_loss = yolo_wh_loss(truth_wh_tensor,pred_wh_tensor,tens_2d)
    m_loss = yolo_regressor_loss(truth_m_tensor,pred_m_tensor,tens)
    v_loss = yolo_regressor_loss(truth_v_tensor,pred_v_tensor,tens)

    loss = 2.0 * conf_loss + 0.25 * xy_loss + 0.25 * wh_loss + 1.5 * m_loss + 1.25 * v_loss # loss v1
    #loss = 2.0 * conf_loss + 0.1 * xy_loss + 1.0 * wh_loss + 5.0 * m_loss + 2.5 * v_loss  # loss v2


    return loss
def true_positive_rate(y_true, y_pred, mode='p'):
    threshold_value = 0.5
    if mode=='n':
        threshold_value=1-threshold_value
    # works as round() with threshold_value
    y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value), K.floatx())
    true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
    real_positives = K.sum(K.clip(y_true, 0, 1))
    return true_positives / (real_positives + K.epsilon())
Exemplo n.º 8
0
 def _batch_all_triplet_loss(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor:
     anchor_positive_dist = K.expand_dims(pairwise_dist, 2)
     anchor_negative_dist = K.expand_dims(pairwise_dist, 1)
     triplet_loss = anchor_positive_dist - anchor_negative_dist + self.margin
     mask = self._get_triplet_mask(y_true, pairwise_dist)
     triplet_loss = mask * triplet_loss
     triplet_loss = K.clip(triplet_loss, 0.0, None)
     valid_triplets = K.cast(K.greater(triplet_loss, 1e-16), K.dtype(triplet_loss))
     num_positive_triplets = K.sum(valid_triplets)
     triplet_loss = K.sum(triplet_loss) / (num_positive_triplets + 1e-16)
     return triplet_loss
Exemplo n.º 9
0
 def loss(y_true, y_pred):
     from plasma.conf import conf
     fac = MaxHingeTarget.fac
     #overall_fac = np.prod(np.array(K.shape(y_pred)[1:]).astype(np.float32))
     overall_fac = K.prod(K.cast(K.shape(y_pred)[1:],K.floatx()))
     max_val = K.max(y_pred,axis=-2) #temporal axis!
     max_val1 = K.repeat(max_val,K.shape(y_pred)[-2])
     mask = K.cast(K.equal(max_val1,y_pred),K.floatx())
     y_pred1 = mask * y_pred + (1-mask) * y_true
     weight_mask = K.mean(y_true,axis=-1)
     weight_mask = K.cast(K.greater(weight_mask,0.0),K.floatx()) #positive label!
     weight_mask = fac*weight_mask + (1 - weight_mask)
     #return weight_mask*squared_hinge(y_true,y_pred1)
     return conf['model']['loss_scale_factor']*overall_fac*weight_mask*hinge(y_true,y_pred1)
    def kaggle_dice(y_true, y_pred0, pix=pix, dim=dim):
        # PIXEL THRESHOLD
        y_pred = K.cast(K.greater(y_pred0, pix), K.floatx())

        # MIN AREA THRESHOLD
        area = 20000. * dim[0] / 350. * dim[1] / 525.
        s = K.sum(y_pred, axis=(1, 2))
        s = K.cast(K.greater(s, area), K.floatx())

        # REMOVE MIN AREA
        s = K.reshape(s, (-1, 1))
        s = K.repeat(s, dim[0] * dim[1])
        s = K.reshape(s, (-1, 1))
        y_pred = K.permute_dimensions(y_pred, (0, 3, 1, 2))
        y_pred = K.reshape(y_pred, shape=(-1, 1))
        y_pred = s * y_pred
        y_pred = K.reshape(y_pred, (-1, y_pred0.shape[3], dim[0], dim[1]))
        y_pred = K.permute_dimensions(y_pred, (0, 2, 3, 1))

        # COMPUTE KAGGLE DICE
        intersection = K.sum(y_true * y_pred, axis=(1, 2))
        total_y_true = K.sum(y_true, axis=(1, 2))
        total_y_pred = K.sum(y_pred, axis=(1, 2))
        return K.mean((2 * intersection + 1e-9) / (total_y_true + total_y_pred + 1e-9))
Exemplo n.º 11
0
    def recall_m(y_true, y_pred):
        #        true_positives = keras.sum(keras.round(keras.clip(y_true * y_pred, 0, 1)))
        #        possible_positives = keras.sum(keras.round(keras.clip(y_true, 0, 1)))
        #        recall = true_positives / (possible_positives + keras.epsilon())

        # Adaptation of the "round()" used before to get the predictions. Clipping to make sure that the predicted raw values are between 0 and 1.
        y_pred = keras.cast(keras.greater(keras.clip(y_pred, 0, 1), 0.5),
                            keras.floatx())
        # Compute the number of true positives. Rounding in prevention to make sure we have an integer.
        true_positives = keras.round(
            keras.sum(keras.clip(y_true * y_pred, 0, 1)))
        # Compute the number of positive targets.
        possible_positives = keras.sum(keras.clip(y_true, 0, 1))
        recall = true_positives / (possible_positives + keras.epsilon())
        return recall
Exemplo n.º 12
0
def fp_score(y_true, y_pred, threshold):

    fp_3d = K.concatenate([
        K.cast(K.expand_dims(K.flatten(K.abs(y_true - K.ones_like(y_true)))),
               'bool'),
        K.cast(
            K.expand_dims(K.flatten(K.greater(y_pred, K.constant(threshold)))),
            'bool'),
        K.cast(K.ones_like(K.expand_dims(K.flatten(y_pred))), 'bool')
    ],
                          axis=-1)

    fp = K.sum(K.cast(K.all(fp_3d, axis=1), 'int32'))

    return fp
Exemplo n.º 13
0
def KMetric(true, pred):  #any shape can go - can't be a loss function
    tresholds = [0.5 + (i * .05) for i in range(10)]
    #flattened images (batch, pixels)
    true = K.batch_flatten(true)
    pred = K.batch_flatten(pred)
    pred = castF(K.greater(pred, 0.5))

    #total white pixels - (batch,)
    trueSum = K.sum(true, axis=-1)
    predSum = K.sum(pred, axis=-1)

    #has mask or not per image - (batch,)
    true1 = castF(K.greater(trueSum, 1))
    pred1 = castF(K.greater(predSum, 1))

    #to get images that have mask in both true and pred
    truePositiveMask = castB(true1 * pred1)

    #separating only the possible true positives to check iou
    testTrue = tf.boolean_mask(true, truePositiveMask)
    testPred = tf.boolean_mask(pred, truePositiveMask)

    #getting iou and threshold comparisons
    iou = iou_loss_core(testTrue, testPred)
    truePositives = [castF(K.greater(iou, tres)) for tres in tresholds]

    #mean of thressholds for true positives and total sum
    truePositives = K.mean(K.stack(truePositives, axis=-1), axis=-1)
    truePositives = K.sum(truePositives)

    #to get images that don't have mask in both true and pred
    trueNegatives = (1 - true1) * (1 - pred1
                                   )  # = 1 -true1 - pred1 + true1*pred1
    trueNegatives = K.sum(trueNegatives)

    return (truePositives + trueNegatives) / castF(K.shape(true)[0])
Exemplo n.º 14
0
def weighted_bce_dice_loss(y_true, y_pred):
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number
    averaged_mask = K.pool2d(
            y_true, pool_size=(11, 11), strides=(1, 1), padding='same', pool_mode='avg')
    border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')
    weight = K.ones_like(averaged_mask)
    w0 = K.sum(weight)
    weight += border * 2
    w1 = K.sum(weight)
    weight *= (w0 / w1)
    loss = weighted_bce_loss(y_true, y_pred, weight) + \
    weighted_dice_loss(y_true, y_pred, weight)
    return loss
 def precision(y_true, y_pred):
     """Precision metric.
     Computes the precision over the whole batch using threshold_value.
     """
     threshold_value = threshold
     # Adaptation of the "round()" used before to get the predictions. Clipping to make sure that the predicted raw values are between 0 and 1.
     y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value),
                     K.floatx())
     # Compute the number of true positives. Rounding in prevention to make sure we have an integer.
     true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
     # count the predicted positives
     predicted_positives = K.sum(y_pred)
     # Get the precision ratio
     precision_ratio = true_positives / (predicted_positives + K.epsilon())
     return precision_ratio
 def recall(y_true, y_pred):
     """Recall metric.
     Computes the recall over the whole batch using threshold_value.
     Taken from: https://stackoverflow.com/questions/42606207
     """
     threshold_value = threshold
     # Adaptation of the "round()" used before to get the predictions. Clipping to make sure that the predicted raw values are between 0 and 1.
     y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value),
                     K.floatx())
     # Compute the number of true positives. Rounding in prevention to make sure we have an integer.
     true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
     # Compute the number of positive targets.
     possible_positives = K.sum(K.clip(y_true, 0, 1))
     recall_ratio = true_positives / (possible_positives + K.epsilon())
     return recall_ratio
Exemplo n.º 17
0
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
    '''Given the observations x, it adds the start boundary energy b_start (resp.
    end boundary energy b_end on the start (resp. end) elements and multiplies
    the mask.'''
    if mask is None:
        if b_start is not None:
            x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
        if b_end is not None:
            x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
    else:
        mask = K.cast(mask, K.floatx())
        mask = K.expand_dims(mask, 2)
        x *= mask
        if b_start is not None:
            mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]],
                                   axis=1)
            start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
            x = x + start_mask * b_start
        if b_end is not None:
            mask_l = K.concatenate(
                [mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
            end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
            x = x + end_mask * b_end
    return x
Exemplo n.º 18
0
def f1_m(y_true, y_pred):
    THRESHOLD = 0.5 # 0.05
    #y_pred = K.round(y_pred)
    y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), THRESHOLD), K.floatx())
    tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
    tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
    fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
    fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)

    p = tp / (tp + fp + K.epsilon())
    r = tp / (tp + fn + K.epsilon())

    f1 = 2*p*r / (p+r+K.epsilon())
    f1 = tf.where(tf.math.is_nan(f1), tf.zeros_like(f1), f1)
    return K.mean(f1)
Exemplo n.º 19
0
def Kaggle_IoU_Precision(y_true, y_pred, threshold=0.5):
    y_pred = K.squeeze(tf.to_int32(y_pred > threshold), -1)
    y_true = K.cast(y_true[..., 0], K.floatx())
    y_pred = K.cast(y_pred, K.floatx())
    truth_areas = K.sum(y_true, axis=[1, 2])
    pred_areas = K.sum(y_pred, axis=[1, 2])
    intersection = K.sum(y_true * y_pred, axis=[1, 2])
    union = K.clip(truth_areas + pred_areas - intersection, 1e-9, 512 * 512)
    check = K.map_fn(lambda x: K.equal(x, 0), truth_areas + pred_areas, dtype=tf.bool)
    p = intersection / union
    iou = K.switch(check, p + 1., p)

    prec = K.map_fn(lambda x: K.mean(K.greater(x, np.arange(0.5, 1.0, 0.05))), iou, dtype=tf.float32)
    prec_iou = K.mean(prec)
    return prec_iou
Exemplo n.º 20
0
def get_gpt_model(config_path, checkpoint_path):
    model = load_trained_model_from_checkpoint(config_path, checkpoint_path)
    inputs = model.inputs[0]
    mask = Lambda(lambda x: K.reshape(K.sum(K.cast(K.greater(x, 0), 'float32'), axis=-1), [K.shape(x)[0], 1]) - 1,
                  name='Mask')(inputs)
    # mask = Lambda(lambda x: print(K.shape(x)),
    #               name='Mask')(inputs)
    layer = model.get_layer(name='Norm').output
    layer = Lambda(seq_gather, name='Gather')([layer, mask])
    predict = Dense(1, activation='sigmoid', name='Predict-Dense')(layer)
    aux = Dense(6, activation='sigmoid', name='Predict-Aux')(layer)

    model = Model(inputs=inputs, outputs=[predict, aux])
    model.summary()
    return model
Exemplo n.º 21
0
def correct_positive_diagnoses(y_true, y_pred):
    THRESHOLD = 0.5
    p_thr = K.greater(y_pred, THRESHOLD)
    y_true = K.cast(y_true, dtype='bool')

    pos_mask = K.any(y_true, axis=1)  #patients with positive diagnoses
    p_thr = p_thr[pos_mask]
    y_true = y_true[pos_mask]

    equals_t = K.equal(p_thr, y_true)
    correct_rows = K.all(equals_t, axis=1)
    correct_rows_float = K.cast(correct_rows, dtype='float32')

    return K.sum(correct_rows_float) / (
        K.cast(K.shape(correct_rows_float)[0], dtype='float32') + K.epsilon())
Exemplo n.º 22
0
    def f1_fix(y_true, y_pred):
        y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold), K.floatx())
        #y_pred = K.round(y_pred)
        tp = K.sum(K.cast(y_true * y_pred, 'float'), axis=0)
        tn = K.sum(K.cast((1 - y_true) * (1 - y_pred), 'float'), axis=0)
        fp = K.sum(K.cast((1 - y_true) * y_pred, 'float'), axis=0)
        fn = K.sum(K.cast(y_true * (1 - y_pred), 'float'), axis=0)

        p = tp / (tp + fp + K.epsilon())
        r = tp / (tp + fn + K.epsilon())

        f1 = 2 * p * r / (p + r + K.epsilon())
        f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)

        return K.mean(f1)
Exemplo n.º 23
0
def competitionMetric2(true, pred): #any shape can go
    tresholds = [0.5 + (i*.05)  for i in range(10)]

    #flattened images (batch, pixels)
    true = K.batch_flatten(true)
    pred = K.batch_flatten(pred)
    pred = castF(K.greater(pred, 0.5))

    #total white pixels - (batch,)
    trueSum = K.sum(true, axis=-1)
    predSum = K.sum(pred, axis=-1)

    #has mask or not per image - (batch,)
    true1 = castF(K.greater(trueSum, 1))
    pred1 = castF(K.greater(predSum, 1))

    #to get images that have mask in both true and pred
    truePositiveMask = castB(true1 * pred1)

    #separating only the possible true positives to check iou
    testTrue = tf.boolean_mask(true, truePositiveMask)
    testPred = tf.boolean_mask(pred, truePositiveMask)

    #getting iou and threshold comparisons
    iou = iou_loss_core(testTrue,testPred)
    truePositives = [castF(K.greater(iou, tres)) for tres in tresholds]

    #mean of thressholds for true positives and total sum
    truePositives = K.mean(K.stack(truePositives, axis=-1), axis=-1)
    truePositives = K.sum(truePositives)

    #to get images that don't have mask in both true and pred
    trueNegatives = (1-true1) * (1 - pred1) # = 1 -true1 - pred1 + true1*pred1
    trueNegatives = K.sum(trueNegatives)
    
    return (truePositives + trueNegatives) / castF(K.shape(true)[0])
Exemplo n.º 24
0
def weighted_bce_dice_loss(y_true, y_pred):
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number
    averaged_mask = K.pool2d(
            y_true, pool_size=(11, 11), strides=(1, 1), padding='same', pool_mode='avg')
    border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')
    weight = K.ones_like(averaged_mask)
    w0 = K.sum(weight)
    weight += border * 2
    w1 = K.sum(weight)
    weight *= (w0 / w1)
    loss = weighted_bce_loss(y_true, y_pred, weight) + \
    weighted_dice_loss(y_true, y_pred, weight)
    return loss
Exemplo n.º 25
0
def DehazeLoss_std(y_true, y_pred, alpha = 1.0, beta = 1.0, batch_size = batch_size):
    std = 0
    total = 0
    THRESHOLD = K.variable(1.0)
    for i in range(batch_size):
        yt = y_true[i,:,:,:]
        yp = y_pred[i,:,:,:]
        mae = K.abs(yt - yp)
        flag = K.greater(mae, THRESHOLD)
        l1_temp = K.mean(K.switch(flag, (mae - 0.5), K.pow(mae, 2)))
        l2_temp = K.mean(K.square(yt - yp))
        std_temp = K.std(yt)
        total += std_temp*(alpha*l1_temp+beta*l2_temp)
        std += std_temp
    return total/(std+K.epsilon())
Exemplo n.º 26
0
    def loss_tv(self, mask, y_comp):
        """Total variation loss, used for smoothing the hole region, see. eq. 6"""

        # Create dilated hole region using a 3x3 kernel of all 1s.
        kernel = K.ones(shape=(3, 3, mask.shape[3], mask.shape[3]))
        dilated_mask = K.conv2d(1-mask, kernel, data_format='channels_last', padding='same')

        # Cast values to be [0., 1.], and compute dilated hole region of y_comp
        dilated_mask = K.cast(K.greater(dilated_mask, 0), 'float32')
        P = dilated_mask * y_comp

        # Calculate total variation loss
        a = self.l1(P[:,1:,:,:], P[:,:-1,:,:])
        b = self.l1(P[:,:,1:,:], P[:,:,:-1,:])        
        return a+b
Exemplo n.º 27
0
def f1_score(y_true, y_pred):

    # if (K.sum(y_pred > thresh) == 0):
    #     y_pred[np.argmax(y_pred)] = 1

    y_pred = K.cast(K.greater(y_pred, thresh), dtype='float32')
    tp = K.sum(y_true * y_pred, axis=1)

    precision = tp / ((K.sum(y_pred, axis=1)) + K.epsilon())
    recall = tp / ((K.sum(y_true, axis=1)) + K.epsilon())

    # return (K.mean(precision))
    # return (K.mean(recall))
    return (K.mean(2 * ((precision * recall) /
                        ((precision + recall) + K.epsilon()))))
Exemplo n.º 28
0
def compute_objectness_loss(proposal_xyz, center_label, objectness_score):
    '''
    Args: 
        aggregated_vote_xyz: (B, K1, 3) proposal
        center_label: (B, K2, 3+1+3) GT
        objectness_score: (B, K1, 2)

    Return:
        loss: scalar
        objectness_label: (B, K1) - predicted center is near of a groud truth of not
        objectness_mask: (B, K1) - Care of not care
        obeject_assignment: (B, K1)- Index of corresponding GT-center for each prediction 
    '''
    gt_center = center_label[:, :, :3]
    dist1, ind1, dist2, _ = nn_distance(proposal_xyz, gt_center)
    # distance between nearest predicted center and GT

    sqrt_dist1 = K.sqrt(dist1 + K.epsilon())
    objectness_label = K.greater(
        K.ones_like(sqrt_dist1) * NEAR_THRESHOLD, sqrt_dist1)
    objectness_label = K.cast(objectness_label, 'int32')
    # label = 1, if sqrt_dist < NEAR_THRESHOLD
    objectness_mask_1 = K.cast(
        K.greater(K.ones_like(sqrt_dist1) * NEAR_THRESHOLD, sqrt_dist1),
        'float32')
    objectness_mask_2 = K.cast(
        K.greater(sqrt_dist1,
                  K.ones_like(sqrt_dist1) * FAR_THRESHOLD), 'float32')
    objectness_mask = objectness_mask_1 + objectness_mask_2
    # mask = 1, if sqrt_dist < NEAR_THRESHHOLD or sqrt_dist > FAR_THRESHOLD
    loss = weighted_crossentropy(K.one_hot(objectness_label, 2),
                                 objectness_score, OBJECTNESS_CLS_WEIGHTS)
    loss = K.sum(
        loss * objectness_mask) / (K.sum(objectness_mask) + K.epsilon())
    object_assignment = ind1
    return loss, objectness_label, objectness_mask, object_assignment
 def binary_dice_coeff(y_true, y_pred):
     """
             DSC = (2 * |X & Y|)/ (|X|+ |Y|)
                 = 2 * sum(|A*B|)/(sum(|A|)+sum(|B|))
                 
     :param y_true: ground truth
     :param y_pred: prediction
     :return:
     """
     y_true_f = K.flatten(y_true)
     y_pred = K.cast(y_pred, 'float32')
     y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32')
     intersection = y_true_f * y_pred_f
     score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
     return score
Exemplo n.º 30
0
def orientation_loss(y_true, y_pred, obj_mask, mf):
    # Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true * obj_mask, [-1, BIN, 2])
    y_pred = y_pred * obj_mask
    y_pred = K.l2_normalize(K.reshape(y_pred, [-1, BIN, 2]), 2)
    obj_mask = K.reshape(obj_mask, [-1, 1])
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, 0.5)  #tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
    # Define the loss
    # cos^2 + sin ^2 = 1
    # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    loss = (y_true[:, :, 0] * y_pred[:, :, 0] +
            y_true[:, :, 1] * y_pred[:, :, 1])  # -1 - 1
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = 1 - loss
    loss = K.reshape(loss, [-1, 2])
    loss = loss * obj_mask
    print(loss.shape)

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
    # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
    #  if K.eval(allobj) == 0:
    #      loss = 0.0
    #  else :
    #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum / allobj, lambda: 0.0)
    #loss = K.switch(allobj > 0, losssum/allobj, 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)

    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
Exemplo n.º 31
0
    def initModels(self):

        S = Input(shape=self.s_dim)
        A = Input(shape=(1, ), dtype='uint8')
        V = Input(shape=(self.env.N, self.s_dim))
        TARGETS = Input(shape=(self.env.N, 1))

        qvals = self.create_critic_network(S, V)
        self.model = Model([S, V], qvals)
        self.qvals = K.function(inputs=[S, V], outputs=[qvals], updates=None)

        actionProbs = K.softmax(qvals)
        self.actionProbs = K.function(inputs=[S, V],
                                      outputs=[actionProbs],
                                      updates=None)

        actionFilter = K.squeeze(K.one_hot(A, self.num_actions), axis=1)
        qval = K.sum(actionFilter * qvals, axis=1, keepdims=True)
        actionProb = K.sum(actionFilter * actionProbs, axis=1, keepdims=True)
        loss_dqn = K.mean(K.square(qval - TARGETS), axis=0)
        self.qval = K.function(inputs=[S, G, M, A],
                               outputs=[qval],
                               updates=None)

        val = K.max(qvals, axis=1, keepdims=True)
        self.val = K.function(inputs=[S, G, M], outputs=[val], updates=None)

        qvalWidth = K.max(qvals, axis=1, keepdims=True) - K.min(
            qvals, axis=1, keepdims=True)
        onehot = 1 - K.squeeze(K.one_hot(A, self.num_actions), axis=1)
        onehotMargin = K.repeat_elements(
            self.margin * qvalWidth, self.num_actions, axis=1) * onehot
        imit = (K.max(qvals + onehotMargin, axis=1, keepdims=True) - qval)
        advantage = K.maximum(MCR - val, 0)
        advClip = K.cast(K.greater(MCR, val), dtype='float32')
        goodexp = K.sum(advClip)
        imitFiltered = imit * advClip
        # loss_imit = K.mean(imitFiltered, axis=0)
        loss_imit = K.sum(imitFiltered, axis=0) / (K.sum(advClip) + 0.0001)
        inputs = [S, A, G, M, TARGETS, MCR]
        self.metrics_names = [
            'loss_dqn', 'val', 'qval', 'loss_imit', 'goodexp'
        ]
        metrics = [loss_dqn, val, qval, loss_imit, goodexp]

        updates = self.optimizer.get_updates(loss_dqn + self.w_i * loss_imit,
                                             self.model.trainable_weights)
        self.train = K.function(inputs, metrics, updates)
Exemplo n.º 32
0
    def crossentropy_reed_origin_core(y_true, y_pred):
        # hyper param
        print(_beta)

        # 1) determine the origin of the patch, as a boolean vector in y_true_flag
        # (True = patch from noisy subset)
        _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90)

        # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format
        # attenuating factor for data points that need it (those that came with a one-hot of 100)
        _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01

        # identity factor for standard one-hot vectors
        _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32')

        # combine 2 masks
        _mask = _mask_reduce + _mask_keep

        _y_true_shape = K.shape(y_true)
        _mask = K.reshape(_mask, (_y_true_shape[0], 1))

        # applying mask to have a valid y_true that we can use as always
        y_true = y_true * _mask

        y_true = K.clip(y_true, K.epsilon(), 1)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # (1) dynamically update the targets based on the current state of the model: bootstrapped target tensor
        # use predicted class proba directly to generate regression targets
        y_true_bootstrapped = _beta * y_true + (1 - _beta) * y_pred

        # at this point we have 2 versions of y_true
        # decide which target label to use for each datapoint
        _mask_noisy = K.cast(_y_true_flag,
                             'float32')  # only allows patches from noisy set
        _mask_clean = K.cast(K.equal(_y_true_flag, False),
                             'float32')  # only allows patches from clean set
        _mask_noisy = K.reshape(_mask_noisy, (_y_true_shape[0], 1))
        _mask_clean = K.reshape(_mask_clean, (_y_true_shape[0], 1))

        # points coming from clean set use the standard true one-hot vector. dim is (batch_size, 1)
        # points coming from noisy set use the Reed bootstrapped target tensor
        y_true_final = y_true * _mask_clean + y_true_bootstrapped * _mask_noisy

        # (2) compute loss as always
        _loss = -K.sum(y_true_final * K.log(y_pred), axis=-1)

        return _loss
Exemplo n.º 33
0
    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        shapes = [K.get_variable_shape(p) for p in params]
        old_grads = [K.zeros(shape) for shape in shapes]
        zetas = [K.ones(shape) for shape in shapes]
        zs = [K.zeros(shape) for shape in shapes]
        thetas = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations]  # + thetas

        # prev_weight_deltas = [K.zeros(shape) for shape in shapes]
        # self.weights = delta_ws + old_grads # TODO: understand self.weights
        self.updates = []

        for param, grad, old_grad, zeta, z, theta in zip(
                params, grads, old_grads, zetas, zs, thetas):
            # Line 4 to 8
            new_zeta = K.switch(
                K.greater(grad * old_grad, 0),
                K.minimum(zeta * self.eta_pos, self.zeta_max),
                K.switch(K.less(grad * old_grad, 0),
                         K.maximum(zeta * self.eta_neg, self.zeta_min), zeta)
            )  # note that I added a 'if gradient = 0 then zeta' condition
            # Line 9
            new_z = self.alpha * z + (1 - self.alpha) * new_zeta
            # Line 10
            new_theta = self.alpha_b * theta + (1 -
                                                self.alpha_b) * K.square(grad)
            # Line 11
            weight_delta = -self.lr / new_z * grad  #/ (K.pow(new_theta, self.theta_pow) + 1e-11)  # added epsilon to prevent zero division
            # weight_delta = -self.lr * (new_zeta/new_z) * grad # * (1 / K.sqrt(new_theta + 1e-11))  # added epsilon to prevent zero division
            # TODO: Figure this out! It seems like the theta part in particular seems to be breaking the calculation
            #    Also, it seems like we should be taking the sign of grad rather than multiplying it directly.
            # weight_delta = -new_z * (grad/new_theta)
            # Line 12
            new_param = param + weight_delta

            # Apply constraints
            #if param in constraints:
            #    c = constraints[param]
            #    new_param = c(new_param)

            self.updates.append(K.update(param, new_param))
            self.updates.append(K.update(zeta, new_zeta))
            self.updates.append(K.update(old_grad, grad))
            self.updates.append(K.update(z, new_z))
            self.updates.append(K.update(theta, new_theta))

        return self.updates
Exemplo n.º 34
0
        def recall(y_true, y_pred):
            """Recall metric.

            Only computes a batch-wise average of recall.

            Computes the recall, a metric for multi-label classification of
            how many relevant items are selected.
            """
            threshold_value = threshold
            y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value),
                            K.floatx())
            true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)),
                                   axis=0)
            possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)), axis=0)
            recall = true_positives / (possible_positives + K.epsilon())
            return recall
Exemplo n.º 35
0
        def precision(y_true, y_pred):
            """Precision metric.

            Only computes a batch-wise average of precision.

            Computes the precision, a metric for multi-label classification of
            how many selected items are relevant.
            """
            threshold_value = threshold
            y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value),
                            K.floatx())
            true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)),
                                   axis=0)
            predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)), axis=0)
            precision = true_positives / (predicted_positives + K.epsilon())
            return precision
Exemplo n.º 36
0
def SP_pixelwise_loss(y_true,y_pred):
    y_true_label=y_true[:,:class_number,:,:]
    y_true_SP_weight=y_true[:,class_number:,:,:]
    
    y_pred=K.clip(y_pred,-50.,50.)#prevent overflow
    sample_num_per_class=K.sum(y_true_label,axis=[2,3],keepdims=True)
    class_ind=K.cast(K.greater(sample_num_per_class,0.),'float32')
    avg_sample_num_per_class=K.sum(sample_num_per_class,axis=1,keepdims=True)/K.sum(class_ind,axis=1,keepdims=True)
    sample_weight_per_class=avg_sample_num_per_class/(sample_num_per_class+0.1)
    exp_pred=K.exp(y_pred-K.max(y_pred,axis=1,keepdims=True))
    y_pred_softmax=exp_pred/K.sum(exp_pred,axis=1,keepdims=True)
    pixel_wise_loss=-K.log(y_pred_softmax)*y_true_label
    pixel_wise_loss=pixel_wise_loss*sample_weight_per_class
    weighter_pixel_wise_loss=K.sum(pixel_wise_loss,axis=1,keepdims=True)
    
    return K.mean(weighter_pixel_wise_loss*y_true_SP_weight)
Exemplo n.º 37
0
def recall(y_true, y_pred):
    """Recall metric.
    Only computes a batch-wise average of recall.
    Computes the recall, a metric for multi-label classification of
    how many relevant items are selected.
    """
    arg_y_true = K.cast(K.argmax(y_true), K.floatx())
    arg_y_pred = K.cast(K.argmax(y_pred), K.floatx())
    true_positives = K.sum(K.cast(K.equal(
        arg_y_true, arg_y_pred), K.floatx())) - K.sum(
            K.cast(K.equal(arg_y_true + arg_y_pred, 0), K.floatx()))
    possible_positives = K.sum(K.cast(K.greater(arg_y_true, 0), K.floatx()))
    #    recall = true_positives / (possible_positives+K.constant(0.1,K.floatx()))
    recall = K.switch(K.equal(possible_positives, 0), K.constant(0.0),
                      true_positives / possible_positives)
    return recall
Exemplo n.º 38
0
def FScore2(y_true, y_pred):
    '''
    The F score, beta=2
    '''
    B2 = K.variable(4)
    OnePlusB2 = K.variable(5)
    pred = K.round(y_pred)
    tp = K.sum(
        K.cast(K.less(K.abs(pred - K.clip(y_true, .5, 1.)), 0.01), 'float32'),
        -1)
    fp = K.sum(K.cast(K.greater(pred - y_true, 0.1), 'float32'), -1)
    fn = K.sum(K.cast(K.less(pred - y_true, -0.1), 'float32'), -1)

    f2 = OnePlusB2 * tp / (OnePlusB2 * tp + B2 * fn + fp)

    return K.mean(f2)
Exemplo n.º 39
0
 def get_psp(self, output_spikes):
     if hasattr(self, 'activation_str') \
             and self.activation_str == 'softmax':
         psp = tf.identity(output_spikes)
     else:
         new_spiketimes = tf.where(k.not_equal(output_spikes, 0),
                                   k.ones_like(output_spikes) * self.time,
                                   self.last_spiketimes)
         assign_new_spiketimes = tf.assign(self.last_spiketimes,
                                           new_spiketimes)
         with tf.control_dependencies([assign_new_spiketimes]):
             last_spiketimes = self.last_spiketimes + 0  # Dummy op
             psp = tf.where(k.greater(last_spiketimes, 0),
                            k.ones_like(output_spikes) * self.dt,
                            k.zeros_like(output_spikes))
     return psp
Exemplo n.º 40
0
def regression_and_classification(y_true, y_pred):

    #if y_true is 0, treated as a negative
    # if y_true > 1, treated as a positive
    # nothing in between 

    y_true_binarized = K.cast(K.greater_equal(y_true, 1.0), K.floatx()) 
    hard_sigmoid_pred = K.hard_sigmoid(y_pred)
    linearized_hard_sigmoid_pred = (0.2*y_pred) + 0.5
    binary_crossentropy_loss = K.mean(K.binary_crossentropy(
                                        output=hard_sigmoid_pred,
                                        target=y_true_binarized), axis=-1)
    mse_loss = K.mean(2.71*K.square(linearized_hard_sigmoid_pred - y_true)
                          *K.cast(K.greater(y_true, 1.0), K.floatx()),
                      axis=-1)
    return binary_crossentropy_loss + mse_loss 
Exemplo n.º 41
0
def reconstruction_loss(bow, p):
    """Computes reconstruction loss between true bow representations and
    softmax predictions."""

    # Flatten the input tensors.
    bow_flat = K.reshape(bow, shape=(-1, ))
    p_flat = K.reshape(p, shape=(-1, ))

    # Gather nonzero indices.
    indices = K.squeeze(tf.where(K.greater(bow_flat, 0.)), axis=1)
    bow_flat = K.gather(bow_flat, indices)
    p_flat = K.gather(p_flat, indices)

    reconstr_loss = -K.sum(K.log(K.maximum(bow_flat * p_flat, 1e-10)))
    reconstr_loss /= K.cast(K.shape(bow)[0], dtype='float32')
    return reconstr_loss
    def starGAN_train(self, D_lr, G_lr, lamda_gp, lamda_cls, lamda_rec):

        x_real = Input(shape=self.image_size)
        label_real = Input(shape=(self.n_class,))
        label_fake = Input(shape=(self.n_class,))
        label_real_matrix = Input(shape=(self.image_size[0],self.image_size[1],self.n_class))
        label_fake_matrix = Input(shape=(self.image_size[0],self.image_size[1],self.n_class))
        x_fake = self.generator([x_real, label_fake_matrix])

        # loss for discriminator
        d_out_src_real, d_out_cls_real = self.discriminator(x_real)
        d_loss_real = -K.mean(d_out_src_real)
        d_loss_cls = K.mean(K.categorical_crossentropy(label_real, d_out_cls_real))
        # cal acc
        label_sub = d_out_cls_real - label_real
        c1 = 1 + K.min(label_sub, axis=1)  # label为1的最小置信度
        c2 = K.max(label_sub, axis=1)  # label为0的最大置信度
        d_acc = K.mean(K.cast(K.greater(c1 - c2, 0), K.floatx()))  # 如果label为1的最小置信度大于label为0的最大置信度,则正确,否则错误
        # label_pred = K.cast(K.greater(K.clip(d_out_cls_real, 0, 1), 0.5), K.floatx())
        # d_acc = 1 - K.mean(K.clip(K.sum(K.abs(label_real - label_pred), axis=1), 0, 1))
        d_out_src_fake, d_out_cls_fake = self.discriminator(x_fake)
        d_loss_fake = K.mean(d_out_src_fake)

        # gradient penalty
        e = K.placeholder(shape=(None, 1, 1, 1))
        x_mixed = Input(shape=self.image_size, tensor=e * x_real + (1 - e) * x_fake)
        x_mixed_gradient = K.gradients(self.discriminator(x_mixed), [x_mixed])[0]
        x_mixed_gradient_norm = K.sqrt(K.sum(K.square(x_mixed_gradient), axis=[1, 2, 3]))  # not norm in batch_size
        gradient_penalty = K.mean(K.square(x_mixed_gradient_norm - 1))

        d_loss = d_loss_real + d_loss_fake + lamda_gp * gradient_penalty + lamda_cls * d_loss_cls
        d_training_updates = RMSprop(lr=D_lr).get_updates(d_loss, self.discriminator.trainable_weights)
        D_train = K.function([x_real, label_real, label_real_matrix, label_fake, label_fake_matrix, e], [d_loss, d_acc], d_training_updates)

        # loss for generator
        x_rec = self.generator([x_fake, label_real_matrix])
        g_out_src_fake, g_out_cls_fake = self.discriminator(x_fake)
        g_loss_fake = -K.mean(g_out_src_fake)
        g_loss_rec = K.mean(K.abs(x_real - x_rec))
        g_loss_cls = K.mean(K.categorical_crossentropy(label_fake, g_out_cls_fake))

        g_loss = g_loss_fake + lamda_rec * g_loss_rec + lamda_cls * g_loss_cls
        g_training_updates = RMSprop(lr=G_lr).get_updates(g_loss, self.generator.trainable_weights)
        G_train = K.function([x_real, label_real, label_real_matrix, label_fake, label_fake_matrix], [g_loss], g_training_updates)

        return D_train, G_train
Exemplo n.º 43
0
    def output_sampling(self, output, rand_matrix):
        # Generates a sampled selection based on raw output state vector
        # Creates a cdf vector and compares against a randomly generated vector
        # Requires a pre-generated rand_matrix (i.e. generated outside step function)

        sampled_output = output / K.sum(output, axis=-1, keepdims=True)  # (batch_size, self.units)
        mod_sampled_output = sampled_output / K.exp(self.temperature)
        norm_exp_sampled_output = mod_sampled_output / K.sum(mod_sampled_output, axis=-1, keepdims=True)

        cdf_vector = K.cumsum(norm_exp_sampled_output, axis=-1)
        cdf_minus_vector = cdf_vector - norm_exp_sampled_output

        rand_matrix = K.stack([rand_matrix], axis=0)
        rand_matrix = K.stack([rand_matrix], axis=2)

        compared_greater_output = K.cast(K.greater(cdf_vector, rand_matrix), dtype='float32')
        compared_lesser_output = K.cast(K.less(cdf_minus_vector, rand_matrix), dtype='float32')

        final_output = compared_greater_output * compared_lesser_output
        return final_output
def iou_metric(y_true, y_pred):
    truth_conf_tensor = K.expand_dims(y_true[:, :, 0], 2)  # tf.slice(y_true, [0, 0, 0], [-1,-1, 0])
    truth_xy_tensor = y_true[:, :, 1:3]  # tf.slice(y_true, [0, 0, 1], [-1,-1, 2])
    truth_wh_tensor = y_true[:, :, 3:5]  # tf.slice(y_true, [0, 0, 3], [-1, -1, 4])

    pred_conf_tensor = K.expand_dims(y_pred[:, :, 0], 2)  # tf.slice(y_pred, [0, 0, 0], [-1, -1, 0])
    # pred_conf_tensor = K.tanh(pred_conf_tensor)
    pred_xy_tensor = y_pred[:, :, 1:3]  # tf.slice(y_pred, [0, 0, 1], [-1, -1, 2])
    pred_wh_tensor = y_pred[:, :, 3:5]  # tf.slice(y_pred, [0, 0, 3], [-1, -1, 4])

    tens = K.greater(truth_conf_tensor, 0.5)

    ave_iou, recall, precision, obj_count, intersection, union, ow, oh, x, y, w, h = iou(truth_xy_tensor[:, :, 0],
                                                                                         truth_xy_tensor[:, :, 1],
                                                                                         truth_wh_tensor[:, :, 0],
                                                                                         truth_wh_tensor[:, :, 1],
                                                                                         pred_xy_tensor[:, :, 0],
                                                                                         pred_xy_tensor[:, :, 1],
                                                                                         pred_wh_tensor[:, :, 0],
                                                                                         pred_wh_tensor[:, :, 1],
                                                                                         tens, pred_conf_tensor)
    return ave_iou
Exemplo n.º 45
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)

        self.updates = [K.update_add(self.iterations, 1)]
        t = self.iterations + 1

        loss_prev = K.variable(0)
        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]

        ch_fact_lbound = K.switch(K.greater(loss, loss_prev), 1+self.thl, 1/(1+self.thu))
        ch_fact_ubound = K.switch(K.greater(loss, loss_prev), 1+self.thu, 1/(1+self.thl))
        loss_ch_fact = loss / loss_prev
        loss_ch_fact = K.switch(K.lesser(loss_ch_fact, ch_fact_lbound), ch_fact_lbound, loss_ch_fact)
        loss_ch_fact = K.switch(K.greater(loss_ch_fact, ch_fact_ubound), ch_fact_ubound, loss_ch_fact)
        loss_hat = K.switch(K.greater(t, 1), loss_prev * loss_ch_fact, loss)

        d_den = K.switch(K.greater(loss_hat, loss_prev), loss_prev, loss_hat)
        d_t = (self.beta_3 * self.d) + (1. - self.beta_3) * K.abs((loss_hat - loss_prev) / d_den)
        d_t = K.switch(K.greater(t, 1), d_t, 1.)
        self.updates.append(K.update(self.d, d_t))

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            mhat_t = m_t / (1. - K.pow(self.beta_1, t))
            self.updates.append(K.update(m, m_t))

            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            vhat_t = v_t / (1. - K.pow(self.beta_2, t))
            self.updates.append(K.update(v, v_t))

            p_t = p - (self.lr / (1. + (self.iterations * self.decay))) * mhat_t / ((K.sqrt(vhat_t) * d_t) + self.epsilon)
            self.updates.append(K.update(p, p_t))

        self.updates.append(K.update(loss_prev, loss_hat))
        return self.updates
Exemplo n.º 46
0
 def call(self, x, mask=None):
     boolean_mask = K.any(K.greater(x, self.mask_value),
                          axis=-1, keepdims=True)
     return x * K.cast(boolean_mask, K.floatx())
Exemplo n.º 47
0
 def compute_mask(self, x, input_mask=None):
     return K.any(K.greater(x, self.mask_value), axis=-1)
Exemplo n.º 48
0
def weighted_sum(first, second, sigma, first_threshold=-np.inf, second_threshold=np.inf):
    logit_probs = first * sigma + second * (1.0 - sigma)
    infty_tensor = kb.ones_like(logit_probs) * INFTY
    logit_probs = kb.switch(kb.greater(first, first_threshold), logit_probs, infty_tensor)
    logit_probs = kb.switch(kb.greater(second, second_threshold), logit_probs, infty_tensor)
    return logit_probs