示例#1
0
def equal(f, other):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.

    # Returns
        A Functional.
    """
    validate_functional(f)

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(K.equal(x[0], x[1])),
                   name=graph_unique_name("equal")) for X in f.outputs
        ]
    else:
        _warn_for_ndarray(other)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(K.equal(x, other)),
                   name=graph_unique_name("equal")) for X in f.outputs
        ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
示例#2
0
    def loss_function(target_subtoken, y_pred):
        # prediction is a probability, log probability for speed and smoothness

        print("Model objective: y_pred.shape: {}".format(y_pred.shape))
        # I_C = vector of a target subtoken exist in the input token - TODO probably not ok, debug using TF eager
        I_C = K.expand_dims(
            K.cast(K.any(K.equal(input_code_subtoken,
                                 K.cast(target_subtoken, 'int32')),
                         axis=-1),
                   dtype='float32'), -1)
        print("Model objective: I_C.shape: {}".format(I_C.shape))
        # I_C shape = [batch_size, token, max_char_len, 1]
        # TODO should I add a penality if there is no subtokens appearing in the model ? Yes
        probability_correct_copy = K.log(copy_probability) + K.log(
            K.sum(I_C * copy_weights) + mu)
        print("Model objective: probability_correct_copy.shape: {}".format(
            probability_correct_copy.shape))

        # penalise the model when cnn-attention predicts unknown
        # but the value can be predicted from the copy mechanism.
        mask_unknown = K.cast(K.equal(target_subtoken, unknown_id),
                              dtype='float32') * mu

        probability_target_token = K.sum(
            K.log(1 - copy_probability) + K.log(y_pred) + mask_unknown, -1,
            True)
        print("Model objective: probability_target_token.shape: {}".format(
            probability_target_token.shape))

        loss = K.logsumexp(
            [probability_correct_copy, probability_target_token])
        return K.mean(loss)
示例#3
0
def focal(y_true, y_pred, alpha=0.25, gamma=2.0, axis=None):
    """Compute the focal loss given the target tensor and the predicted tensor.

    As defined in https://arxiv.org/abs/1708.02002

    Args:
        y_true: Tensor of target data with shape (B, N, num_classes).
        y_pred: Tensor of predicted data with shape (B, N, num_classes).
        alpha: Scale the focal weight with alpha.
        gamma: Take the power of the focal weight with gamma.

    Returns:
        The focal loss of y_pred w.r.t. y_true.
    """
    if axis is None:
        axis = 1 if K.image_data_format(
        ) == 'channels_first' else K.ndim(y_pred) - 1

    # compute the focal loss
    alpha_factor = K.ones_like(y_true) * alpha
    alpha_factor = tf.where(K.equal(y_true, 1), alpha_factor, 1 - alpha_factor)
    focal_weight = tf.where(K.equal(y_true, 1), 1 - y_pred, y_pred)
    focal_weight = alpha_factor * focal_weight**gamma

    cls_loss = focal_weight * K.binary_crossentropy(y_true, y_pred)

    return K.sum(cls_loss, axis=axis)
示例#4
0
    def focal_loss(y_true, y_pred):
        # Define espislon so that the backpropagation will not result int NaN
        # for 0 divisor case
        epsilon = K.epsilon()
        # Add the epsilon to prediction value
        # y_pred = y_pred + epsilon
        # Clip the prediction value
        y_pred = K.clip(y_pred, epsilon, 1.0 - epsilon)

        alpha_factor = K.ones_like(y_true) * alpha

        # Calculate p_t
        p_t = tf.where(K.equal(y_true, 1), alpha_factor, 1 - alpha_factor)

        # Calculate alpha_t
        alpha_t = tf.where(K.equal(y_true, 1), alpha_factor, 1 - alpha_factor)
        # Calculate cross entropy
        cross_entropy = -K.log(p_t)
        weight = alpha_t * K.pow((1 - p_t), gamma)
        # Calculate focal loss
        loss = weight * cross_entropy
        # Sum the losses in mini_batch
        loss = K.sum(loss, axis=1)

        return loss
示例#5
0
 def call(self, inputs, reverse=False, ddi=False, **kwargs):
     logscale_factor = 3.
     x = inputs
     reduce_axis = list(range(K.ndim(inputs)))[:-1]
     if not reverse:
         log_scale = self.log_scale
         bias = self.bias
         if ddi:
             x_var = tf.reduce_mean(x**2, reduce_axis, keepdims=True)
             init_scale = tf.log(1. /
                                 (tf.sqrt(x_var) + 1e-6)) / logscale_factor
             init_bias = tf.reduce_mean(x, reduce_axis, keepdims=True)
             log_scale = K.switch(K.all(K.equal(self.log_scale, 0.)),
                                  init_scale, self.log_scale)
             bias = K.switch(K.all(K.equal(self.bias, 0.)), -init_bias,
                             self.bias)
             self.add_update(K.update_add(
                 self.log_scale,
                 K.switch(K.all(K.equal(self.log_scale, 0.)), init_scale,
                          K.zeros_like(init_scale))),
                             inputs=x)
             self.add_update(K.update_add(
                 self.bias,
                 K.switch(K.all(K.equal(self.bias, 0.)), -init_bias,
                          K.zeros_like(init_bias))),
                             inputs=x)
         return (x + bias) * K.exp(log_scale)
     else:
         return x / K.exp(self.log_scale) - self.bias
示例#6
0
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
    """Return the RPN bounding box loss graph.
    config: the model config object.
    target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
        Uses 0 padding to fill in unsed bbox deltas.
    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
    """
    # Positive anchors contribute to the loss, but negative and
    # neutral anchors (match value of 0 or -1) don't.
    rpn_match = K.squeeze(rpn_match, -1)
    indices = tf.where(K.equal(rpn_match, 1))

    # Pick bbox deltas that contribute to the loss
    rpn_bbox = tf.gather_nd(rpn_bbox, indices)

    # Trim target bounding box deltas to the same length as rpn_bbox.
    batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
    target_bbox = batch_pack_graph(target_bbox, batch_counts,
                                   config.IMAGES_PER_GPU)

    loss = smooth_l1_loss(target_bbox, rpn_bbox)

    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss
def class2_accuracy(y_true, y_pred):
    class_id_true = K.argmax(y_true, axis=-1)
    class_id_preds = K.argmax(y_pred, axis=-1)
    accuracy_mask = K.cast(K.equal(class_id_preds, 2), 'int32')
    class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds),
                              'int32') * accuracy_mask
    class_acc = K.sum(class_acc_tensor) / K.maximum(K.sum(accuracy_mask), 1)
    return class_acc
示例#8
0
    def loss(y_true, y_pred):

        loss_val = -1 * K.sum(
            K.log(K.softmax(y_pred[:, :-1])) * y_true[:, :-1], axis=-1)

        return K.mean(
            K.switch(
                K.equal(task, 1005), loss_weights[task] * loss_val,
                K.switch(K.equal(y_true[:, -1], task), loss_val,
                         loss_weights[task] * loss_val)))
 def f(y_true, y_pred):
     class_id_true = K.argmax(y_true, axis=-1)
     class_id_preds = K.argmax(y_pred, axis=-1)
     # Replace class_id_preds with class_id_true for recall here
     accuracy_mask = K.cast(K.equal(class_id_preds, interested_class_id),
                            'int32')
     class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds),
                               'int32') * accuracy_mask
     class_acc = K.sum(class_acc_tensor) / K.maximum(
         K.sum(accuracy_mask), 1)
     return class_acc
示例#10
0
    def custom_accuracy(y_true, y_pred):
        y_true_1 = tf.slice(y_true, [0, 0], [batch_size, 6])
        y_true_2 = tf.slice(y_true, [0, 6], [batch_size, 6])
        y_pred_1 = tf.slice(y_pred, [0, 0], [batch_size, 6])
        y_pred_2 = tf.slice(y_pred, [0, 6], [batch_size, 6])

        equal = backend.equal(
            backend.equal(backend.argmax(y_true_1, axis=-1),
                          backend.argmax(y_pred_1, axis=-1)),
            backend.equal(backend.argmax(y_true_2, axis=-1),
                          backend.argmax(y_pred_2, axis=-1)))
        return backend.mean(equal)
 def custom_loss(y_true, y_pred, loss_weights = loss_weights): # Verified
     
     zero_index = K.zeros_like(y_true[:, 0]) 
     ones_index = K.ones_like(y_true[:, 0]) 
     
     # Classifier
     labels = y_true[:, 0] 
     class_preds = y_pred[:, 0] 
     bi_crossentropy_loss = -labels * K.log(class_preds) - (1 - labels) * K.log(1 - class_preds) 
     
     classify_valid_index = tf.where(K.less(y_true[:, 0], 0), zero_index, ones_index) 
     classify_keep_num = K.cast(tf.cast(tf.reduce_sum(classify_valid_index), tf.float32) * SAMPLE_KEEP_RATIO, dtype = tf.int32) 
     # For classification problem, only pick 70% of the valid samples. 
     
     classify_loss_sum = bi_crossentropy_loss * tf.cast(classify_valid_index, bi_crossentropy_loss.dtype) 
     classify_loss_sum_filtered, _ = tf.nn.top_k(classify_loss_sum, k = classify_keep_num) 
     classify_loss = tf.where(K.equal(classify_keep_num, 0), tf.constant(0, dtype = tf.float32), K.mean(classify_loss_sum_filtered)) 
     
     # Bounding box regressor
     rois = y_true[:, 1: 5] 
     roi_preds = y_pred[:, 1: 5] 
     roi_raw_mean_square_error = K.sum(K.square(rois - roi_preds), axis = 1) # mse
     # roi_raw_smooth_l1_loss = K.mean(tf.where(K.abs(rois - roi_preds) < 1, 0.5 * K.square(rois - roi_preds), K.abs(rois - roi_preds) - 0.5)) # L1 Smooth Loss 
     
     roi_valid_index = tf.where(K.equal(K.abs(y_true[:, 0]), 1), ones_index, zero_index) 
     roi_keep_num = K.cast(tf.reduce_sum(roi_valid_index), dtype = tf.int32) 
     
     roi_valid_mean_square_error = roi_raw_mean_square_error * tf.cast(roi_valid_index, roi_raw_mean_square_error.dtype)
     roi_filtered_mean_square_error, _ = tf.nn.top_k(roi_valid_mean_square_error, k = roi_keep_num) 
     roi_loss = tf.where(K.equal(roi_keep_num, 0), tf.constant(0, dtype = tf.float32), K.mean(roi_filtered_mean_square_error)) 
     # roi_valid_smooth_l1_loss = roi_raw_smooth_l1_loss * roi_valid_index
     # roi_filtered_smooth_l1_loss, _ = tf.nn.top_k(roi_valid_smooth_l1_loss, k = roi_keep_num) 
     # roi_loss = K.mean(roi_filtered_smooth_l1_loss) 
     
     # Landmark regressor
     pts = y_true[:, 5: 17] 
     pt_preds = y_pred[:, 5: 17] 
     pts_raw_mean_square_error  = K.sum(K.square(pts - pt_preds), axis = 1) # mse 
     # pts_raw_smooth_l1_loss = K.mean(tf.where(K.abs(pts - pt_preds) < 1, 0.5 * K.square(pts - pt_preds), K.abs(pts - pt_preds) - 0.5)) # L1 Smooth Loss 
     
     pts_valid_index = tf.where(K.equal(y_true[:, 0], -2), ones_index, zero_index) 
     pts_keep_num = K.cast(tf.reduce_sum(pts_valid_index), dtype = tf.int32) 
     
     pts_valid_mean_square_error = pts_raw_mean_square_error * tf.cast(pts_valid_index, tf.float32) 
     pts_filtered_mean_square_error, _ = tf.nn.top_k(pts_valid_mean_square_error, k = pts_keep_num) 
     pts_loss = tf.where(K.equal(pts_keep_num, 0), tf.constant(0, dtype = tf.float32), K.mean(pts_filtered_mean_square_error)) 
     # pts_valid_smooth_l1_loss = pts_raw_smooth_l1_loss * pts_valid_index
     # pts_filtered_smooth_l1_loss, _ = tf.nn.top_k(pts_valid_smooth_l1_loss, k = pts_keep_num) 
     # pts_loss = K.mean(pts_filtered_smooth_l1_loss)
     
     loss = classify_loss * loss_weights[0] + roi_loss * loss_weights[1] + pts_loss * loss_weights[2]
     
     return loss 
示例#12
0
文件: model.py 项目: VolkerH/TRAILMAP
def adjusted_accuracy(y_true, y_pred):
    weights = tf.reduce_sum(y_true, axis=-1, keepdims=True)

    mask = K.equal(weights, 1)

    axons_true = y_true[:, :, :, :, 0]
    axons_true = K.expand_dims(axons_true, -1)

    mask_true = tf.boolean_mask(axons_true, mask)
    mask_pred = tf.boolean_mask(y_pred, mask)

    return K.mean(K.equal(mask_true, K.round(mask_pred)))
def mbce(y_true, y_pred):
    """ Balanced sigmoid cross-entropy loss with masking """
    mask = K.not_equal(y_true, -1.0)
    mask = K.cast(mask, dtype=np.float32)
    num_examples = K.sum(mask, axis=1)
    pos = K.cast(K.equal(y_true, 1.0), dtype=np.float32)
    num_pos = K.sum(pos, axis=None)
    neg = K.cast(K.equal(y_true, 0.0), dtype=np.float32)
    num_neg = K.sum(neg, axis=None)
    pos_ratio = 1.0 - num_pos / num_neg
    mbce = mask * tf.nn.weighted_cross_entropy_with_logits(
        targets=y_true, logits=y_pred, pos_weight=pos_ratio)
    mbce = K.sum(mbce, axis=1) / num_examples
    return K.mean(mbce, axis=-1)
示例#14
0
def create_model(numNodes, embedding_size, lamb_V):

    u = Input(shape=(1, ))
    pos = Input(shape=(1, ))
    neg = Input(shape=(1, ))
    train_type = Input(shape=(1, ))

    # No reg
    vertex_emb = Embedding(numNodes, embedding_size, name='vertex_emb')
    context_emb = Embedding(numNodes, embedding_size, name='context_emb')

    u_emb = vertex_emb(u)
    pos_emb = vertex_emb(pos)
    neg_emb = vertex_emb(neg)

    pos_ctx = context_emb(pos)
    neg_ctx = context_emb(neg)

    # DS pair score
    DS_score = Lambda(lambda x: x[0] * x[1] - x[0] * x[2],
                      name='DS_SCORE')([u_emb, pos_emb, neg_emb])

    # NS pair
    NS_score = Lambda(lambda x: x[0] * x[1] - x[0] * x[2],
                      name='NS_SCORE')([u_emb, pos_ctx, neg_ctx])

    score = Lambda(lambda x: K.switch(
        K.equal(x[2], 1), tf.reduce_sum(x[0], axis=-1, keep_dims=False),
        tf.reduce_sum(x[1], axis=-1, keep_dims=False)),
                   name='switch')([DS_score, NS_score, train_type])

    model = Model(inputs=[u, pos, neg, train_type], outputs=score)

    return model, vertex_emb
示例#15
0
    def __init__(self, optimizer, steps_per_update=1, **kwargs):
        super(AccumOptimizer, self).__init__(**kwargs)

        self.optimizer = optimizer
        with K.name_scope(self.__class__.__name__):
            self.steps_per_update = steps_per_update
            self.iterations = K.variable(0, "int64", "iteration")
            self.cond = K.equal(self.iterations % steps_per_update, 0)
            self.lr = self.optimizer.lr

            self.accum_grads = None

            self.optimizer.lr = K.switch(self.cond, self.lr, 0)
            for attr in ["momentum", "rho", "beta_1", "beta_2"]:
                if hasattr(self.optimizer, attr):
                    value = getattr(self.optimizer, attr)
                    setattr(self, attr, value)
                    setattr(self.optimizer, attr, 1. - 1e-7)

            for cfg in self.optimizer.get_config():
                if not hasattr(self, cfg):
                    value = getattr(self.optimizer, cfg)
                    setattr(self, cfg, value)

            # Cover the original get_gradients method with accumulative gradients.
            def get_gradients(loss, params):
                return [ag / self.steps_per_update for ag in self.accum_grads]

            self.optimizer.get_gradients = get_gradients
    def generate_val_set(self):
        """
        Generates the actual dataset. It uses all the functions defined above to read images from disk and create croppings.
        :return: K.data.Dataset
        """
        parse_path_func = lambda x, y: self.parse_path(x, y)
        process_label_func = lambda x, y: self.process_label(x, y)
        resize_func = lambda x, y: self.resize_and_norm(x, y)
        crops_func = lambda x, y: self.crop_img_and_serve(x, y)
        filter_func = lambda x, y: K.equal(K.any(y), False)

        batch_size = self.batch_size

        n_el = len(list(self.val_id_ep_dict.keys()))
        ids = []
        labels = []
        for k, v in self.val_id_ep_dict.items():
            ids.append(os.path.join(self.train_images_folder, k))
            labels.append(v)
        id_tensor = K.constant(ids, dtype=tf.string, shape=([n_el]))
        label_tensor = K.constant(labels, dtype=tf.string, shape=(n_el, 4))
        return (tf.data.Dataset.from_tensor_slices((id_tensor, label_tensor))
                .shuffle(buffer_size=n_el)
                .map(parse_path_func, num_parallel_calls=AUTOTUNE)
                .map(process_label_func, num_parallel_calls=AUTOTUNE)  # create actual one_crop
                .map(resize_func, num_parallel_calls=AUTOTUNE)  # create actual one_crop
                .map(crops_func, num_parallel_calls=AUTOTUNE)  # create crops of image to enlarge output
                .flat_map(
            lambda x, y: tf.data.Dataset.from_tensor_slices((x, y)))  # serve crops as new dataset to flat_map array
                .filter(filter_func)
                .batch(batch_size)  # defined batch_size
                .prefetch(AUTOTUNE)  # number of batches to be prefetch.
                .repeat()  # repeats the dataset when it is finished
                )
    def triplet_loss(y_true, y_pred):
        # Euclidean dist between all pairs
        dist = K.expand_dims(y_pred, axis=1) - K.expand_dims(y_pred, axis=0)
        dist_mat = K.cast(K.sqrt(K.sum(K.square(dist), axis=-1) + K.epsilon()),
                          dtype='float32')
        self_mask = K.cast(K.equal(K.expand_dims(y_true, axis=1),
                                   K.expand_dims(y_true, axis=0)),
                           dtype='float32')

        # Reverse the the positive mask
        neg_mask = K.cast(tf.abs(self_mask - 1), dtype='float32')

        # Make the sample do not match with itself
        diag = tf.linalg.diag_part(self_mask) - tf.linalg.diag_part(self_mask)
        pos_mask = K.cast(tf.linalg.set_diag(self_mask, diag), dtype='float32')

        # Pick the top K pairs for each positive/negative example(furthest positives and closest negatives)
        top_k_pos = tf.nn.top_k(dist_mat * pos_mask, k).values
        top_k_neg = tf.abs(
            tf.nn.top_k(-1 * (dist_mat * neg_mask + 1e10 * self_mask),
                        k).values)

        loss = K.mean(margin + K.expand_dims(top_k_pos, axis=-1) -
                      K.expand_dims(top_k_neg, axis=-2))
        loss = K.maximum(loss, 0)
        return loss
示例#18
0
def captcha_metric(y_true, y_pred):
    y_pred = K.reshape(y_pred, (-1, alphabet))
    y_true = K.reshape(y_true, (-1, alphabet))
    y_p = K.argmax(y_pred, axis=1)
    y_t = K.argmax(y_true, axis=1)
    r = K.mean(K.cast(K.equal(y_p, y_t), 'float32'))
    return r
示例#19
0
def fractional_accuracy(y_true, y_pred):
    equal = K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1))
    X = K.mean(K.sum(K.cast(equal, tf.float32), axis=-1))
    not_equal = K.not_equal(K.argmax(y_true, axis=-1), K.argmax(y_pred,
                                                                axis=-1))
    Y = K.mean(K.sum(K.cast(not_equal, tf.float32), axis=-1))
    return X / (X + Y)
示例#20
0
 def _fd_conditional(y_true, y_pred):
     # if there are no masks annotations, return 0; else, compute fdl loss
     return tf.cond(
         K.any(K.equal(K.shape(y_true), 0)),
         lambda: K.cast_to_floatx(0.0),
         lambda: _fd_batch(y_true, y_pred,
                           iou_threshold=self.fdl_iou_threshold,
                           parallel_iterations=self.parallel_iterations))
示例#21
0
    def loss(y_true, y_pred):
        y_true = K.cast(y_true, K.floatx())
        mask = K.equal(y_true, mask_value)
        mask = 1 - K.cast(mask, K.floatx())
        y_true = y_true * mask

        loss = K.sparse_categorical_crossentropy(y_true, y_pred) * mask
        return K.sum(loss) / K.sum(mask)
示例#22
0
 def accuracy(y_true, y_pred):
     # reshape in case it's in shape (num_samples, 1) instead of (num_samples,)
     if K.ndim(y_true) == K.ndim(y_pred):
         y_true = K.squeeze(y_true, -1)
     # convert dense predictions to labels
     y_pred_labels = K.argmax(y_pred, axis=-1)
     y_pred_labels = K.cast(y_pred_labels, K.floatx())
     return K.cast(K.equal(y_true, y_pred_labels), K.floatx())
示例#23
0
        def _roi_align(args):
            boxes = args[0]
            scores = args[1]
            fpn = args[2]

            # compute from which level to get features from
            target_levels = self.map_to_level(boxes)

            # process each pyramid independently
            rois, ordered_indices = [], []
            for i in range(len(fpn)):
                # select the boxes and classification from this pyramid level
                indices = tf.where(K.equal(target_levels, i))
                ordered_indices.append(indices)

                level_boxes = tf.gather_nd(boxes, indices)
                fpn_shape = K.cast(K.shape(fpn[i]), dtype=K.floatx())

                # convert to expected format for crop_and_resize
                x1 = level_boxes[:, 0]
                y1 = level_boxes[:, 1]
                x2 = level_boxes[:, 2]
                y2 = level_boxes[:, 3]
                level_boxes = K.stack([
                    (y1 / image_shape[1] * fpn_shape[0]) / (fpn_shape[0] - 1),
                    (x1 / image_shape[2] * fpn_shape[1]) / (fpn_shape[1] - 1),
                    (y2 / image_shape[1] * fpn_shape[0] - 1) / (fpn_shape[0] - 1),
                    (x2 / image_shape[2] * fpn_shape[1] - 1) / (fpn_shape[1] - 1),
                ], axis=1)

                if(len(fpn[i].get_shape()) >=4):
                    unstack = tf.unstack(fpn[i], axis=3)
                    temp_stack=[]
                    for j in unstack:
                        temp = tf.image.crop_and_resize(
                            K.expand_dims(j, axis=3),
                            level_boxes,
                            tf.zeros((K.shape(level_boxes)[0],), dtype='int32'),
                            (self.crop_size[0], self.crop_size[1]))
                        temp_stack.append(temp)
                    rois.append(temp_stack)
                else:
                    rois.append(tf.image.crop_and_resize(
                        K.expand_dims(fpn[i], axis=0),
                        level_boxes,
                        tf.zeros((K.shape(level_boxes)[0],), dtype='int32'),
                        self.crop_size
                    ))


            # concatenate rois to one blob
            rois = K.concatenate(rois, axis=0)

            # reorder rois back to original order
            indices = K.concatenate(ordered_indices, axis=0)
            rois = tf.scatter_nd(indices, rois, K.cast(K.shape(rois), 'int64'))

            return rois
示例#24
0
def dice(y_true, y_pred):
    eps = K.constant(1e-6)
    truelabels = tf.argmax(y_true, axis=-1, output_type=tf.int32)
    predictions = tf.argmax(y_pred, axis=-1, output_type=tf.int32)
    # cast->型変換,minimum2つのテンソルの要素ごとの最小値,equal->boolでかえってくる
    intersection = K.cast(K.sum(K.minimum(K.cast(K.equal(predictions, truelabels), tf.int32), truelabels)), tf.float32)
    union = tf.count_nonzero(predictions, dtype=tf.float32) + tf.count_nonzero(truelabels, dtype=tf.float32)
    dice = 2. * intersection / (union + eps)
    return dice
def mask_acc(y_true, y_pred):
    y_true_class = K.argmax(y_true, axis=-1)
    y_pred_class = K.argmax(y_pred, axis=-1)

    ignore_mask = K.cast(K.not_equal(y_true_class, 0), "int32")
    matches = K.cast(K.equal(y_true_class, y_pred_class),
                     "int32") * ignore_mask
    accuracy = K.sum(matches) / K.maximum(K.sum(ignore_mask), 1)
    return accuracy
示例#26
0
def accuracy_ignore_padding(y_true, y_pred):
    prediction = backend.argmax(y_pred, axis=-1)
    target = backend.argmax(y_true, axis=-1)

    accuracy = backend.equal(
        backend.array_ops.boolean_mask(prediction,
                                       backend.not_equal(target, 0)),
        backend.array_ops.boolean_mask(target, backend.not_equal(target, 0)))

    return backend.mean(accuracy)
示例#27
0
def multitask_accuracy(y_true, y_pred):
    """Multi-Task accuracy metric.

    Only computes a batch-wise average of accuracy.

    Computes the accuracy, a metric for multi-label classification of
    how many items are selected correctly.
    """
    return K.mean(
        K.equal(K.argmax(y_true[:, :-1], axis=-1),
                K.argmax(y_pred[:, :-1], axis=-1)))
示例#28
0
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
    nb_classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, nb_classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       nb_classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[-1], tf.bool)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))
示例#29
0
def contingency_table(y, z):
    """Note:  if y and z are not rounded to 0 or 1, they are ignored
    """
    y = K.cast(K.round(y), K.floatx())
    z = K.cast(K.round(z), K.floatx())

    def count_matches(y, z):
        return K.sum(K.cast(y, K.floatx()) * K.cast(z, K.floatx()))

    ones = K.ones_like(y)
    zeros = K.zeros_like(y)
    y_ones = K.equal(y, ones)
    y_zeros = K.equal(y, zeros)
    z_ones = K.equal(z, ones)
    z_zeros = K.equal(z, zeros)

    tp = count_matches(y_ones, z_ones)
    tn = count_matches(y_zeros, z_zeros)
    fp = count_matches(y_zeros, z_ones)
    fn = count_matches(y_ones, z_zeros)
    return (tp, tn, fp, fn)
示例#30
0
def equal(f, other, tol=None):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.
        tol: (float) If you need a tolerance measure.

    # Returns
        A Functional.
    """
    validate_functional(f)
    assert isinstance(
        tol, (type(None), float)), 'Expected a floating value for `tol`.'

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.equal(x[0], x[1]))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.less_equal(K.abs(x[0] - x[1]), tol))
    else:
        _warn_for_ndarray(other)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.equal(x, other))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.less_equal(K.abs(x - other), tol))

    lmbd = [
        Lambda(lambda_opr, name=graph_unique_name("equal")) for X in f.outputs
    ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res