def loss(y_true, y_pred):
        # https://stats.stackexchange.com/questions/272754/how-do-interpret-an-cross-entropy-score
        # payoffs [kack, lay, 0, maximum_possible]
        # y_true: [1,0,0] or [0,1,0]

        y_pred

        back_true = y_true[:, :, 0]
        back_pred = y_pred[:, :, 0]
        lay_true = y_true[:, :, 1]
        lay_pred = y_pred[:, :, 1]
        back_loss = K.switch(
            K.all(K.equal(back_true, back_pred), K.equal(back_pred, 1)),
            payoffs - 1,
            K.switch(
                K.all(K.not_equal(back_true, back_pred), K.equal(back_pred,
                                                                 1)), -1, 0))

        lay_loss = K.switch(
            K.all(K.equal(lay_true, lay_pred), K.equal(lay_pred, 1)), 1,
            K.switch(
                K.all(K.not_equal(lay_true, lay_pred), K.equal(lay_pred, 1)),
                -payoffs + 1, 0))

        total_loss = K.mean(lay_loss + back_loss)
        # -(customized_rate * y_true * tensor.log(y_pred) + (1.0 - y_true) * tensor.log(1.0 - y_pred))

        loss = K.mean(K.binary_crossentropy(total_loss), axis=-1)
        return loss
Beispiel #2
0
    def mean_squared_error_difference_learn(y_true, y_pred):
        depth_gt = y_true[:, :, :, 0]
        depth_gap = y_true[:, :, :, 1]

        is_gt_available = depth_gt > depth_threshold
        is_gap_unavailable = depth_gap < depth_threshold

        is_depth_close = K.all(K.stack([
            K.abs(depth_gap - depth_gt) < difference_threshold, is_gt_available], axis=0), axis=0)

        # difference learn
        gt = depth_gt - depth_gap

        # scale
        gt = gt * scaling

        # complement
        is_complement = False
        if is_complement:
            is_to_interpolate = K.all(K.stack(
                [is_gt_available, is_gap_unavailable], axis=0),
                                    axis=0)
            is_valid = K.any(K.stack([is_to_interpolate, is_depth_close], axis=0),
                            axis=0)
            # is_valid = K.cast(is_valid, float)
            is_valid = K.cast(is_valid, 'float32')
        else:
            # is_valid = K.cast(is_depth_close, float)
            is_valid = K.cast(is_depth_close, 'float32')

        valid_length = K.sum(is_valid)
        # err = K.sum(K.square(gt - y_pred[:, :, :, 0]) * is_valid)  / valid_length # MSE
        err = K.sum(K.abs(gt - y_pred[:, :, :, 0]) * is_valid)  / valid_length # MAE
        return err
Beispiel #3
0
def recall(y_true, y_pred):

    tp_3d = K.concatenate([
        K.cast(y_true, 'bool'),
        K.cast(K.round(y_pred), 'bool'),
        K.cast(K.ones_like(y_pred), 'bool')
    ],
                          axis=1)

    fp_3d = K.concatenate([
        K.cast(K.abs(y_true - K.ones_like(y_true)), 'bool'),
        K.cast(K.round(y_pred), 'bool'),
        K.cast(K.ones_like(y_pred), 'bool')
    ],
                          axis=1)

    fn_3d = K.concatenate([
        K.cast(y_true, 'bool'),
        K.cast(K.abs(K.round(y_pred) - K.ones_like(y_pred)), 'bool'),
        K.cast(K.ones_like(y_pred), 'bool')
    ],
                          axis=1)

    tp = K.sum(K.cast(K.all(tp_3d, axis=1), 'int32'))
    fp = K.sum(K.cast(K.all(fp_3d, axis=1), 'int32'))
    fn = K.sum(K.cast(K.all(fn_3d, axis=1), 'int32'))

    recall = tp / (tp + fn)

    return recall
Beispiel #4
0
    def build():
        states = Input(shape=(height * base, width * base))
        error = build_error(states, height, width, base)
        matches = 1 - K.clip(K.sign(error - threshold), 0, 1)
        # a, h, w, panel

        num_matches = K.sum(matches, axis=3)
        panels_ok = K.all(K.equal(num_matches, 1), (1, 2))
        panels_ng = K.any(K.not_equal(num_matches, 1), (1, 2))
        panels_nomatch = K.any(K.equal(num_matches, 0), (1, 2))
        panels_ambiguous = K.any(K.greater(num_matches, 1), (1, 2))

        panel_coverage = K.sum(matches, axis=(1, 2))
        # ideally, this should be [[1,1,1,1,1,1,1,1,1], ...]
        coverage_ok = K.all(K.less_equal(panel_coverage, 1), 1)
        coverage_ng = K.any(K.greater(panel_coverage, 1), 1)
        validity = tf.logical_and(panels_ok, coverage_ok)

        if verbose:
            return Model(states, [
                wrap(states, x) for x in [
                    panels_ok, panels_ng, panels_nomatch, panels_ambiguous,
                    coverage_ok, coverage_ng, validity
                ]
            ])
        else:
            return Model(states, wrap(states, validity))
Beispiel #5
0
    def build():
        states = Input(shape=(tower_height, tower_width * towers))
        error = build_error(states, disks, towers, tower_width, panels)
        matches = 1 - K.clip(K.sign(error - threshold), 0, 1)

        num_matches = K.sum(matches, axis=3)
        panels_ok = K.all(K.equal(num_matches, 1), (1, 2))
        panels_ng = K.any(K.not_equal(num_matches, 1), (1, 2))
        panels_nomatch = K.any(K.equal(num_matches, 0), (1, 2))
        panels_ambiguous = K.any(K.greater(num_matches, 1), (1, 2))

        panel_coverage = K.sum(matches, axis=(1, 2))
        # ideally, this should be [[1,1,1...1,1,1,disks*tower-disk], ...]

        ideal_coverage = np.ones(disks + 1)
        ideal_coverage[-1] = disks * towers - disks
        ideal_coverage = K.variable(ideal_coverage)
        coverage_ok = K.all(K.equal(panel_coverage, ideal_coverage), 1)
        coverage_ng = K.any(K.not_equal(panel_coverage, ideal_coverage), 1)
        validity = tf.logical_and(panels_ok, coverage_ok)

        if verbose:
            return Model(states, [
                wrap(states, x) for x in [
                    panels_ok, panels_ng, panels_nomatch, panels_ambiguous,
                    coverage_ok, coverage_ng, validity
                ]
            ])
        else:
            return Model(states, wrap(states, validity))
Beispiel #6
0
def f1_score(y_true, y_pred):

    tp_3d = K.concatenate([
        K.cast(y_true, 'bool'),
        K.cast(K.round(y_pred), 'bool'),
        K.cast(K.ones_like(y_pred), 'bool')
    ],
                          axis=1)

    fp_3d = K.concatenate([
        K.cast(K.abs(y_true - K.ones_like(y_true)), 'bool'),
        K.cast(K.round(y_pred), 'bool'),
        K.cast(K.ones_like(y_pred), 'bool')
    ],
                          axis=1)

    fn_3d = K.concatenate([
        K.cast(y_true, 'bool'),
        K.cast(K.abs(K.round(y_pred) - K.ones_like(y_pred)), 'bool'),
        K.cast(K.ones_like(y_pred), 'bool')
    ],
                          axis=1)

    tp = K.sum(K.cast(K.all(tp_3d, axis=1), 'int32'))
    fp = K.sum(K.cast(K.all(fp_3d, axis=1), 'int32'))
    fn = K.sum(K.cast(K.all(fn_3d, axis=1), 'int32'))

    precision = tp / (tp + fp)
    recall = tp / (tp + fn)
    return 2 * ((precision * recall) / (precision + recall))
Beispiel #7
0
def mmd(x):
    """
    maximum mean discrepancy (MMD) based on Gaussian kernel
    function for keras models (theano or tensorflow backend)
    - Gretton, Arthur, et al. "A kernel method for the two-sample-problem."
    Advances in neural information processing systems. 2007.
    """
    kvar = K.constant(value=np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
                      dtype='float32')

    train_tensor = tf.map_fn(lambda cur: tf.cond(
        K.all(K.equal(cur[1], kvar)), lambda: K.expand_dims(
            K.zeros_like(cur[0]), axis=0), lambda: K.expand_dims(cur[0],
                                                                 axis=0)),
                             (x[0], x[2]),
                             dtype=(tf.float32))
    test_tensor = tf.map_fn(lambda cur: tf.cond(
        K.all(K.equal(cur[1], kvar)), lambda: K.expand_dims(
            K.zeros_like(cur[0]), axis=0), lambda: K.expand_dims(cur[0],
                                                                 axis=0)),
                            (x[1], x[3]),
                            dtype=(tf.float32))

    beta = 1.0
    x1x1 = gaussian_kernel(train_tensor, train_tensor, beta)
    x1x2 = gaussian_kernel(train_tensor, test_tensor, beta)
    x2x2 = gaussian_kernel(test_tensor, test_tensor, beta)
    diff = K.mean(x1x1) - 2 * K.mean(x1x2) + K.mean(x2x2)

    return diff
Beispiel #8
0
 def compute_mask(self, x, mask=None):
     if self.return_probabilities:
         mask2 = mask
         if mask is not None:
             mask2 = K.expand_dims(K.all(mask2, axis=-1))
         return [mask, mask2]
     return mask
def m_accuracy(true_y, pred_y):
    treshold = 0
    mask = Lambda(lambda x: K.greater_equal(x, treshold))(true_y)
    mask = Lambda(lambda x: K.cast(x, 'float32'))(mask)
    pred_label = Lambda(lambda x: x * mask)(pred_y)
    true_label = Lambda(lambda x: x * mask)(true_y)
    return K.mean(K.all(K.equal(true_label, K.round(pred_label)), axis=-1))
Beispiel #10
0
 def compute_mask(self, inputs, mask=None):
     if mask is None:
         return None
     if not isinstance(mask, (tuple, list)):
         raise ValueError(f"`mask` should be a list. Received mask={mask}")
     if not isinstance(inputs, (tuple, list)):
         raise ValueError(
             f"`inputs` should be a list. Received: inputs={inputs}")
     if len(mask) != len(inputs):
         raise ValueError(
             "The lists `inputs` and `mask` should have the same length. "
             f"Received: inputs={inputs} of length {len(inputs)}, and "
             f"mask={mask} of length {len(mask)}")
     if all(m is None for m in mask):
         return None
     # Make a list of masks while making sure
     # the dimensionality of each mask
     # is the same as the corresponding input.
     masks = []
     for input_i, mask_i in zip(inputs, mask):
         if mask_i is None:
             # Input is unmasked. Append all 1s to masks,
             masks.append(tf.ones_like(input_i, dtype="bool"))
         elif backend.ndim(mask_i) < backend.ndim(input_i):
             # Mask is smaller than the input, expand it
             masks.append(tf.expand_dims(mask_i, axis=-1))
         else:
             masks.append(mask_i)
     concatenated = backend.concatenate(masks, axis=self.axis)
     return backend.all(concatenated, axis=-1, keepdims=False)
Beispiel #11
0
 def compute_mask(self, inputs, mask=None):
     # It will pass a mask only if all entries were masked,
     # otherwise it won't pass the mask to the next layers
     if len(inputs.shape) > 2 and mask is not None:
         mask = K.all(mask, axis=-1, keepdims=False)
     else:  #don't return mask if not enough dimsions
         return None
Beispiel #12
0
def zero_one_accuracy(y_true, y_pred):
    y_true, y_pred = tensorify(y_true), tensorify(y_pred)
    n_instances, n_objects = get_instances_objects(y_true)
    equal_ranks = K.cast(K.all(K.equal(y_pred, y_true), axis=1), dtype="float32")
    denominator = K.cast(n_instances, dtype="float32")
    zero_one_loss = K.sum(equal_ranks) / denominator
    return zero_one_loss
Beispiel #13
0
 def compute_mask(self, x, mask=None):
     if self.return_probabilities:
         mask2 = mask
         if mask is not None:
             mask2 = K.expand_dims(K.all(mask2, axis=-1))
         return [mask, mask2]
     return mask
def inst_weight(output_y, output_x, output_dr, output_dl, config=None):
    dy = output_y[:,2:,2:]-output_y[:, :-2,2:] + \
         2*(output_y[:,2:,1:-1]- output_y[:,:-2,1:-1]) + \
         output_y[:,2:,:-2]-output_y[:,:-2,:-2]
    dx = output_x[:,2:,2:]- output_x[:,2:,:-2] + \
         2*( output_x[:,1:-1,2:]- output_x[:,1:-1,:-2]) +\
         output_x[:,:-2,2:]- output_x[:,:-2,:-2]
    ddr=  (output_dr[:,2:,2:]-output_dr[:,:-2,:-2] +\
           output_dr[:,1:-1,2:]-output_dr[:,:-2,1:-1]+\
           output_dr[:,2:,1:-1]-output_dr[:,1:-1,:-2])*K.constant(2)
    ddl=  (output_dl[:,2:,:-2]-output_dl[:,:-2,2:] +\
           output_dl[:,2:,1:-1]-output_dl[:,1:-1,2:]+\
           output_dl[:,1:-1,:-2]-output_dl[:,:-2,1:-1])*K.constant(2)
    dpred = K.concatenate([dy,dx,ddr,ddl],axis=-1)
    dpred = K.spatial_2d_padding(dpred)
    weight_fg = K.cast(K.all(dpred>K.constant(config.GRADIENT_THRES), axis=3, 
                          keepdims=True), K.floatx())
    
    weight = K.clip(K.sqrt(weight_fg*K.prod(dpred, axis=3, keepdims=True)), 
                    config.WEIGHT_AREA/config.CLIP_AREA_HIGH, 
                    config.WEIGHT_AREA/config.CLIP_AREA_LOW)
    weight +=(1-weight_fg)*config.WEIGHT_AREA/config.BG_AREA
    weight = K.conv2d(weight, K.constant(config.GAUSSIAN_KERNEL),
                      padding='same')
    return K.stop_gradient(weight)
Beispiel #15
0
 def _compute_valid_seed_region(self, height, width):
     positions = K.concatenate([
         K.expand_dims(K.tile(K.expand_dims(K.arange(height), axis=1),
                              [1, width]),
                       axis=-1),
         K.expand_dims(K.tile(K.expand_dims(K.arange(width), axis=0),
                              [height, 1]),
                       axis=-1),
     ],
                               axis=-1)
     half_block_size = self.block_size // 2
     valid_seed_region = K.switch(
         K.all(
             K.stack(
                 [
                     positions[:, :, 0] >= half_block_size,
                     positions[:, :, 1] >= half_block_size,
                     positions[:, :, 0] < height - half_block_size,
                     positions[:, :, 1] < width - half_block_size,
                 ],
                 axis=-1,
             ),
             axis=-1,
         ),
         K.ones((height, width)),
         K.zeros((height, width)),
     )
     return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1)
Beispiel #16
0
 def compute_mask(self, inputs, mask=None):
     if mask is None:
         return None
     if not isinstance(mask, (tuple, list)):
         raise ValueError('`mask` should be a list.')
     if not isinstance(inputs, (tuple, list)):
         raise ValueError('`inputs` should be a list.')
     if len(mask) != len(inputs):
         raise ValueError('The lists `inputs` and `mask` '
                          'should have the same length.')
     if all(m is None for m in mask):
         return None
     # Make a list of masks while making sure
     # the dimensionality of each mask
     # is the same as the corresponding input.
     masks = []
     for input_i, mask_i in zip(inputs, mask):
         if mask_i is None:
             # Input is unmasked. Append all 1s to masks,
             masks.append(tf.compat.v1.ones_like(input_i, dtype='bool'))
         elif K.ndim(mask_i) < K.ndim(input_i):
             # Mask is smaller than the input, expand it
             masks.append(tf.compat.v1.expand_dims(mask_i, axis=-1))
         else:
             masks.append(mask_i)
     concatenated = K.concatenate(masks, axis=self.axis)
     return K.all(concatenated, axis=-1, keepdims=False)
Beispiel #17
0
def psnr_masked(true, pred):
    mask = K.cast(K.not_equal(true, self.config['pad_value']), K.floatx())
    return 10. * K.log(
        K.cast(
            K.sum(1 - K.all(1 - mask, axis=[2, 3, 4]), axis=1) *
            K.prod(K.shape(true)[2:]), K.floatx()) /
        K.sum(K.square(pred - true) * mask, axis=[1, 2, 3, 4])) / K.log(10.)
def c_iou_zero(y_truth, y_pred):
    t, p = K.flatten(K.cast(K.round(y_truth[..., 0]), 'int32')), K.flatten(
        K.cast(y_pred[..., 0] > 0, 'int32'))
    intersection = K.all(K.stack([t, p], axis=0), axis=0)
    union = K.any(K.stack([t, p], axis=0), axis=0)
    iou = K.sum(K.cast(intersection, 'int32')) / K.sum(K.cast(union, 'int32'))
    return K.mean(iou)
def fn(y_true, y_pred):

    tp_3d = K.concatenate([
        K.cast(y_true, 'int32'),
        K.cast(K.round(y_pred), 'int32'),
        K.cast(K.ones_like(y_pred), 'int32')
    ],
                          axis=1)

    fp_3d = K.concatenate([
        K.cast(K.abs(y_true - K.ones_like(y_true)), 'int32'),
        K.cast(K.round(y_pred), 'int32'),
        K.cast(K.ones_like(y_pred), 'int32')
    ],
                          axis=1)

    fn_3d = K.concatenate([
        K.cast(y_true, 'int32'),
        K.cast(K.abs(K.round(y_pred) - K.ones_like(y_pred)), 'int32'),
        K.cast(K.ones_like(y_pred), 'int32')
    ],
                          axis=1)

    fn = K.sum(K.cast(K.all(fn_3d, axis=1), 'int32'))

    return fn
Beispiel #20
0
def recall_binary(y_true, y_pred):
    """ Keras metric for computing recall for a binary classification task during training

        Recall = true positives / (true positives + false negatives)

        Parameters
        ----------
        y_true : K.variable
            Ground truth N-dimensional Keras variable of float type with only 0's and 1's.
        y_pred : K.variable
            Predicted Keras variable of float type with predicted values between 0 and 1.

        Returns
        -------
        K.variable
            A single value indicating the recall.

    """

    logical_and = K.cast(
        K.all(K.stack([K.cast(y_true, 'bool'),
                       K.greater_equal(y_pred, 0.5)],
                      axis=0),
              axis=0), 'float32')
    logical_or = K.cast(
        K.any(K.stack([K.cast(y_true, 'bool'),
                       K.greater_equal(y_pred, 0.5)],
                      axis=0),
              axis=0), 'float32')
    tp = K.sum(logical_and)
    fn = K.sum(logical_or - K.cast(K.greater_equal(y_pred, 0.5), 'float32'))
    return K.switch(K.equal(tp, K.variable(0)), K.variable(0), tp / (tp + fn))
Beispiel #21
0
    def compute_mask(self, inputs, mask=None):

        if mask is None or not any([m is not None for m in mask]):
            return None

        assert hasattr(mask, '__len__') and len(mask) == len(inputs)

        if self.mode in ['sum', 'mul', 'ave']:
            bool_type = 'bool' if K._BACKEND == 'tensorflow' else 'int32'
            masks = [K.cast(m, bool_type) for m in mask if m is not None]
            mask = masks[0]
            for m in masks[1:]:
                mask = mask & m
            return mask
        elif self.mode in ['concat']:
            masks = [K.ones_like(inputs[i][:-1]) if m is None else m for i, m in zip(inputs, mask)]
            expanded_dims = [K.expand_dims(m) for m in masks]
            concatenated = K.concatenate(expanded_dims, axis=self.concat_axis)
            return K.all(concatenated, axis=-1, keepdims=False)
        elif self.mode in ['cos', 'dot']:
            return None
        elif hasattr(self.mode, '__call__'):
            if hasattr(self._output_mask, '__call__'):
                return self._output_mask(mask)
            else:
                return self._output_mask
        else:
            # this should have been caught earlier
            raise Exception('Invalid merge mode: {}'.format(self.mode))
Beispiel #22
0
def my_acc(y_true, y_pred):
    mask = K.all(K.equal(y_true, 0), axis=-1)
    mask = 1 - K.cast(mask, K.floatx())
    acc = (K.cast(
        K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)),
        K.floatx())) * mask
    return K.sum(acc) / K.sum(mask)
    def compute_mask(self, inputs, mask=None):
        """

        把两个mask进行组合,一个是手动mask的mask, 一个padding的mask,

        """
        mask_combine = K.all([K.cast(inputs[1], bool), mask[0]], axis=0)
        return mask_combine
 def call(self, x):
     min_cat = K.less(x, self.thresholds[0])
     max_cat = K.greater_equal(x, self.thresholds[-1])
     other_cat = map(
         lambda (th1, th2): K.all(K.stack([K.greater_equal(x, th1), K.less(x, th2)], axis=0), axis=0),
         zip(self.thresholds[:-1], self.thresholds[1:])
     )
     return K.cast(K.concatenate([min_cat] + other_cat + [max_cat]), K.floatx())
Beispiel #25
0
def APCER(y_true, y_pred):
    y_true = K.cast(K.argmax(y_true, axis=1), dtype='float32')
    y_pred = K.cast(K.argmax(y_pred, axis=1), dtype='float32')
    count_fake = K.sum(1 - y_true)

    false_positives = K.all([K.equal(y_true, 0), K.equal(y_pred, 1)], axis=0)
    false_positives = K.sum(K.cast(false_positives, dtype='float32'))
    return K.switch(K.equal(count_fake, 0), 0.0, false_positives / count_fake)
Beispiel #26
0
 def call(self, inputs, states, training=None):
   """Step function of the cell."""
   h_tm1 = states[0] # previous output
   cond = K.all(K.equal(inputs, 0), axis=-1)
   new_output, new_states = super().call(inputs, states, training=training)
   curr_output = K.switch(cond, h_tm1, new_output)
   curr_states = [K.switch(cond, states[i], new_states[i]) for i in range(len(states))]
   return curr_output, curr_states
Beispiel #27
0
def BPCER(y_true, y_pred):
    y_true = K.cast(K.argmax(y_true, axis=1), dtype='float32')
    y_pred = K.cast(K.argmax(y_pred, axis=1), dtype='float32')
    count_real = K.sum(y_true)

    false_negatives = K.all([K.equal(y_true, 1), K.equal(y_pred, 0)], axis=0)
    false_negatives = K.sum(K.cast(false_negatives, dtype='float32'))
    return K.switch(K.equal(count_real, 0), 0.0, false_negatives / count_real)
Beispiel #28
0
 def call(self, inputs):
     end = tf.concat((self.init, self.end), axis=-1)
     end = tf.roll(end, shift=[0, -1], axis=[0, 1])
     end = end[:, :-1]
     a = K.greater_equal(inputs, self.init)
     b = K.greater(end, inputs)
     c = K.cast(K.all(K.stack([a, b], axis=0), axis=0), np.float64)
     return c
 def full_number_accuracy(y_true, y_pred):
     y_true_argmax = K.argmax(y_true)
     y_pred_argmax = K.argmax(y_pred)
     tfd = K.equal(y_true_argmax, y_pred_argmax)
     tfn = K.all(tfd, axis=1)
     tfc = K.cast(tfn, dtype='float32')
     tfm = K.mean(tfc)
     return tfm
def masked_categorical_crossentropy(y_true, y_pred):

    mask = K.all(K.equal(y_true, [1, 0, 0, 0, 0, 0]), axis=-1)
    mask = 1 - K.cast(mask, K.floatx())

    loss = K.categorical_crossentropy(y_true, y_pred) * mask

    return K.sum(loss) / K.sum(mask)
Beispiel #31
0
 def masked_categorical_crossentropy(y_true, y_pred):
     # find out which timesteps in `y_true` are not the padding character 'PAD'
     mask = K.all(K.equal(y_true, mask_value), axis=-1)
     mask = 1 - K.cast(mask, K.floatx())
     # multiply categorical_crossentropy with the mask
     loss = K.sparse_categorical_crossentropy(y_true, y_pred) * mask
     # take average w.r.t. the number of unmasked entries
     return K.sum(loss) / K.sum(mask)
Beispiel #32
0
 def squash_mask(self, mask):
     if K.ndim(mask) == 2:
         return mask
     elif K.ndim(mask) == 3:
         return K.all(mask, axis=-1)
     return mask
Beispiel #33
0
 def count_matches(a, b):
     tmp = K.concatenate([a, b])
     return K.sum(K.cast(K.all(tmp, -1), K.floatx()))