コード例 #1
0
ファイル: slayers.py プロジェクト: fcihraeipnusnacwh/MRC-CE
	def compute_position_ids(self, inputs):
		"""T5的相对位置分桶(直接翻译自官方T5源码)
		"""
		q, v = inputs
		# 计算位置差
		q_idxs = K.arange(0, K.shape(q)[1], dtype='int32')
		q_idxs = K.expand_dims(q_idxs, 1)
		v_idxs = K.arange(0, K.shape(v)[1], dtype='int32')
		v_idxs = K.expand_dims(v_idxs, 0)
		pos_ids = v_idxs - q_idxs
		# 后处理操作
		num_buckets, max_distance = self.input_dim, self.max_distance
		ret = 0
		n = -pos_ids
		if self.bidirectional:
			num_buckets //= 2
			ret += K.cast(K.less(n, 0), 'int32') * num_buckets
			n = K.abs(n)
		else:
			n = K.maximum(n, 0)
		# now n is in the range [0, inf)
		max_exact = num_buckets // 2
		is_small = K.less(n, max_exact)
		val_if_large = max_exact + K.cast(
			K.log(K.cast(n, K.floatx()) / max_exact) /
			np.log(max_distance / max_exact) * (num_buckets - max_exact),
			'int32',
		)
		val_if_large = K.minimum(val_if_large, num_buckets - 1)
		ret += K.switch(is_small, n, val_if_large)
		return ret
コード例 #2
0
    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        shapes = [K.get_variable_shape(p) for p in params]
        alphas = [
            K.variable(K.ones(shape) * self.init_alpha) for shape in shapes
        ]
        old_grads = [K.zeros(shape) for shape in shapes]
        self.weights = alphas + old_grads
        self.updates = []

        for p, grad, old_grad, alpha in zip(params, grads, old_grads, alphas):
            grad = K.sign(grad)
            new_alpha = K.switch(
                K.greater(grad * old_grad, 0),
                K.minimum(alpha * self.scale_up, self.max_alpha),
                K.switch(K.less(grad * old_grad, 0),
                         K.maximum(alpha * self.scale_down, self.min_alpha),
                         alpha))

            grad = K.switch(K.less(grad * old_grad, 0), K.zeros_like(grad),
                            grad)
            new_p = p - grad * new_alpha

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)
            self.updates.append(K.update(p, new_p))
            self.updates.append(K.update(alpha, new_alpha))
            self.updates.append(K.update(old_grad, grad))

        return self.updates
コード例 #3
0
ファイル: metrics.py プロジェクト: wspalding/hs_dataset
    def update_state(self, y_true, y_pred, sample_weight=None):

        y_true = K.less(y_true, self.threshold)
        y_pred = K.less(y_pred, self.threshold)
        pos = tf.math.logical_and(y_true, y_pred)
        # print(pos)
        # print(K.equal(y_true, y_pred))
        true_pos = K.sum(K.cast(pos, dtype=tf.float32))
        self.cat_true_neg.assign_add(true_pos)
コード例 #4
0
ファイル: e2efs.py プロジェクト: braisCB/E2E-FS
 def _get_update_list(self, kernel):
     update_list = super(E2EFSSoft, self)._get_update_list(kernel)
     update_list += [
         (self.moving_factor, K.switch(K.less(self.moving_T, self.warmup_T),
                                       self.start_alpha,
                                       K.minimum(self.alpha_M, self.start_alpha + (1. - self.start_alpha) * (self.moving_T - self.warmup_T) / self.T))),
         (self.moving_T, self.moving_T + 1),
         (self.moving_decay, K.switch(K.less(self.moving_factor, self.alpha_M), self.moving_decay, K.maximum(.75, self.moving_decay + self.epsilon)))
     ]
     return update_list
コード例 #5
0
    def get_spikes(self, new_mem):
        """Linear activation."""

        thr = self._v_thresh
        pos_spikes = k.cast(
            tf.logical_and(k.less(self.mem, thr),
                           k.greater_equal(new_mem, thr)), k.floatx())
        neg_spikes = k.cast(
            tf.logical_and(k.less(new_mem, thr),
                           k.greater_equal(self.mem, thr)), k.floatx())
        return pos_spikes - neg_spikes
コード例 #6
0
 def _get_update_list(self, kernel):
     super(E2EFSSoft, self)._get_update_list(kernel)
     self.moving_factor.assign(
         K.switch(
             K.less(self.moving_T, self.warmup_T), self.start_alpha,
             K.minimum(
                 self.alpha_M, self.start_alpha + (1. - self.start_alpha) *
                 (self.moving_T - self.warmup_T) / self.T)))
     self.moving_T.assign_add(1.)
     self.moving_decay.assign(
         K.switch(K.less(self.moving_factor, self.alpha_M),
                  self.moving_decay,
                  K.maximum(.75, self.moving_decay + self.epsilon)))
コード例 #7
0
    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        shapes = [K.shape(p) for p in params]
        alphas = [
            K.variable(K.ones(shape) * self.init_alpha) for shape in shapes
        ]
        old_grads = [K.zeros(shape) for shape in shapes]
        prev_weight_deltas = [K.zeros(shape) for shape in shapes]
        self.weights = alphas + old_grads
        self.updates = []

        for param, grad, old_grad, prev_weight_delta, alpha in zip(
                params, grads, old_grads, prev_weight_deltas, alphas):
            # equation 4
            new_alpha = K.switch(
                K.greater(grad * old_grad, 0),
                K.minimum(alpha * self.scale_up, self.max_alpha),
                K.switch(K.less(grad * old_grad, 0),
                         K.maximum(alpha * self.scale_down, self.min_alpha),
                         alpha))

            # equation 5
            new_delta = K.switch(
                K.greater(grad, 0), -new_alpha,
                K.switch(K.less(grad, 0), new_alpha, K.zeros_like(new_alpha)))

            # equation 7
            weight_delta = K.switch(K.less(grad * old_grad, 0),
                                    -prev_weight_delta, new_delta)

            # equation 6
            new_param = param + weight_delta

            # reset gradient_{t-1} to 0 if gradient sign changed (so that we do
            # not "double punish", see paragraph after equation 7)
            grad = K.switch(K.less(grad * old_grad, 0), K.zeros_like(grad),
                            grad)

            # Apply constraints
            #if param in constraints:
            #    c = constraints[param]
            #    new_param = c(new_param)

            self.updates.append(K.update(param, new_param))
            self.updates.append(K.update(alpha, new_alpha))
            self.updates.append(K.update(old_grad, grad))
            self.updates.append(K.update(prev_weight_delta, weight_delta))

        return self.updates
コード例 #8
0
    def smooth_l1(y_true, y_pred):
        """
        计算rpn 建议框坐标的loss
        使用smooth l1 loss
        f(x) =  0.5 * (sigma * x)^2          if |x| < 1 / sigma / sigma
                |x| - 0.5 / sigma^2          otherwise
        :param sigma: 是平滑参数,控制平滑区域
        :param y_true: 真实值 [batch_size, num_anchor, 4+1]
        :param y_pred: 预测值 [batch_size, num_anchor, 4]
        :return: rpn regr_loss
        """
        regression_pred = y_pred
        regression_true = y_true[:, :, :-1]   # 取rpn上的坐标
        label_true = y_true[:, :, -1]         # 取框内是否有物体的预测值

        # 找到只有物体的框,不要背景
        indices = tf.where(backend.equal(label_true, 1))                    # 如果x,y都为None,就返回condition的坐标
        regression_pred = tf.gather_nd(regression_pred, indices)            # 根据有物体的索引,取出预测框的相关坐标
        regression_true = tf.gather_nd(regression_true, indices)            # 取出真实框的坐标

        # 计算 smooth L1 loss
        regression_diff = backend.abs(regression_pred - regression_true)
        regression_loss = tf.where(  # tf.where用做判断条件
            backend.less(regression_diff, 1.0 / sigma_squared),             # 绝对值是否小于1
            0.5 * sigma_squared * backend.pow(regression_diff, 2),          # 如果是
            regression_diff - 0.5 / sigma_squared                           # 如果不是
        )

        # 除于N_cls
        normalizer = backend.maximum(1, backend.shape(indices)[0])
        normalizer = backend.cast(normalizer, dtype='float32')
        loss = backend.sum(regression_loss) / normalizer

        return loss
コード例 #9
0
def smooth_l1(y_true, y_pred, sigma=3.0, axis=None):
    """Compute the smooth L1 loss of y_pred w.r.t. y_true.

    Args:
        y_true: Tensor from the generator of shape (B, N, 5).
            The last value for each box is the state of the anchor
            (ignore, negative, positive).
        y_pred: Tensor from the network of shape (B, N, 4).
        sigma: The point where the loss changes from L2 to L1.

    Returns:
        The smooth L1 loss of y_pred w.r.t. y_true.
    """
    if axis is None:
        axis = 1 if K.image_data_format() == 'channels_first' else K.ndim(y_pred) - 1

    sigma_squared = sigma ** 2

    # compute smooth L1 loss
    # f(x) = 0.5 * (sigma * x)^2          if |x| < 1 / sigma / sigma
    #        |x| - 0.5 / sigma / sigma    otherwise
    regression_diff = K.abs(y_true - y_pred)  # |y - f(x)|

    regression_loss = tf.where(
        K.less(regression_diff, 1.0 / sigma_squared),
        0.5 * sigma_squared * K.pow(regression_diff, 2),
        regression_diff - 0.5 / sigma_squared)
    return K.sum(regression_loss, axis=axis)
コード例 #10
0
ファイル: layers.py プロジェクト: NON906/NVC_train
 def call(self, x, training=None):
     mask = K.random_uniform(K.shape(x)[:-1], 0.0, 1.0)
     mask = K.expand_dims(mask, -1)
     mask = K.repeat_elements(mask, K.int_shape(x)[-1], -1)
     rand_x = K.switch(K.less(mask, self.rate),
                       K.random_normal(K.shape(x), 0.0, 1.0), x)
     return K.in_train_phase(rand_x, x, training=training)
コード例 #11
0
    def _smooth_l1(y_true, y_pred):
        #   y_true [batch_size, num_anchor, 4+1]
        #   y_pred [batch_size, num_anchor, 4]
        regression = y_pred
        regression_target = y_true[:, :, :-1]
        anchor_state = y_true[:, :, -1]

        # 找到正样本
        indices = tf.where(K.equal(anchor_state, 1))
        regression = tf.gather_nd(regression, indices)
        regression_target = tf.gather_nd(regression_target, indices)

        # 计算smooth L1损失
        regression_diff = regression - regression_target
        regression_diff = K.abs(regression_diff)
        regression_loss = tf.where(
            K.less(regression_diff, 1.0 / sigma_squared),
            0.5 * sigma_squared * K.pow(regression_diff, 2),
            regression_diff - 0.5 / sigma_squared)

        # 将所获得的loss除上正样本的数量
        normalizer = K.maximum(1, K.shape(indices)[0])
        normalizer = K.cast(normalizer, dtype=K.floatx())
        regression_loss = K.sum(regression_loss) / normalizer
        return regression_loss
コード例 #12
0
ファイル: losses.py プロジェクト: xiaoduli/notekeras
    def _smooth_l1(y_true, y_pred):
        """ Compute the smooth L1 loss of y_pred w.r.t. y_true.

        Args
            y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).
            y_pred: Tensor from the network of shape (B, N, 4).

        Returns
            The smooth L1 loss of y_pred w.r.t. y_true.
        """
        # separate target and state
        regression = y_pred
        regression_target = y_true[:, :, :-1]
        anchor_state = y_true[:, :, -1]

        # filter out "ignore" anchors
        indices = tf.where(K.equal(anchor_state, 1))
        regression = tf.gather_nd(regression, indices)
        regression_target = tf.gather_nd(regression_target, indices)

        # compute smooth L1 loss
        # f(x) = 0.5 * (sigma * x)^2          if |x| < 1 / sigma / sigma
        #        |x| - 0.5 / sigma / sigma    otherwise
        regression_diff = regression - regression_target
        regression_diff = K.abs(regression_diff)
        regression_loss = tf.where(
            K.less(regression_diff, 1.0 / sigma_squared),
            0.5 * sigma_squared * K.pow(regression_diff, 2),
            regression_diff - 0.5 / sigma_squared)

        # compute the normalizer: the number of positive anchors
        normalizer = K.maximum(1, K.shape(indices)[0])
        normalizer = K.cast(normalizer, dtype=K.floatx())
        return K.sum(regression_loss) / normalizer
コード例 #13
0
    def apply(self, Xs, Ys, Rs, reverse_state):
        #this method is correct, but wasteful
        grad = ilayers.GradientWRT(len(Xs))
        times_alpha0 = tensorflow.keras.layers.Lambda(
            lambda x: x * self._alpha[0])
        times_alpha1 = tensorflow.keras.layers.Lambda(
            lambda x: x * self._alpha[1])
        times_beta0 = tensorflow.keras.layers.Lambda(
            lambda x: x * self._beta[0])
        times_beta1 = tensorflow.keras.layers.Lambda(
            lambda x: x * self._beta[1])
        keep_positives = tensorflow.keras.layers.Lambda(
            lambda x: x * K.cast(K.greater(x, 0), K.floatx()))
        keep_negatives = tensorflow.keras.layers.Lambda(
            lambda x: x * K.cast(K.less(x, 0), K.floatx()))

        def f(layer, X):
            Zs = kutils.apply(layer, X)
            # Divide incoming relevance by the activations.
            tmp = [ilayers.SafeDivide()([a, b]) for a, b in zip(Rs, Zs)]
            # Propagate the relevance to the input neurons
            # using the gradient
            tmp = iutils.to_list(grad(X + Zs + tmp))
            # Re-weight relevance with the input values.
            tmp = [
                tensorflow.keras.layers.Multiply()([a, b])
                for a, b in zip(X, tmp)
            ]
            return tmp

        # Distinguish postive and negative inputs.
        Xs_pos = kutils.apply(keep_positives, Xs)
        Xs_neg = kutils.apply(keep_negatives, Xs)

        # xpos*wpos
        r_pp = f(self._layer_wo_act_positive, Xs_pos)
        # xneg*wneg
        r_nn = f(self._layer_wo_act_negative, Xs_neg)
        # a0 * r_pp + a1 * r_nn
        r_pos = [
            tensorflow.keras.layers.Add()([times_alpha0(pp),
                                           times_beta1(nn)])
            for pp, nn in zip(r_pp, r_nn)
        ]

        # xpos*wneg
        r_pn = f(self._layer_wo_act_negative, Xs_pos)
        # xneg*wpos
        r_np = f(self._layer_wo_act_positive, Xs_neg)
        # b0 * r_pn + b1 * r_np
        r_neg = [
            tensorflow.keras.layers.Add()([times_beta0(pn),
                                           times_beta1(np)])
            for pn, np in zip(r_pn, r_np)
        ]

        return [
            tensorflow.keras.layers.Subtract()([a, b])
            for a, b in zip(r_pos, r_neg)
        ]
コード例 #14
0
ファイル: losses.py プロジェクト: BrWillian/api-cerberus
def weighted_bce_dice_loss(y_true, y_pred):
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number
    if K.int_shape(y_pred)[1] == 128:
        kernel_size = 11
    elif K.int_shape(y_pred)[1] == 256:
        kernel_size = 21
    else:
        raise ValueError('Unexpected image size')
    averaged_mask = K.pool2d(y_true,
                             pool_size=(kernel_size, kernel_size),
                             strides=(1, 1),
                             padding='same',
                             pool_mode='avg')
    border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(
        K.less(averaged_mask, 0.995), 'float32')
    weight = K.ones_like(averaged_mask)
    w0 = K.sum(weight)
    weight += border * 2
    w1 = K.sum(weight)
    weight *= (w0 / w1)
    loss = weighted_bce_loss(y_true, y_pred, weight) + (
        1 - weighted_dice_coeff(y_true, y_pred, weight))
    return loss
コード例 #15
0
def customAccuracy(y_true, y_pred):
    diff = K.abs(
        y_true -
        y_pred)  #absolute difference between correct and predicted values
    correct = K.less(
        diff, 0.01)  #tensor with 0 for false values and 1 for true values
    return K.mean(correct)  #sum all 1's and divide by the total.
コード例 #16
0
ファイル: multi_box_loss.py プロジェクト: zuoguoqing/paz
 def _smooth_l1(self, y_true, y_pred):
     absolute_value_loss = K.abs(y_true - y_pred)
     square_loss = 0.5 * (y_true - y_pred)**2
     absolute_value_condition = K.less(absolute_value_loss, 1.0)
     l1_smooth_loss = tf.where(absolute_value_condition, square_loss,
                               absolute_value_loss - 0.5)
     return K.sum(l1_smooth_loss, axis=-1)
コード例 #17
0
def yolo_confidence_loss(y_true, y_pred, t):
    error = K.square(y_true-y_pred)
    object_true = OBJECT_SCALE*(error)
    object_false = NO_OBJECT_SCALE*(error)
    object_default = K.zeros_like(y_true)
    loss1 = tf.where(t, object_true, object_default)
    loss2 = tf.where(K.less(tf.to_float(t),0.5), object_false, object_default)
    return K.sum(loss1) + K.sum(loss2)
コード例 #18
0
def smooth_l1_loss(y_true, y_pred):
    """Implements Smooth-L1 loss.
    y_true and y_pred are typically: [N, 4], but could be any shape.
    """
    diff = K.abs(y_true - y_pred)
    less_than_one = K.cast(K.less(diff, 1.0), "float32")
    loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
    return loss
コード例 #19
0
def custom_loss(y_true, y_pred):
    #custom_loss = K.mean(K.sum(K.square(y_true - y_pred)))
    alpha = 100.
    loss = K.switch(K.less(y_true * y_pred, 0), \
        alpha*y_pred**2 - K.sign(y_true)*y_pred + K.abs(y_true), \
        K.abs(y_true - y_pred)
        )
    return K.mean(loss, axis=-1)
コード例 #20
0
 def regularization(x):
     l_units = loss_units(x)
     t = x / K.max(K.abs(x))
     p = K.switch(K.less(t, K.epsilon()), K.zeros_like(x), x)
     cost = K.cast_to_floatx(0.)
     cost += K.sum(p * (1. - p)) + 2. * l_units
     # cost += K.sum(K.relu(x - 1.))
     return cost
コード例 #21
0
    def _resource_apply_sparse(self, grad, var, apply_state=None):
        var_device, var_dtype = var.device, var.dtype.base_dtype
        coefficients = ((apply_state or {}).get((var_device, var_dtype))
                        or self._fallback_apply_state(var_device, var_dtype))
        old_grad = self.get_slot(var, "old_grad")
        old_weight_delta = self.get_slot(var, "old_weight_delta")
        alpha = self.get_slot(var, "alpha")

        gradneg = K.less(grad * old_grad, 0)

        # equation 4
        scale_down = coefficients["scale_down_t"]
        min_alpha = coefficients["min_alpha_t"]
        alpha_t = state_ops.\
            assign(alpha,
                   K.switch(K.greater(grad * old_grad, 0),
                            K.minimum(alpha * coefficients["scale_up_t"],
                                      coefficients["max_alpha_t"]),
                            K.switch(gradneg,
                                     K.maximum(alpha * scale_down, min_alpha),
                                     alpha)),
                   use_locking=self._use_locking)

        # equation 5

        new_tmp_delta_t = K.switch(
            K.greater(grad, 0), -alpha_t,
            K.switch(K.less(grad, 0), alpha_t, K.zeros_like(alpha_t)))

        # equation 7
        old_weight_delta_t = state_ops.assign(
            old_weight_delta,
            K.switch(gradneg, -old_weight_delta, new_tmp_delta_t),
            use_locking=self._use_locking)

        # equation 6
        # var_new = math_ops.add(var, old_weight_delta_t)
        var_update = state_ops.assign_add(var, old_weight_delta_t,
            use_locking=self._use_locking)
        #print(f"var: {var}")
        #print(f"var update: {var_update}")
        old_grad_t = state_ops.assign(old_grad, grad,
            use_locking=self._use_locking)
        return control_flow_ops.group(
            *[var_update, old_grad_t, old_weight_delta_t, alpha_t])
コード例 #22
0
def triplet_acc(y_true,y_pred):
    a = y_pred[0::3]
    p = y_pred[1::3]
    n = y_pred[2::3]
    
    ap = K.sum(K.square(a-p),-1)
    an = K.sum(K.square(a-n),-1)
    
    return K.less(ap+alpha,an)
コード例 #23
0
    def apply(self, Xs, Ys, Rs, reverse_state):
        #this method is correct, but wasteful
        grad = ilayers.GradientWRT(len(Xs))
        times_alpha = tensorflow.keras.layers.Lambda(lambda x: x * self._alpha)
        times_beta = tensorflow.keras.layers.Lambda(lambda x: x * self._beta)
        keep_positives = tensorflow.keras.layers.Lambda(
            lambda x: x * K.cast(K.greater(x, 0), K.floatx()))
        keep_negatives = tensorflow.keras.layers.Lambda(
            lambda x: x * K.cast(K.less(x, 0), K.floatx()))

        def f(layer1, layer2, X1, X2):
            # Get activations of full positive or negative part.
            Z1 = kutils.apply(layer1, X1)
            Z2 = kutils.apply(layer2, X2)
            Zs = [
                tensorflow.keras.layers.Add()([a, b]) for a, b in zip(Z1, Z2)
            ]
            # Divide incoming relevance by the activations.
            tmp = [ilayers.SafeDivide()([a, b]) for a, b in zip(Rs, Zs)]
            # Propagate the relevance to the input neurons
            # using the gradient
            tmp1 = iutils.to_list(grad(X1 + Z1 + tmp))
            tmp2 = iutils.to_list(grad(X2 + Z2 + tmp))
            # Re-weight relevance with the input values.
            tmp1 = [
                tensorflow.keras.layers.Multiply()([a, b])
                for a, b in zip(X1, tmp1)
            ]
            tmp2 = [
                tensorflow.keras.layers.Multiply()([a, b])
                for a, b in zip(X2, tmp2)
            ]
            #combine and return
            return [
                tensorflow.keras.layers.Add()([a, b])
                for a, b in zip(tmp1, tmp2)
            ]

        # Distinguish postive and negative inputs.
        Xs_pos = kutils.apply(keep_positives, Xs)
        Xs_neg = kutils.apply(keep_negatives, Xs)
        # xpos*wpos + xneg*wneg
        activator_relevances = f(self._layer_wo_act_positive,
                                 self._layer_wo_act_negative, Xs_pos, Xs_neg)

        if self._beta:  #only compute beta-weighted contributions of beta is not zero
            # xpos*wneg + xneg*wpos
            inhibitor_relevances = f(self._layer_wo_act_negative,
                                     self._layer_wo_act_positive, Xs_pos,
                                     Xs_neg)
            return [
                tensorflow.keras.layers.Subtract()(
                    [times_alpha(a), times_beta(b)])
                for a, b in zip(activator_relevances, inhibitor_relevances)
            ]
        else:
            return activator_relevances
コード例 #24
0
 def loss_units(x):
     t = x / K.max(K.abs(x))
     x = K.switch(K.less(t, K.epsilon()), K.zeros_like(x), x)
     # m = K.sum(K.cast(K.greater(x, 0.), K.floatx()))
     sum_x = K.sum(x)
     # moving_units = K.switch(K.less_equal(m, self.units), m, self.moving_units)
     # epsilon_minus = 0.
     # epsilon_plus = K.switch(K.less_equal(m, self.units), self.moving_units, 0.)
     return K.abs(self.moving_units - sum_x)
コード例 #25
0
def dice_coef_border(y_true, y_pred):
    SMOOTH = 1
    print("inside dice_coef_border .. y_true shape=", y_true.shape,
          "y_pred shape=", y_pred.shape)

    #    negative = 1. - tf.cast(y_true, tf.float32)
    #    positive = tf.cast(y_true, tf.float32)

    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number

    averaged_mask1 = tf.nn.avg_pool2d(y_true,
                                      pool_size=(11, 11),
                                      strides=(1, 1),
                                      padding='same')  #, pool_mode='avg')
    border1 = K.cast(K.greater(averaged_mask1, 0.005), 'float32') * K.cast(
        K.less(averaged_mask1, 0.995), 'float32')

    #    positive = K.pool2d(positive, pool_size=(11,11), padding="same", data_format='channels_last')
    #    negative = K.pool2d(negative, pool_size=(11,11), padding="same", data_format='channels_last')
    #    border = positive * negative

    #    border = get_border_mask((11, 11), y_true)

    averaged_mask2 = tf.nn.avg_pool2d(y_pred,
                                      pool_size=(11, 11),
                                      strides=(1, 1),
                                      padding='same')  #, pool_mode='avg')
    border2 = K.cast(K.greater(averaged_mask2, 0.005), 'float32') * K.cast(
        K.less(averaged_mask2, 0.995), 'float32')

    border1 = K.flatten(border1)
    border2 = K.flatten(border2)

    y_true_f = border1  #K.flatten(y_true)
    y_pred_f = border2  #K.flatten(y_pred)
    #    y_true_f = K.gather(y_true_f, tf.where(border1 > 0.5))
    #    y_pred_f = K.gather(y_pred_f, tf.where(border2 > 0.5))

    print("y_pred_f shape", y_pred_f.shape, "y_true_f.shape=", y_true_f.shape)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + SMOOTH) / (K.sum(y_true_f) + K.sum(y_pred_f) +
                                           SMOOTH)
コード例 #26
0
ファイル: dogfacenet.py プロジェクト: m-aliabbas/DogFaceNet
def triplet_acc(y_true,y_pred):
    #Accuracy Computation for Triplet loss
    a = y_pred[0::3] #Getting Anchor for comperision
    p = y_pred[1::3] #Getting Positive
    n = y_pred[2::3] #Getting Negative
    
    ap = K.sum(K.square(a-p),-1) #SUm of Squre between Anchor and Positve
    an = K.sum(K.square(a-n),-1) #SUm of Squre between Anchor and Negative
    
    return K.less(ap+alpha,an) #Min (Element wise) betweeen AchorPostive and Negative is taken
コード例 #27
0
ファイル: e2efs.py プロジェクト: braisCB/E2E-FS
 def loss_units(x):
     t = x / K.max(K.abs(x))
     x = K.switch(K.less(t, K.epsilon()), K.zeros_like(x), x)
     m = K.sum(K.cast(K.greater(x, 0.), K.floatx()))
     sum_x = K.sum(x)
     moving_units = K.switch(K.less_equal(m, self.units), m,
                             (1. - self.moving_decay) * self.moving_units)
     epsilon_minus = 0.
     epsilon_plus = K.switch(K.less_equal(m, self.units), self.moving_units, 0.)
     return K.relu(moving_units - sum_x - epsilon_minus) + K.relu(sum_x - moving_units - epsilon_plus)
コード例 #28
0
        def custom_loss(y_true, y_pred, loss_weights=loss_weights):  # Verified

            zero_index = K.zeros_like(y_true[:, 0])
            ones_index = K.ones_like(y_true[:, 0])

            # Classifier
            labels = y_true[:, 0]
            class_preds = y_pred[:, 0]
            bi_crossentropy_loss = -labels * K.log(class_preds) - (
                1 - labels) * K.log(1 - class_preds)

            classify_valid_index = tf.where(K.less(y_true[:, 0], 0),
                                            zero_index, ones_index)
            classify_keep_num = K.cast(tf.reduce_sum(classify_valid_index) *
                                       SAMPLE_KEEP_RATIO,
                                       dtype=tf.int32)
            # For classification problem, only pick 70% of the valid samples.

            classify_loss_sum = bi_crossentropy_loss * classify_valid_index
            classify_loss_sum_filtered, _ = tf.nn.top_k(classify_loss_sum,
                                                        k=classify_keep_num)
            classify_loss = K.mean(classify_loss_sum_filtered)

            # Bounding box regressor
            rois = y_true[:, 1:5]
            roi_preds = y_pred[:, 1:5]
            # roi_raw_mean_square_error = K.sum(K.square(rois - roi_preds), axis = 1) # mse
            roi_raw_smooth_l1_loss = K.mean(
                tf.where(
                    K.abs(rois - roi_preds) < 1,
                    0.5 * K.square(rois - roi_preds),
                    K.abs(rois - roi_preds) - 0.5))  # L1 Smooth Loss

            roi_valid_index = tf.where(K.equal(K.abs(y_true[:, 0]), 1),
                                       ones_index, zero_index)
            roi_keep_num = K.cast(tf.reduce_sum(roi_valid_index),
                                  dtype=tf.int32)

            # roi_valid_mean_square_error = roi_raw_mean_square_error * roi_valid_index
            # roi_filtered_mean_square_error, _ = tf.nn.top_k(roi_valid_mean_square_error, k = roi_keep_num)
            # roi_loss = K.mean(roi_filtered_mean_square_error)
            roi_valid_smooth_l1_loss = roi_raw_smooth_l1_loss * roi_valid_index
            roi_filtered_smooth_l1_loss, _ = tf.nn.top_k(
                roi_valid_smooth_l1_loss, k=roi_keep_num)
            roi_loss = K.mean(roi_filtered_smooth_l1_loss)

            loss = classify_loss * loss_weights[0] + roi_loss * loss_weights[1]

            return loss
コード例 #29
0
ファイル: adaptive_NN.py プロジェクト: k-burger/ML_in_pop_gen
def nmse(y_true, y_pred, a, c):
    dim = len(c) - 1
    loss_classes = [[] for i in range(dim)]
    cond = [[] for i in range(dim)]
    loss = [[] for i in range(dim)]

    for i in range(0, dim):
        loss_classes[i] = a[i] * K.square((y_pred - y_true) / (y_true))
        cond[i] = K.less(y_true, c[i + 1]) & K.greater(y_true, c[i])

    loss[0] = K.switch(cond[0], loss_classes[0], loss_classes[1])
    for i in range(1, dim):
        loss[i] = K.switch(cond[i], loss_classes[i], loss[i - 1])

    return K.mean(loss[dim - 1], axis=-1)
コード例 #30
0
 def get_psp(self, output_spikes):
     new_spiketimes = tf.where(k.greater(output_spikes, 0),
                               k.ones_like(output_spikes) * self.time,
                               self.last_spiketimes)
     new_spiketimes = tf.where(k.less(output_spikes, 0),
                               k.zeros_like(output_spikes) * self.time,
                               new_spiketimes)
     assign_new_spiketimes = tf.assign(self.last_spiketimes, new_spiketimes)
     with tf.control_dependencies([assign_new_spiketimes]):
         last_spiketimes = self.last_spiketimes + 0  # Dummy op
         # psp = k.maximum(0., tf.divide(self.dt, last_spiketimes))
         psp = tf.where(k.greater(last_spiketimes, 0),
                        k.ones_like(output_spikes) * self.dt,
                        k.zeros_like(output_spikes))
     return psp