def is_in_geo_range(patch_x_and_y):

        patch_x_tensor = patch_x_and_y[0]
        patch_y_tensor = patch_x_and_y[1]

        base_leaner_x_max = K.constant(geo_range[0], dtype='float32')
        base_leaner_x_min = K.constant(geo_range[1], dtype='float32')
        base_leaner_y_max = K.constant(geo_range[2], dtype='float32')
        base_leaner_y_min = K.constant(geo_range[3], dtype='float32')

        layer_output = patch_x_and_y[2]
        #coef_geo_range = K.ones((K.int_shape(img_input)[0],1),dtype='float32')
        coef_geo_range = layer_output

        coef_geo_range = K.switch(
            K.less_equal(patch_x_tensor, base_leaner_x_max),
            coef_geo_range * 1.0, coef_geo_range * 0.0)

        coef_geo_range = K.switch(
            K.greater_equal(patch_x_tensor, base_leaner_x_min),
            coef_geo_range * 1.0, coef_geo_range * 0.0)

        coef_geo_range = K.switch(
            K.less_equal(patch_y_tensor, base_leaner_y_max),
            coef_geo_range * 1.0, coef_geo_range * 0.0)

        coef_geo_range = K.switch(
            K.greater_equal(patch_y_tensor, base_leaner_y_min),
            coef_geo_range * 1.0, coef_geo_range * 0.0)

        return coef_geo_range
Esempio n. 2
0
    def rpn_loss_regr_fixed_num(y_true, y_pred):
        if K.image_dim_ordering() == 'th':
            # x是真值与预测值的差值
            x = y_true[:, 4 * num_anchors:, :, :] - y_pred
            # x_abs是差值的绝对值
            x_abs = K.abs(x)
            # x_abs小于1为True
            x_bool = K.less_equal(x_abs, 1.0)

            # 差值绝对值小于1时,0.5X^2;大于1的绝对值减0.5,然后相加
            # 在乘上是否要计算这个loss
            # 求和再除以个数,就均值
            return lambda_rpn_regr * K.sum(
                y_true[:, :4 * num_anchors, :, :] *
                (x_bool * (0.5 * x * x) + (1 - x_bool) *
                 (x_abs - 0.5))) / K.sum(epsilon +
                                         y_true[:, :4 * num_anchors, :, :])

        else:  # tensorflow
            # 4 * num_anchors:回归梯度的内容
            x = y_true[:, :, :, 4 * num_anchors:] - y_pred
            x_abs = K.abs(x)
            x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

            return lambda_rpn_regr * K.sum(
                y_true[:, :, :, :4 * num_anchors] *
                (x_bool * (0.5 * x * x) + (1 - x_bool) *
                 (x_abs - 0.5))) / K.sum(epsilon +
                                         y_true[:, :, :, :4 * num_anchors])
Esempio n. 3
0
    def rpn_loss_regr_fixed_num(y_true, y_pred):
        """
        :param y_true: ground truth, shape(1,m,n,72)
        :param y_pred: 预测的回归系数, shape(1,m,n,36)
        :return:
        """
        if K.image_dim_ordering() == 'th':
            x = y_true[:, 4 * num_anchors:, :, :] - y_pred
            x_abs = K.abs(x)
            x_bool = K.less_equal(x_abs, 1.0)
            return lambda_rpn_regr * K.sum(
                y_true[:, :4 * num_anchors, :, :] *
                (x_bool * (0.5 * x * x) + (1 - x_bool) *
                 (x_abs - 0.5))) / K.sum(epsilon +
                                         y_true[:, :4 * num_anchors, :, :])
        else:
            x = y_true[:, :, :,
                       4 * num_anchors:] - y_pred  # 计算预测值与ground truth的差值
            x_abs = K.abs(x)  # 取绝对值
            x_bool = K.cast(K.less_equal(x_abs, 1.0),
                            tf.float32)  # 判断是否小于1,并将bool转换为float

            # 前面乘以 y_true[:, :, :, :4 * num_anchors] 是因为只对正样本求回归误差
            # 后面的 K.sum(epsilon + y_true[:, :, :, :4 * num_anchors]) 代表正样本个数,加上epsilon是防止除以0
            return lambda_rpn_regr * K.sum(
                y_true[:, :, :, :4 * num_anchors] *
                (x_bool * (0.5 * x * x) + (1 - x_bool) *
                 (x_abs - 0.5))) / K.sum(epsilon +
                                         y_true[:, :, :, :4 * num_anchors])
Esempio n. 4
0
def less_equal(f, other):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.

    # Returns
        A Functional.
    """
    validate_functional(f)

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        lmbd = [Lambda(lambda x: K.cast_to_floatx(K.less_equal(x[0], x[1])), name=graph_unique_name("less_equal")) for X in f.outputs]
    else:
        _warn_for_ndarray(other)
        lmbd = [Lambda(lambda x: K.cast_to_floatx(K.less_equal(x, other)), name=graph_unique_name("less_equal")) for X in f.outputs]

    Functional = f.get_class()
    res = Functional(
        inputs=unique_tensors(inputs),
        outputs=_apply_operation(lmbd, f, other),
        layers=lmbd
    )
    return res
Esempio n. 5
0
def berHu_loss_elementwise_w_border(y_true, y_pred):
    ''' Proposed Lpano as described in our paper. '''
    ret_loss = 0

    y_diff = y_true - y_pred
    y_diff_abs = K.abs(y_diff)
    c = (1.0/5.0)*K.max(y_diff_abs)

    L2_berHu = (K.pow(y_diff_abs, 2) + c**2) / (2*c)
    berHu_tensor = tf.where(K.less_equal(y_diff_abs, c), y_diff_abs, L2_berHu)

    ## regular reverse huber
    n_pixels = tf.to_float(tf.size(y_true))
    berHu_overall = K.sum(berHu_tensor) / n_pixels

    ## add extra weight to the borders
    ## build boolean mask by combining boundary conditions for the rows and cols
    shape = tf.shape(berHu_tensor)
    bs, R, C, chans = tf.meshgrid(tf.range(shape[0]), tf.range(shape[1]), tf.range(shape[2]), tf.range(shape[3])) ## batch_size, width, height, channels
    row_lines = tf.logical_or(K.less_equal(R, 16), K.greater_equal(R, 112)) # these numbers will need to change when moving to larger image sizes or different border width
    col_lines = tf.logical_or(K.less_equal(C, 16), K.greater_equal(C, 240))
    border_mask = tf.logical_or(row_lines, col_lines)
    border_berHu_vals = tf.boolean_mask(berHu_tensor, border_mask)

    n_border_pixels = tf.to_float(tf.size(border_berHu_vals))
    berHu_border = K.sum(border_berHu_vals) / n_border_pixels

    lambda_frac = 0.5 # default amount for the weight matrix
    ret_loss = berHu_overall + lambda_frac*berHu_border 

    return ret_loss
Esempio n. 6
0
 def rpn_loss_regr_fixed_num(y_true, y_pred):
     # 维度顺序不一样
     # if K.image_dim_ordering() == 'th':  旧keras版本使用此行代码
     if K.image_data_format() == "channels_first":
         x = y_true[:, 4 * num_anchors:, :, :] - y_pred
         x_abs = K.abs(x)
         x_bool = K.less_equal(x_abs, 1.0)
         return lambda_rpn_regr * K.sum(
             y_true[:, :4 * num_anchors, :, :] *
             (x_bool * (0.5 * x * x) + (1 - x_bool) *
              (x_abs - 0.5))) / K.sum(epsilon +
                                      y_true[:, :4 * num_anchors, :, :])
     else:
         # y_true 维度是(None, 高,宽,72) 其中72里面前36位是9个锚框对应的回归梯度的有效位,其实就是是不是物体,
         # 是物体取1,不是取0,后36位才是回归梯度
         # 所以求和就是把所是物体的回归梯度值加起来,然后除以是物体的个数求平均
         # 真实和预测之间的回归梯度值的差
         x = y_true[:, :, :, 4 * num_anchors:] - y_pred
         # 取绝对值
         x_abs = K.abs(x)
         # 判断每个元素是否小于1
         x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
         # 乘以这个y_true[:, :, :, :4 * num_anchors]是标志位,表示这个值是不是物体的,值是0或者1,表示只计算有物体是1,没有物体的其实是0
         #因为使用了Smooth L1误差函数,所以才有绝对值判断,小于1判断,这样做使得损失对于误差小的时候变换不敏感
         return lambda_rpn_regr * K.sum(
             y_true[:, :, :, :4 * num_anchors] *
             (x_bool * (0.5 * x * x) + (1 - x_bool) *
              (x_abs - 0.5))) / K.sum(epsilon +
                                      y_true[:, :, :, :4 * num_anchors])
Esempio n. 7
0
    def rpn_loss_regr_fixed_num(y_true, y_pred):

        #print("y_true shape")
        #print(y_true.shape)
        #print("y_pred shape")
        #print(y_pred.shape)

        if K.image_dim_ordering() == 'th':
            x = y_true[:, 4 * num_anchors:, :, :] - y_pred
            x_abs = K.abs(x)
            x_bool = K.less_equal(x_abs, 1.0)
            return lambda_rpn_regr * K.sum(
                y_true[:, :4 * num_anchors, :, :] *
                (x_bool * (0.5 * x * x) + (1 - x_bool) *
                 (x_abs - 0.5))) / K.sum(epsilon +
                                         y_true[:, :4 * num_anchors, :, :])
        else:
            x = y_true[:, :, :, 4 * num_anchors:] - y_pred
            x_abs = K.abs(x)
            x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

            #loss= lambda_rpn_regr * K.sum(
            #	y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])

            #print_op = tf.print("y_true, y_pred shapes=",y_true.shape,y_pred.shape)
            #with tf.control_dependencies([print_op]):
            #	return K.identity(loss)

            return lambda_rpn_regr * K.sum(
                y_true[:, :, :, :4 * num_anchors] *
                (x_bool * (0.5 * x * x) + (1 - x_bool) *
                 (x_abs - 0.5))) / K.sum(epsilon +
                                         y_true[:, :, :, :4 * num_anchors])
Esempio n. 8
0
def my_loss(y_true, y_pred):

    pixels1 = K.sum(K.cast(K.greater(y_pred, 0.5), 'float32'))
    pixels2 = K.sum(K.cast(K.less_equal(y_pred, 0.5), 'float32'))
    mask1 = Multiply()([K.cast(K.greater(y_pred, 0.5), 'float32'),
                        y_pred])  # values greater than 0.5
    mask0 = Multiply()([K.cast(K.less_equal(y_pred, 0.5), 'float32'),
                        y_pred])  # values greater than 0.5

    return -K.log((K.sum(mask1) / pixels1 - K.sum(mask0) / pixels2) / 255)
Esempio n. 9
0
	def rpn_loss_regr_fixed_num(y_true, y_pred):
		if K.image_dim_ordering() == 'th':
			x = y_true[:, 4 * num_anchors:, :, :] - y_pred
			x_abs = K.abs(x)
			x_bool = K.less_equal(x_abs, 1.0)
			return lambda_rpn_regr * K.sum(
				y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
		else:
			x = y_true[:, :, :, 4 * num_anchors:] - y_pred
			x_abs = K.abs(x)
			x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

			return lambda_rpn_regr * K.sum(
				y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
Esempio n. 10
0
	def rpn_loss_regr_fixed_num(y_true, y_pred):
		if K.image_dim_ordering() == 'th':
			x = y_true[:, 4 * num_anchors:, :, :] - y_pred
			x_abs = K.abs(x)
			x_bool = K.less_equal(x_abs, 1.0)
			return lambda_rpn_regr * K.sum(
				y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
		else:
			x = y_true[:, :, :, 4 * num_anchors:] - y_pred# y_true的尺寸是??;y_pred的尺寸是??
			x_abs = K.abs(x)
			x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)# less_equal(x, y):小于等于

			return lambda_rpn_regr * K.sum(
				y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
Esempio n. 11
0
    def rpn_loss_regr_fixed_num(y_true, y_pred):
        if K.image_data_format() == 'channels_first':
            x = y_true[:, 4 * num_anchors:, :, :] - y_pred
            x_abs = K.abs(x)
            x_bool = K.less_equal(x_abs, 1.0)
            return lambda_rpn_regr * K.sum(
                y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
        else:
            x = y_true[:, :, :, 4 * num_anchors:] - y_pred
            x_abs = K.abs(x)
            x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

            return lambda_rpn_regr * K.sum(
                y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
Esempio n. 12
0
def f_mera_loss(y_true, y_pred):
    y_true1, y_pred1 = K.minimum(y_true / 255, 1), K.minimum(y_pred / 255, 1)
    fb = K.cast(K.equal(y_true1, 1),"float32") * K.cast(K.less_equal(y_pred1, 0.25),"float32")
    fw = K.cast(K.equal(y_true1, 0),"float32") * K.cast(K.greater(y_pred1, 0.25),"float32")
    tb = K.cast(K.equal(y_true1, 0),"float32") * K.cast(K.less_equal(y_pred1, 0.25),"float32")
    tw = K.cast(K.equal(y_true1, 1),"float32") * K.cast(K.greater(y_pred1, 0.25),"float32")
    fb = K.sum(fb * (y_true1 - y_pred1), axis = [1,2,3])
    fw = K.sum(fw * (y_pred1 - y_true1), axis = [1,2,3])
    tb = K.sum(tb * (1 - y_pred1 + y_true1), axis = [1,2,3])
    tw = K.sum(tw * (1 - y_true1 + y_pred1), axis = [1,2,3])
    prec = tw / (tw + fw + 0.0001)
    rec = tw / (tw + fb + 0.0001)
    f_mera = 2 * prec * rec / (prec + rec + 0.0001)
    return K.mean(1 - f_mera)
Esempio n. 13
0
	def rpn_loss_regr_fixed_num(y_true, y_pred):
		if K.image_dim_ordering() == 'th':
			x = y_true[:, 4 * num_anchors:, :, :] - y_pred
			x_abs = K.abs(x)
			x_bool = K.less_equal(x_abs, 1.0)
			return lambda_rpn_regr * K.sum(
				y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
		#if里边是不是tf的,不用管
		else:
			x = y_true[:, :, :, 4 * num_anchors:] - y_pred
			x_abs = K.abs(x)
			x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)#这三行是smoothL1范数

			return lambda_rpn_regr * K.sum(
				y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
Esempio n. 14
0
def lessThreeAccuracy(y_true, y_pred):
    shape = K.shape(y_true)
    h = K.reshape(shape[1], (1,1))
    w = K.reshape(shape[2], (1,1))
    denom = K.dot(h, w)
    denom = 1 / K.cast(K.reshape(K.dot(h, w), (1,1)), dtype = 'float32')
    return K.dot(K.reshape(K.sum(K.cast(K.less_equal(K.abs(y_true - y_pred), 3), dtype = 'float32')), (1,1)), denom)
Esempio n. 15
0
def recall(y_true, y_pred):
    y_true1, y_pred1 = K.minimum(y_true / 255, 1), K.minimum(y_pred / 255, 1)
    fb = K.cast(K.equal(y_true1, 1), 'float32') * K.cast(
        K.less_equal(y_pred1, 0.25), 'float32')
    fw = K.cast(K.equal(y_true1, 0), 'float32') * K.cast(
        K.greater(y_pred1, 0.25), 'float32')
    tb = K.cast(K.equal(y_true1, 0), 'float32') * K.cast(
        K.less_equal(y_pred1, 0.25), 'float32')
    tw = K.cast(K.equal(y_true1, 1), 'float32') * K.cast(
        K.greater(y_pred1, 0.25), 'float32')
    fb = K.sum(fb, axis=[1, 2, 3])
    fw = K.sum(fw, axis=[1, 2, 3])
    tb = K.sum(tb, axis=[1, 2, 3])
    tw = K.sum(tw, axis=[1, 2, 3])
    rec = tw / (tw + fb + K.epsilon())
    return K.mean(rec)
Esempio n. 16
0
def huber_loss(y, y_pred, delta: float=1.0):
    """
    Return the Huber loss between tensors.

    Reference:
        https://en.wikipedia.org/wiki/Huber_loss
        https://web.stanford.edu/class/cs20si/2017/lectures/slides_03.pdf
        https://keras.io/backend/

    Args:
        y: ground truth y labels
        y_pred: predicted y labels
        delta: the separating constant between MSE and MAE

    Returns:
        a scalar loss between the ground truth and predicted labels

    """
    # calculate the residuals
    residual = K.abs(y_pred - y)
    # determine the result of the logical comparison to delta
    condition = K.less_equal(residual, delta)
    # calculate the two possible returns (MSE and MAE)
    then_this = 0.5 * K.square(residual)
    else_this = delta * residual - 0.5 * K.square(delta)
    # use the condition to determine the resulting tensor
    return K.switch(condition, then_this, else_this)
    def call(self, inputs):
        self.input_shapes = [K.int_shape(input) for input in inputs]
        v = [
            ResizeLayer(K.int_shape(input),
                        self.input_shapes[self.ref_idx])(input)
            for input in inputs
        ]

        if self.min_block is not None:
            min_from_first = self.min_block - self.first_incoming_block
            min_from_first = int(np.floor(min_from_first))
            lo = np.clip(min_from_first, 0, len(v) - 1)
        else:
            lo = 0

        if self.max_block is not None:
            max_from_first = self.max_block - self.first_incoming_block
            max_from_first = int(np.ceil(max_from_first))
            hi = np.clip(max_from_first, lo, len(v) - 1)
        else:
            hi = len(v) - 1

        t = self.cur_block - self.first_incoming_block
        r = v[hi]
        for i in range(hi - 1, lo - 1, -1):  # i = hi-1, hi-2, ..., lo
            r = K.switch(K.less(t, i + 1),
                         v[i] * ((i + 1) - t) + v[i + 1] * (t - i), r)

        if lo < hi:
            r = K.switch(K.less_equal(t, lo), v[lo], r)

        return r
Esempio n. 18
0
	def class_loss_regr_fixed_num(y_true, y_pred):
		label_true = K.cast(y_true[:, :, 4*num_classes:], 'float32')
		label_pred = K.cast(y_pred, 'float32')
		x = label_true - label_pred
		x_abs = K.abs(x)
		x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
		return K.cast(lambda_cls_regr, 'float32') * K.cast(K.sum(label_true * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + label_true), 'float32')
Esempio n. 19
0
    def sample(pi, mu, sig):
        batch_size = K.shape(pi)[0]
        if K.backend() == 'cntk':
            # generate cumulative sum via matrix multiplication
            cumsum = K.dot(pi, K.constant(np.triu(np.ones((n_components, n_components)))))
        else:
            cumsum = K.cumsum(pi, 1)
        cumsum_shift = K.concatenate([K.zeros_like(cumsum[:, 0:1]), cumsum])[:, :-1]
        if K.backend() == 'cntk':
            import cntk as C
            # Generate standard uniform values in shape (batch_size,1)
            #   (since we can't use the dynamic batch_size with random.uniform in CNTK,
            #    we use uniform_like instead with an input of an appropriate shape)
            rndSmp = C.random.uniform_like(pi[:, 0:1])
        else:
            rndSmp = K.random_uniform((batch_size, 1))
        cmp1 = K.less_equal(cumsum_shift, rndSmp)
        cmp2 = K.less(rndSmp, cumsum)

        # convert to floats and multiply to perform equivalent of logical AND
        rndIndex = K.cast(cmp1, K.floatx()) * K.cast(cmp2, K.floatx())

        if K.backend() == 'cntk':
            # Generate standard normal values in shape (batch_size,1,d_t)
            #   (since we can't use the dynamic batch_size with random.normal in CNTK,
            #    we use normal_like instead with an input of an appropriate shape)
            rndNorms = C.random.normal_like(mu[:, 0:1, :])  # K.random_normal((1,d_t))
        else:
            rndNorms = K.random_normal((batch_size, 1, d_t))

        rndVec = mu + K.expand_dims(sig) * rndNorms

        # exactly one entry should be nonzero for each b,d combination; use sum to select it
        return K.sum(K.expand_dims(rndIndex) * rndVec, 1)
Esempio n. 20
0
	def rpn_loss_regr_fixed_num(y_true, y_pred):
		x = y_true[:, :, :, 2:] - y_pred
		x_abs = K.abs(x)
		x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

		return lambda_rpn_regr * K.sum(
			y_true[:, :, :, :2] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, 1])
Esempio n. 21
0
def weighted_binary_crossentropy(y_true, y_pred):
    false_positive_weight = 50
    thresh = 0.5
    y_pred_true = K.greater_equal(thresh, y_pred)
    y_not_true = K.less_equal(thresh, y_true)
    false_positive_tensor = K.equal(y_pred_true, y_not_true)

    #first let's transform the bool tensor in numbers - maybe you need float64 depending on your configuration
    false_positive_tensor = K.cast(false_positive_tensor, 'float32')

    #and let's create it's complement (the non false positives)
    complement = 1 - false_positive_tensor

    #now we're going to separate two groups
    falsePosGroupTrue = y_true * false_positive_tensor
    falsePosGroupPred = y_pred * false_positive_tensor

    nonFalseGroupTrue = y_true * complement
    nonFalseGroupPred = y_pred * complement

    #let's calculate one crossentropy loss for each group
    falsePosLoss = K.binary_crossentropy(falsePosGroupTrue, falsePosGroupPred)
    nonFalseLoss = K.binary_crossentropy(nonFalseGroupTrue, nonFalseGroupPred)

    #return them weighted:
    return (false_positive_weight * falsePosLoss) + nonFalseLoss
Esempio n. 22
0
 def n_pixel_error(x_corr, x_pred):
     class_corr = K.argmax(x_corr, axis=-1)
     class_pred = K.argmax(x_pred, axis=-1)
     diff = K.abs(class_corr - class_pred)
     errors = K.less_equal(diff, n)
     errors = K.cast(errors, "float32")
     return errors
Esempio n. 23
0
    def allpair_count_goodfit(y_true, y_pred):
        # nP = 3
        # nN = 2
        assert (y_pred.shape[1] == 1 + nP + nN)

        # y_pred.shape = shape=(?, 5, 512)
        q = y_pred[:, 0:1, :]  # shape=(?, 1, 512)
        P = y_pred[:, 1:1 + nP, :]  # shape=(?, 2, 512)
        N = y_pred[:, 1 + nP:, :]  # shape=(?, 2, 512)

        q_dot_P = keras.layers.dot([q, P], axes=-1)  # shape=(?, 1, 2)
        q_dot_N = keras.layers.dot([q, N], axes=-1)  # shape=(?, 1, 2)

        # epsilon = 0.3  # Your epsilon here

        zeros = K.zeros((nP, nN), dtype='float32')
        ones_m = K.ones((nP, 1), dtype='float32')
        ones_n = K.ones((nN, 1), dtype='float32')

        _1m__qdotN_T = ones_m[None, :] * q_dot_N  # 1m ( \delta^q_N )^T
        qdotP__1n_T = K.permute_dimensions(ones_n[None, :] * q_dot_P,
                                           [0, 2, 1])  # ( \delta^q_P ) 1n^T
        _1m__1n_T = epsilon * ones_m[None, :] * K.permute_dimensions(
            ones_n[None, :], [0, 2, 1])  # 1m 1n^T

        aux = _1m__qdotN_T - qdotP__1n_T + _1m__1n_T

        return K.sum(K.cast(K.less_equal(aux, 0), 'float32'), axis=[
            -1, -2
        ])  #number of pairs which satisfy out of total nP*nN pairs
Esempio n. 24
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        adam_lr = self.adam_lr
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay *
                             K.cast(self.iterations, K.dtype(self.decay))))
            adam_lr = adam_lr * (1. / (1. + self.decay * K.cast(
                self.iterations, K.dtype(self.decay))))

        t = K.cast(self.iterations, K.floatx()) + 1
        adam_lr_t = adam_lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                               (1. - K.pow(self.beta_1, t)))

        # momentum
        shapes = [K.int_shape(p) for p in params]
        moments = [K.zeros(shape) for shape in shapes]
        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        else:
            vhats = [K.zeros(1) for _ in params]
        self.ms = K.zeros(K.int_shape(params[0]), dtype=K.dtype(params[0]))
        self.vs = K.zeros(K.int_shape(params[0]), dtype=K.dtype(params[0]))
        self.weights = [self.iterations] + moments + vhats + [self.ms
                                                              ] + [self.vs]
        for i, (p, g, m, vhat) in enumerate(zip(params, grads, moments,
                                                vhats)):
            v = self.momentum * m - lr * g  # velocity
            self.updates.append(K.update(m, v))

            if self.nesterov:
                new_p = p + self.momentum * v - lr * g
            else:
                new_p = p + v

            if i == 0 and self.e2efs_layer is not None:
                nnz = K.sum(K.cast(K.greater(p, 0.), K.floatx()))
                m_t = (self.beta_1 * self.ms) + (1. - self.beta_1) * g
                v_t = (self.beta_2 *
                       self.vs) + (1. - self.beta_2) * K.square(g)
                if self.amsgrad:
                    vhat_t = K.maximum(vhat, v_t)
                    p_t = p - adam_lr_t * m_t / (K.sqrt(vhat_t) + K.epsilon())
                    self.updates.append(K.update(vhat, vhat_t))
                else:
                    p_t = p - adam_lr_t * m_t / (K.sqrt(v_t) + K.epsilon())

                self.updates.append(K.update(self.ms, m_t))
                self.updates.append(K.update(self.vs, v_t))
                new_p = K.switch(K.less_equal(nnz, self.e2efs_layer.units),
                                 new_p, p_t)

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates
 def inverse_huber(y_true, y_pred):
     threshold = c * K.max(K.abs(y_true - y_pred))
     absolute_mean = K.mean(K.abs(y_true - y_pred))
     mask = K.less_equal(absolute_mean, threshold)
     mask = K.cast(mask, dtype='float32')
     return mask * absolute_mean + (1 - mask) * K.mean(
         K.square(K.abs(y_true - y_pred)))
Esempio n. 26
0
 def class_loss_regr_fixed_num(y_true, y_pred):
     x = y_true[:, :, 4*num_classes:] - y_pred
     x_abs = K.abs(x)
     x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
     return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] *
                                    (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / \
            K.sum(epsilon + y_true[:, :, :4*num_classes])
Esempio n. 27
0
    def build():
        states = Input(shape=(height * base, width * base))
        error = build_error(states, height, width, base)
        matches = 1 - K.clip(K.sign(error - threshold), 0, 1)
        # a, h, w, panel

        num_matches = K.sum(matches, axis=3)
        panels_ok = K.all(K.equal(num_matches, 1), (1, 2))
        panels_ng = K.any(K.not_equal(num_matches, 1), (1, 2))
        panels_nomatch = K.any(K.equal(num_matches, 0), (1, 2))
        panels_ambiguous = K.any(K.greater(num_matches, 1), (1, 2))

        panel_coverage = K.sum(matches, axis=(1, 2))
        # ideally, this should be [[1,1,1,1,1,1,1,1,1], ...]
        coverage_ok = K.all(K.less_equal(panel_coverage, 1), 1)
        coverage_ng = K.any(K.greater(panel_coverage, 1), 1)
        validity = tf.logical_and(panels_ok, coverage_ok)

        if verbose:
            return Model(states, [
                wrap(states, x) for x in [
                    panels_ok, panels_ng, panels_nomatch, panels_ambiguous,
                    coverage_ok, coverage_ng, validity
                ]
            ])
        else:
            return Model(states, wrap(states, validity))
Esempio n. 28
0
    def rpn_loss_regr_fixed_num(y_true, y_pred):
        if K.backend() == "tensorflow":
            x = y_true[:, :, :, 4 * num_anchors:] - y_pred
            x_abs = K.abs(x)
            x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

            return lambda_rpn_regr * K.sum(y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
Esempio n. 29
0
def stochastic_binarization(x):
    hard_sigmoid = K.clip((x + 1.) / 2, 0, 1)
    tensor_bool = K.less_equal(
        K.random_uniform(shape=x.shape, minval=0.0, maxval=1.1), hard_sigmoid)
    tensor_float = K.cast(tensor_bool, dtype='float32')
    tensor_float_comp = tensor_float + K.constant(
        -1, shape=tensor_float.shape, dtype='float32')
    return tensor_float + tensor_float_comp
Esempio n. 30
0
def CMI2(a, b, d, dm1):
    mask1 = K.less_equal(d, dm1)
    mask2 = K.greater(d, dm1)
    m1 = MI(tf.boolean_mask(a, mask1), tf.boolean_mask(b, mask1))
    m2 = MI(tf.boolean_mask(a, mask2), tf.boolean_mask(b, mask2))
    p1 = K.sum(K.cast(mask1, 'float32'))
    p2 = K.sum(K.cast(mask2, 'float32'))
    return tf.divide(p1, p1 + p2) * m1 + tf.divide(p2, p1 + p2) * m2
Esempio n. 31
0
 def rpn_loss_regr_fixed_num(y_true, y_pred):
     x = y_true[:, :, :, 4 * num_anchors:] - y_pred
     x_abs = K.abs(x)
     x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
     Ksum1 = K.sum(y_true[:, :, :, :4 * num_anchors] *
                   (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5)))
     Ksum2 = K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
     return lambda_rpn_regr * Ksum1 / Ksum2
Esempio n. 32
0
	def class_loss_regr_fixed_num(y_true, y_pred):
		x = y_true[:, :, 4*num_classes:] - y_pred
		x_abs = K.abs(x)
		x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
		return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])