Ejemplo n.º 1
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        t = self.iterations + 1
        lr_t = self.lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))

        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            p_t = p - self.get_param_learning_rate_t(p,t,lr_t) * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))

            new_p = p_t
            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))
        return self.updates
Ejemplo n.º 2
0
Archivo: wtte.py Proyecto: g6t/wtte-rnn
        def loglik_discrete(y, u, a, b, epsilon=1e-35):
            hazard0 = K.pow((y + epsilon) / a, b)
            hazard1 = K.pow((y + 1.0) / a, b)

            loglikelihoods = u * \
                K.log(K.exp(hazard1 - hazard0) - 1.0) - hazard1
            return loglikelihoods
Ejemplo n.º 3
0
    def build_loss(self):
        # Infinity norm
        if np.isinf(self.p):
            value = K.max(self.img)
        else:
            value = K.pow(K.sum(K.pow(K.abs(self.img), self.p)), 1. / self.p)

        return normalize(self.img, value)
Ejemplo n.º 4
0
 def func(y_true, y_pred):
     Y_true = K.reshape(y_true, (-1, ) + img_shape)
     Y_pred = K.reshape(y_pred, (-1, ) + img_shape)
     t1 = K.pow(K.abs(Y_true[:, :, :, 1:, :] - Y_true[:, :, :, :-1, :]) -
                K.abs(Y_pred[:, :, :, 1:, :] - Y_pred[:, :, :, :-1, :]), alpha)
     t2 = K.pow(K.abs(Y_true[:, :, :, :, :-1] - Y_true[:, :, :, :, 1:]) -
                K.abs(Y_pred[:, :, :, :, :-1] - Y_pred[:, :, :, :, 1:]), alpha)
     out = K.mean(K.batch_flatten(t1 + t2), -1)
     return out
Ejemplo n.º 5
0
	def focal_loss_fixed(y_true, y_pred):
		if(K.backend()=="tensorflow"):
			import tensorflow as tf
			pt = tf.where(tf.equal(y_true, 1), y_pred, 1 - y_pred)
			return -K.mean(alpha * K.pow(1. - pt, gamma) * K.log(pt))
		if(K.backend()=="theano"):
			import theano.tensor as T
			pt = T.where(T.eq(y_true, 1), y_pred, 1 - y_pred)
			return -K.mean(alpha * K.pow(1. - pt, gamma) * K.log(pt))
Ejemplo n.º 6
0
 def call(self, x):
     
     # statistics computed along features dimension, on every spatial position of the input tensor
     A   = K.mean(K.abs(x), axis = self.channel_axis)            # mean absolute value
     M1  = K.mean(x, axis = self.channel_axis)                   # mean value
     M2  = K.mean(x**2, axis = self.channel_axis)                # squared quadratic average
     V   = M2 - M1**2                                            # variance: V[X] = E[X^2] - E[X]^2
     eps = 0.001 #K.epsilon()
     
     norm = K.pow(V + eps, self.norm_dev/2) * K.pow(M2 + eps, self.norm_mag/2) * K.pow(A + eps, self.norm_abs)
     return x / norm[...,None]
Ejemplo n.º 7
0
def normals_metric(y_true, y_pred):

    y_true = K.variable(y_true)
    y_pred = K.variable(y_pred)

    y_true = K.expand_dims(y_true,0)


    filter_y = K.variable(np.array([[ 0., -0.5 , 0.],
                               [0., 0., 0.],
                               [0., 0.5, 0.]]).reshape(3, 3, 1, 1))


    filter_x = K.variable(np.array([ [0, 0., 0.],
                               [0.5, 0., -0.5],
                               [0., 0., 0.]]).reshape(3, 3, 1, 1))

    dzdx = K.conv2d(K.exp(y_true), filter_x, padding='same')
    dzdy = K.conv2d(K.exp(y_true), filter_y, padding='same')

    dzdx_ = dzdx * -1.0#K.constant(-1.0, shape=[batch_size,K.int_shape(y_pred)[1],K.int_shape(y_pred)[2],K.int_shape(y_pred)[3]]) #K.constant(-1.0, shape=K.int_shape(dzdx))
    dzdy_ = dzdy * -1.0#K.constant(-1.0, shape=[batch_size,K.int_shape(y_pred)[1],K.int_shape(y_pred)[2],K.int_shape(y_pred)[3]]) #K.constant(-1.0, shape=K.int_shape(dzdy))

    mag_norm = K.pow(dzdx,2) + K.pow(dzdy,2) + 1.0#K.constant(1.0, shape=[batch_size,K.int_shape(y_pred)[1],K.int_shape(y_pred)[2],K.int_shape(y_pred)[3]]) #K.constant(1.0, shape=K.int_shape(dzdx))

    mag_norm = K.sqrt(mag_norm)
    N3 = 1.0 / mag_norm #K.constant(1.0, shape=K.int_shape(dzdx)) / mag_norm
    N1 = dzdx_ / mag_norm
    N2 = dzdy_ / mag_norm

    normals = K.concatenate(tensors=[N1,N2,N3],axis=-1)

    dzdx_pred = K.conv2d(K.exp(y_pred), filter_x, padding='same')
    dzdy_pred = K.conv2d(K.exp(y_pred), filter_y, padding='same')

    mag_norm_pred = K.pow(dzdx_pred,2) + K.pow(dzdy_pred,2) + 1.0
    mag_norm_pred = K.sqrt(mag_norm_pred)

    grad_x = K.concatenate(tensors=[1.0/ mag_norm_pred,
                                    0.0/ mag_norm_pred, dzdx_pred/ mag_norm_pred],axis=-1)
    grad_y = K.concatenate(tensors=[0.0/ mag_norm_pred,
                                    1.0/ mag_norm_pred, dzdy_pred/ mag_norm_pred],axis=-1)


    dot_term_x = K.mean(K.sum(normals[0,:,:,:] * grad_x[0,:,:,:], axis=-1, keepdims=True), axis=-1)
    dot_term_y = K.mean(K.sum(normals[0,:,:,:] * grad_y[0,:,:,:], axis=-1, keepdims=True), axis=-1)


    dot_term_x = K.abs(dot_term_x)
    dot_term_y = K.abs(dot_term_y)

    return K.eval(K.mean(dot_term_x)),K.eval(K.mean(dot_term_y))
Ejemplo n.º 8
0
 def call(self, x, mask=None):
     input_shape = K.int_shape(x)
     reduction_axes = list(range(len(input_shape)))
     del reduction_axes[self.axis]
     broadcast_shape = [1] * len(input_shape)
     broadcast_shape[self.axis] = input_shape[self.axis]
     alpha_pos = K.reshape(self.alpha_pos, broadcast_shape)
     alpha_neg = K.reshape(self.alpha_neg, broadcast_shape)
     beta_pos = K.reshape(self.beta_pos, broadcast_shape)
     beta_neg = K.reshape(self.beta_neg, broadcast_shape)
     rho_pos = K.reshape(self.rho_pos, broadcast_shape)
     rho_neg = K.reshape(self.rho_neg, broadcast_shape)
     pos = alpha_pos * K.pow(K.relu(x + beta_pos) + K.epsilon(), rho_pos)
     neg = alpha_neg * K.pow(K.relu(-x + beta_neg) + K.epsilon(), rho_neg)
     return pos + neg
def total_variation_loss(x):
    assert K.ndim(x) == 4
    a = K.square(x[:, :, 1:, :img_width - 1] - x[:, :, :img_height - 1, :img_width - 1])
    b = K.square(x[:, :, :img_height - 1, 1:] - x[:, :, :img_width - 1, :img_height - 1])
    #a = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, 1:, :img_height-1])
    #b = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, :img_width-1, 1:])
    return K.sum(K.pow(a + b, 1.25))
Ejemplo n.º 10
0
    def build_loss(self):
        r"""Implements the N-dim version of function
        $$TV^{\beta}(x) = \sum_{whc} \left ( \left ( x(h, w+1, c) - x(h, w, c) \right )^{2} +
        \left ( x(h+1, w, c) - x(h, w, c) \right )^{2} \right )^{\frac{\beta}{2}}$$
        to return total variation for all images in the batch.
        """
        image_dims = K.ndim(self.img) - 2

        # Constructing slice [1:] + [:-1] * (image_dims - 1) and [:-1] * (image_dims)
        start_slice = [slice(1, None, None)] + [slice(None, -1, None) for _ in range(image_dims - 1)]
        end_slice = [slice(None, -1, None) for _ in range(image_dims)]
        samples_channels_slice = [slice(None, None, None), slice(None, None, None)]

        # Compute pixel diffs by rolling slices to the right per image dim.
        tv = None
        for i in range(image_dims):
            ss = tuple(samples_channels_slice + start_slice)
            es = tuple(samples_channels_slice + end_slice)
            diff_square = K.square(self.img[utils.slicer[ss]] - self.img[utils.slicer[es]])
            tv = diff_square if tv is None else tv + diff_square

            # Roll over to next image dim
            start_slice = np.roll(start_slice, 1).tolist()
            end_slice = np.roll(end_slice, 1).tolist()

        tv = K.sum(K.pow(tv, self.beta / 2.))
        return normalize(self.img, tv)
Ejemplo n.º 11
0
 def __call__(self, loss):
     x = self.layer.get_output(True)
     assert K.ndim(x) == 4
     a = K.square(x[:, :, 1:, :-1] - x[:, :, :-1, :-1])
     b = K.square(x[:, :, :-1, 1:] - x[:, :, :-1, :-1])
     loss += self.weight * K.mean(K.sum(K.pow(a + b, 1.25), axis=(1,2,3)))
     return loss
Ejemplo n.º 12
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        t = self.iterations + 1
        lr_t = self.lr / (1. - K.pow(self.beta_1, t))

        shapes = [K.get_variable_shape(p) for p in params]
        # zero init of 1st moment
        ms = [K.zeros(shape) for shape in shapes]
        # zero init of exponentially weighted infinity norm
        us = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + ms + us

        for p, g, m, u in zip(params, grads, ms, us):

            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            u_t = K.maximum(self.beta_2 * u, K.abs(g))
            p_t = p - self.get_param_learning_rate_t(p,t,lr_t) * m_t / (u_t + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(u, u_t))

            new_p = p_t
            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))
        return self.updates
Ejemplo n.º 13
0
Archivo: wtte.py Proyecto: g6t/wtte-rnn
 def loglik_continuous_conditional_correction(y, u, a, b, epsilon=1e-35):
     """Integrated conditional excess loss.
         Explanation TODO
     """
     ya = (y + epsilon) / a
     loglikelihoods = y * \
         (u * (K.log(b) + b * K.log(ya)) - (b / (b + 1.)) * K.pow(ya, b))
     return loglikelihoods
Ejemplo n.º 14
0
def total_variation_loss(y_pred):
    if K.image_data_format() == 'channels_first':
        a = K.square(y_pred[:, :, :m - 1, :n - 1] - y_pred[:, :, 1:, :n - 1])
        b = K.square(y_pred[:, :, :m - 1, :n - 1] - y_pred[:, :, :m - 1, 1:])
    else:
        a = K.square(y_pred[:, :m - 1, :n - 1, :] - y_pred[:, 1:, :n - 1, :])
        b = K.square(y_pred[:, :m - 1, :n - 1, :] - y_pred[:, :m - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
Ejemplo n.º 15
0
def variation_loss(comb):
    if K.image_dim_ordering() == "th":
        dx = K.square(comb[:, :, :RESIZED_WH-1, :RESIZED_WH-1] - comb[:, :, 1:, :RESIZED_WH-1])
        dy = K.square(comb[:, :, :RESIZED_WH-1, :RESIZED_WH-1] - comb[:, :, :RESIZED_WH-1, 1:])
    else:
        dx = K.square(comb[:, :RESIZED_WH-1, :RESIZED_WH-1, :] - comb[:, 1:, :RESIZED_WH-1, :])
        dy = K.square(comb[:, :RESIZED_WH-1, :RESIZED_WH-1, :] - comb[:, :RESIZED_WH-1, 1:, :])
    return K.sum(K.pow(dx + dy, 1.25))
Ejemplo n.º 16
0
def total_variation_loss(x):
    assert 4 == K.ndim(x)
    if K.image_dim_ordering() == 'th':
        a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1])
        b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:])
    else:
        a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :])
        b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
Ejemplo n.º 17
0
def continuity_loss(x):
    assert K.ndim(x) == 4
    if K.image_dim_ordering() == "th":
        a = K.square(x[:, :, : img_width - 1, : img_height - 1] - x[:, :, 1:, : img_height - 1])
        b = K.square(x[:, :, : img_width - 1, : img_height - 1] - x[:, :, : img_width - 1, 1:])
    else:
        a = K.square(x[:, : img_width - 1, : img_height - 1, :] - x[:, 1:, : img_height - 1, :])
        b = K.square(x[:, : img_width - 1, : img_height - 1, :] - x[:, : img_width - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
Ejemplo n.º 18
0
def total_variation_loss(x):
    assert K.ndim(x) == 4
    if K.image_data_format() == 'channels_first':
        a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1])
        b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:])
    else:
        a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :])
        b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
Ejemplo n.º 19
0
def total_variation_loss(x):
    assert K.ndim(x) == 4
    if K.image_dim_ordering() == 'th':
        a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])
        b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])
    else:
        a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
        b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
 def __call__(self, x):
     assert K.ndim(x) == 4
     if K.image_dim_ordering() == 'th':
         a = K.square(x[:, :, :self.img_width - 1, :self.img_height - 1] - x[:, :, 1:, :self.img_height - 1])
         b = K.square(x[:, :, :self.img_width - 1, :self.img_height - 1] - x[:, :, :self.img_width - 1, 1:])
     else:
         a = K.square(x[:, :self.img_width - 1, :self.img_height - 1, :] - x[:, 1:, :self.img_height - 1, :])
         b = K.square(x[:, :self.img_width - 1, :self.img_height - 1, :] - x[:, :self.img_width - 1, 1:, :])
     loss = self.weight * K.mean(K.sum(K.pow(a + b, 1.25)))
     return loss
Ejemplo n.º 21
0
    def call(self, x, mask=None):
        input_shape = self.input_spec[0].shape
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]
        alpha = K.reshape(self.alpha, broadcast_shape)
        rho = K.reshape(self.rho, broadcast_shape)

        return alpha * K.pow(K.relu(x) + K.epsilon(), rho)
Ejemplo n.º 22
0
 def total_variation_loss(self):
     """
     Total variation loss, designed to keep the generated image locally coherent
     :return: total variation loss
     """
     x = self.input_tensor
     assert K.ndim(x) == 4
     a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])
     b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])
     return K.sum(K.pow(a + b, 1.25))
def KLdivergence(P, Y):
    alpha = low_dim - 1.
    sum_Y = K.sum(K.square(Y), axis=1)
    eps = K.variable(10e-15)
    D = sum_Y + K.reshape(sum_Y, [-1, 1]) - 2 * K.dot(Y, K.transpose(Y))
    Q = K.pow(1 + D / alpha, -(alpha + 1) / 2)
    Q *= K.variable(1 - np.eye(batch_size))
    Q /= K.sum(Q)
    Q = K.maximum(Q, eps)
    C = K.log((P + eps) / (Q + eps))
    C = K.sum(P * C)
    return C
Ejemplo n.º 24
0
 def call(self, inputs, mask=None):
     if K.backend() == 'theano':
         a = K.pattern_broadcast(self.a, self.a_param_broadcast)
         k = K.pattern_broadcast(self.k, self.k_param_broadcast)
         n = K.pattern_broadcast(self.n, self.n_param_broadcast)
         z = K.pattern_broadcast(self.z, self.z_param_broadcast)
     else:
         a = self.a
         k = self.k
         n = self.n
         z = self.z
     return a / (K.pow((k / (inputs + 1e-5)), n) + z + 1e-5)
Ejemplo n.º 25
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        t = self.iterations + 1

        # Due to the recommendations in [2], i.e. warming momentum schedule
        momentum_cache_t = self.beta_1 * (1. - 0.5 * (K.pow(0.96, t * self.schedule_decay)))
        momentum_cache_t_1 = self.beta_1 * (1. - 0.5 * (K.pow(0.96, (t + 1) * self.schedule_decay)))
        m_schedule_new = self.m_schedule * momentum_cache_t
        m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
        self.updates.append((self.m_schedule, m_schedule_new))

        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]

        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
            # the following equations given in [1]
            g_prime = g / (1. - m_schedule_new)
            m_t = self.beta_1 * m + (1. - self.beta_1) * g
            m_t_prime = m_t / (1. - m_schedule_next)
            v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
            v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
            m_t_bar = (1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))

            p_t = p - get_learing_rate(p, self.lr) * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
            new_p = p_t

            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))
        return self.updates
Ejemplo n.º 26
0
    def call(self, x):
        
        # print 'LocalNormalization.kernel_size:', self.kernel_size
        
        def mean2d(y):
            
            y = K.pool2d(y, (self.kernel_size[0], 1), pool_mode = 'avg', padding = 'same')
            y = K.pool2d(y, (1, self.kernel_size[1]), pool_mode = 'avg', padding = 'same')
            return y

            # return K.pool2d(y, self.kernel_size, pool_mode = 'avg', padding = 'same')
            
            # (dy, dx) = self.kernel_size
            # top  = dy/2 + 1                             # if even `dy`, averaging window is shifted to the top
            # left = dx/2 + 1                             # if even `dx`, averaging window is shifted to the left
            #
            # padding = ((top, dy-top), (left, dx-left))
            #
            # z  = K.spatial_2d_padding(y, padding)       # `y` padded with zeros
            # s1 = K.cumsum(z,  axis = -3)                # cumulative sums along Y axis only
            # s  = K.cumsum(s1, axis = -2)                # cumulative sums along (Y,X) axes
            #
            # t = s[...,dy:,dx:,:] + s[...,:-dy,:-dx,:] - s[...,dy:,:-dx,:] - s[...,:-dy,dx:,:]
            #
            # # t[0,0] = s[dy,dx] + s[0,0] - ... = cumsum(y)[0,0] + cumsum(y)[dy,dx] - ... = z[0,0] + (z[0,0]+...+z[dy,dx]) - ...
            # #        = area_sum(z, (1,1)...(dy,dx)) = area_sum(y, (0,0)...(dy-top,dx-left)) = area_sum(y, (0,0)...(dy-(dy/2+1), dx-(dx/2+1))) =
            #
            # return t / float(dx*dy)
            
            
        # mean of `x` and x^2 in local area around given pixel
        M   = mean2d(x)
        M2  = mean2d(x**2)
        V   = mean2d((x-M)**2)
        eps = 0.001  #K.epsilon()
        
        scale = K.exp(self.scale) / K.pow(M2 + eps, self.normal_mag/2) / K.pow(V + eps, self.normal_dev/2)  #(V + eps) #K.exp(K.log(D + eps) * self.normal[None,None,:])
        return (x - self.shift * M) * scale
Ejemplo n.º 27
0
def total_variation_loss(x, img_nrows, img_ncols):
	"""
	Total variational loss. Encourages spatial smoothness 
	in the output image.
	"""
	H, W = img_nrows, img_ncols
	if K.image_dim_ordering() == 'th':
		a = K.square(x[:, :, :H-1, :W-1] - x[:, :, 1:, :W-1])
		b = K.square(x[:, :, :H-1, :W-1] - x[:, :, :H-1, 1:])
	else:	
		a = K.square(x[:, :H-1, :W-1, :] - x[:, 1:, :W-1, :])
		b = K.square(x[:, :H-1, :W-1, :] - x[:, :H-1, 1:, :])

	return K.sum(K.pow(a + b, 1.25))
 def call(self, x, mask=None):
     if (self.size == None) or (self.mode == 'sum'):
         self.size = int(x.shape[-1])
     batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
     position_j = 1. / K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
     position_j = K.expand_dims(position_j, 0)
     position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1  # K.arange不支持变长,只好用这种方法生成
     position_i = K.expand_dims(position_i, 2)
     position_ij = K.dot(position_i, position_j)
     position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
     if self.mode == 'sum':
         return position_ij + x
     elif self.mode == 'concat':
         return K.concatenate([position_ij, x], 2)
Ejemplo n.º 29
0
def continuity_loss(x):
    # continuity loss util function
    assert K.ndim(x) == 4
    if K.image_data_format() == 'channels_first':
        a = K.square(x[:, :, :img_height - 1, :img_width - 1] -
                     x[:, :, 1:, :img_width - 1])
        b = K.square(x[:, :, :img_height - 1, :img_width - 1] -
                     x[:, :, :img_height - 1, 1:])
    else:
        a = K.square(x[:, :img_height - 1, :img_width - 1, :] -
                     x[:, 1:, :img_width - 1, :])
        b = K.square(x[:, :img_height - 1, :img_width - 1, :] -
                     x[:, :img_height - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
Ejemplo n.º 30
0
def generalized_loss_function(y_true, y_pred, var_a=1.0, cnst=1.0/255.0):
    """
    generalized function used to return a large variety of mathematical loss functions
    primary benefit is smooth, differentiable version of L1 loss

    Barron, J. A More General Robust Loss Function
    https://arxiv.org/pdf/1701.03077.pdf

    Parameters:
        a: penalty factor. larger number give larger weight to large deviations
        c: scale factor used to adjust to the input scale (i.e. inputs of mean 1e-4 or 256)

    Return:
        a loss value from the results of function(y_pred - y_true)

    Example:
        a=1.0, x>>c , c=1.0/255.0 will give a smoothly differentiable version of L1 / MAE loss
        a=1.999999 (lim as a->2), c=1.0/255.0 will give L2 / RMSE loss
    """
    var_x = y_pred - y_true
    loss = (K.abs(2.0-var_a)/var_a) * (K.pow(K.pow(var_x/cnst, 2.0)/K.abs(2.0-var_a) + 1.0,
                                             (var_a/2.0)) - 1.0)
    return K.mean(loss, axis=-1) * cnst
Ejemplo n.º 31
0
def total_variation_loss(x, height, width):
    a = backend.square(x[:, :height - 1, :width - 1, :] -
                       x[:, 1:, :width - 1, :])
    b = backend.square(x[:, :height - 1, :width - 1, :] -
                       x[:, :height - 1, 1:, :])
    return backend.sum(backend.pow(a + b, 1.25))
def total_variation_loss(img):
	assert(K.ndim(img) == 4)
	a = K.square(img[:, :nrows - 1, :ncols - 1, :] - img[:, 1:, :ncols - 1, :])
	b = K.square(img[:, :nrows - 1, :ncols - 1, :] - img[:, :nrows - 1, 1:, :])
	return K.sum(K.pow(a + b, 1.25))
Ejemplo n.º 33
0
 def mse(y_true, y_pred):
     return K.mean(K.pow(y_true - y_pred, 2))
Ejemplo n.º 34
0
def Loss_tv(x_true, x_hat):  # total variation loss
    a = K.square(x_hat[:, :, :H0 - 1, :W0 - 1] -
                 x_hat[:, :, 1:, :W0 - 1]) / (C0 * H0 * W0)
    b = K.square(x_hat[:, :, :H0 - 1, :W0 - 1] -
                 x_hat[:, :, :H0 - 1, 1:]) / (C0 * H0 * W0)
    return K.sum(K.pow(a + b, 1.25))
def potential_loss(y_true, y_pred):  # shapes if y_true and y_pred must be the same

    return K.mean((1 - y_true) * K.exp(-10 * (1 - y_true) * y_pred) / K.maximum(K.epsilon(), y_pred) +
                  y_true * K.pow(y_pred, 3))
Ejemplo n.º 36
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay *
                             K.cast(self.iterations, K.dtype(self.decay))))

        t = K.cast(self.iterations, K.floatx()) + 1
        lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (
            1. - K.pow(self.beta_1, t))

        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):

            # if a weight tensor (len > 1) use weight normalized parameterization
            # this is the only part changed w.r.t. keras.optimizers.Adam
            ps = K.get_variable_shape(p)
            if len(ps) > 1:

                # get weight normalization parameters
                V, V_norm, V_scaler, g_param, grad_g, grad_V = get_weightnorm_params_and_grads(
                    p, g)

                # Adam containers for the 'g' parameter
                V_scaler_shape = K.get_variable_shape(V_scaler)
                m_g = K.zeros(V_scaler_shape)
                v_g = K.zeros(V_scaler_shape)

                # update g parameters
                m_g_t = (self.beta_1 * m_g) + (1. - self.beta_1) * grad_g
                v_g_t = (self.beta_2 *
                         v_g) + (1. - self.beta_2) * K.square(grad_g)
                new_g_param = g_param - lr_t * m_g_t / (K.sqrt(v_g_t) +
                                                        self.epsilon)
                self.updates.append(K.update(m_g, m_g_t))
                self.updates.append(K.update(v_g, v_g_t))

                # update V parameters
                m_t = (self.beta_1 * m) + (1. - self.beta_1) * grad_V
                v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(grad_V)
                new_V_param = V - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
                self.updates.append(K.update(m, m_t))
                self.updates.append(K.update(v, v_t))

                # Apply constraints.
                if getattr(p, 'constraint', None) is not None:
                    new_V_param = p.constraint(new_V_param)

                # wn param updates --> W updates
                add_weightnorm_param_updates(self.updates, new_V_param,
                                             new_g_param, p, V_scaler)

            else:  # do optimization normally
                m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
                v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
                p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

                self.updates.append(K.update(m, m_t))
                self.updates.append(K.update(v, v_t))

                new_p = p_t

                # Apply constraints.
                if getattr(p, 'constraint', None) is not None:
                    new_p = p.constraint(new_p)

                self.updates.append(K.update(p, new_p))
        return self.updates
Ejemplo n.º 37
0
 def ContrastiveLoss(self, y_pre, y_align):
     weight_arr = np.diag([1.0 for i in range(self.Constant.BATCH_SIZE)])
     weight_tensor = K.variable(weight_arr)
     loss_contrastive = K.mean(K.dot(weight_tensor, K.pow(y_pre - y_align, 2)))
     return loss_contrastive
Ejemplo n.º 38
0
 def call(self, x):
     basis = K.concatenate([K.pow(x, i) for i in xrange(self.order + 1)])
     return K.dot(basis, self.kernel)
Ejemplo n.º 39
0
 def call(self, x):
     # norm the input
     norm = K.sqrt(K.sum(K.pow(x,2), axis=[1,2], keepdims=True))
     x = x / norm * self.scale_factor
     return x
Ejemplo n.º 40
0
def focal_tversky_loss(y_true, y_pred):
    pt_1 = tversky(y_true, y_pred)
    gamma = 0.75
    return K.pow((1 - pt_1), gamma)
Ejemplo n.º 41
0
def total_loss(features, height, width):
    a = K.square(features[:, :height - 1, :width - 1, :] -
                 features[:, 1:, :width - 1, :])
    b = K.square(features[:, :height - 1, :width - 1, :] -
                 features[:, :height - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lrr

        completed_updates = K.cast(
            tf.math.floordiv(self.iterations, self.accum_iters), K.floatx())

        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay * completed_updates))

        t = completed_updates + 1

        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                     (1. - K.pow(self.beta_1, t)))

        update_switch = K.equal((self.iterations + 1) % self.accum_iters, 0)
        update_switch = K.cast(update_switch, K.floatx())

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        gs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]

        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        else:
            vhats = [K.zeros(1) for _ in params]

        self.weights = [self.iterations] + ms + vs + vhats

        for p, g, m, v, vhat, tg in zip(params, grads, ms, vs, vhats, gs):

            sum_grad = tg + g
            avg_grad = sum_grad / self.accum_iters_float

            m_t = (self.beta_1 * m) + (1. - self.beta_1) * avg_grad
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(avg_grad)

            if self.amsgrad:
                vhat_t = K.maximum(vhat, v_t)
                p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
                self.updates.append(
                    K.update(vhat, (1 - update_switch) * vhat +
                             update_switch * vhat_t))
            else:
                p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append(
                K.update(m, (1 - update_switch) * m + update_switch * m_t))
            self.updates.append(
                K.update(v, (1 - update_switch) * v + update_switch * v_t))
            self.updates.append(K.update(tg, (1 - update_switch) * sum_grad))
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(
                K.update(p, (1 - update_switch) * p + update_switch * new_p))
        return self.updates
    # get the predicted probabilities for each class
    predictions = model.predict(image_batch)

    return predictions
'''
#print(test_inference(filename))
# -------------------------------------

# distribution elements to Keras
distribution_elements_row = tf.constant(np.array(
    [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
                                        dtype='float32',
                                        name='marks')
distribution_elements = K.expand_dims(distribution_elements_row, -1)
distribution_elements_square = K.square(distribution_elements)
distribution_elements_cube = K.pow(distribution_elements, 3)


# compute squared difference of first moments
def first_moment(y_true, y_pred):

    means_true = K.dot(y_true, distribution_elements)
    means_pred = K.dot(y_pred, distribution_elements)

    return K.sqrt(K.mean(K.square(means_true - means_pred)))


# compute squared difference of second moments
def second_moment(y_true, y_pred):

    means_true = K.dot(y_true, distribution_elements)
 def g_k(x):
     pi = tf.convert_to_tensor(np.pi, dtype=tf.float32)
     return 1. / K.pow(2. * pi,
                       int(x.get_shape()[-1]) / 2) * K.exp(
                           -0.5 * K.square(x))
Ejemplo n.º 45
0
def rmse(y_true, y_pred):
    y_pred = K.clip(y_pred, 1.0, 5.0)
    return K.sqrt(K.mean(K.pow(y_true - y_pred, 2)))
Ejemplo n.º 46
0
def get_TV(new_gram_matrix):
    x_diff = K.square(new_gram_matrix[:, :WIDTH - 1, :HEIGHT - 1, :] - new_gram_matrix[:, 1:, :HEIGHT - 1, :])
    y_diff = K.square(new_gram_matrix[:, :WIDTH - 1, :HEIGHT - 1, :] - new_gram_matrix[:, :WIDTH - 1, 1:, :])
    return TV_WEIGHT * K.mean(K.sum(K.pow(x_diff + y_diff, 1.25)))
 def __call__(self, x):
     regularization = 0.
     regularization += K.pow(K.sum(self.lp * K.pow(x,self.p)),1/self.p)
     return regularization
Ejemplo n.º 48
0
 def mse_powers(y_true, y_pred):
     m = mse(y_true, y_pred)
     return {
         'mse_squared': K.pow(m, 2),
         'mse_cubed': K.pow(m, 3)
     }
Ejemplo n.º 49
0
def norm(x1, x2, axis=1, norm=1):
    return Keras.pow(Keras.sum(Keras.pow(Keras.abs(x1 - x2), norm), axis=axis),
                     1.0 / norm)
Ejemplo n.º 50
0
 def focal_loss_fixed(y_true, y_pred):
     pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
     pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.ones_like(y_pred))
     return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.sum(
         1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0)
Ejemplo n.º 51
0
def total_variation_loss(x):
    a = K.square(x[:, :resized_width - 1, :resized_height - 1, :] - x[:, 1:, :resized_height - 1, :])
    b = K.square(x[:, :resized_width - 1, :resized_height - 1, :] - x[:, :resized_width - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
def total_variation_loss(x):
    a = backend.square(x[:, :IMAGE_HEIGHT - 1, :IMAGE_WIDTH - 1, :] -
                       x[:, 1:, :IMAGE_WIDTH - 1, :])
    b = backend.square(x[:, :IMAGE_HEIGHT - 1, :IMAGE_WIDTH - 1, :] -
                       x[:, :IMAGE_HEIGHT - 1, 1:, :])
    return backend.sum(backend.pow(a + b, TOTAL_VARIATION_LOSS_FACTOR))
Ejemplo n.º 53
0
def test_model_methods():
    a = Input(shape=(3,), name='input_a')
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]
    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None)

    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    # test train_on_batch
    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                               [output_a_np, output_b_np])
    out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                               {'dense_1': output_a_np, 'dropout': output_b_np})

    # test fit
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np], nb_epoch=1, batch_size=4)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    [output_a_np, output_b_np], nb_epoch=1, batch_size=4)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    {'dense_1': output_a_np, 'dropout': output_b_np},
                    nb_epoch=1, batch_size=4)

    # test validation_split
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np],
                    nb_epoch=1, batch_size=4, validation_split=0.5)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    [output_a_np, output_b_np],
                    nb_epoch=1, batch_size=4, validation_split=0.5)
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    {'dense_1': output_a_np, 'dropout': output_b_np},
                    nb_epoch=1, batch_size=4, validation_split=0.5)

    # test validation data
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np],
                    nb_epoch=1, batch_size=4,
                    validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    [output_a_np, output_b_np],
                    nb_epoch=1, batch_size=4, validation_split=0.5,
                    validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
    out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
                    {'dense_1': output_a_np, 'dropout': output_b_np},
                    nb_epoch=1, batch_size=4, validation_split=0.5,
                    validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}))

    # test_on_batch
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                              [output_a_np, output_b_np])
    out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
                              {'dense_1': output_a_np, 'dropout': output_b_np})

    # predict_on_batch
    out = model.predict_on_batch([input_a_np, input_b_np])
    out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})

    # predict, evaluate
    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
    out = model.predict([input_a_np, input_b_np], batch_size=4)

    # with sample_weight
    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    sample_weight = [None, np.random.random((10,))]
    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np],
                               sample_weight=sample_weight)

    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np],
                              sample_weight=sample_weight)

    # test accuracy metric
    model.compile(optimizer, loss, metrics=['acc'],
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    assert len(out) == 5
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == 5

    # this should also work
    model.compile(optimizer, loss, metrics={'dense_1': 'acc'},
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    assert len(out) == 4
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == 4

    # and this as well
    model.compile(optimizer, loss, metrics={'dense_1': ['acc']},
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    assert len(out) == 4
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == 4

    # test starting from non-zero initial epoch
    trained_epochs = []

    def on_epoch_begin(epoch, logs):
        trained_epochs.append(epoch)
    tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin)
    out = model.fit([input_a_np, input_b_np],
                    [output_a_np, output_b_np], nb_epoch=5, batch_size=4,
                    initial_epoch=2, callbacks=[tracker_cb])
    assert trained_epochs == [2, 3, 4]

    # test starting from non-zero initial epoch for generator too
    trained_epochs = []

    def gen_data(batch_sz):
        while True:
            yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
                   [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])
    out = model.fit_generator(gen_data(4), samples_per_epoch=10, nb_epoch=5,
                              initial_epoch=2, callbacks=[tracker_cb])
    assert trained_epochs == [2, 3, 4]

    # test with a custom metric function
    mse = lambda y_true, y_pred: K.mean(K.pow(y_true - y_pred, 2))

    def mse_powers(y_true, y_pred):
        m = mse(y_true, y_pred)
        return {
            'mse_squared': K.pow(m, 2),
            'mse_cubed': K.pow(m, 3)
        }

    model.compile(optimizer, loss, metrics=[mse, mse_powers],
                  sample_weight_mode=None)

    out = model.train_on_batch([input_a_np, input_b_np],
                               [output_a_np, output_b_np])
    out_len = 1 + 2 * 4  # total loss, per layer: loss + 3 metrics
    assert len(out) == out_len
    out = model.test_on_batch([input_a_np, input_b_np],
                              [output_a_np, output_b_np])
    assert len(out) == out_len

    input_a_np = np.random.random((10, 3))
    input_b_np = np.random.random((10, 3))

    output_a_np = np.random.random((10, 4))
    output_b_np = np.random.random((10, 3))

    out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, nb_epoch=1)
    out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
    out = model.predict([input_a_np, input_b_np], batch_size=4)
Ejemplo n.º 54
0
def smooth_l1(y_true, y_pred):
    diff = K.abs(y_true - y_pred)
    less_part = K.cast(K.less(diff, 1), tf.float32)
    greater_part = 1.0 - less_part
    return less_part * K.pow(diff, 2) + greater_part * (K.abs(0.5 - diff))
Ejemplo n.º 55
0
 def focal_tversky(self, y_true, y_pred):
     pt_1 = self.tversky_index(y_true, y_pred)
     gamma = 0.75
     return K.pow((1 - pt_1), gamma)
Ejemplo n.º 56
0
def gelu(x):
    return 0.5 * x * (1.0 + K.tanh(0.797884561 * (x + 0.044715 * K.pow(x, 3))))
Ejemplo n.º 57
0
 def loss_img(self, y, y_pred):
     return K.sum(K.pow(y - y_pred, 2))
Ejemplo n.º 58
0
def focal_loss(target, output, gamma=2):
    output /= K.sum(output, axis=-1, keepdims=True)
    eps = K.epsilon()
    output = K.clip(output, eps, 1. - eps)
    return -K.sum(K.pow(1. - output, gamma) * target * K.log(output), axis=-1)
Ejemplo n.º 59
0
 def call(self, x, **kwargs):
     cdf = 0.5 * (1.0 + K.tanh(
         (math.sqrt(2 / math.pi) * (x + 0.044715 * K.pow(x, 3)))))
     return x * cdf
Ejemplo n.º 60
0
 def k_func(x):
     return KK.pow(x, 3.0)