Example #1
0
def loss_DSSIM_theano(y_true, y_pred):
    # expected net output is of shape [batch_size, row, col, image_channels]
    # e.g. [10, 480, 640, 3] for a batch of 10 640x480 RGB images
    # We need to shuffle this to [Batch_size, image_channels, row, col]
    y_true = y_true.dimshuffle([0, 3, 1, 2])
    y_pred = y_pred.dimshuffle([0, 3, 1, 2])
    
    
    # There are additional parameters for this function
    # Note: some of the 'modes' for edge behavior do not yet have a gradient definition in the Theano tree
    #   and cannot be used for learning
    patches_true = T.nnet.neighbours.images2neibs(y_true, [4, 4])
    patches_pred = T.nnet.neighbours.images2neibs(y_pred, [4, 4])

    u_true = K.mean(patches_true, axis=-1)
    u_pred = K.mean(patches_pred, axis=-1)
    var_true = K.var(patches_true, axis=-1)
    var_pred = K.var(patches_pred, axis=-1)
    std_true = K.sqrt(var_true)
    std_pred = K.sqrt(var_pred)
    c1 = 0.01 ** 2
    c2 = 0.03 ** 2
    ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
    denom = (u_true ** 2 + u_pred ** 2 + c1) * (var_pred + var_true + c2)
    
    ssim /= K.clip(denom, K.epsilon(), np.inf)
    #ssim = tf.select(tf.is_nan(ssim), K.zeros_like(ssim), ssim)
    
    return K.mean((1.0 - ssim) / 2.0)
Example #2
0
def keras_SSIM_cs(y_true, y_pred):
    axis=None
    gaussian = make_kernel(1.5)
    x = tf.nn.conv2d(y_true, gaussian, strides=[1, 1, 1, 1], padding='SAME')
    y = tf.nn.conv2d(y_pred, gaussian, strides=[1, 1, 1, 1], padding='SAME')

    u_x=K.mean(x, axis=axis)
    u_y=K.mean(y, axis=axis)

    var_x=K.var(x, axis=axis)
    var_y=K.var(y, axis=axis)

    cov_xy = K.mean(x*y, axis=axis) - u_x*u_y
    #cov_xy=cov_keras(x, y, axis)

    K1=0.01
    K2=0.03
    L=1  # depth of image (255 in case the image has a differnt scale)

    C1=(K1*L)**2
    C2=(K2*L)**2
    C3=C2/2

    l = ((2*u_x*u_y)+C1) / (K.pow(u_x,2) + K.pow(u_x,2) + C1)
    c = ((2*K.sqrt(var_x)*K.sqrt(var_y))+C2) / (var_x + var_y + C2)
    s = (cov_xy+C3) / (K.sqrt(var_x)*K.sqrt(var_y) + C3)

    return [c,s,l]
Example #3
0
def DSSIM(y_true, y_pred):
    # y_true = inputs[0]
    # y_pred = inputs[1]
    # print(y_true.shape)

    patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
    patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
    eps = 1e-9
    u_true = K.mean(patches_true, axis=3)
    u_pred = K.mean(patches_pred, axis=3)
        
    var_true = K.var(patches_true, axis=3)
    var_pred = K.var(patches_pred, axis=3)
        
    covar_true_pred = K.mean(patches_true * patches_pred, axis=3) - u_true * u_pred
        
    std_true = K.sqrt(var_true+eps)
    std_pred = K.sqrt(var_pred+eps)
        
    c1 = 0.01 ** 2
    c2 = 0.03 ** 2
    ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
    denom = (K.sqrt(u_true+eps) + K.sqrt(u_pred+eps) + c1) * (var_pred + var_true + c2)
    ssim /= denom
    ssim = tf.where(tf.is_nan(ssim), K.zeros_like(ssim), ssim)
    return K.mean(((1.0 - ssim) / 2))
Example #4
0
    def loss_DSSIS_tf11(self, y_true, y_pred):
        """
        DSSIM loss function to get the structural dissimilarity between y_true and y_pred
        :param y_true: groundtruth
        :param y_pred: output from the model
        :return: The loss value
        :note Need tf0.11rc to work
        """
        y_true = tf.reshape(y_true, [self.batch_size] + get_shape(y_pred)[1:])
        y_pred = tf.reshape(y_pred, [self.batch_size] + get_shape(y_pred)[1:])
        y_true_tf = tf.transpose(y_true, [0, 2, 3, 1])
        y_pred_tf = tf.transpose(y_pred, [0, 2, 3, 1])
        patches_true = tf.extract_image_patches(y_true_tf, [1, 5, 5, 1],
                                                [1, 2, 2, 1], [1, 1, 1, 1],
                                                "SAME")
        patches_pred = tf.extract_image_patches(y_pred_tf, [1, 5, 5, 1],
                                                [1, 2, 2, 1], [1, 1, 1, 1],
                                                "SAME")

        u_true = K.mean(patches_true, axis=3)
        u_pred = K.mean(patches_pred, axis=3)
        var_true = K.var(patches_true, axis=3)
        var_pred = K.var(patches_pred, axis=3)
        std_true = K.sqrt(var_true)
        std_pred = K.sqrt(var_pred)
        c1 = 0.01**2
        c2 = 0.03**2
        ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
        denom = (u_true**2 + u_pred**2 + c1) * (var_pred + var_true + c2)
        ssim /= denom
        ssim = tf.select(tf.is_nan(ssim), K.zeros_like(ssim), ssim)
        norma = K.mean(K.abs(y_true - y_pred))
        norma = tf.select(tf.is_nan(norma), K.ones_like(norma), norma)
        return K.mean(((1.0 - ssim) / 2)) + (norma / 2)
Example #5
0
	def my_loss(x, z):
		#batch, m = z.get_shape()
		#batch = K.cast(batch, x.dtype)
		#div_N = Lambda(lambda v: v / batch)
		
		z = z - K.mean(z, axis = 0, keepdims = True)
		x = x - K.mean(x, axis = 0, keepdims = True)
		x_2 = K.var(x, axis = 0, keepdims = True) #1xi
		z_2 = K.var(z, axis = 0, keepdims = True) #1xj
		#zj_xi = div_N(K.dot(K.transpose(z), x))	
		zj_xi = tf.divide(K.dot(K.transpose(z), x), batch)
		z2x2 = K.dot(K.transpose(z_2),x_2)


		R = 1./(1+K.sum(tf.divide(zj_xi**2, z2x2- zj_xi**2+EPS), axis = 0, keepdims = True)) #1 x i
		R = tf.divide(R, x_2 + EPS)
		x_decode = tf.multiply(R, K.transpose(K.dot(K.transpose(tf.divide(zj_xi, (z2x2 - zj_xi**2+EPS))), K.transpose(z))))
		
		dist = tf.contrib.distributions.Normal(0.0, 1.0)
		if z_log_noise is None:
			# for each yj, calculate prod (p(y_j) ) under gaussian
			prod_zj = K.sum(K.log(dist.prob(tf.divide(z, K.sqrt(z_2)))), axis = -1, keepdims = True) # batch x 1
			# why log?
		else:
			prod_zj = 0	
			# z_log_noise 1 x j
			# each real z, 

		# 1/n sum_y p_CI(xi|y) log p_CI(y) / prod(y_j)
		ci_wms = K.mean(tf.multiply(x_decode, K.log(K.dot(K.exp(prod_zj), R)+EPS)), axis = 0) #)), axis = 0) # 1 x i (mean over batch) 
		#ci_wms = K.mean(tf.multiply(x_decode, K.log(1./R)), axis = 0) #)), axis = 0) # 1 x i (mean over batch) 
		#return K.sum(tf.divide(zj_xi**2, z2x2-zj_xi**2+EPS))
		return K.sum(ci_wms) #we want min negative wms == minimize TC == minimize redundancy
Example #6
0
def gaussian_cond_var(x_, z, invert_sigmoid = False, subtract_log_det = False, binary_z = False, return_all = False):
	batch_size = K.cast(K.shape(x_)[0], x_.dtype)  
	div_N = Lambda(lambda v: v / batch_size)  
	size = K.cast(K.shape(x_)[1], dtype='int32')

	if invert_sigmoid:
		#log_det_jac = -K.mean(K.log(K.clip(tf.multiply(x_, 1-x_), EPS, 1-EPS)))
		#log_det_jac = -K.mean(K.log(K.clip(tf.multiply(x, 1-x), EPS, 1-EPS)))
		x = K.clip(x_, EPS, 1-EPS)
		x = K.log(x) - K.log(1-x)
		if binary_z:
			z = K.clip(z, EPS, 1-EPS)
			z = K.log(z) - K.log(1-z)
			# z is likely continuous, but invert sigmoid if binary		
	else:
		x = x_
		log_det_jac = 0

	mi = K.expand_dims(K.mean(x, axis=0), 0)  # mean of x_i
	mj = K.expand_dims(K.mean(z, axis=0), 1)  # mean of z_j
	vj = K.expand_dims(K.var(z, axis=0), 1)  # sigma_j^2
	vi = K.expand_dims(K.var(x, axis=0), 0)  # sigma_i^2
	V = div_N(K.dot(K.transpose(z-K.transpose(mj)), x- mi)) #jxi
	cond_var = vi - tf.divide(V**2, vj)
	if return_all:
		return cond_var, mi, mj, vi, vj, V
	else:
		return cond_var
Example #7
0
def loss_DSSIM_theano(y_true, y_pred):
    # There are additional parameters for this function
    # Note: some of the 'modes' for edge behavior do not yet have a gradient definition in the Theano tree
    # and cannot be used for learning
    y_true = y_true.dimshuffle([0, 3, 1, 2])
    y_pred = y_pred.dimshuffle([0, 3, 1, 2])
    patches_true = images2neibs(y_true, [4, 4])
    patches_pred = images2neibs(y_pred, [4, 4])

    u_true = K.mean(patches_true, axis=-1)
    u_pred = K.mean(patches_pred, axis=-1)

    var_true = K.var(patches_true, axis=-1)
    var_pred = K.var(patches_pred, axis=-1)
    std_true = K.sqrt(var_true + K.epsilon())
    std_pred = K.sqrt(var_pred + K.epsilon())

    c1 = 0.01**2
    c2 = 0.03**2
    ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
    denom = (u_true**2 + u_pred**2 + c1) * (var_pred + var_true + c2)
    ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero

    return (alpha * K.mean((1.0 - ssim) / 2.0) +
            beta * K.mean(K.square(y_pred - y_true), axis=-1))
Example #8
0
    def loss_G(self,y_true, y_pred):
        # L1 Error, I am not using MSE
        L1_distance = K.mean(K.abs(y_true-y_pred))

        # SSIM error, or D-SSIM = (1-SSIM)
        # x = K.mean(y_pred,axis=0)
        # y = K.mean(y_true,axis=0)
        x = y_true
        y = y_pred
        ave_x = K.mean(x)
        ave_y = K.mean(y)
        var_x = K.var(x)
        var_y = K.var(y)
        covariance = K.mean(x*y) - ave_x*ave_y
        c1 = 0.01**2
        c2 = 0.03**2
        ssim = (2*ave_y*ave_x+c1)*(2*covariance+c2)
        ssim = ssim/((K.pow(ave_x,2)+K.pow(ave_y,2)+c1) * (var_x+var_y+c2))
        dssim = 1 - ssim

        #Possibly add something here as well to penalize noise
        # SNR
        snr = K.sigmoid(K.var(y_pred-y_true)/K.var(y_true))

        return self.l1_loss* L1_distance + self.dssim_loss*dssim
Example #9
0
        def loss_DSSIS_tf11(self, y_true, y_pred):
            """Need tf0.11rc to work"""
            y_true = tf.reshape(y_true,
                                [self.batch_size] + get_shape(y_pred)[1:])
            y_pred = tf.reshape(y_pred,
                                [self.batch_size] + get_shape(y_pred)[1:])
            y_true = tf.transpose(y_true, [0, 2, 3, 1])
            y_pred = tf.transpose(y_pred, [0, 2, 3, 1])
            patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1],
                                                    [1, 2, 2, 1], [1, 1, 1, 1],
                                                    "SAME")
            patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1],
                                                    [1, 2, 2, 1], [1, 1, 1, 1],
                                                    "SAME")

            u_true = K.mean(patches_true, axis=3)
            u_pred = K.mean(patches_pred, axis=3)
            var_true = K.var(patches_true, axis=3)
            var_pred = K.var(patches_pred, axis=3)
            std_true = K.sqrt(var_true)
            std_pred = K.sqrt(var_pred)
            c1 = 0.01**2
            c2 = 0.03**2
            ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
            denom = (u_true**2 + u_pred**2 + c1) * (var_pred + var_true + c2)
            ssim /= denom
            ssim = tf.select(tf.is_nan(ssim), K.zeros_like(ssim), ssim)
            return K.mean(((1.0 - ssim) / 2))
Example #10
0
def ssim(y_true, y_pred):
    # source: https://gist.github.com/Dref360/a48feaecfdb9e0609c6a02590fd1f91b

    y_true = tf.expand_dims(y_true, -1)
    y_pred = tf.expand_dims(y_pred, -1)
    y_true = tf.transpose(y_true, [0, 2, 3, 1])
    y_pred = tf.transpose(y_pred, [0, 2, 3, 1])
    patches_true = tf.extract_image_patches(y_true, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
    patches_pred = tf.extract_image_patches(y_pred, [1, 5, 5, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")

    u_true = K.mean(patches_true, axis=3)
    u_pred = K.mean(patches_pred, axis=3)
    var_true = K.var(patches_true, axis=3)
    var_pred = K.var(patches_pred, axis=3)
    std_true = K.sqrt(var_true)
    std_pred = K.sqrt(var_pred)
    c1 = 0.01 ** 2
    c2 = 0.03 ** 2
    ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
    denom = (u_true ** 2 + u_pred ** 2 + c1) * (var_pred + var_true + c2)
    ssim /= denom
    ssim = tf.where(tf.is_nan(ssim), K.zeros_like(ssim), ssim)
    return ssim

# ----------------------------------------------------------------------------------------------------------------------
def DSSIM_base(y_true, y_pred, kernel_size):
    kernel = [kernel_size, kernel_size]
    y_true = K.reshape(y_true, [-1] + list(K.int_shape(y_pred)[1:]))
    y_pred = K.reshape(y_pred, [-1] + list(K.int_shape(y_pred)[1:]))

    patches_pred = KC.extract_image_patches(y_pred, kernel, kernel, 'valid',
                                            K.image_data_format())
    patches_true = KC.extract_image_patches(y_true, kernel, kernel, 'valid',
                                            K.image_data_format())

    # Reshape to get the var in the cells
    bs, w, h, c1, c2, c3 = K.int_shape(patches_pred)
    patches_pred = K.reshape(patches_pred, [-1, w, h, c1 * c2 * c3])
    patches_true = K.reshape(patches_true, [-1, w, h, c1 * c2 * c3])
    # Get mean
    u_true = K.mean(patches_true, axis=-1)
    u_pred = K.mean(patches_pred, axis=-1)
    # Get variance
    var_true = K.var(patches_true, axis=-1)
    var_pred = K.var(patches_pred, axis=-1)
    # Get std dev
    covar_true_pred = K.mean(patches_true * patches_pred,
                             axis=-1) - u_true * u_pred

    ssim = (2 * u_true * u_pred + 0.01**2) * (2 * covar_true_pred + 0.03**2)
    denom = ((K.square(u_true) + K.square(u_pred) + 0.01**2) *
             (var_pred + var_true + 0.03**2))
    ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
    print('----------------------', K.mean((1.0 - ssim) / 2.0))
    return K.mean((1.0 - ssim) / 2.0)
Example #12
0
def SSIM_cs(y_true, y_pred):
    patches_true = tf.extract_image_patches(y_true,
                                            ksizes=[1, 8, 8, 1],
                                            strides=[1, 8, 8, 1],
                                            rates=[1, 1, 1, 1],
                                            padding="VALID")
    patches_pred = tf.extract_image_patches(y_pred,
                                            ksizes=[1, 8, 8, 1],
                                            strides=[1, 8, 8, 1],
                                            rates=[1, 1, 1, 1],
                                            padding="VALID")

    var_true = K.var(patches_true, axis=(1, 2))
    var_pred = K.var(patches_pred, axis=(1, 2))
    mean_true = K.mean(patches_true, axis=(1, 2))
    mean_pred = K.mean(patches_pred, axis=(1, 2))
    std_true = K.sqrt(var_true)
    std_pred = K.sqrt(var_pred)
    covar_true_pred = K.mean(patches_true * patches_pred,
                             axis=(1, 2)) - mean_true * mean_pred

    c1 = (0.01 * 11.09)**2
    c2 = (0.03 * 11.09)**2
    #contrast = (2 * std_pred * std_true + c2)/(var_pred+var_true+c2);
    #lumi = (2 * mean_pred * mean_true + c1)/(mean_pred**2+mean_true**2+c1);
    #struct = (covar_true_pred+c2/2)/(std_true*std_pred+c2/2);

    ssim = (2 * mean_true * mean_pred + c1) * (2 * covar_true_pred + c2)
    denom = (mean_pred**2 + mean_true**2 + c1) * (var_pred + var_true + c2)
    ssim /= denom

    #ssim = contrast*struct;
    ssim = tf.where(tf.is_nan(ssim), K.zeros_like(ssim), ssim)
    return K.mean(ssim)
Example #13
0
    def __call__(self, y_true, y_pred):
        # There are additional parameters for this function
        # Note: some of the 'modes' for edge behavior do not yet have a gradient definition in the Theano tree
        #   and cannot be used for learning

        kernel = [self.kernel_size, self.kernel_size]
        y_true = KC.reshape(y_true, [-1] + list(self.__int_shape(y_pred)[1:]))
        y_pred = KC.reshape(y_pred, [-1] + list(self.__int_shape(y_pred)[1:]))

        patches_pred = KC.extract_image_patches(y_pred, kernel, kernel,
                                                'valid', self.dim_ordering)
        patches_true = KC.extract_image_patches(y_true, kernel, kernel,
                                                'valid', self.dim_ordering)

        # Reshape to get the var in the cells
        bs, w, h, c1, c2, c3 = self.__int_shape(patches_pred)
        patches_pred = KC.reshape(patches_pred, [-1, w, h, c1 * c2 * c3])
        patches_true = KC.reshape(patches_true, [-1, w, h, c1 * c2 * c3])
        # Get mean
        u_true = KC.mean(patches_true, axis=-1)
        u_pred = KC.mean(patches_pred, axis=-1)
        # Get variance
        var_true = K.var(patches_true, axis=-1)
        var_pred = K.var(patches_pred, axis=-1)
        # Get std dev
        covar_true_pred = K.mean(patches_true * patches_pred,
                                 axis=-1) - u_true * u_pred

        ssim = (2 * u_true * u_pred + self.c1) * (2 * covar_true_pred +
                                                  self.c2)
        denom = (K.square(u_true) + K.square(u_pred) +
                 self.c1) * (var_pred + var_true + self.c2)
        ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
        return K.mean(ssim)
Example #14
0
def CCC(actual, predicted):
    pred_mean = K.mean(predicted, axis=0)
    ref_mean = K.mean(actual, axis=0)
    pred_var = K.var(predicted, axis=0)
    ref_var = K.var(actual, axis=0)
    covariance = K.mean((predicted - pred_mean) * (actual - ref_mean), axis=0)
    CCC = (2 * covariance) / (pred_var + ref_var + K.pow((pred_mean - ref_mean), 2))
    return K.sum(CCC) / 2
Example #15
0
def inverse_variance_combination_content_loss(base, combination):
    num = K.sum(combination / K.var(combination))
    den = K.sum(1 / K.var(combination))

    inverse_variance = num / den

    loss = K.sqrt(inverse_variance * K.sum(combination - base))

    return loss
Example #16
0
def ccc_err_tensor(y_true, y_pred):
    """ Symbolic CCC error for two 1D time series. Can be used as objective.
    """
    cov_xy = K.mean(y_pred * y_true) - (K.mean(y_pred) * K.mean(y_true))
    mean_x = K.mean(y_pred)
    mean_y = K.mean(y_true)
    var_x = K.var(y_pred)
    var_y = K.var(y_true)
    return 1 - (2 * cov_xy / (var_x + var_y + K.square(mean_x - mean_y)))
Example #17
0
    def call(self, inputs, mask=None):
        input_shape = K.int_shape(inputs)
        if len(input_shape) != 4 and len(input_shape) != 2:
            raise ValueError('Inputs should have rank ' +
                             str(4) + " or " + str(2) +
                             '; Received input shape:', str(input_shape))

        if len(input_shape) == 4:
            if self.data_format == 'channels_last':
                batch_size, h, w, c = input_shape
                if batch_size is None:
                    batch_size = -1
                
                if c < self.group:
                    raise ValueError('Input channels should be larger than group size' +
                                     '; Received input channels: ' + str(c) +
                                     '; Group size: ' + str(self.group)
                                    )

                x = K.reshape(inputs, (batch_size, h, w, self.group, c // self.group))
                mean = K.mean(x, axis=[1, 2, 4], keepdims=True)
                std = K.sqrt(K.var(x, axis=[1, 2, 4], keepdims=True) + self.epsilon)
                x = (x - mean) / std

                x = K.reshape(x, (batch_size, h, w, c))
                return self.gamma * x + self.beta
            elif self.data_format == 'channels_first':
                batch_size, c, h, w = input_shape
                if batch_size is None:
                    batch_size = -1
                
                if c < self.group:
                    raise ValueError('Input channels should be larger than group size' +
                                     '; Received input channels: ' + str(c) +
                                     '; Group size: ' + str(self.group)
                                    )

                x = K.reshape(inputs, (batch_size, self.group, c // self.group, h, w))
                mean = K.mean(x, axis=[2, 3, 4], keepdims=True)
                std = K.sqrt(K.var(x, axis=[2, 3, 4], keepdims=True) + self.epsilon)
                x = (x - mean) / std

                x = K.reshape(x, (batch_size, c, h, w))
                return self.gamma * x + self.beta
                
        elif len(input_shape) == 2:
            reduction_axes = list(range(0, len(input_shape)))
            del reduction_axes[0]
            batch_size, _ = input_shape
            if batch_size is None:
                batch_size = -1
                
            mean = K.mean(inputs, keepdims=True)
            std = K.sqrt(K.var(inputs, keepdims=True) + self.epsilon)
            x = (inputs  - mean) / std
            
            return self.gamma * x + self.beta
Example #18
0
def pearson_error(y_true, y_pred):
    true_mean = K.mean(y_true)
    true_variance = K.var(y_true)
    pred_mean = K.mean(y_pred)
    pred_variance = K.var(y_pred)

    x = y_true - true_mean
    y = y_pred - pred_mean
    rho = K.sum(x * y) / K.sqrt(K.sum(x**2) * K.sum(y**2))
    return 1 - rho
Example #19
0
def gaussian_mi(x, z): # returns j i matrix of I(X_i:Y_j)
	batch_size = K.cast(K.shape(x)[0], x.dtype)  # This is a node tensor, so we can't treat as integer
	div_N = Lambda(lambda v: v / batch_size)  

	mi = K.expand_dims(K.mean(x, axis=0), 0)  # mean of x_i
	mj = K.expand_dims(K.mean(z, axis=0), 1)  # mean of z_j
	vj = K.expand_dims(K.var(z, axis=0) + EPS, 1)  # sigma_j^2
	vi = K.expand_dims(K.var(x, axis=0) + EPS, 0)  # sigma_i^2
	V = div_N(K.dot(K.transpose(z-K.transpose(mj)), x- mi))
	rho = V / K.sqrt(vi*vj)
	return -.5*K.log(1-rho**2) #j i
def SSIM(y_true, y_pred):
    u_true = k.mean(y_true)
    u_pred = k.mean(y_pred)
    var_true = k.var(y_true)
    var_pred = k.var(y_pred)
    std_true = k.sqrt(var_true)
    std_pred = k.sqrt(var_pred)
    c1 = k.square(0.01 * 7)
    c2 = k.square(0.03 * 7)
    ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
    denom = (u_true**2 + u_pred**2 + c1) * (var_pred + var_true + c2)
    return ssim / denom
Example #21
0
    def layer_style_loss(self, content, style):

        content_mean = K.mean(content, axis=[1, 2])
        content_var = K.sqrt(K.var(content, axis=[1, 2]) + 1e-03)

        style_mean = K.mean(style, axis=[1, 2])
        style_std = K.sqrt(K.var(style, axis=[1, 2]) + 1e-03)

        m_loss = LossFunction.sse(content_mean, style_mean) / self.batch_size
        s_loss = LossFunction.sse(content_var, style_std) / self.batch_size

        return m_loss + s_loss
Example #22
0
 def call(self, inputs):
     shape = inputs.shape
     if K.image_data_format() == 'channels_first':
         inputs = K.reshape(inputs,(-1,int(shape[1]),int(shape[2])*int(shape[3])))
         m = K.mean(inputs, axis=-1, keepdims=False)
         v = K.sqrt(K.update_add(K.var(inputs, axis=-1, keepdims=False),1.0e-5))
         return K.concatenate([m,v],axis = -1)
     else:
         inputs = (K.permute_dimensions(inputs, (0, 3, 1, 2)))
         inputs = K.reshape(inputs, (-1, int(shape[3]), int(shape[1]) * int(shape[2])))
         m = K.mean(inputs, axis=-1, keepdims=False)
         v = K.sqrt(K.var(inputs, axis=-1, keepdims=False)+K.constant(1.0e-5, dtype=inputs.dtype.base_dtype))
         return K.concatenate([m,v],axis = -1)
Example #23
0
 def loss_DSSIS(self, y_true, y_pred):
     u_true = K.mean(y_true)
     u_pred = K.mean(y_pred)
     var_true = K.var(y_true)
     var_pred = K.var(y_pred)
     std_true = K.std(y_true)
     std_pred = K.std(y_pred)
     c1 = 0.01**2
     c2 = 0.03**2
     ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
     ssim /= (u_true**2 + u_pred**2 + c1) * (var_pred + var_true + c2)
     return ((1.0 - ssim) / 2 +
             K.binary_crossentropy(y_pred, y_true, True)) / 2.0
Example #24
0
    def __call__(self, y_true, y_pred):
        """ Call the DSSIM Loss Function.

        Parameters
        ----------
        y_true: tensor or variable
            The ground truth value
        y_pred: tensor or variable
            The predicted value

        Returns
        -------
        tensor
            The DSSIM Loss value

        Notes
        -----
        There are additional parameters for this function. some of the 'modes' for edge behavior
        do not yet have a gradient definition in the Theano tree and cannot be used for learning
        """

        kernel = [self.kernel_size, self.kernel_size]
        y_true = K.reshape(y_true, [-1] + list(self._int_shape(y_pred)[1:]))
        y_pred = K.reshape(y_pred, [-1] + list(self._int_shape(y_pred)[1:]))
        patches_pred = self.extract_image_patches(y_pred,
                                                  kernel,
                                                  kernel,
                                                  'valid',
                                                  self.dim_ordering)
        patches_true = self.extract_image_patches(y_true,
                                                  kernel,
                                                  kernel,
                                                  'valid',
                                                  self.dim_ordering)

        # Get mean
        u_true = K.mean(patches_true, axis=-1)
        u_pred = K.mean(patches_pred, axis=-1)
        # Get variance
        var_true = K.var(patches_true, axis=-1)
        var_pred = K.var(patches_pred, axis=-1)
        # Get standard deviation
        covar_true_pred = K.mean(
            patches_true * patches_pred, axis=-1) - u_true * u_pred

        ssim = (2 * u_true * u_pred + self.c_1) * (
            2 * covar_true_pred + self.c_2)
        denom = (K.square(u_true) + K.square(u_pred) + self.c_1) * (
            var_pred + var_true + self.c_2)
        ssim /= denom  # no need for clipping, c_1 + c_2 make the denorm non-zero
        return (1.0 - ssim) / 2.0
Example #25
0
def loss_MS_SSIM(y_true, y_pred):

    # expected net output is of shape [batch_size, row, col, image_channels]
    # We need to shuffle this to [Batch_size, image_channels, row, col]
    y_true = y_true.dimshuffle([0, 3, 1, 2])
    y_pred = y_pred.dimshuffle([0, 3, 1, 2])

    c1 = 0.01**2
    c2 = 0.03**2
    ssim = 1.0

    alpha = 1.0
    beta = 1.0
    gamma = 1.0

    for i in range(0, 3):
        patches_true = K.T.nnet.neighbours.images2neibs(
            y_true, [10, 10], [5, 5])
        patches_pred = K.T.nnet.neighbours.images2neibs(
            y_pred, [10, 10], [5, 5])

        mx = K.mean(patches_true, axis=-1)
        my = K.mean(patches_pred, axis=-1)
        varx = K.var(patches_true, axis=-1)
        vary = K.var(patches_pred, axis=-1)
        covxy = K.mean(patches_true * patches_pred, axis=-1) - mx * my

        if i == 0:
            ssimLn = (2 * mx * my + c1)
            ssimLd = (mx**2 + my**2 + c1)
            ssimLn /= K.clip(ssimLd, K.epsilon(), np.inf)
            ssim = K.mean(ssimLn**(alpha * 3))

        ssimCn = (2 * K.sqrt(varx * vary + K.epsilon()) + c2)
        ssimCd = (varx + vary + c2)
        ssimCn /= K.clip(ssimCd, K.epsilon(), np.inf)

        ssimSn = (covxy + c2 / 2)
        ssimSd = (K.sqrt(varx * vary + K.epsilon()) + c2 / 2)
        ssimSn /= K.clip(ssimSd, K.epsilon(), np.inf)

        ssim *= K.mean((ssimCn**beta) * (ssimSn**gamma))

        y_true = K.pool2d(y_true, (2, 2), (2, 2),
                          data_format='channels_first',
                          pool_mode='avg')
        y_pred = K.pool2d(y_pred, (2, 2), (2, 2),
                          data_format='channels_first',
                          pool_mode='avg')

    return (1.0 - ssim)
Example #26
0
    def __call__(self, y_true, y_pred):
        # There are additional parameters for this function
        # Note: some of the 'modes' for edge behavior do not yet have a gradient definition in the Theano tree
        #   and cannot be used for learning

        #print(y_true)

        kernel = [1, self.kernel_size, self.kernel_size, 1]
        y_true = K.reshape(y_true, [-1] + list(self.__int_shape(y_pred)[1:]))
        y_pred = K.reshape(y_pred, [-1] + list(self.__int_shape(y_pred)[1:]))

        #print(y_true)

        patches_pred = tf.extract_image_patches(
            images=y_pred,
            ksizes=kernel,
            strides=kernel,
            rates=[1, 1, 1, 1],
            padding='SAME')
        patches_true = tf.extract_image_patches(
            images=y_true,
            ksizes=kernel,
            strides=kernel,
            rates=[1, 1, 1, 1],
            padding='SAME')

        #print(patches_true)

        # Get mean
        u_true = K.mean(patches_true, axis=-1)
        u_pred = K.mean(patches_pred, axis=-1)
        # Get variance
        var_true = K.var(patches_true, axis=-1)
        var_pred = K.var(patches_pred, axis=-1)
        # Get std dev
        covar_true_pred = K.mean(
            patches_true * patches_pred, axis=-1) - u_true * u_pred

        #print(var_true)

        ssim = (2 * u_true * u_pred + self.c1) * \
            (2 * covar_true_pred + self.c2)
        denom = (K.square(u_true) + K.square(u_pred) + self.c1) * \
            (var_pred + var_true + self.c2)
        ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
        ssim = tf.clip_by_value(ssim, -1.0, 1.0)

        #print(ssim)

        #print(ssim.eval(session=tf.Session()))
        return K.mean((1.0 - ssim) / 2.0)
Example #27
0
def SSIM_Loss(y_true, y_pred):
    # assert y_true.shape == y_pred.shape, 'Cannot compute PNSR if two input shapes are not same: %s and %s' % (str(
    #     y_true.shape), str(y_pred.shape))
    u_true = K.mean(y_true)
    u_pred = K.mean(y_pred)
    var_true = K.var(y_true)
    var_pred = K.var(y_pred)
    std_true = K.sqrt(var_true)
    std_pred = K.sqrt(var_pred)
    c1 = K.square(0.01 * 7)
    c2 = K.square(0.03 * 7)
    ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
    denom = (u_true**2 + u_pred**2 + c1) * (var_pred + var_true + c2)
    return ssim / denom
Example #28
0
    def tlayer(self, O, T, W, P, K, D, b):

        if self.normalize:
            O = BE.batch_normalization(x=O,
                                       mean=BE.mean(O),
                                       var=BE.var(O),
                                       gamma=1.,
                                       beta=0.,
                                       epsilon=0.0001)
            P = BE.batch_normalization(x=P,
                                       mean=BE.mean(P),
                                       var=BE.var(P),
                                       gamma=1.,
                                       beta=0.,
                                       epsilon=0.0001)

        T_ = BE.reshape(T, [D, D * K])
        OT = BE.dot(O, T_)
        OT = BE.reshape(OT, [-1, D, K])

        P_ = BE.reshape(P, [-1, D, 1])
        OTP = BE.batch_dot(OT, P_, axes=(1, 1))

        OP = BE.concatenate([O, P], axis=1)
        W_ = BE.transpose(W)

        WOP = BE.dot(OP, W_)
        WOP = BE.reshape(WOP, [-1, K, 1])

        b_ = BE.reshape(b, [K, 1])

        S = merge([OTP, WOP, b_], mode='sum')
        S_ = BE.reshape(S, [-1, K])

        R = BE.tanh(S_)

        # print('O shape: ', BE.int_shape(O))
        # print('T_ shape: ', BE.int_shape(T_))
        # print('OT shape:', BE.int_shape(OT))
        # print('P shape: ', BE.int_shape(P))
        # print('P_ shape: ', BE.int_shape(P_))
        # print('OTP shape:', BE.int_shape(OTP))
        # print('OP shape: ', BE.int_shape(OP))
        # print('WOP shape: ', BE.int_shape(WOP))
        # print('WOP reshape: ', BE.int_shape(WOP))
        # print('b_ shape: ', BE.int_shape(b_))
        # print('S shape: ', BE.int_shape(S))
        # print('S_ shape: ', BE.int_shape(S_))

        return R
    def SSIM_Loss(self, y_true, y_pred):
        y_pred_hsv = rgb_to_hsv(y_pred)

        # mae_loss
        mae = K.mean(K.abs(y_pred - y_true), axis=-1)

        # tv_loss
        shape = tf.shape(y_pred)
        height, width = shape[1], shape[2]
        y = tf.slice(y_pred, [0, 0, 0, 0],
                     tf.stack([-1, height - 1, -1, -1])) - tf.slice(
                         y_pred, [0, 1, 0, 0], [-1, -1, -1, -1])
        x = tf.slice(y_pred, [0, 0, 0, 0],
                     tf.stack([-1, -1, width - 1, -1])) - tf.slice(
                         y_pred, [0, 0, 1, 0], [-1, -1, -1, -1])
        tv_loss = tf.nn.l2_loss(x) / tf.to_float(
            tf.size(x)) + tf.nn.l2_loss(y) / tf.to_float(tf.size(y))

        # ssim_loss
        c1 = 0.01**2
        c2 = 0.03**2
        y_true = tf.transpose(y_true, [0, 2, 3, 1])
        y_pred = tf.transpose(y_pred, [0, 2, 3, 1])
        patches_true = tf.extract_image_patches(y_true, [1, 8, 8, 1],
                                                [1, 2, 2, 1], [1, 1, 1, 1],
                                                "SAME")
        patches_pred = tf.extract_image_patches(y_pred, [1, 8, 8, 1],
                                                [1, 2, 2, 1], [1, 1, 1, 1],
                                                "SAME")
        # Get mean
        u_true = K.mean(patches_true, axis=-1)
        u_pred = K.mean(patches_pred, axis=-1)
        # Get variance
        var_true = K.var(patches_true, axis=-1)
        var_pred = K.var(patches_pred, axis=-1)
        # Get std dev
        std_true = K.sqrt(var_true)
        std_pred = K.sqrt(var_pred)
        covar_true_pred = std_pred * std_true
        ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2)
        denom = (K.square(u_true) + K.square(u_pred) + c1) * (var_pred +
                                                              var_true + c2)
        ssim /= denom

        size = tf.size(y_pred_hsv)
        light_loss = tf.nn.l2_loss(y_pred_hsv[:, :, :, 2]) / tf.to_float(size)

        total_loss = -0.07 * light_loss + 1.0 * mae - 0.0005 * tv_loss
        return total_loss
Example #30
0
def ssim(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)

    u_true = K.mean(y_true_f)
    u_pred = K.mean(y_pred_f)
    var_true = K.var(y_true_f)
    var_pred = K.var(y_pred_f)
    std_true = K.sqrt(var_true)
    std_pred = K.sqrt(var_pred)

    c1 = 0.01
    c2 = 0.03

    return ((2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)) / ((u_true ** 2 + u_pred ** 2 + c1) * (var_pred + var_true + c2))
Example #31
0
def loss_DSSIM_theano(y_true, y_pred):
    patches_true = T.nnet.neighbours.images2neibs(y_true, [4, 4])
    patches_pred = T.nnet.neighbours.images2neibs(y_pred, [4, 4])
    u_true = K.mean(patches_true, axis=-1)
    u_pred = K.mean(patches_pred, axis=-1)
    var_true = K.var(patches_true, axis=-1)
    var_pred = K.var(patches_pred, axis=-1)
    eps = 1e-9
    std_true = K.sqrt(var_true + eps)
    std_pred = K.sqrt(var_pred + eps)
    c1 = 0.01 ** 2
    c2 = 0.03 ** 2
    ssim = (2 * u_true * u_pred + c1) * (2 * std_pred * std_true + c2)
    denom = (u_true ** 2 + u_pred ** 2 + c1) * (var_pred + var_true + c2)
    ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
    return K.mean((1.0 - ssim) / 2.0)
Example #32
0
    def __call__(self, y_true, y_pred):
        # There are additional parameters for this function
        # Note: some of the 'modes' for edge behavior do not yet have a
        # gradient definition in the Theano tree and cannot be used for
        # learning

        kernel = [self.kernel_size, self.kernel_size]
        y_true = K.reshape(y_true, [-1] + list(self.__int_shape(y_pred)[1:]))
        y_pred = K.reshape(y_pred, [-1] + list(self.__int_shape(y_pred)[1:]))

        patches_pred = self.extract_image_patches(y_pred,
                                                  kernel,
                                                  kernel,
                                                  'valid',
                                                  self.dim_ordering)
        patches_true = self.extract_image_patches(y_true,
                                                  kernel,
                                                  kernel,
                                                  'valid',
                                                  self.dim_ordering)

        # Get mean
        u_true = K.mean(patches_true, axis=-1)
        u_pred = K.mean(patches_pred, axis=-1)
        # Get variance
        var_true = K.var(patches_true, axis=-1)
        var_pred = K.var(patches_pred, axis=-1)
        # Get std dev
        covar_true_pred = K.mean(
            patches_true * patches_pred, axis=-1) - u_true * u_pred

        ssim = (2 * u_true * u_pred + self.c_1) * (
            2 * covar_true_pred + self.c_2)
        denom = (K.square(u_true) + K.square(u_pred) + self.c_1) * (
            var_pred + var_true + self.c_2)
        ssim /= denom  # no need for clipping, c_1 + c_2 make the denom non-zero
        return K.mean((1.0 - ssim) / 2.0)
Example #33
0
 def norm(self, xs, norm_id):
   mu = K.mean(xs, axis=-1, keepdims=True)
   sigma = K.sqrt(K.var(xs, axis=-1, keepdims=True) + 1e-3)
   xs = self.gs[norm_id] * (xs - mu) / (sigma + 1e-3) + self.bs[norm_id]
   return xs
Example #34
0
 def _get_mean_and_std(self, x):
     reduction_axes = list(range(K.ndim(x)))
     del reduction_axes[self.axis]
     mean = K.mean(x, axis=reduction_axes)
     std = K.sqrt(K.var(x, axis=reduction_axes) + K.epsilon())
     return mean, std
Example #35
0
    def main(self, name, opts):
        logging.basicConfig(filename=opts.log_file,
                            format='%(levelname)s (%(asctime)s): %(message)s')
        log = logging.getLogger(name)
        if opts.verbose:
            log.setLevel(logging.DEBUG)
        else:
            log.setLevel(logging.INFO)
            log.debug(opts)

        if opts.seed is not None:
            np.random.seed(opts.seed)

        if not opts.model_files:
            raise ValueError('No model files provided!')

        log.info('Loading model ...')
        K.set_learning_phase(0)
        model = mod.load_model(opts.model_files)

        # Get DNA layer.
        dna_layer = None
        for layer in model.layers:
            if layer.name == 'dna':
                dna_layer = layer
                break
        if not dna_layer:
            raise ValueError('The provided model is not a DNA model!')

        # Create output vector.
        outputs = []
        for output in model.outputs:
            outputs.append(K.reshape(output, (-1, 1)))
        outputs = K.concatenate(outputs, axis=1)

        # Compute gradient of outputs wrt. DNA layer.
        grads = []
        for name in opts.targets:
            if name == 'mean':
                target = K.mean(outputs, axis=1)
            elif name == 'var':
                target = K.var(outputs, axis=1)
            else:
                raise ValueError('Invalid effect size "%s"!' % name)
            grad = K.gradients(target, dna_layer.output)
            grads.extend(grad)
        grad_fun = K.function(model.inputs, grads)

        log.info('Reading data ...')
        nb_sample = dat.get_nb_sample(opts.data_files, opts.nb_sample)
        replicate_names = dat.get_replicate_names(
            opts.data_files[0],
            regex=opts.replicate_names,
            nb_key=opts.nb_replicate)
        data_reader = mod.data_reader_from_model(
            model, outputs=False, replicate_names=replicate_names)
        data_reader = data_reader(opts.data_files,
                                  nb_sample=nb_sample,
                                  batch_size=opts.batch_size,
                                  loop=False,
                                  shuffle=False)

        meta_reader = hdf.reader(opts.data_files, ['chromo', 'pos'],
                                 nb_sample=nb_sample,
                                 batch_size=opts.batch_size,
                                 loop=False,
                                 shuffle=False)

        out_file = h5.File(opts.out_file, 'w')
        out_group = out_file

        def h5_dump(path, data, idx, dtype=None, compression='gzip'):
            if path not in out_group:
                if dtype is None:
                    dtype = data.dtype
                out_group.create_dataset(
                    name=path,
                    shape=[nb_sample] + list(data.shape[1:]),
                    dtype=dtype,
                    compression=compression
                )
            out_group[path][idx:idx+len(data)] = data

        log.info('Computing effects ...')
        progbar = ProgressBar(nb_sample, log.info)
        idx = 0
        for inputs in data_reader:
            if isinstance(inputs, dict):
                inputs = list(inputs.values())
            batch_size = len(inputs[0])
            progbar.update(batch_size)

            # Compute gradients.
            grads = grad_fun(inputs)

            # Slice window at center.
            if opts.dna_wlen:
                for i, grad in enumerate(grads):
                    delta = opts.dna_wlen // 2
                    ctr = grad.shape[1] // 2
                    grads[i] = grad[:, (ctr-delta):(ctr+delta+1)]

            # Aggregate effects in window
            if opts.agg_effects:
                for i, grad in enumerate(grads):
                    if opts.agg_effects == 'mean':
                        grad = grad.mean(axis=1)
                    elif opts.agg_effects == 'wmean':
                        weights = linear_weights(grad.shape[1])
                        grad = np.average(grad, axis=1, weights=weights)
                    elif opts.agg_effects == 'max':
                        grad = grad.max(axis=1)
                    else:
                        tmp = 'Invalid function "%s"!' % (opts.agg_effects)
                        raise ValueError(tmp)
                    grads[i] = grad

            # Write computed effects
            for name, grad in zip(opts.targets, grads):
                h5_dump(name, grad, idx)

            # Store inputs
            if opts.store_inputs:
                for name, value in zip(model.input_names, inputs):
                    h5_dump(name, value, idx)

            # Store positions
            for name, value in next(meta_reader).items():
                h5_dump(name, value, idx)

            idx += batch_size
        progbar.close()

        out_file.close()
        log.info('Done!')

        return 0
Example #36
0
    def call(self, inputs, mask=None):
        input_shape = K.int_shape(inputs)
        if len(input_shape) != 4 and len(input_shape) != 2:
            raise ValueError('Inputs should have rank ' +
                             str(4) + " or " + str(2) +
                             '; Received input shape:', str(input_shape))

        if len(input_shape) == 4:
            if self.data_format == 'channels_last':
                batch_size, height, width, channels = input_shape
                if batch_size is None:
                    batch_size = -1

                if channels < self.group:
                    raise ValueError('Input channels should be larger than group size' +
                                     '; Received input channels: ' + str(channels) +
                                     '; Group size: ' + str(self.group))

                var_x = K.reshape(inputs, (batch_size,
                                           height,
                                           width,
                                           self.group,
                                           channels // self.group))
                mean = K.mean(var_x, axis=[1, 2, 4], keepdims=True)
                std = K.sqrt(K.var(var_x, axis=[1, 2, 4], keepdims=True) + self.epsilon)
                var_x = (var_x - mean) / std

                var_x = K.reshape(var_x, (batch_size, height, width, channels))
                retval = self.gamma * var_x + self.beta
            elif self.data_format == 'channels_first':
                batch_size, channels, height, width = input_shape
                if batch_size is None:
                    batch_size = -1

                if channels < self.group:
                    raise ValueError('Input channels should be larger than group size' +
                                     '; Received input channels: ' + str(channels) +
                                     '; Group size: ' + str(self.group))

                var_x = K.reshape(inputs, (batch_size,
                                           self.group,
                                           channels // self.group,
                                           height,
                                           width))
                mean = K.mean(var_x, axis=[2, 3, 4], keepdims=True)
                std = K.sqrt(K.var(var_x, axis=[2, 3, 4], keepdims=True) + self.epsilon)
                var_x = (var_x - mean) / std

                var_x = K.reshape(var_x, (batch_size, channels, height, width))
                retval = self.gamma * var_x + self.beta

        elif len(input_shape) == 2:
            reduction_axes = list(range(0, len(input_shape)))
            del reduction_axes[0]
            batch_size, _ = input_shape
            if batch_size is None:
                batch_size = -1

            mean = K.mean(inputs, keepdims=True)
            std = K.sqrt(K.var(inputs, keepdims=True) + self.epsilon)
            var_x = (inputs - mean) / std

            retval = self.gamma * var_x + self.beta
        return retval