def get_output(self, train):
     x = self.get_input(train)
     x -= K.mean(x, axis=1, keepdims=True)
     x = K.l2_normalize(x, axis=1)
     pos = K.relu(x)
     neg = K.relu(-x)
     return K.concatenate([pos, neg], axis=1)
예제 #2
0
def margin_loss(y_true, y_pred):
    """Margin loss

    # Arguments
        y_true: tensor of true targets.
        y_pred: tensor of predicted targets.

    # Returns
        Tensor with one scalar loss entry per sample.
    """
    lamb, margin = 0.5, 0.1
    return K.sum(y_true * K.square(K.relu(1 - margin - y_pred)) + lamb * (
        1 - y_true) * K.square(K.relu(y_pred - margin)), axis=-1)
예제 #3
0
 def call(self, x, mask=None):
     input_shape = K.int_shape(x)
     reduction_axes = list(range(len(input_shape)))
     del reduction_axes[self.axis]
     broadcast_shape = [1] * len(input_shape)
     broadcast_shape[self.axis] = input_shape[self.axis]
     alpha_pos = K.reshape(self.alpha_pos, broadcast_shape)
     alpha_neg = K.reshape(self.alpha_neg, broadcast_shape)
     beta_pos = K.reshape(self.beta_pos, broadcast_shape)
     beta_neg = K.reshape(self.beta_neg, broadcast_shape)
     rho_pos = K.reshape(self.rho_pos, broadcast_shape)
     rho_neg = K.reshape(self.rho_neg, broadcast_shape)
     pos = alpha_pos * K.pow(K.relu(x + beta_pos) + K.epsilon(), rho_pos)
     neg = alpha_neg * K.pow(K.relu(-x + beta_neg) + K.epsilon(), rho_neg)
     return pos + neg
 def get_output(self, train):
     input_shape = self.input_shape
     broadcast_shape = [1] * len(input_shape)
     broadcast_shape[self.axis] = input_shape[self.axis]
     X = self.get_input(train)
     pos = K.relu(X)
     a = K.reshape(self.alphas, broadcast_shape)
     neg = a * (X - abs(X)) * 0.5
     return pos + neg
예제 #5
0
def margin_hinge(y_true, y_pred, margin=0.5):
    # y_pred are the dot product similarities, in interleaved form (positive example, negative example, ...)
    # y_true is simply 1, 0, 1, 0
    signed = 2 * y_pred * (y_true - 0.5) # we do this, just so that y_true is part of the computational graph
    pos = signed[0::2]
    neg = signed[1::2]
    # negative samples are multiplied by -1, so that the sign in the rankSVM objective is flipped below
    rank_hinge_loss = K.mean(K.relu(margin - pos - neg))
    return rank_hinge_loss
예제 #6
0
    def call(self, x, mask=None):
        input_shape = self.input_spec[0].shape
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]
        alpha = K.reshape(self.alpha, broadcast_shape)
        rho = K.reshape(self.rho, broadcast_shape)

        return alpha * K.pow(K.relu(x) + K.epsilon(), rho)
예제 #7
0
파일: activations.py 프로젝트: Calysto/conx
def relu(x, alpha=0., max_value=None):
    """
    Rectified Linear Unit activation function.

    >>> relu(1)
    1.0
    >>> relu(-1)
    0.0
    """
    return K.eval(K.relu(K.variable(x), alpha, max_value)).tolist()
예제 #8
0
    def compile(self, optimizer, **kwargs):
        qa_model = self.get_qa_model()

        good_similarity = qa_model([self.question, self.answer_good])
        bad_similarity = qa_model([self.question, self.answer_bad])

        loss = merge([good_similarity, bad_similarity],
                     mode=lambda x: K.relu(self.config['margin'] - x[0] + x[1]),
                     output_shape=lambda x: x[0])

        self.prediction_model = Model(input=[self.question, self.answer_good], output=good_similarity, name='prediction_model')
        self.prediction_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer=optimizer, **kwargs)

        self.training_model = Model(input=[self.question, self.answer_good, self.answer_bad], output=loss, name='training_model')
        self.training_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer=optimizer, **kwargs)
    def compile(self, optimizer, **kwargs):
        qa_model = self.get_qa_model()

        good_output = qa_model([self.question, self.answer_good])
        bad_output = qa_model([self.question, self.answer_bad])

        loss = merge([good_output, bad_output],
                     mode=lambda x: K.relu(self.config['margin'] - x[0] + x[1]),
                     output_shape=lambda x: x[0])

        self.training_model = Model(input=[self.question, self.answer_good, self.answer_bad], output=loss)
        self.training_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer=optimizer, **kwargs)

        self.prediction_model = Model(input=[self.question, self.answer_good], output=good_output)
        self.prediction_model.compile(loss='binary_crossentropy', optimizer=optimizer, **kwargs)
예제 #10
0
 def get_output(self, train=False):
     X = self.get_input(train)
     conv_out = K.conv2d(X, self.kernel, strides=self.strides,
                         border_mode='same',
                         dim_ordering=self.dim_ordering,
                         image_shape=self.input_shape,
                         filter_shape=self.kernel_shape)
     if self.dim_ordering == 'th':
         output = conv_out + K.reshape(self.biases, (1, self.nb_filter, 1, 1))
     elif self.dim_ordering == 'tf':
         output = conv_out + K.reshape(self.biases, (1, 1, 1, self.nb_filter))
     else:
         raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
     output = K.relu(output)
     return output
예제 #11
0
def sparse_simpler_asoftmax_loss(y_true, y_pred, scale=30):
    y_true = K.expand_dims(y_true[:, 0], 1) # 保证y_true的shape=(None, 1)
    y_true = K.cast(y_true, 'int32') # 保证y_true的dtype=int32
    batch_idxs = K.arange(0, K.shape(y_true)[0])
    batch_idxs = K.expand_dims(batch_idxs, 1)
    idxs = K.concatenate([batch_idxs, y_true], 1)
    y_true_pred = K.tf.gather_nd(y_pred, idxs) # 目标特征,用tf.gather_nd提取出来
    y_true_pred = K.expand_dims(y_true_pred, 1)
    # 用到了四倍角公式进行展开
    y_true_pred_margin = 1 - 8 * K.square(y_true_pred) + 8 * K.square(K.square(y_true_pred))
    # 下面等效于min(y_true_pred, y_true_pred_margin)
    y_true_pred_margin = y_true_pred_margin - K.relu(y_true_pred_margin - y_true_pred)
    _Z = K.concatenate([y_pred, y_true_pred_margin], 1) # 为计算配分函数
    _Z = _Z * scale # 缩放结果,主要因为pred是cos值,范围[-1, 1]
    logZ = K.logsumexp(_Z, 1, keepdims=True) # 用logsumexp,保证梯度不消失
    logZ = logZ + K.log(1 - K.exp(scale * y_true_pred - logZ)) # 从Z中减去exp(scale * y_true_pred)
    return - y_true_pred_margin * scale + logZ
예제 #12
0
 def process_ele(i, outer_sum_loss):
     # Get a subtensor from batch
     y_true_one = y_true[i]
     y_pred_one = y_pred[i]
     # Stack margin to a num_class*1 matrix
     margin_stack = tf.reshape(tf.stack([tf.constant(0.1)] * self.num_classes), [self.num_classes, 1])
     # Stack true label to a word_dim*num_class matrix and transpose it
     y_true_one_stack = tf.stack([tf.transpose(y_true_one)] * self.num_classes)
     # Reshape predict from (word_dim,) to (word_dim,1)
     y_pred_one_t = tf.reshape(y_pred_one, [self.word_dim, 1])
     # Calculate loss
     r = margin_stack - tf.matmul(y_true_one_stack, y_pred_one_t) + tf.matmul(self.label_vec_tensor, y_pred_one_t)
     # Summation
     # We did not exclude true label inside summation, so we subtract extra margin
     sum_inner_loss = tf.reduce_sum(K.relu(r)) - margin
     # Return counter++ and accumulated loss
     return tf.add(i, 1), tf.add(outer_sum_loss, sum_inner_loss)
    def call(self, inputs, mask=None):
        cos_m = math.cos(self.m)
        sin_m = math.sin(self.m)
        mm = sin_m * self.m
        threshold = math.cos(math.pi - self.m)
        # inputs:
        # x: features, y_mask: 1-D or one-hot label works as mask
        x = inputs[0]
        y_mask = inputs[1]
        if y_mask.shape[-1]==1:
            y_mask = K.cast(y_mask, tf.int32)
            y_mask = K.reshape(K.one_hot(y_mask, self.class_num),(-1, self.class_num))

        # feature norm
        x = K.l2_normalize(x, axis=1)
        # weights norm
        self.W = K.l2_normalize(self.W, axis=0)

        # cos(theta+m)
        cos_theta = K.dot(x, self.W)
        cos_theta2 = K.square(cos_theta)
        sin_theta2 = 1. - cos_theta2
        sin_theta = K.sqrt(sin_theta2 + K.epsilon())
        cos_tm = self.s * ((cos_theta * cos_m) - (sin_theta * sin_m))

        # this condition controls the theta+m should in range [0, pi]
        #      0<=theta+m<=pi
        #     -m<=theta<=pi-m
        cond_v = cos_theta - threshold
        cond = K.cast(K.relu(cond_v), dtype=tf.bool)
        keep_val = self.s * (cos_theta - mm)
        cos_tm_temp = tf.where(cond, cos_tm, keep_val)

        # mask by label
        y_mask =+ K.epsilon()
        inv_mask = 1. - y_mask
        s_cos_theta = self.s * cos_theta
        output = K.softmax((s_cos_theta * inv_mask) + (cos_tm_temp * y_mask))

        return output
예제 #14
0
def leaky_relu(x):
    return K.relu(x, 0.2)
예제 #15
0
 def call(self, inputs):
     return K.relu(inputs, alpha=self.alpha)
예제 #16
0
 def loss(y_true, y_pred):
     #         return lam1*K.mean(K.relu(loss1)) + lam2*K.mean(K.relu(loss2)) + lam2*K.mean(K.relu(loss3))
     return lam1 * K.mean(K.relu(loss1)) + lam2 * K.mean(
         K.relu(loss2)) + lam3 * K.mean(K.relu(loss3)) + lam4 * loss4
예제 #17
0
 def bond(bl):
     return tf.add(K.relu(tf.negative(bl)), K.relu(bl - 1.0))
예제 #18
0
 def call(self, x, mask=None):
     t = 0.2
     x = K.sigmoid(x)
     inv_msk = K.relu(x - t, max_value=1)
     return inv_msk
def custom_loss(y_true, y_pred):
	final_loss = 0.
	if dataset.enable_boundingbox:
		obj_true = y_true[...,dataset.num_classes]
		obj_pred = y_pred[...,dataset.num_classes]
		#   (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
		log_weight = 1. + (args.pos_weight - 1.) * obj_true
		obj = (1. - obj_true) * obj_pred + log_weight * (K.log(1. + K.exp(-K.abs(obj_pred))) + K.relu(- obj_pred))

		obj = K.square(obj_pred - obj_true)

		prob = y_pred[...,0:dataset.num_classes]
		# scale predictions so that the class probas of each sample sum to 1
		prob /= K.sum(prob, axis=-1, keepdims=True)
		# clip to prevent NaN's and Inf's
		prob = K.clip(prob, K.epsilon(), 1 - K.epsilon())
		# calc
		loss = y_true[...,0:dataset.num_classes] * K.log(prob) #* class_weights
		cat = -K.sum(loss, -1, keepdims=True)

		reg = K.sum(K.square(y_true[..., dataset.num_classes+1:dataset.num_classes+5] - y_pred[...,dataset.num_classes+1:dataset.num_classes+5]), axis=-1, keepdims=True)

		# if args.best_position_classification:
		# 	mask = K.cast( K.less_equal( y_true[..., dataset.num_classes+5:(dataset.num_classes+6)], model.strides[0] * 1.42 / 2  ), K.floatx())

		mask = K.cast( K.equal( y_true[..., dataset.num_classes:(dataset.num_classes+1)], 1.0  ), K.floatx())

		final_loss = final_loss + obj + K.sum(cat * mask) / K.maximum(K.sum(mask), 1.0) + 100 * K.sum(reg * mask) / K.maximum(K.sum(mask), 1.0)

	if dataset.enable_classification or dataset.enable_segmentation:
        	final_loss = final_loss + K.categorical_crossentropy(y_true, y_pred)

	return final_loss
예제 #20
0
 def rectifier(x):
     return K.relu(x)
import tensorflow as tf
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
from keras.layers.merge import _Merge
from keras.layers import *
from keras import activations
from keras import initializers
from keras.models import Model,Sequential
import numpy as np
from layers import *

linear, linear_init = activations.linear,       initializers.he_normal()
relu,   relu_init = activations.relu,         initializers.he_normal()
lrelu,  lrelu_init = lambda x: K.relu(x, 0.2),  initializers.he_normal()


def vlrelu(x): return K.relu(x, 0.3)


def G_convblock(
    net,
    num_filter,
    filter_size,
    actv,
    init,
    pad='same',
    use_wscale=True,
    use_pixelnorm=True,
    use_batchnorm=False,
    name=None):
예제 #22
0
def relu6(x):
    """Relu 6
    """
    return K.relu(x, max_value=6.0)
예제 #23
0
 def call(self, x, mask=None):
     pos = K.relu(x)
     neg = K.relu(-x)
     con = K.concatenate([pos, neg], axis=1)
     return K.relu(con)
예제 #24
0
def hinge_D_fake_loss(y_true, y_pred):
    return K.mean(K.relu(1+y_pred))
예제 #25
0
def hinge_D_real_loss(y_true, y_pred):
    return K.mean(K.relu(1-y_pred))
예제 #26
0
def capsule_loss(y_true, y_pred):
    return y_true*K.relu(0.9-y_pred)**2 + 0.25*(1-y_true)*K.relu(y_pred-0.1)**2
예제 #27
0
def margin_loss(y_true, y_pred):
    lamb, margin = 0.5, 0.1
    return K.sum(y_true * K.square(K.relu(1 - margin - y_pred)) + lamb * (
        1 - y_true) * K.square(K.relu(y_pred - margin)), axis=-1)
예제 #28
0
def leaky_relu(inputs, alpha=0.1):
    return K.relu(inputs, alpha=alpha)
def vlrelu(x): return K.relu(x, 0.3)


def G_convblock(
예제 #30
0
def relu6(x):
    return K.relu(x, max_value=6)
예제 #31
0
def relu_plus_one(x):
    return K.relu(x) + 1
예제 #32
0
def leaky_relu(x):
    return K.relu(x, 0.2)
예제 #33
0
def leakyCReLU(x):
    x_pos = K.relu(x, .0)
    x_neg = K.relu(-x, .0)
    return K.concatenate([x_pos, x_neg], axis=1)
예제 #34
0
 def loss(y_true, y_pred):
     #         return mean_squared_error(y_true, y_pred) + lam1 * K.mean(K.relu(loss1)) + lam2 * K.mean(K.relu(loss2)) + lam2 * K.mean(K.relu(loss3))
     return mean_squared_error(y_true, y_pred) + lam1 * K.mean(
         K.relu(loss1)) + lam2 * K.mean(K.relu(loss2)) + lam3 * K.mean(
             K.relu(loss3)) + lam4 * loss4
예제 #35
0
 def call(self, inputs):
     inputs -= K.mean(inputs, axis=1, keepdims=True)
     inputs = K.l2_normalize(inputs, axis=1)
     pos = K.relu(inputs)
     neg = K.relu(-inputs)
     return K.concatenate([pos, neg], axis=1)
예제 #36
0
 def poros(poroi, porof):
     return K.relu(tf.negative(porof)) + K.relu(porof - poroi)
def relu6(x):
    return K.relu(x, max_value=6)
예제 #38
0
x_in = Input(shape=(input_size, ))
x = x_in
x_all = list(np.zeros((num_capsule, 1)))
encoders = []
for i in range(num_capsule):
    x_all[i] = Dense(z_dim, activation='relu')(x_in)
    encoders.append(Model(x_in, x_all[i]))

x = Concatenate()(x_all)
x = Reshape((num_capsule, z_dim))(x)
capsule = Capsule(num_classes, z_dim, 3, False)(x)
output = capsule

model = Model(inputs=x_in, outputs=output)
model.compile(
    loss=lambda y_true, y_pred: y_true * K.relu(0.9 - y_pred)**2 + 0.25 *
    (1 - y_true) * K.relu(y_pred - 0.1)**2,
    optimizer='adam',
    metrics=['accuracy'])
#model.summary()
model.load_weights(args.weights)

###################################################################################################
#2 heatmap for coupling coefficients
Y_pred = model.predict(x_test)

coupling_coefficients_value = {}
count = {}
for i in range(len(Y_pred)):
    ind = int(Y_test[i])
    if ind in coupling_coefficients_value.keys():
예제 #39
0
feat = np.load(cwd + "/database/ZINC/features/0.npy")
adj = np.load(cwd + "/database/ZINC/adj/0.npy")

conv_feature_dim = 32
readout_dimensions = 512
inputs = [feat, adj]

features_matrix = Input(feat[0].shape)
adj_matrix = Input(adj[0].shape)

X = Dense(conv_feature_dim, activation='linear',
          use_bias=True)(features_matrix)
_X = Lambda(lambda x: K.batch_dot(x[0], x[1]))([adj_matrix, X])
_X = Add()([_X, X])
conv_output = Lambda(lambda x: K.relu(x))(_X)

for i in range(num_layers - 1):
    _X = Dense(conv_feature_dim, activation='linear',
               use_bias=True)(conv_output)
    _X = Lambda(lambda x: K.batch_dot(x[0], x[1]))([adj_matrix, _X])
    _X = Add()([_X, X])
    conv_output = Lambda(lambda x: K.relu(x))(_X)

x2 = Dense(readout_dimensions, activation='relu', use_bias=True)(conv_output)
_X = Lambda(lambda x: K.sigmoid(K.sum(K.relu(x), axis=1, keepdims=False)))(x2)
x3 = Dense(readout_dimensions, activation='relu')(_X)
_X = Dense(readout_dimensions, activation='relu')(_X)
_X = Dense(readout_dimensions, activation='tanh')(_X)
y = Dense(1, activation='linear')(_X)
예제 #40
0
 def call(self, x, mask=None):
     return K.in_train_phase(K.relu(x, K.random_uniform(K.shape(x), self.l, self.u)),
                             K.relu(x, self.average))
예제 #41
0
 def _relu6(self, x):
     """Relu 6
     """
     return K.relu(x, max_value=6.0)
예제 #42
0
    def call(self, inputs):
        s = K.zeros((K.shape(inputs)[0],self.units))
        init_states = [s,s,s,s,s,s]
        outputs = K.rnn(self.step_do, inputs, init_states)[1]
        '''
        if self.attention:
           self.attention1_1 = self.attention1[:self.units,:]
           self.attention1_2 = self.attention1[self.units:,:]
           for i in range(inputs.shape[1]):
                step_in = inputs[:,i,:]
                h = outputs[:,i,:]

                h_atten=K.tanh(K.dot(h,self.attention1_1) + 0*self.biase1)     ##################tanh
                h_atten=(K.dot(h_atten,self.attention2))

                h_b=K.tanh(K.dot(step_in,self.attention1_2)+0*self.biase2)     ##################tanh
                h_b=(K.dot(h_b,self.attention2_2))

                h_atten = K.tanh(h_atten*h + h_b)
                if i ==0:
                   output_atten = h_atten
                else:
                   output_atten = K.concatenate([output_atten,h_atten])
           outputs = Reshape((inputs.shape[1],self.units))(output_atten)       

        '''
        init_states2 = [s,s,s,s,s,s]
        input2 = K.reverse(inputs,axes=1)
        outputs2 = K.rnn(self.step_do, input2, init_states2)[1]
        '''
        if self.attention:
           self.attention1_1 = self.attention1[:self.units,:]
           self.attention1_2 = self.attention1[self.units:,:]
           for i in range(inputs.shape[1]):
                step_in = inputs[:,i,:]
                h = outputs2[:,i,:]

                h_atten=K.tanh(K.dot(h,self.attention1_1) + 0*self.biase1)     ##################0
                h_atten=(K.dot(h_atten,self.attention2))

                h_b=K.tanh(K.dot(step_in,self.attention1_2)+1*self.biase2)     ##################1
                h_b=(K.dot(h_b,self.attention2_2))

                h_atten = K.tanh(h_atten*h + h_b)
                if i ==0:
                   output_atten = h_atten
                else:
                   output_atten = K.concatenate([output_atten,h_atten])
           outputs2 = Reshape((inputs.shape[1],self.units))(output_atten)   
        '''

        outputs2 = K.reverse(outputs2,axes=1)
        outputs = (K.concatenate([outputs,outputs2]))



        if self.intra_attention:
           self.attention1_1 = self.attention1[:2*self.units,:]
           self.attention1_2 = self.attention1[2*self.units:,:]
           for i in range(inputs.shape[1]):
                step_in = inputs[:,i,:]
                h = outputs[:,i,:]

                h_atten=K.relu(K.dot(h,self.attention1_1) + 0*self.biase1)     ##################0
                h_atten=(K.dot(h_atten,self.attention2))

                h_b=K.relu(K.dot(step_in,self.attention1_2)+0*self.biase2)     ##################1
                h_b=(K.dot(h_b,self.attention2_2))

                h_atten = K.tanh(1*h_atten*h + 1*h_b)
                if i ==0:
                   output_atten = h_atten
                else:
                   output_atten = K.concatenate([output_atten,h_atten])
           outputs = Reshape((inputs.shape[1],2*self.units))(output_atten)   
        return outputs
예제 #43
0
 def _hard_swish(self, x):
     """Hard swish
     """
     return x * K.relu(x + 3.0, max_value=6.0) / 6.0
예제 #44
0
 def call(self, x, mask=None):
     x -= K.mean(x, axis=1, keepdims=True)
     x = K.l2_normalize(x, axis=1)
     pos = K.relu(x)
     neg = K.relu(-x)
     return K.concatenate([pos, neg], axis=1)
예제 #45
0
def margin_loss(y_true, y_pred):
    lamb, margin = 0.5, 0.1
    return K.sum(y_true * K.square(K.relu(1 - margin - y_pred)) + lamb *
                 (1 - y_true) * K.square(K.relu(y_pred - margin)),
                 axis=-1)
예제 #46
0
def step(x):
   x = tensorflow.sign ( tensorflow.sign( x ) + 0.1 )
   return relu(x, alpha=0., max_value=1, threshold=0.)
예제 #47
0
def Relu_advanced(x):
    return K.relu(x, max_value=1.0)
예제 #48
0
 def antirectifier(x):
     x -= K.mean(x, axis=1, keepdims=True)
     x = K.l2_normalize(x, axis=1)
     pos = K.relu(x)
     neg = K.relu(-x)
     return K.concatenate([pos, neg], axis=1)
예제 #49
0
def relu6(x):
    from keras import backend as K
    return K.relu(x)
예제 #50
0
 def call(self, x, mask=None):
     x -= K.mean(x, axis=1, keepdims=True)
     x = K.l2_normalize(x, axis=1)
     pos = K.relu(x)
     neg = K.relu(-x)
     return K.concatenate([pos, neg], axis=1)
def relu_neg(x):
    return K.relu(x + 1) - 1