示例#1
0
    def call(self, inputs, **kwargs):
        x = self.conv1_1(inputs)
        x = self.norm1_1(x)
        x = self.conv1_2(x)
        x = self.norm1_2(x)
        x = self.maxpool1(x)

        skip = self.conv2_skip(x)
        x = self.conv2_1(x)
        x = self.norm2_1(x)
        x = self.conv2_2(x)
        x = self.norm2_2(x)
        x = K.relu(skip + x)
        x = self.maxpool2(x)

        skip = self.conv3_skip(x)
        x = self.conv3_1(x)
        x = self.norm3_1(x)
        x = self.conv3_2(x)
        x = self.norm3_2(x)
        x = K.relu(skip + x)
        x = self.maxpool3(x)

        skip = self.conv4_skip(x)
        x = self.conv4_1(x)
        x = self.norm4_1(x)
        x = self.conv4_2(x)
        x = self.norm4_2(x)
        x = K.relu(skip + x)
        x = self.maxpool4(x)
        return x
示例#2
0
 def call(self, inputs, mask=None):
     pos = K.relu(inputs)
     if K.backend() == 'theano':
         neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) *
                (inputs - math_ops.abs(inputs)) * 0.5)
     else:
         neg = -self.alpha * K.relu(-inputs)
     return pos + neg
 def call(self, inputs, mask=None):
   pos = K.relu(inputs)
   if K.backend() == 'theano':
     neg = (
         K.pattern_broadcast(self.alpha, self.param_broadcast) *
         (inputs - math_ops.abs(inputs)) * 0.5)
   else:
     neg = -self.alpha * K.relu(-inputs)
   return pos + neg
示例#4
0
def discriminative_instance_loss(y_true,
                                 y_pred,
                                 delta_v=0.5,
                                 delta_d=1.5,
                                 gamma=1e-3):
    """Discriminative loss between an output tensor and a target tensor.

    Args:
        y_true: A tensor of the same shape as y_pred.
        y_pred: A tensor of the vector embedding

    Returns:
        tensor: Output tensor.
    """
    def temp_norm(ten, axis=None):
        if axis is None:
            axis = 1 if K.image_data_format(
            ) == 'channels_first' else K.ndim(ten) - 1
        return K.sqrt(K.epsilon() + K.sum(K.square(ten), axis=axis))

    rank = K.ndim(y_pred)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else rank - 1
    axes = [x for x in list(range(rank)) if x != channel_axis]

    # Compute variance loss
    cells_summed = tf.tensordot(y_true, y_pred, axes=[axes, axes])
    n_pixels = K.cast(tf.count_nonzero(y_true, axis=axes),
                      dtype=K.floatx()) + K.epsilon()
    n_pixels_expand = K.expand_dims(n_pixels, axis=1) + K.epsilon()
    mu = tf.divide(cells_summed, n_pixels_expand)

    delta_v = K.constant(delta_v, dtype=K.floatx())
    mu_tensor = tf.tensordot(y_true, mu, axes=[[channel_axis], [0]])
    L_var_1 = y_pred - mu_tensor
    L_var_2 = K.square(K.relu(temp_norm(L_var_1) - delta_v))
    L_var_3 = tf.tensordot(L_var_2, y_true, axes=[axes, axes])
    L_var_4 = tf.divide(L_var_3, n_pixels)
    L_var = K.mean(L_var_4)

    # Compute distance loss
    mu_a = K.expand_dims(mu, axis=0)
    mu_b = K.expand_dims(mu, axis=1)

    diff_matrix = tf.subtract(mu_b, mu_a)
    L_dist_1 = temp_norm(diff_matrix)
    L_dist_2 = K.square(
        K.relu(K.constant(2 * delta_d, dtype=K.floatx()) - L_dist_1))
    diag = K.constant(0, dtype=K.floatx()) * tf.diag_part(L_dist_2)
    L_dist_3 = tf.matrix_set_diag(L_dist_2, diag)
    L_dist = K.mean(L_dist_3)

    # Compute regularization loss
    L_reg = gamma * temp_norm(mu)
    L = L_var + L_dist + K.mean(L_reg)

    return L
示例#5
0
 def call(self, inputs, **kwargs):
     inputs_real, inputs_imag = tf.math.real(inputs), tf.math.imag(inputs)
     real = K.relu(inputs_real,
                   max_value=self.max_value_real,
                   threshold=self.threshold_real,
                   alpha=self.alpha_real)
     imag = K.relu(inputs_imag,
                   max_value=self.max_value_imag,
                   threshold=self.threshold_imag,
                   alpha=self.alpha_imag)
     return tf.complex(real, imag)
 def call(self, inputs):
   # alpha is used for leaky relu slope in activations instead of
   # negative_slope.
   return K.relu(inputs,
                 alpha=self.negative_slope,
                 max_value=self.max_value,
                 threshold=self.threshold)
示例#7
0
 def call(self, inputs):
     # alpha is used for leaky relu slope in activations instead of
     # negative_slope.
     return K.relu(inputs,
                   alpha=self.negative_slope,
                   max_value=self.max_value,
                   threshold=self.threshold)
示例#8
0
def relu(x, alpha=0., max_value=None, threshold=0):
    """Rectified Linear Unit.

  The Rectified Linear Unit function is:
  x , if x>0 else 0 (for default parameter values)
  
  In the case that parameters are given values:
  `f(x) = max_value` for `x >= max_value`,
  `f(x) = x` for `threshold <= x < max_value`,
  `f(x) = alpha * (x - threshold)` otherwise.

  Example usage:
  ```python3
  model.add(Convolution2D(20,kernel_size = 5, padding = 'same', input_shape = input_shape))
  model.add(Activation("relu"))
  ```

  Arguments:
      x: A tensor or variable.
      alpha: A scalar, slope of negative section (default=`0.`).
      max_value: float. Saturation threshold(upper limit) (default = `None`).
      threshold: float. Threshold value(lower limit) for thresholded activation.

  Returns:
      A tensor of the same shape as that of input x each element of which has undergone the ReLU activation.
  """
    return K.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
示例#9
0
 def true_branch_renorm():
   # We apply epsilon as part of the moving_stddev to mirror the training
   # code path.
   moving_stddev = _do_update(self.moving_stddev,
                              math_ops.sqrt(new_variance + self.epsilon))
   return self._assign_new_value(
       self.moving_variance,
       # Apply relu in case floating point rounding causes it to go
       # negative.
       K.relu(moving_stddev * moving_stddev - self.epsilon))
示例#10
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay *
                             K.cast(self.iterations, K.dtype(self.decay))))

        t = K.cast(self.iterations, K.floatx()) + 1
        beta_1_t = K.pow(self.beta_1, t)
        beta_2_t = K.pow(self.beta_2, t)
        rho = 2 / (1 - self.beta_2) - 1
        rho_t = rho - 2 * t * beta_2_t / (1 - beta_2_t)
        r_t = K.sqrt(
            K.relu(rho_t - 4) * K.relu(rho_t - 2) * rho / ((rho - 4) *
                                                           (rho - 2) * rho_t))
        flag = K.cast(rho_t > 4, K.floatx())

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            mhat_t = m_t / (1 - beta_1_t)
            vhat_t = K.sqrt(v_t / (1 - beta_2_t))
            p_t = p - lr * mhat_t * (flag * r_t / (vhat_t + self.epsilon) +
                                     (1 - flag))

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates
示例#11
0
    def call(self, x, mask=None):
        x = K.expand_dims(x, axis=2)
        x = tf.nn.separable_conv2d(x,
                                   self.depthwise_w,
                                   self.pointwise_w,
                                   strides=(1, 1, 1, 1),
                                   padding="SAME")
        x += self.bias
        x = K.relu(x)
        outputs = K.squeeze(x, axis=2)

        return outputs
示例#12
0
    def get_pairwise_model(self):
        if self.pairwise_model is None:
            if new_Para.param.pairwise and self.model is not None:  # 如果使用pairwise型的目标函数
                mashup_id_input = Input(shape=(1, ),
                                        dtype='int32',
                                        name='mashup_id_input')
                api_id_input = Input(shape=(1, ),
                                     dtype='int32',
                                     name='api_id_input')
                neg_api_id_input = Input(shape=(1, ),
                                         dtype='int32',
                                         name='neg_api_id_input')
                mashup_slt_apis_input = Input(
                    shape=(new_Para.param.slt_item_num, ),
                    dtype='int32',
                    name='slt_api_ids_input')
                if self.old_new == 'new':
                    pos_ratings = self.model(
                        [mashup_id_input, api_id_input, mashup_slt_apis_input])
                    neg_ratings = self.model([
                        mashup_id_input, neg_api_id_input,
                        mashup_slt_apis_input
                    ])  # 再加一个负例api id
                elif self.old_new == 'old':
                    pos_ratings = self.model([mashup_id_input, api_id_input])
                    neg_ratings = self.model(
                        [mashup_id_input, neg_api_id_input])

                loss = Lambda(
                    lambda x: K.relu(new_Para.param.margin + x[0] - x[1]),
                    name='sub_result')([neg_ratings, pos_ratings])

                # 注意输入格式!
                if self.old_new == 'new':
                    self.pairwise_model = Model(inputs=[
                        mashup_id_input, api_id_input, mashup_slt_apis_input,
                        neg_api_id_input
                    ],
                                                outputs=loss)
                elif self.old_new == 'old':
                    self.pairwise_model = Model(inputs=[
                        mashup_id_input, api_id_input, neg_api_id_input
                    ],
                                                outputs=loss)

                for layer in self.pairwise_model.layers:
                    print(layer.name)
                # # 复用的是同一个对象!
                # print(self.pairwise_model.get_layer('predict_model'),id(self.pairwise_model.get_layer('predict_model')))
                # print(self.model,id(self.model))
        return self.pairwise_model
def relu(x, alpha=0., max_value=None):
    """Rectified Linear Unit.

  Arguments:
      x: Input tensor.
      alpha: Slope of the negative part. Defaults to zero.
      max_value: Maximum value for the output.

  Returns:
      The (leaky) rectified linear unit activation: `x` if `x > 0`,
        `alpha * x` if `x < 0`. If `max_value` is defined, the result
        is truncated to this value.
  """
    return K.relu(x, alpha=alpha, max_value=max_value)
示例#14
0
 def call(self, x):
     if self.mode == MODE_VISIBLE_BERNOULLI:
         return K.cast(
             K.less(
                 K.random_uniform(shape=(self.hps['batch_size'],
                                         x.shape[1]))  #?
                 ,
                 K.sigmoid(K.dot(x, self.rbm_weight) + self.hidden_bias)))
     elif self.mode == MODE_VISIBLE_GAUSSIAN:
         return K.cast(
             K.less(
                 K.random_uniform(shape=(self.hps['batch_size'],
                                         x.shape[1])),
                 K.relu(K.dot(x, self.rbm_weight) + self.hidden_bias)))  #?
示例#15
0
 def variance_update():
   """Update self.moving_variance with the most recent data point."""
   if self.renorm:
     # We apply epsilon as part of the moving_stddev to mirror the training
     # code path.
     moving_stddev = self._assign_moving_average(
         self.moving_stddev, math_ops.sqrt(variance + self.epsilon),
         momentum, inputs_size)
     return self._assign_new_value(
         self.moving_variance,
         # Apply relu in case floating point rounding causes it to go
         # negative.
         K.relu(moving_stddev * moving_stddev - self.epsilon))
   else:
     return self._assign_moving_average(self.moving_variance, variance,
                                        momentum, inputs_size)
示例#16
0
    def call(self, x):

        row = []
        col = []

        # 对特征进行两两组合
        for r, c in combinations(x, 2):  # [field * (field - 1)] / 2
            row.append(r)
            col.append(c)

        p = K.concatenate(
            row,
            axis=1)  # [batch_size, [field * (field - 1)] / 2, embedding_size]
        q = K.concatenate(col, axis=1)

        inner_product = p * q  # 对应元素相乘
        # 添加非线性, 进行激活
        attention_tmp = K.relu(
            K.bias_add(K.dot(inner_product, self.attention_W),
                       self.attention_b))
        # [batch_size, [field * (field - 1)] / 2, embedding_size] * [embedding_size, attention_units]  = > [batch_size, [field * (field - 1)] / 2, attention_units]

        # context 向量
        attention_tmp_dot = K.dot(
            attention_tmp,
            self.projection_h)  # [batch_size, [field * (field - 1)] / 2, 1]

        # 计算的是一个样本的sofmax, sum的是一个样本的所有特征
        attention_weight = K.softmax(
            attention_tmp_dot, axis=1
        )  # 等价于  K.exp(attention_tmp_dot) / K.sum(attention_tmp_dot, axis=1, keepdims=True)
        # [batch_size, [field * (field - 1)] / 2, 1]

        # 权重乘以内积
        attention_output = K.sum(inner_product * attention_weight,
                                 axis=1)  # [batch_size, embedding_size]

        # 经过dropout操作
        attention_output = K.dropout(
            attention_output,
            self.dropout_rate)  # [batch_size, embedding_size]

        # 等价于dense层
        afm_out = K.dot(attention_output, self.projection_p)  # [batch_size, 1]

        return afm_out
示例#17
0
    def call(self, inputs, mask=None):

        cos_m = math.cos(self.m)
        sin_m = math.sin(self.m)
        mm = sin_m * self.m
        threshold = math.cos(math.pi - self.m)

        # features
        X = inputs[0]
        # 1-D or one-hot label works as mask
        Y_mask = inputs[1]
        # If Y_mask is not in one-hot form, transfer it to one-hot form.
        if Y_mask.shape[-1] == 1:
            Y_mask = K.cast(Y_mask, tf.int32)
            Y_mask = K.reshape(K.one_hot(Y_mask, self.class_num),
                               (-1, self.class_num))

        X_normed = K.l2_normalize(X, axis=1)  # L2 Normalized X
        W_normed = K.l2_normalize(self.W, axis=0)  # L2 Normalized Weights

        # cos(theta + m)
        cos_theta = K.dot(X_normed, W_normed)  # 矩阵乘法
        cos_theta2 = K.square(cos_theta)
        sin_theta2 = 1. - cos_theta2
        sin_theta = K.sqrt(sin_theta2 + K.epsilon())
        cos_tm = self.s * ((cos_theta * cos_m) - (sin_theta * sin_m))

        # This condition controls the theta + m should in range [0, pi]
        #   0 <= theta + m < = pi
        #   -m <= theta <= pi - m
        cond_v = cos_theta - threshold
        cond = K.cast(K.relu(cond_v), dtype=tf.bool)
        keep_val = self.s * (cos_theta - mm)
        cos_tm_temp = tf.where(cond, cos_tm, keep_val)

        # mask by label
        # Y_mask =+ K.epsilon() # Why???
        inv_mask = 1. - Y_mask
        s_cos_theta = self.s * cos_theta

        output = K.softmax((s_cos_theta * inv_mask) + (cos_tm_temp * Y_mask))

        return output
示例#18
0
def relu(x, alpha=0., max_value=None, threshold=0):
    """Rectified Linear Unit.

  With default values, it returns element-wise `max(x, 0)`.

  Otherwise, it follows:
  `f(x) = max_value` for `x >= max_value`,
  `f(x) = x` for `threshold <= x < max_value`,
  `f(x) = alpha * (x - threshold)` otherwise.

  Arguments:
      x: A tensor or variable.
      alpha: A scalar, slope of negative section (default=`0.`).
      max_value: float. Saturation threshold.
      threshold: float. Threshold value for thresholded activation.

  Returns:
      A tensor.
  """
    return K.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
示例#19
0
def relu(x, alpha=0., max_value=None, threshold=0):
  """Rectified Linear Unit.

  With default values, it returns element-wise `max(x, 0)`.

  Otherwise, it follows:
  `f(x) = max_value` for `x >= max_value`,
  `f(x) = x` for `threshold <= x < max_value`,
  `f(x) = alpha * (x - threshold)` otherwise.

  Arguments:
      x: A tensor or variable.
      alpha: A scalar, slope of negative section (default=`0.`).
      max_value: float. Saturation threshold.
      threshold: float. Threshold value for thresholded activation.

  Returns:
      A tensor.
  """
  return K.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
示例#20
0
 def call(self, inputs, training=None, mask=None):
     identityInput = K.identity(inputs)
     stacks = K.array_ops.unstack(inputs, axis=1)
     ### [b,n]
     tempList = []
     for oneTimeStepTensor in stacks:
         ### [b,1]
         bTensor = self.dense0(oneTimeStepTensor)
         bTensor = K.relu(bTensor)
         bTensor = self.dense1(bTensor)
         tempList.append(bTensor)
     ### [b,t,1]
     stackedTensor = K.array_ops.stack(tempList, axis=1)
     softMaxTensor = K.softmax(stackedTensor, axis=1)
     ###[b,t,1]
     weightEdTensor = K.math_ops.multiply(identityInput, softMaxTensor)
     unstack = K.array_ops.unstack(weightEdTensor, axis=1)
     temp1List = []
     for oneTensor in unstack:
         temp1List.append(oneTensor)
     return K.math_ops.add_n(temp1List)
示例#21
0
def relu(x, alpha=0., max_value=None, threshold=0):
    """Applies the rectified linear unit activation function.

  With default values, this returns the standard ReLU activation:
  `max(x, 0)`, the element-wise maximum of 0 and the input tensor.

  Modifying default parameters allows you to use non-zero thresholds,
  change the max value of the activation,
  and to use a non-zero multiple of the input for values below the threshold.

  For example:

  >>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32)
  >>> tf.keras.activations.relu(foo).numpy()
  array([ 0.,  0.,  0.,  5., 10.], dtype=float32)
  >>> tf.keras.activations.relu(foo, alpha=0.5).numpy()
  array([-5. , -2.5,  0. ,  5. , 10. ], dtype=float32)
  >>> tf.keras.activations.relu(foo, max_value=5).numpy()
  array([0., 0., 0., 5., 5.], dtype=float32)
  >>> tf.keras.activations.relu(foo, threshold=5).numpy()
  array([-0., -0.,  0.,  0., 10.], dtype=float32)

  Args:
      x: Input `tensor` or `variable`.
      alpha: A `float` that governs the slope for values lower than the
        threshold.
      max_value: A `float` that sets the saturation threshold (the largest value
        the function will return).
      threshold: A `float` giving the threshold value of the activation function
        below which values will be damped or set to zero.

  Returns:
      A `Tensor` representing the input tensor,
      transformed by the relu activation function.
      Tensor will be of the same shape and dtype of input `x`.
  """
    return backend.relu(x,
                        alpha=alpha,
                        max_value=max_value,
                        threshold=threshold)
示例#22
0
 def call(self, inputs, mask=None):
   return K.relu(inputs, max_value=self.alpha)
示例#23
0
 def call(self, inputs):
   return K.relu(inputs, alpha=self.alpha)
 def call(self, inputs):
   return K.relu(inputs) + self.bias
示例#25
0
 def call(self, inputs, mask=None):
   pos = K.relu(inputs)
   neg = -self.alpha * K.relu(-inputs)
   return pos + neg
示例#26
0
def relu(x, alpha=0., max_value=None):
    return K.relu(x, alpha=alpha, max_value=max_value)
示例#27
0
 def call(self, inputs):
   pos = K.relu(inputs)
   neg = -self.alpha * K.relu(-inputs)
   return pos + neg
示例#28
0
 def call(self, inputs):
     pos = backend.relu(inputs)
     neg = -self.alpha * backend.relu(-inputs)
     return pos + neg
示例#29
0
 def call(self, inputs):
     return K.relu(inputs, alpha=self.alpha)
示例#30
0
 def bounded_relu(x):
     return K.relu(x, max_value=1)
示例#31
0
def relu6(x):
    return K.relu(x, max_value=6)
示例#32
0
def relu6(x):
  return K.relu(x, max_value=6)
示例#33
0
文件: core.py 项目: linxigal/tfos
 def antirectifier(x):
     x -= K.mean(x, axis=1, keepdims=True)
     x = K.l2_normalize(x, axis=1)
     pos = K.relu(x)
     neg = K.relu(-x)
     return K.concatenate([pos, neg], axis=1)
示例#34
0
def relu6(x, alpha=0., max_value=None, threshold=0):
    return K.relu(x, alpha=alpha, max_value=6, threshold=threshold)
 def call(self, inputs):
   neg = -self.alpha * K.relu(-inputs + self.threshold)
   pos = self.beta * K.relu(inputs - self.threshold)
   return pos + neg + self.bias
示例#36
0
def relu(x, alpha=0., max_value=None):
  return K.relu(x, alpha=alpha, max_value=max_value)
示例#37
0
 def call(self, inputs):
     return backend.relu(inputs, alpha=self.alpha)