def mish(x):
	return layers.Lambda(lambda x: x*K.tanh(K.softplus(x)))(x)
示例#2
0
def gelu_tanh(x):
    """基于Tanh近似计算的gelu函数
    """
    cdf = 0.5 * (1.0 + K.tanh(
        (np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x, 3)))))
    return x * cdf
 def call(self, inputs):
     return inputs * K.tanh(K.softplus(inputs))
示例#4
0
 def call(self, x, **kwargs):
     et = K.squeeze(K.tanh(K.dot(x, self.W) + self.b), axis=-1)
     at = K.softmax(et)
     at = K.expand_dims(at, axis=-1)
     output = x * at
     return K.sum(output, axis=1)
示例#5
0
def mish(x):
    return x*K.tanh(K.softplus(x))
示例#6
0
def triplet_tanh_pn_loss(y_true, y_pred):
    return K.mean(K.tanh(y_pred[:,0,0]) +
                  ((K.constant(1) - K.tanh(y_pred[:,1,0])) +
                   (K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
示例#7
0
def gelu(x):
    if K.backend() == 'tensorflow':
        return 0.5 * x * (1.0 + tf.math.erf(x / tf.sqrt(2.0)))
    return 0.5 * x * (1.0 + K.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * K.pow(x, 3))))
示例#8
0
 def LastLayer(self, x):
     return 1.75 * K.tanh(x / 100)
    def call(self, inputs, **kwargs):
        pos = keras.activations.relu(inputs)
        neg = self.alpha * K.tanh(-self.beta * keras.activations.relu(-inputs))

        return pos + neg
示例#10
0
def gelu(x):
    """An approximation of gelu.

    See: https://arxiv.org/pdf/1606.08415.pdf
    """
    return 0.5 * x * (1.0 + K.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * K.pow(x, 3))))
示例#11
0
    def __init__(self, model, upsample_size=UPSAMPLE_SIZE):

        mask_size = np.ceil(np.array((32, 32), dtype=float) /
                            upsample_size)
        mask_size = mask_size.astype(int)
        self.mask_size = mask_size
        mask = np.zeros(self.mask_size)
        pattern = np.zeros((32, 32, 3))
        mask = np.expand_dims(mask, axis=2)

        mask_tanh = np.zeros_like(mask)
        pattern_tanh = np.zeros_like(pattern)

        # prepare mask related tensors
        self.mask_tanh_tensor = K.variable(mask_tanh)
        mask_tensor_unrepeat = (K.tanh(self.mask_tanh_tensor) \
            / (2 - K.epsilon()) + 0.5)
        mask_tensor_unexpand = K.repeat_elements(
            mask_tensor_unrepeat,
            rep=3,
            axis=2)
        self.mask_tensor = K.expand_dims(mask_tensor_unexpand, axis=0)
        upsample_layer = UpSampling2D(
            size=(upsample_size, upsample_size))
        mask_upsample_tensor_uncrop = upsample_layer(self.mask_tensor)
        uncrop_shape = K.int_shape(mask_upsample_tensor_uncrop)[1:]
        cropping_layer = Cropping2D(
            cropping=((0, uncrop_shape[0] - 32),
                      (0, uncrop_shape[1] - 32)))
        self.mask_upsample_tensor = cropping_layer(
            mask_upsample_tensor_uncrop)
        # self.mask_upsample_tensor = K.round(self.mask_upsample_tensor)
        reverse_mask_tensor = (K.ones_like(self.mask_upsample_tensor) -
                               self.mask_upsample_tensor)

        # prepare pattern related tensors
        self.pattern_tanh_tensor = K.variable(pattern_tanh)
        self.pattern_raw_tensor = (
            (K.tanh(self.pattern_tanh_tensor) / (2 - K.epsilon()) + 0.5) *
            255.0)

        # prepare input image related tensors
        # ignore clip operation here
        # assume input image is already clipped into valid color range
        input_tensor = K.placeholder((None,32,32,3))
        input_raw_tensor = input_tensor

        # IMPORTANT: MASK OPERATION IN RAW DOMAIN
        X_adv_raw_tensor = (
            reverse_mask_tensor * input_raw_tensor +
            self.mask_upsample_tensor * self.pattern_raw_tensor)

        X_adv_tensor = X_adv_raw_tensor

        output_tensor = model(X_adv_tensor)
        y_target_tensor = K.placeholder((None,43))
        y_true_tensor = K.placeholder((None,43))

        self.loss_ce = categorical_crossentropy(output_tensor, y_target_tensor)

        self.hyperparameters = K.reshape(K.constant(np.array([1e-2, 1e-5, 1e-7, 1e-8, 0, 1e-2])), shape=(6, 1))
        self.loss_reg = self.build_tabor_regularization(input_raw_tensor,
                                                        model, y_target_tensor,
                                                        y_true_tensor)
        self.loss_reg = K.dot(K.reshape(self.loss_reg, shape=(1, 6)), self.hyperparameters)
        self.loss = K.mean(self.loss_ce) + self.loss_reg
        self.opt = Adam(lr=1e-3, beta_1=0.5, beta_2=0.9)
        self.updates = self.opt.get_updates(
            params=[self.pattern_tanh_tensor, self.mask_tanh_tensor],
            loss=self.loss)
        self.train = K.function(
            [input_tensor, y_true_tensor, y_target_tensor],
            [self.loss_ce, self.loss_reg, self.loss],
            updates=self.updates)
示例#12
0
 def custom_func(x):
     return (n * K.tanh(x))