Exemplo n.º 1
0
 def call(self, inputs, **kwargs):
     input_dim = inputs.get_shape().as_list()[-1]
     kernel_shape = [input_dim, self.units]
     mask, imask = self.generate_bernoulli_matrix_imatrix(kernel_shape)
     mask_sign = K.softsign(self.kernel * mask)
     imask_sign = K.softsign(self.kernel * imask)
     weight = self.c * imask_sign + \
              self.itau * (self.kernel + self.c * self.tau * mask_sign)
     output = K.dot(inputs, weight)
     if self.use_bias:
         output = K.bias_add(output, self.bias, data_format='channels_last')
     if self.activation is not None:
         output = self.activation(output)
     return output
Exemplo n.º 2
0
 def dense(x, w, b, act):
     x = K.dot(x, w)
     if b:
         x = K.bias_add(x, b)
     if act.lower().strip() == 'softmax':
         x = K.softmax(x)
     elif act.lower().strip() == 'elu':
         x = K.elu(x)
     elif act.lower().strip() == 'gelu':
         x = 0.5 * x * (1 + K.tanh(
             np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x, 3))))
     elif act.lower().strip() == 'selu':
         alpha = 1.6732632423543772848170429916717
         scale = 1.0507009873554804934193349852946
         x = scale * K.elu(x, alpha)
     elif act.lower().strip() == 'softplus':
         x = K.softplus(x)
     elif act.lower().strip() == 'softsign':
         x = K.softsign(x)
     elif act.lower().strip() == 'relu':
         x = K.relu(x)
     elif act.lower().strip() == 'leaky_relu':
         x = K.relu(x, alpha=0.01)
     elif act.lower().strip() == 'tanh':
         x = K.tanh(x)
     elif act.lower().strip() == 'sigmoid':
         x = K.sigmoid(x)
     elif act.lower().strip() == 'hard_sigmoid':
         x = K.hard_sigmoid(x)
     return x
Exemplo n.º 3
0
 def call(self, inputs, **kwargs):
     input_dim = inputs.get_shape().as_list()[-1]
     kernel_shape = self.kernel_size + (input_dim, self.filters)
     mask, imask = self.generate_bernoulli_matrix_imatrix(kernel_shape)
     mask_sign = K.softsign(self.kernel * mask)
     imask_sign = K.softsign(self.kernel * imask)
     weight = self.c * imask_sign + \
              self.itau * (self.kernel + self.c * self.tau * mask_sign)
     outputs = K.conv2d(
         inputs,
         weight,
         strides=self.strides,
         padding=self.padding)
     if self.use_bias:
         outputs = K.bias_add(outputs, self.bias)
     if self.activation is not None:
         return self.activation(outputs)
     return outputs
Exemplo n.º 4
0
def softsign(x):
    """
    Softsign activation function.

    >>> softsign(1)
    0.5
    >>> softsign(-1)
    -0.5
    """
    return K.eval(K.softsign(K.variable(x))).tolist()
Exemplo n.º 5
0
def softsign(x):
    """
    Softsign activation function.

    >>> softsign(1)
    0.5
    >>> softsign(-1)
    -0.5
    """
    return K.eval(K.softsign(K.variable(x))).tolist()
Exemplo n.º 6
0
    def _build_impl_impl(self, input):
        """Internal method. Implements building the unit itself using shared_layer().
        
        Arguments:
        - input -- tensor; all inputs to the model
        """

        # Limit how much we can move
        move = self.shared_layer(
            keras.layers.Lambda,
            (
                (
                    lambda x: K.softsign(x)  # -1. .. 1. {NONLIN}
                ), ),
            {'name': 'NonLinCtlr'})(input)

        # Add the move control to the position
        if self._build_counter == 0:
            self.position = move
            self.skip_layer()
        else:
            self.position = self.shared_layer(
                keras.layers.Lambda, ((lambda x: x[0] + x[1]), ),
                {'name': 'AddPosition'})([self.position, move])

        self.position = self.print_layer(self.position, "Attn_Position")

        def select_impl(x):
            data = x[0]  # (batch_size,datapoints,outputs)
            position = x[1]  # (batch_size,1)

            # batch_size = K.shape(position)[0]
            indices = K.arange(start=0,
                               stop=self.datapoints,
                               dtype=position.dtype)  # (datapoints)
            # e.g. [0., 1., 2., 3., 4., 5., 6. 7.]

            indices = K.expand_dims(indices, axis=1)  # (datapoints,1)
            position = K.expand_dims(position, axis=-2)  # (batch_size,1,1)

            # Version without 0 grad regions
            diff = position - indices
            mask = 1. / (1. + K.abs(diff))  # {NONLIN}

            masked = mask * data  # (batch_size,datapoints,outputs)
            return K.sum(masked, axis=-2)  # (batch_size,outputs)

        out = self.shared_layer(keras.layers.Lambda, (select_impl, ),
                                {'name': 'Select'})([self.data, self.position])
        out = self.print_layer(out, "Attn_Out")
        return out
Exemplo n.º 7
0
        def call(self, u):

            # edge detection (learned filter)
            edges = K.separable_conv2d(self.image,
                                       self.sep_kernel_depthwise,
                                       self.sep_kernel_pointwise,
                                       padding='same')
            edges = K.relu(edges)
            edges = K.sum(edges, axis=-1, keepdims=True)
            edges = K.softsign(edges)

            # grad( edge_detection ) approx (learned filter)
            grad_edges_x = K.conv2d(edges, self.conv_kernel_x, padding='same')
            grad_edges_x = K.relu(grad_edges_x)
            grad_edges_x = K.sum(grad_edges_x, axis=-1, keepdims=True)
            grad_edges_y = K.conv2d(edges, self.conv_kernel_y, padding='same')
            grad_edges_y = K.relu(grad_edges_y)
            grad_edges_y = K.sum(grad_edges_y, axis=-1, keepdims=True)

            # upwind approx to grad( edge_detection)^T grad( u )
            xp = K.conv2d(u, kXP, padding='same')
            xn = K.conv2d(u, kXN, padding='same')
            yp = K.conv2d(u, kYP, padding='same')
            yn = K.conv2d(u, kYN, padding='same')
            fxp = K.relu(grad_edges_x)
            fxn = -1.0 * K.relu(-1.0 * grad_edges_x)
            fyp = K.relu(grad_edges_y)
            fyn = -1.0 * K.relu(-1.0 * grad_edges_y)
            xpp = fxp * xp
            xnn = fxn * xn
            ypp = fyp * yp
            ynn = fyn * yn

            # curvature kappa( u ) approx (learned filter)
            kappa = K.conv2d(u, self.conv_kernel_curve_0, padding='same')
            kappa = K.conv2d(kappa, self.conv_kernel_curve_1, padding='same')
            kappa = K.conv2d(kappa, self.conv_kernel_curve_2, padding='same')
            kappa = K.sum(kappa, axis=-1, keepdims=True)
            kappa = kappa * self.kappa_kernel
            kappa = K.conv2d(kappa, self.conv_kernel_smoother, padding='same')
            #          uxx = K.conv2d(u, kXX, padding='same')
            #          uyy = K.conv2d(u, kYY, padding='same')
            #          uxy = K.conv2d(u, kXY, padding='same')
            #          uxc = K.conv2d(u, kXC, padding='same')
            #          uyc = K.conv2d(u, kYC, padding='same')
            #          kappa = uxx*(uyc*uyc) - 2.0*(uxc*uyc)*uxy + uyy*(uxc*uxc)

            return u + xpp + xnn + ypp + ynn + edges + kappa
Exemplo n.º 8
0
def softsign(x):
    return K.softsign(x)
Exemplo n.º 9
0
 def k_func(x):
     return KK.softsign(x)
Exemplo n.º 10
0
 def custom_activation(x):
     #    print(type((K.sigmoid(x) * 4) + 1) == type(K.round((K.sigmoid(x) * 4) + 1)))
     return ((K.softsign(x) * 2.5) + 2.5)
Exemplo n.º 11
0
def softsign(x):
    return K.eval(K.softsign(K.variable(x))).tolist()
Exemplo n.º 12
0
def soft_heaviside(x):
    return 0.5 * (K.softsign(4000*x)+1)