Ejemplo n.º 1
0
 def map_fn(i):
     std_samples = dist.sample(1)
     distorted_loss = K.categorical_crossentropy(pred + std_samples,
                                                 true,
                                                 from_logits=True)
     diff = undistorted_loss - distorted_loss
     return -K.elu(diff)
Ejemplo n.º 2
0
def elu(x, alpha=1.0):
    """Exponential linear unit.
    # Arguments
        x: Input tensor.
        alpha: A scalar, slope of negative section.
    # Returns
        The exponential linear activation: `x` if `x > 0` and
        `alpha * (exp(x)-1)` if `x < 0`.
    # References
        - [Fast and Accurate Deep Network Learning by Exponential
           Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)
    """
    return K.elu(x, alpha)
    def call(self, inputs, mask=None):

        # dense layer for mu (mean) of the gaussians
        mu_output = K.dot(inputs, self.mu_kernel)
        mu_output = K.bias_add(mu_output, self.mu_bias, data_format='channels_last')

        # dense layer for sigma (variance) of the gaussians
        sigma_output = K.dot(inputs, self.sigma_kernel)
        sigma_output = K.bias_add(sigma_output, self.sigma_bias, data_format='channels_last')

        # Avoid NaN's by pushing sigma through the following custom activation
        sigma_output = K.elu(sigma_output) + 1 + K.epsilon()

        # dense layer for pi (amplitude) of the gaussians
        pi_output = K.dot( inputs, self.pi_kernel)
        pi_output = K.bias_add(pi_output, self.pi_bias, data_format='channels_last')

        output = Concatenate()([mu_output, sigma_output, pi_output], name="mdn_outputs")
        return(output)
Ejemplo n.º 4
0
def selu(x):
    """Scaled Exponential Linear Unit (SELU).
    SELU is equal to: `scale * elu(x, alpha)`, where alpha and scale
    are predefined constants. The values of `alpha` and `scale` are
    chosen so that the mean and variance of the inputs are preserved
    between two consecutive layers as long as the weights are initialized
    correctly (see `lecun_normal` initialization) and the number of inputs
    is "large enough" (see references for more information).
    # Arguments
        x: A tensor or variable to compute the activation function for.
    # Returns
       The scaled exponential unit activation: `scale * elu(x, alpha)`.
    # Note
        - To be used together with the initialization "lecun_normal".
        - To be used together with the dropout variant "AlphaDropout".
    # References
        - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
    """
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946
    return scale * K.elu(x, alpha)
def build_model(hidden_layers, activation='tanh',alpha=1e-3,penalty_order=3,penalty_loss='l1'):
    inputs = keras.Input(shape=(1,))
    for i,hidden in enumerate(hidden_layers):
        if i == 0:
            h = keras.layers.Dense(hidden,activation='linear', kernel_initializer=keras.initializers.glorot_normal)(inputs)
        else:
            h = keras.layers.Dense(hidden,activation='linear')(h)
        if activation == 'tanh':
            h = K.tanh(h)
        elif activation == 'sine':
            h = K.sin(h)
        elif activation == 'elu':
            h = K.elu(h)
        elif activation == 'sigmoid':
            h = K.sigmoid(h)
        elif activation == 'relu':
            h = K.relu(h)
        #h = keras.layers
        #h = keras.layers.Dropout(rate=0.8)(h)
        #h = keras.layers.BatchNormalization()(h)
    outputs = keras.layers.Dense(1,activation='linear')(h)
    model = keras.Model(inputs, outputs)
    grad1 = K.gradients(model.output, model.input)[0]
    iterate1 = K.function([model.input], [grad1])
    grad2 = K.gradients(grad1, model.input)[0]
    iterate2 = K.function([model.input], [grad2])
    if penalty_order == 2:
        tt = grad2
    elif penalty_order == 3:
        grad3 = K.gradients(grad2, model.input)[0]
        tt = grad3
    if penalty_loss == 'l1':
        model.compile(optimizer='Adam', loss=penalty_l1(tt, alpha=alpha))
    elif penalty_loss == 'l2': 
        model.compile(optimizer='Adam', loss=penalty_l2(tt, alpha=alpha))
    return model,iterate1,iterate2
Ejemplo n.º 6
0
	  def map_fn(i):
	    std_samples = K.transpose(dist.sample(num_classes))
	    distorted_loss = K.categorical_crossentropy(pred + std_samples, true, from_logits=True)
	    return distorted_loss
	    diff = undistorted_loss - distorted_loss
	    return -K.elu(diff)
Ejemplo n.º 7
0
def elu_plus(x):
    return K.elu(x) + 1 + 1e-6
Ejemplo n.º 8
0
def elu_plus_one_plus_epsilon(x):
    """ELU activation with a very small addition to help prevent NaN in loss."""
    return (K.elu(x) + 1 + 1e-8)
 def call(self, inputs, **kwargs):
     output = self.lmbd * K.elu(inputs) + (1 - self.lmbd) * (
         K.softplus(inputs) - self.alpha)
     return output
 def call(self, inputs, **kwargs):
     return self.lmbd * K.elu(inputs) + (1 - self.lmbd) * K.softplus(inputs)