def get_mlp(input_shape, n_outputs):
    from tensorflow import keras
    import tensorflow.keras.layers as layers

    inputs = layers.Input(shape=input_shape)

    # x = layers.Dense(500, kernel_initializer="he_normal")(inputs)
    # x = layers.LeakyReLU(alpha=0.2)(x)
    # x = layers.Dense(500, kernel_initializer="he_normal")(x)
    # x = layers.LeakyReLU(alpha=0.2)(x)

    x = layers.Dense(500,
                     kernel_initializer="he_normal",
                     kernel_regularizer=keras.regularizers.l2(0.01),
                     use_bias=False)(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.ELU()(x)

    x = layers.Dense(500,
                     kernel_initializer="he_normal",
                     kernel_regularizer=keras.regularizers.l2(0.01),
                     use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ELU()(x)

    # x = layers.Dense(50, activation="relu")(inputs)
    # x = layers.Dense(10, activation="relu")(x)

    outputs = layers.Dense(n_outputs)(x)
    model = keras.Model(inputs=[inputs], outputs=[outputs])
    return model
Exemple #2
0
 def __init__(self,
              in_channels,
              out_channels,
              data_format="channels_last",
              **kwargs):
     super(LwopEncoderFinalBlock, self).__init__(**kwargs)
     self.pre_conv = conv1x1_block(in_channels=in_channels,
                                   out_channels=out_channels,
                                   use_bias=True,
                                   use_bn=False,
                                   data_format=data_format,
                                   name="pre_conv")
     self.body = SimpleSequential(name="body")
     for i in range(3):
         self.body.add(
             dwsconv3x3_block(in_channels=out_channels,
                              out_channels=out_channels,
                              use_bn=False,
                              dw_activation=(lambda: nn.ELU()),
                              pw_activation=(lambda: nn.ELU()),
                              data_format=data_format,
                              name="block{}".format(i + 1)))
     self.post_conv = conv3x3_block(in_channels=out_channels,
                                    out_channels=out_channels,
                                    use_bias=True,
                                    use_bn=False,
                                    data_format=data_format,
                                    name="post_conv")
def conv_layer(x):
    import tensorflow.keras.layers as layers
    from tensorflow import keras

    # x = layers.Conv2D(32, 8, kernel_initializer=initializer, strides=4, activation='relu')(x)
    # x = layers.Conv2D(64, 4, kernel_initializer=initializer, strides=2, activation='relu')(x)
    # x = layers.Conv2D(64, 3, kernel_initializer=initializer, strides=1, activation='relu')(x)

    initializer = keras.initializers.VarianceScaling(
        scale=2.0, mode='fan_in', distribution='truncated_normal')

    x = layers.Conv2D(64, 5, kernel_initializer=initializer, padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ELU()(x)
    x = layers.Conv2D(64, 3, kernel_initializer=initializer, padding='valid')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ELU()(x)
    x = layers.Conv2D(128, 3, kernel_initializer=initializer, padding='valid')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ELU()(x)
    x = layers.Conv2D(128, 3, kernel_initializer=initializer, padding='valid')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ELU()(x)

    return x
def mlp_layer(x):
    from tensorflow import keras
    import tensorflow.keras.layers as layers

    # initializer = "he_normal"
    # x = layers.Dense(512, kernel_initializer=initializer, activation='relu')(x)

    initializer = keras.initializers.VarianceScaling(
        scale=2.0, mode='fan_in', distribution='truncated_normal')

    x = layers.Dense(1000, kernel_initializer=initializer,
                     kernel_regularizer=keras.regularizers.l2(0.01),
                     use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ELU()(x)
    # x = layers.ReLU()(x)

    x = layers.Dense(1000, kernel_initializer=initializer,
                     kernel_regularizer=keras.regularizers.l2(0.01),
                     use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ELU()(x)
    # x = layers.ReLU()(x)

    return x
    def __init__(self, params):
        name = params['model_name']
        super(discrete_10000_ec_4, self).__init__(name=name)

        # float
        dropout_rate = params['dropout_rate']

        # list with 2 element: start and end
        # [0:1] = V
        # [1:2] = W
        # [0:2] =  V and W
        self.pchoice = params['pchoice']

        self.d = layers.Conv1D(1, 2, strides=1, padding='same')
        self.d2 = layers.Conv1D(1, 3, strides=1, padding='same')
        self.semilocal = layers.Conv1D(1, 16, strides=1, padding='same')

        self.decider = keras.Sequential()
        self.decider.add(layers.Conv1D(6, 2, strides=1, padding='same'))
        self.decider.add(layers.ELU())
        self.decider.add(layers.Dropout(rate=dropout_rate))
        self.decider.add(layers.Conv1D(7, 4, strides=1, padding='same'))
        self.decider.add(layers.LeakyReLU())
        self.decider.add(layers.Dropout(rate=dropout_rate))
        self.decider.add(layers.Conv1D(8, 6, strides=1, padding='same'))

        self.counters = []
        for i in range(1, 8):
            model = keras.Sequential()
            model.add(layers.Conv1D(8, 2**i, 2**(i - 1), padding='same'))
            model.add(layers.ELU())
            model.add(layers.Dropout(rate=dropout_rate))
            pool_size = (10000 + 2**(i - 1) - 1) // 2**(i - 1)
            model.add(layers.AveragePooling1D(pool_size=pool_size))
            model.add(layers.LeakyReLU())
            self.counters.append(model)

        self.conv_down = layers.Dense(1)

        self.weighters = keras.Sequential()
        self.weighters.add(layers.BatchNormalization())
        self.weighters.add(layers.Dense(21))
        self.weighters.add(layers.ELU())
        self.weighters.add(layers.Dropout(rate=dropout_rate))
        self.weighters.add(layers.Dense(14))
        self.weighters.add(layers.LeakyReLU())
        self.weighters.add(layers.Dropout(rate=dropout_rate))
        self.weighters.add(layers.Dense(7))
        self.weighters.add(layers.Activation('softmax'))

        self.out_layer = layers.Dense(1)
 def __init__(self, **kwargs):
     super(Q, self).__init__(**kwargs)
     self.dim_reduction1 = FFC(out_length=2, groups=2, as_matrix=False)
     self.layer_normal1 = layers.LayerNormalization()
     self.activation1 = layers.ELU()
     self.dim_reduction2 = FFC(out_length=1, groups=1, as_matrix=False)
     self.layer_normal2 = layers.LayerNormalization()
     self.activation2 = layers.ELU()
     self.reshape = None
     self.squeeze = None
     self.relu = layers.ReLU()
     self.excitation = None
     self.reverse_reshape = None
     self.layer_normal3 = layers.LayerNormalization()
Exemple #7
0
def create_model(trainable=True):
    image_input = Input(shape=(160, 120, 3))
    model = MobileNetV2(input_tensor=image_input, include_top=False, alpha=ALPHA, weights = None)
    last_layer = model.layers[-1].output
    
    x = GlobalAveragePooling2D()(last_layer)
    x = layers.GaussianDropout(0.3)(x)
    x = Dense(512, activation=layers.ELU(alpha=1.0), name='fc1')(x)
    x = layers.GaussianDropout(0.1)(x)
    x = Dense(64, activation=layers.ELU(alpha=1.0), name='fc2')(x)
    x = layers.GaussianDropout(0.05)(x)
    x = Dense(NUM_CLASSES, activation='linear', name='output')(x)
    
    return Model(inputs=model.input, outputs=x)
Exemple #8
0
    def __init__(self, params):
        name = params['model_name']
        super(cnn_landscape, self).__init__(name=name)

        # choice of potential
        self.pchoice = params['pchoice']

        # residual
        self.res = keras.Sequential(name='residual')
        self.res.add(layers.Conv1D(self.pchoice[1]-self.pchoice[0], 20, strides=1, padding='same'))
        self.res.add(layers.ELU())
        self.res.add(layers.Conv1D(self.pchoice[1]-self.pchoice[0], 20, strides=1, padding='same'))
        self.res.add(layers.LeakyReLU())
        self.res.add(layers.Dropout(rate=0.1))

        # embedding layers
        self.emb = keras.Sequential(name='embedding')
        self.emb.add(layers.BatchNormalization())
        self.emb.add(layers.Conv1D(5, 50, strides=5, padding='same'))
        self.emb.add(layers.ELU())
        self.emb.add(layers.Dropout(rate=0.1))
        self.emb.add(layers.Conv1D(5, 50, strides=5, padding='same'))
        self.emb.add(layers.LeakyReLU())
        self.emb.add(layers.Dropout(rate=0.1))

        # reduction of input
        self.conv1 = layers.Conv1D(5, 50, strides=25, padding='same')


        self.weighters = keras.Sequential(name='integral_weight')
        self.weighters.add(layers.Conv1D(10, 20, strides=10, padding='same'))
        self.weighters.add(layers.ELU())
        self.weighters.add(layers.Dropout(rate=0.1))
        self.weighters.add(layers.Conv1D(20, 20, strides=10, padding='same'))
        self.weighters.add(layers.LeakyReLU())
        self.weighters.add(layers.Dropout(rate=0.1))
        self.weighters.add(layers.Conv1D(30, 4, strides=4, padding='same'))
        self.weighters.add(layers.Flatten())

        self.out_layer1 = keras.Sequential(name='out_layer1')
        self.out_layer1.add(layers.Dense(30))
        self.out_layer1.add(layers.LeakyReLU())
        self.out_layer1.add(layers.Dense(30))
        self.out_layer1.add(layers.LeakyReLU())

        self.out_layer2 = keras.Sequential(name='out_layer2')
        self.out_layer2.add(layers.Dense(10))
        self.out_layer2.add(layers.LeakyReLU())
        self.out_layer2.add(layers.Dense(1))
Exemple #9
0
def do_activation(input_values, function_name, alpha=0.2):
    """Runs input array through activation function.

    :param input_values: numpy array (any shape).
    :param function_name: Name of activation function (must be accepted by ``).
    :param alpha: Slope parameter (alpha) for activation function.  This applies
        only for eLU and ReLU.
    :return: output_values: Same as `input_values` but post-activation.
    """

    architecture_utils.check_activation_function(
        activation_function_string=function_name,
        alpha_for_elu=alpha,
        alpha_for_relu=alpha)

    input_object = K.placeholder()

    if function_name == architecture_utils.ELU_FUNCTION_STRING:
        function_object = K.function([input_object],
                                     [layers.ELU(alpha=alpha)(input_object)])
    elif function_name == architecture_utils.RELU_FUNCTION_STRING:
        function_object = K.function(
            [input_object], [layers.LeakyReLU(alpha=alpha)(input_object)])
    else:
        function_object = K.function(
            [input_object], [layers.Activation(function_name)(input_object)])

    return function_object([input_values])[0]
Exemple #10
0
    def __init__(self, num_actions):
        super().__init__()

        self.conv1 = layers.Conv2D(
            32,
            8,
            4,
            padding="same",
            input_shape=((84, 84, stack_size)),
            kernel_initializer=tf.keras.initializers.glorot_normal())
        self.activation = layers.ELU()
        self.conv2 = layers.Conv2D(
            64,
            4,
            2,
            padding="same",
            kernel_initializer=tf.keras.initializers.glorot_normal())
        self.conv3 = layers.Conv2D(
            128,
            2,
            2,
            padding="valid",
            kernel_initializer=tf.keras.initializers.glorot_normal())
        self.normalization = layers.BatchNormalization(epsilon=1e-5,
                                                       name="batch_norm")
        self.normalization1 = layers.BatchNormalization(epsilon=1e-5,
                                                        name="batch_norm")
        self.flatten = layers.Flatten()
        self.dense1 = layers.Dense(512, activation="relu")
        self.actor = layers.Dense(num_actions)
        self.critic = layers.Dense(1)
 def __init__(self, rate=2, matrix_shape=(4, 4), regularize=1e-5, **kwargs):
     super(CondenseTiny, self).__init__(**kwargs)
     self.sparse_extraction = PartialMatrix(rate=rate,
                                            matrix_shape=matrix_shape,
                                            regularize=regularize)
     self.normal = layers.LayerNormalization()
     self.activation = layers.ELU()
Exemple #12
0
def build_model_Dense():
    model = keras.Sequential()
    model.add(
        layers.Dense(200,
                     input_dim=375,
                     kernel_initializer=keras.initializers.Zeros()))
    model.add(layers.Activation('sigmoid'))
    model.add(layers.Dense(185))
    model.add(layers.Activation('sigmoid'))
    model.add(layers.Dense(92))
    model.add(layers.ELU(alpha=1.0))
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(1, activation='relu'))

    optimizer = keras.optimizers.Adam(lr=0.0001,
                                      beta_1=0.99,
                                      beta_2=0.999,
                                      epsilon=0.01,
                                      decay=0.01,
                                      amsgrad=False)

    model.compile(loss='mean_absolute_error',
                  optimizer=optimizer,
                  metrics=['acc'])
    model.summary()
    return model
Exemple #13
0
def get_activation_fn(activation: str, name: str) -> tf.Tensor:
    """Returns the activation function"""
    if activation == 'relu':
        return layers.ReLU(name=name+'_relu')
    if activation == 'elu':
        return layers.ELU(name=name+'_elu')
    raise ValueError('Unknown activation function')
 def __init__(self, **kwargs):
     super(CapsSimilarity, self).__init__(**kwargs)
     self.layer_normal1 = layers.LayerNormalization()
     # self.dot = layers.Dot((2, 2), normalize=True)
     self.dot = layers.Dot((2, 2))
     self.layer_normal2 = layers.LayerNormalization()
     self.activation = layers.ELU()
Exemple #15
0
def create_wdmodel(n_cat_input, n_num_input, hidden_layer, num_classes):

    input_layer1 = tf.keras.Input(shape=(n_num_input, ))
    hidden = layers.Dense(
        hidden_layer[0],
        kernel_regularizer=regularizers.l2(0.1),
        activity_regularizer=regularizers.l1(0.1))(input_layer1)
    hidden = layers.ReLU(max_value=None, negative_slope=0.0,
                         threshold=0.0)(hidden)
    hidden = layers.Dropout(0.1)(hidden)
    hidden = layers.Dense(hidden_layer[1],
                          kernel_regularizer=regularizers.l2(0.1),
                          activity_regularizer=regularizers.l1(0.1))(hidden)
    hidden = layers.ReLU(max_value=None, negative_slope=0.0,
                         threshold=0.0)(hidden)
    hidden = layers.Dropout(0.1)(hidden)
    hidden = layers.Dense(hidden_layer[2],
                          kernel_regularizer=regularizers.l2(0.1),
                          activity_regularizer=regularizers.l1(0.1))(hidden)
    hidden = layers.ReLU(max_value=None, negative_slope=0.0,
                         threshold=0.0)(hidden)
    hidden = layers.Dropout(0.1)(hidden)

    if num_classes > 2:
        output_layer1 = layers.Dense(num_classes, activation="softmax")(hidden)
    else:
        output_layer1 = layers.Dense(1, activation="sigmoid")(hidden)

    input_layer2 = tf.keras.Input(shape=(n_cat_input, ))
    output_layer2 = layers.ELU(alpha=1)(input_layer2)

    inputs = []
    inputs.append(input_layer1)
    inputs.append(input_layer2)

    outputs = []
    outputs.append(output_layer1)
    outputs.append(output_layer2)

    concat = layers.Concatenate()(outputs)
    if num_classes > 2:
        model_out = layers.Dense(num_classes, activation="softmax")(concat)
    else:
        model_out = layers.Dense(1, activation="sigmoid")(concat)
    #model_out = layers.Dense( 2, activation = "softmax" )( concat )
    model = models.Model(inputs, model_out)

    if num_classes > 2:
        loss_func = 'sparse_categorical_crossentropy'
    else:
        loss_func = 'binary_crossentropy'

    opt = optimizers.Adam(learning_rate=0.005,
                          beta_1=0.9,
                          beta_2=0.999,
                          amsgrad=False)
    model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

    return model
def get_activation(name):
    """
    Parses a string to extract the activation function.
    """
    string = name.split(':')

    if string[0] == 'RELU':
        activation = Layers.ReLU()
    elif string[0] == 'TANH':
        activation = Layers.Activation(tf.keras.activations.tanh)
    elif string[0] == 'SIGMOID':
        activation = Layers.Activation(tf.keras.activations.sigmoid)
    elif string[0] == 'LRELU':
        try:
            activation = Layers.LeakyReLU(float(string[1]))
        except:
            warnings.warn('Using default alpha parameter : 0.1.',
                          SyntaxWarning)
            warnings.warn(
                'To set a custom alpha value type LRELU:alpha instead of LRELU',
                SyntaxWarning)
            activation = Layers.LeakyReLU(0.1)
    elif string[0] == 'PRELU':
        activation = Layers.PReLU()
    elif string[0] == 'ELU':
        try:
            activation = Layers.ELU(float(string[1]))
        except:
            warnings.warn('Using default alpha parameter : 1.0.',
                          SyntaxWarning)
            warnings.warn(
                'To set a custom alpha value type LRELU:alpha instead of LRELU',
                SyntaxWarning)
            activation = Layers.ELU(1.0)
    elif string[0] == 'SELU':
        activation = Layers.Activation(tf.keras.activations.selu)
    elif string[0] == 'SWISH':
        activation = swish()
    elif string[0] == 'CSWISH':
        activation = Layers.Activation(cswish)
    elif string[0] == 'MISH':
        activation = Layers.Activation(mish)
    else:
        raise ValueError('error: unknown activation function')
    return activation
 def __init__(self,
              num_capsule: int,
              matrix_shape: tuple = (4, 4),
              regularize=1e-4,
              **kwargs):
     super(Condense, self).__init__(**kwargs)
     self.sparse_extraction = GlobalMatrix(num_capsule,
                                           matrix_shape,
                                           regularize=regularize)
     self.normal = layers.LayerNormalization()
     self.activation = layers.ELU()
Exemple #18
0
def create_model(num_input, hidden_layer, num_classes):

    input_layer = tf.keras.Input(shape=(num_input, ))
    hidden = layers.Dense(
        hidden_layer[0],
        kernel_regularizer=regularizers.l2(0.1),
        activity_regularizer=regularizers.l1(0.1))(input_layer)
    hidden = layers.ELU(alpha=1)(hidden)
    hidden = layers.Dropout(0.1)(hidden)
    hidden = layers.Dense(hidden_layer[1],
                          kernel_regularizer=regularizers.l2(0.1),
                          activity_regularizer=regularizers.l1(0.1))(hidden)
    hidden = layers.ELU(alpha=1)(hidden)
    hidden = layers.Dropout(0.1)(hidden)
    hidden = layers.Dense(hidden_layer[2],
                          kernel_regularizer=regularizers.l2(0.1),
                          activity_regularizer=regularizers.l1(0.1))(hidden)
    hidden = layers.ELU(alpha=1)(hidden)
    hidden = layers.Dropout(0.1)(hidden)
    if num_classes > 2:
        output_layer = layers.Dense(num_classes, activation="softmax")(hidden)
    else:
        output_layer = layers.Dense(1, activation="sigmoid")(hidden)

    model = models.Model(input_layer, output_layer)
    if num_classes > 2:
        loss_func = 'sparse_categorical_crossentropy'
    else:
        loss_func = 'binary_crossentropy'

    opt = optimizers.Adam(learning_rate=0.005,
                          beta_1=0.9,
                          beta_2=0.999,
                          amsgrad=False)
    model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

    return model
def createModelIII(reg1=1e-3, reg2=1e-3, dp=0.3, learning_rate=1e-4):

    tsteps = 48
    model = models.Sequential()

    model.add(
        layers.LSTM(32,
                    input_shape=[tsteps, 6],
                    return_sequences=True,
                    dropout=0.2,
                    use_bias=True))

    model.add(
        layers.TimeDistributed(
            layers.Dense(tsteps, activation='relu', use_bias=True)))

    model.add(layers.Reshape([tsteps, tsteps, 1]))

    model.add(layers.Conv2D(16, (3, 3), padding="same", activation="relu"))

    model.add(layers.Conv2D(32, (7, 7), padding="same", activation="relu"))

    model.add(layers.Flatten())

    model.add(
        layers.Dense(64, kernel_regularizer=tf.keras.regularizers.l2(reg1)))
    model.add(layers.ELU())

    model.add(layers.Dropout(dp))

    model.add(
        layers.Dense(14,
                     activation='softmax',
                     kernel_regularizer=tf.keras.regularizers.l2(reg2)))

    lr_decayed_fn = (tf.keras.experimental.CosineDecayRestarts(
        learning_rate, 10))
    opt = tf.keras.optimizers.Adam(learning_rate=lr_decayed_fn,
                                   amsgrad=True,
                                   beta_1=0.86,
                                   beta_2=0.98,
                                   epsilon=1e-9)

    opt = tf.keras.optimizers.RMSprop(learning_rate=learning_rate, )
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    return model
 def __init__(self,
              out_length,
              rate=2,
              strides=2,
              regularize=1e-5,
              **kwargs):
     super(CondenseTiny, self).__init__(**kwargs)
     self.sparse_extraction = layers.Conv1D(
         out_length,
         rate,
         strides=strides,
         use_bias=False,
         kernel_regularizer=regularizers.L2(regularize))
     self.normal = layers.LayerNormalization()
     self.activation = layers.ELU()
Exemple #21
0
    def __init__(self,
                 num_hiddenlayers=3,
                 num_neurons=64,
                 activationlayer=kl.ELU()):
        super().__init__('mlp_policy')
        self.num_hiddenlayers = num_hiddenlayers
        self.activationlayer = activationlayer

        self.hidden2 = kl.Dense(num_neurons)  #hidden layer for state-value
        self.hidden22 = kl.Dense(num_neurons)
        if self.num_hiddenlayers > 2:
            self.hidden222 = kl.Dense(num_neurons)
        if self.num_hiddenlayers > 3:
            self.hidden2222 = kl.Dense(num_neurons)

        self.val = kl.Dense(1, name='value')
Exemple #22
0
    def __init__(self,
                 n_classes,
                 channels=100,
                 kernel_size=1,
                 classifier_layers=3,
                 rate=0.0,
                 **kwargs):
        super().__init__(**kwargs)

        # define the layers
        self.n_classes = n_classes
        self.channels = channels
        self.kernel_size = kernel_size
        self.classifier_layers = classifier_layers
        self.rate = rate

        self.initial_conv1d = layers.Conv1D(channels, kernel_size)
        self.inner_conv1d = layers.Conv1D(channels, kernel_size)
        self.elu = layers.ELU()
        self.outer_conv1d = layers.Conv1D(n_classes, kernel_size)
        self.dropout = layers.Dropout(rate)
Exemple #23
0
    def __init__(self, params):
        super(cnn_dense_0, self).__init__(name=params['model_name'])

        self.pchoice = params['pchoice']

        self.internal = tf.keras.Sequential(name=params['model_name'] +
                                            '_layers')
        self.internal.add(layers.BatchNormalization())
        self.internal.add(layers.Conv1D(4, 20, strides=10, padding='same'))
        self.internal.add(layers.ELU())
        self.internal.add(layers.Dropout(0.1))

        self.internal.add(layers.BatchNormalization())
        self.internal.add(layers.Conv1D(8, 20, strides=10, padding='same'))
        self.internal.add(layers.ELU(alpha=2.0))
        self.internal.add(layers.Dropout(0.1))

        self.internal.add(layers.BatchNormalization())
        self.internal.add(layers.Conv1D(12, 20, strides=10, padding='same'))
        self.internal.add(layers.ELU(alpha=4.0))
        self.internal.add(layers.Dropout(0.1))

        self.internal.add(layers.BatchNormalization())
        self.internal.add(layers.Conv1D(32, 10, strides=10, padding='same'))
        self.internal.add(layers.ELU(alpha=8.0))
        self.internal.add(layers.Dropout(0.1))

        self.internal.add(layers.Flatten())

        self.internal.add(layers.Dense(32))
        self.internal.add(layers.ELU(alpha=10.0))
        self.internal.add(layers.Dropout(0.05))

        self.internal.add(layers.Dense(16))
        self.internal.add(layers.ELU(alpha=10.0))

        self.internal.add(layers.Dense(1))
Exemple #24
0
SPEC_SHAPE = (257, 384)  # (height, width)

# Initializers
initializers = {
    'glorot': tf.initializers.GlorotNormal(seed=RANDOM_SEED),
    'he': k.initializers.he_normal(seed=RANDOM_SEED),
    'random': k.initializers.RandomNormal(mean=0.0,
                                          stddev=0.05,
                                          seed=RANDOM_SEED),
    'constant': k.initializers.Constant(value=0)
}

# Activations
activations = {
    'relu': l.ReLU(max_value=None, negative_slope=0.0, threshold=0.0),
    'elu': l.ELU(alpha=1.0),
    'lrelu': l.LeakyReLU(alpha=0.3)
}


def resBlock(net_in,
             filters,
             kernel_size,
             stride=1,
             preactivated=True,
             block_id=1,
             name=''):

    # Show input shape
    print('    ' + name + ' IN SHAPE:', net_in.shape, end=' ')
Exemple #25
0
def make_cnn_model(x_train,
                   y_train,
                   x_test,
                   y_test,
                   reg=0.001,
                   alpha=.7,
                   learning_rate=0.001,
                   dropout=0.5,
                   epochs=100,
                   relative_size=1.0,
                   optim='SGD'):
    #policy = mixed_precision.Policy('mixed_float16')
    #mixed_precision.set_policy(policy)
    x_train = x_train.transpose((0, 2, 1))[:, :, :, None]
    x_test = x_test.transpose((0, 2, 1))[:, :, :, None]
    y_train -= 769
    y_test -= 769

    print(x_train.shape)

    model = keras.models.Sequential()
    size = int(25 * relative_size)
    conv1 = layers.Conv2D(size,
                          kernel_size=(10, 1),
                          strides=1,
                          kernel_regularizer=regularizers.l2(reg))
    conv2 = layers.Conv2D(size,
                          kernel_size=(1, 22),
                          kernel_regularizer=regularizers.l2(reg))
    perm1 = layers.Permute((1, 3, 2))
    pool1 = layers.AveragePooling2D(pool_size=(3, 1))
    drop1 = layers.Dropout(dropout)

    model.add(conv1)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(conv2)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(perm1)
    model.add(pool1)
    model.add(drop1)

    conv3 = layers.Conv2D(2 * size,
                          kernel_size=(10, size),
                          kernel_regularizer=regularizers.l2(reg))
    model.add(layers.ELU(alpha))
    perm2 = layers.Permute((1, 3, 2))
    pool2 = layers.AveragePooling2D(pool_size=(3, 1))
    drop2 = layers.Dropout(dropout)

    model.add(conv3)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(perm2)
    model.add(pool2)
    model.add(drop2)

    conv4 = layers.Conv2D(4 * size,
                          kernel_size=(10, 2 * size),
                          kernel_regularizer=regularizers.l2(reg))
    perm3 = layers.Permute((1, 3, 2))
    pool3 = layers.AveragePooling2D(pool_size=(3, 1))
    drop3 = layers.Dropout(dropout)

    model.add(conv4)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(perm3)
    model.add(pool3)
    model.add(drop3)

    conv5 = layers.Conv2D(8 * size,
                          kernel_size=(10, 4 * size),
                          kernel_regularizer=regularizers.l2(reg))
    perm4 = layers.Permute((1, 3, 2))
    pool4 = layers.AveragePooling2D(pool_size=(3, 1))
    drop4 = layers.Dropout(dropout)

    model.add(conv5)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(perm4)
    model.add(pool4)
    model.add(drop4)

    model.add(layers.Flatten())

    model.add(layers.Dense(4, name='dense_logits'))
    model.add(layers.Activation('softmax', dtype='float32',
                                name='predictions'))

    if optim == 'Adam':
        optimizer = keras.optimizers.Adam(learning_rate,
                                          beta_1=0.85,
                                          beta_2=0.92,
                                          amsgrad=True)
    elif optim == 'RMSprop':
        optimizer = keras.optimizers.RMSprop(learning_rate)
    else:
        optimizer = keras.optimizers.SGD(learning_rate, nesterov=True)

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    history = model.fit(x_train,
                        y_train,
                        batch_size=20,
                        epochs=epochs,
                        validation_split=0.2,
                        verbose=1)
    test_scores = model.evaluate(x_test, y_test, verbose=2)
    print('Test loss:', test_scores[0])
    print('Test accuracy:', test_scores[1])

    plot(history)

    return model
Exemple #26
0
def make_vae_model(x_train,
                   y_train,
                   x_test,
                   y_test,
                   reg=0.001,
                   alpha=.7,
                   learning_rate=0.001,
                   dropout=0.5,
                   epochs=100,
                   relative_size=1.0,
                   optim='SGD'):
    y_train -= 769
    y_test -= 769

    # latent_dim should be much smaller, but right now its equal to the original cnn input size
    latent_dim = 500 * 22  # 2
    original_dim = 22000
    intermediate_dim = 512
    batch_size = 100

    epsilon_std = 1.0

    x_train = x_train.reshape(-1, original_dim)
    train_mean = np.mean(x_train)
    train_std = np.std(x_train)
    norm_x_train = (x_train - train_mean) / train_std

    x_test = x_test.reshape(-1, original_dim)
    test_mean = np.mean(x_test)
    test_std = np.std(x_test)
    norm_x_test = (x_test - test_mean) / test_std

    decoder = kmodels.Sequential([
        klayers.Dense(intermediate_dim,
                      input_dim=latent_dim,
                      activation='relu',
                      kernel_regularizer=regularizers.l2(reg)),
        klayers.Dense(original_dim,
                      activation='sigmoid',
                      kernel_regularizer=regularizers.l2(reg))
    ])

    x = klayers.Input(shape=(original_dim, ))
    h = klayers.Dense(intermediate_dim,
                      activation='relu',
                      kernel_regularizer=regularizers.l2(reg))(x)

    z_mu = klayers.Dense(latent_dim,
                         kernel_regularizer=regularizers.l2(reg))(h)
    z_log_var = klayers.Dense(latent_dim,
                              kernel_regularizer=regularizers.l2(reg))(h)

    z_mu, z_log_var = KLDivergenceLayer()([z_mu, z_log_var])
    z_sigma = klayers.Lambda(lambda t: K.exp(.5 * t))(z_log_var)

    eps = klayers.Input(tensor=K.random_normal(
        stddev=epsilon_std, shape=(K.shape(x)[0], latent_dim)))
    z_eps = klayers.Multiply()([z_sigma, eps])
    z = klayers.Add()([z_mu, z_eps])

    x_pred = decoder(z)
    vae = kmodels.Model(inputs=[x, eps], outputs=x_pred)
    vae.compile(optimizer='rmsprop', loss=nll)

    history = vae.fit(norm_x_train,
                      norm_x_train,
                      shuffle=True,
                      epochs=epochs,
                      batch_size=batch_size,
                      validation_split=.2)

    encoder = kmodels.Model(x, z_mu)
    z_train = encoder.predict(norm_x_train, batch_size=batch_size)
    z_test = encoder.predict(norm_x_test, batch_size=batch_size)

    z_train = z_train.reshape(-1, 22, 500, 1).transpose(0, 2, 1, 3)
    z_test = z_test.reshape(-1, 22, 500, 1).transpose(0, 2, 1, 3)
    # now pass encoded input into cnn

    size = int(25 * relative_size)
    conv1 = layers.Conv2D(size,
                          kernel_size=(10, 1),
                          strides=1,
                          kernel_regularizer=regularizers.l2(reg))
    conv2 = layers.Conv2D(size,
                          kernel_size=(1, 22),
                          kernel_regularizer=regularizers.l2(reg))
    perm1 = layers.Permute((1, 3, 2))
    pool1 = layers.AveragePooling2D(pool_size=(3, 1))
    drop1 = layers.Dropout(dropout)

    model = keras.models.Sequential()

    model.add(conv1)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(conv2)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(perm1)
    model.add(pool1)
    model.add(drop1)

    conv3 = layers.Conv2D(2 * size,
                          kernel_size=(10, size),
                          kernel_regularizer=regularizers.l2(reg))
    model.add(layers.ELU(alpha))
    perm2 = layers.Permute((1, 3, 2))
    pool2 = layers.AveragePooling2D(pool_size=(3, 1))
    drop2 = layers.Dropout(dropout)

    model.add(conv3)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(perm2)
    model.add(pool2)
    model.add(drop2)

    conv4 = layers.Conv2D(4 * size,
                          kernel_size=(10, 2 * size),
                          kernel_regularizer=regularizers.l2(reg))
    perm3 = layers.Permute((1, 3, 2))
    pool3 = layers.AveragePooling2D(pool_size=(3, 1))
    drop3 = layers.Dropout(dropout)

    model.add(conv4)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(perm3)
    model.add(pool3)
    model.add(drop3)

    conv5 = layers.Conv2D(8 * size,
                          kernel_size=(10, 4 * size),
                          kernel_regularizer=regularizers.l2(reg))
    perm4 = layers.Permute((1, 3, 2))
    pool4 = layers.AveragePooling2D(pool_size=(3, 1))
    drop4 = layers.Dropout(dropout)

    model.add(conv5)
    model.add(layers.ELU(alpha))
    model.add(layers.BatchNormalization())
    model.add(perm4)
    model.add(pool4)
    model.add(drop4)

    model.add(layers.Flatten())

    model.add(layers.Dense(4, name='dense_logits'))
    model.add(layers.Activation('softmax', dtype='float32',
                                name='predictions'))

    if optim == 'Adam':
        optimizer = keras.optimizers.Adam(learning_rate,
                                          beta_1=0.85,
                                          beta_2=0.92,
                                          amsgrad=True)
    elif optim == 'RMSprop':
        optimizer = keras.optimizers.RMSprop(learning_rate)
    else:
        optimizer = keras.optimizers.SGD(learning_rate, nesterov=True)

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    history = model.fit(z_train,
                        y_train,
                        batch_size=20,
                        epochs=epochs,
                        validation_split=0.2,
                        verbose=1)
    test_scores = model.evaluate(z_test, y_test, verbose=2)
    print('Test loss:', test_scores[0])
    print('Test accuracy:', test_scores[1])

    plot(history)

    return model
 def __init__(self, num_caps, out_length, **kwargs):
     super(Condense, self).__init__(**kwargs)
     self.sparse_extraction = CapsuleMapping(num_caps=num_caps,
                                             caps_length=out_length)
     self.normal = layers.LayerNormalization()
     self.activation = layers.ELU()
Exemple #28
0
def build_stixel_net(input_shape=(370, 800, 3)):
    """
    input_shape -> (height, width, channel)
    """
    img_input = keras.Input(shape=input_shape)

    x = layers.Conv2D(
        64, (3, 3), activation="relu", padding="same", name="block1_conv1"
    )(img_input)
    x = layers.Conv2D(
        64, (3, 3), activation="relu", padding="same", name="block1_conv2"
    )(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block1_pool")(x)

    # Block 2
    x = layers.Conv2D(
        128, (3, 3), activation="relu", padding="same", name="block2_conv1"
    )(x)
    x = layers.Conv2D(
        128, (3, 3), activation="relu", padding="same", name="block2_conv2"
    )(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block2_pool")(x)

    # Block 3
    x = layers.Conv2D(
        256, (3, 3), activation="relu", padding="same", name="block3_conv1"
    )(x)
    x = layers.Conv2D(
        256, (3, 3), activation="relu", padding="same", name="block3_conv2"
    )(x)
    x = layers.Conv2D(
        256, (3, 3), activation="relu", padding="same", name="block3_conv3"
    )(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block3_pool")(x)

    # Block 4
    x = layers.Conv2D(
        512, (3, 3), activation="relu", padding="same", name="block4_conv1"
    )(x)
    x = layers.Conv2D(
        512, (3, 3), activation="relu", padding="same", name="block4_conv2"
    )(x)
    x = layers.Conv2D(
        512, (3, 3), activation="relu", padding="same", name="block4_conv3"
    )(x)

    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding="same")(x)
    x = layers.ELU()(x)
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding="same")(x)
    x = layers.ELU()(x)
    x = layers.MaxPooling2D((2, 1), strides=(2, 1))(x)

    x = layers.Dropout(0.4)(x)

    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding="same")(x)
    x = layers.ELU()(x)
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding="same")(x)
    x = layers.ELU()(x)
    x = layers.MaxPooling2D((2, 1), strides=(2, 1), padding="same")(x)

    x = layers.Dropout(0.4)(x)

    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding="same")(x)
    x = layers.ELU()(x)
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding="same")(x)
    x = layers.ELU()(x)
    x = layers.MaxPooling2D((2, 1), strides=(2, 1))(x)

    x = layers.Dropout(0.4)(x)

    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding="same")(x)
    x = layers.ELU()(x)
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding="same")(x)
    x = layers.ELU()(x)
    x = layers.MaxPooling2D((2, 1), strides=(2, 1))(x)

    x = layers.Dropout(0.4)(x)

    x = layers.Conv2D(2048, (3, 1), strides=(1, 1), padding="valid")(x)
    x = layers.ELU()(x)
    x = layers.Conv2D(2048, (1, 3), strides=(1, 1), padding="same")(x)
    x = layers.ELU()(x)
    x = layers.Conv2D(2048, (1, 1), strides=(1, 1))(x)
    x = layers.ELU()(x)

    x = layers.Dropout(0.4)(x)

    x = layers.Conv2D(50, (1, 1), strides=(1, 1), activation="softmax")(x)

    x = layers.Reshape((100, 50))(x)

    model = models.Model(inputs=img_input, outputs=x)

    return model