Пример #1
0
    def _mlp_selu(self):
        """
        Create Multi-Layer Perceptron model scaled exponential lineary units and alpha dropout.
        :returns: model
        :rtype: keras.models.Sequential
        """

        kernel_initializer = "lecun_normal"
        activation = "selu"

        model = Sequential()
        model.add(
            Dense(self.layers[0],
                  input_dim=self.input_dim,
                  activation=activation,
                  kernel_regularizer=self._reg,
                  kernel_initializer=kernel_initializer))
        model.add(AlphaDropout(self.dropout_perc))
        for i in range(1, len(self.layers) - 1):
            model.add(
                Dense(self.layers[i],
                      activation=activation,
                      kernel_regularizer=self._reg,
                      kernel_initializer=kernel_initializer))
            model.add(AlphaDropout(self.dropout_perc))
        self._add_last_layer(model)

        return model
Пример #2
0
def create_cnn(num_classes: int = 2) -> tf.keras.Model:
    x = Input(shape=(256, ), dtype="int64")
    h = Embedding(en2vec.corpus_size + 1, 128, input_length=256)(x)

    conv1 = Convolution1D(filters=256, kernel_size=10, activation="tanh")(h)
    conv2 = Convolution1D(filters=256, kernel_size=7, activation="tanh")(h)
    conv3 = Convolution1D(filters=256, kernel_size=5, activation="tanh")(h)
    conv4 = Convolution1D(filters=256, kernel_size=3, activation="tanh")(h)

    h = Concatenate()([
        GlobalMaxPooling1D()(conv1),
        GlobalMaxPooling1D()(conv2),
        GlobalMaxPooling1D()(conv3),
        GlobalMaxPooling1D()(conv4),
    ])

    h = Dense(1024, activation="selu", kernel_initializer="lecun_normal")(h)
    h = AlphaDropout(0.1)(h)
    h = Dense(1024, activation="selu", kernel_initializer="lecun_normal")(h)
    h = AlphaDropout(0.1)(h)

    y = Dense(num_classes, activation="softmax")(h)

    model = Model(x, y)
    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy", AUC()])
    return model
Пример #3
0
def szubert_base_network(input_shape):
    '''A small, quick-to-train base network. The default for Ivis.'''
    return Sequential([
        SeluDense(128, input_shape=input_shape),
        AlphaDropout(0.1),
        SeluDense(128),
        AlphaDropout(0.1),
        SeluDense(128)
    ])
Пример #4
0
def szubert_base_network(input_shape):
    '''A small, quick-to-train base network. The default for Ivis.'''
    inputs = Input(shape=input_shape)
    x = Dense(128, activation='selu',
              kernel_initializer='lecun_normal')(inputs)
    x = AlphaDropout(0.1)(x)
    x = Dense(128, activation='selu', kernel_initializer='lecun_normal')(x)
    x = AlphaDropout(0.1)(x)
    x = Dense(128, activation='selu', kernel_initializer='lecun_normal')(x)
    return Model(inputs, x)
Пример #5
0
def maaten_base_network(input_shape):
    """Base network to be shared (eq. to feature extraction)."""
    inputs = Input(shape=input_shape)
    x = Dense(500, activation='selu',
              kernel_initializer='lecun_normal')(inputs)
    x = AlphaDropout(0.1)(x)
    x = Dense(500, activation='selu', kernel_initializer='lecun_normal')(x)
    x = AlphaDropout(0.1)(x)
    x = Dense(2000, activation='selu', kernel_initializer='lecun_normal')(x)
    return Model(inputs, x)
Пример #6
0
def szubert_base_network(input_shape):
    '''Base network to be shared (eq. to feature extraction).
    '''
    inputs = Input(shape=input_shape)
    x = Dense(128, activation='selu',
              kernel_initializer='lecun_normal')(inputs)
    x = AlphaDropout(0.1)(x)
    x = Dense(128, activation='selu', kernel_initializer='lecun_normal')(x)
    x = AlphaDropout(0.1)(x)
    x = Dense(128, activation='selu', kernel_initializer='lecun_normal')(x)
    return Model(inputs, x)
Пример #7
0
def hinton_base_network(input_shape):
    '''A base network inspired by the autoencoder architecture published in Hinton's paper
    'Reducing Dimensionality of Data with Neural Networks' (https://www.cs.toronto.edu/~hinton/science.pdf)'''
    inputs = Input(shape=input_shape)
    x = Dense(2000, activation='selu',
              kernel_initializer='lecun_normal')(inputs)
    x = AlphaDropout(0.1)(x)
    x = Dense(1000, activation='selu', kernel_initializer='lecun_normal')(x)
    x = AlphaDropout(0.1)(x)
    x = Dense(500, activation='selu', kernel_initializer='lecun_normal')(x)
    return Model(inputs, x)
Пример #8
0
def hinton_base_network(input_shape):
    '''A base network inspired by the autoencoder architecture published in Hinton's paper
    'Reducing Dimensionality of Data with Neural Networks'
    (https://www.cs.toronto.edu/~hinton/science.pdf)'''
    return Sequential([
        SeluDense(2000, input_shape=input_shape),
        AlphaDropout(0.1),
        SeluDense(1000),
        AlphaDropout(0.1),
        SeluDense(500)
    ])
Пример #9
0
def maaten_base_network(input_shape):
    '''A base network inspired by the network architecture published in Maaten's t-SNE paper
    'Learning a Parametric Embedding by Preserving Local Structure'
    (https://lvdmaaten.github.io/publications/papers/AISTATS_2009.pdf)'''
    return Sequential([
        SeluDense(500, input_shape=input_shape),
        AlphaDropout(0.1),
        SeluDense(500),
        AlphaDropout(0.1),
        SeluDense(2000)
    ])
Пример #10
0
def maaten_base_network(input_shape):
    '''A base network inspired by the network architecture published in Maaten's t-SNE paper
    'Learning a Parametric Embedding by Preserving Local Structure'
    (https://lvdmaaten.github.io/publications/papers/AISTATS_2009.pdf)'''
    inputs = Input(shape=input_shape)
    x = Dense(500, activation='selu',
              kernel_initializer='lecun_normal')(inputs)
    x = AlphaDropout(0.1)(x)
    x = Dense(500, activation='selu', kernel_initializer='lecun_normal')(x)
    x = AlphaDropout(0.1)(x)
    x = Dense(2000, activation='selu', kernel_initializer='lecun_normal')(x)
    return Model(inputs, x)
Пример #11
0
def base_network(input_shape):
    '''Base network to be shared (eq. to feature extraction).
    '''
    inputs = Input(shape=input_shape)
    n_dim = round(0.75 * input_shape[0])
    x = Dense(n_dim, activation='selu',
              kernel_initializer='lecun_normal')(inputs)
    x = AlphaDropout(0.25)(x)
    x = Dense(n_dim, activation='selu', kernel_initializer='lecun_normal')(x)
    x = AlphaDropout(0.25)(x)
    x = Dense(n_dim, activation='selu', kernel_initializer='lecun_normal')(x)
    return Model(inputs, x)
Пример #12
0
    def _build_model(self):
        """Internal method that defines the structure of the DNN
        
        Returns
        -------
        tensorflow.keras.models.Model
            A neural network model using keras and tensorflow
        """
        inputShape = (None, self.n_features)

        past_data = Input(batch_shape=inputShape)

        past_Dense = past_data
        if self.activation == 'selu':
            self.initializer = 'lecun_normal'

        for k, neurons in enumerate(self.neurons):

            if self.activation == 'LeakyReLU':
                past_Dense = Dense(neurons,
                                   activation='linear',
                                   batch_input_shape=inputShape,
                                   kernel_initializer=self.initializer,
                                   kernel_regularizer=self._reg(
                                       self.lambda_reg))(past_Dense)
                past_Dense = LeakyReLU(alpha=.001)(past_Dense)

            elif self.activation == 'PReLU':
                past_Dense = Dense(neurons,
                                   activation='linear',
                                   batch_input_shape=inputShape,
                                   kernel_initializer=self.initializer,
                                   kernel_regularizer=self._reg(
                                       self.lambda_reg))(past_Dense)
                past_Dense = PReLU()(past_Dense)

            else:
                past_Dense = Dense(neurons,
                                   activation=self.activation,
                                   batch_input_shape=inputShape,
                                   kernel_initializer=self.initializer,
                                   kernel_regularizer=self._reg(
                                       self.lambda_reg))(past_Dense)

            if self.batch_normalization:
                past_Dense = BatchNormalization()(past_Dense)

            if self.dropout > 0:
                if self.activation == 'selu':
                    past_Dense = AlphaDropout(self.dropout)(past_Dense)
                else:
                    past_Dense = Dropout(self.dropout)(past_Dense)

        output_layer = Dense(self.outputShape,
                             kernel_initializer=self.initializer,
                             kernel_regularizer=self._reg(
                                 self.lambda_reg))(past_Dense)
        model = Model(inputs=[past_data], outputs=[output_layer])

        return model
Пример #13
0
def apply_dropout(inp, rate, dropout_type='standard', name=None):
    '''Helper function to add a dropout layer of a specified type to a model

    Parameters:
    ----------
    inp: tensor
        The input tensor
    rate: float
        The rate parameter of the dropout (proportion of units dropped)
    dropout_type: str
        The type of the dropout. Allowed values are ['standard', 'gaussian', 'alpha', 'none'], which respectively
        correspond to the Dropout, GaussianDropout, and AlphaDropout keras layers, or no dropout. The default is
        'standard'
    name: str
        This string is passed as the name parameter when constructing the layer

    Returns:
    -------
    tensor
        The output tensor after application of the dropout layer
    '''

    if dropout_type == 'standard':
        output = Dropout(rate, name=name)(inp)
    elif dropout_type == 'gaussian':
        output = GaussianDropout(rate, name=name)(inp)
    elif dropout_type == 'alpha':
        output = AlphaDropout(rate, name=name)(inp)
    elif dropout_type == 'none':
        output = inp
    else:
        raise ValueError('Unrecognised dropout type {}'.format(dropout_type))
    return output
Пример #14
0
 def block(x):
     for i in range(n_layers):
         x = Dense(n_units, kernel_regularizer=l2(l2_lambda))(x)
         if batch_norm: x = BatchNormalization()(x)
         x = activation()(x)
         if dropout_prob: AlphaDropout(dropout_prob)
     return x
    def build(self):
        conv_layers = [[256, 10], [256, 7], [256, 5], [256, 3]]
        fully_connected_layers = [1024, 1024]
        input = Input(shape=(self.max_sequence_len, ),
                      dtype='int32',
                      name='input')
        embedded_sequence = self.embedding_layer(input)

        convolution_output = []
        for num_filters, filter_width in conv_layers:
            conv = Convolution1D(filters=num_filters,
                                 kernel_size=filter_width,
                                 activation='tanh',
                                 name='Conv1D_{}_{}'.format(
                                     num_filters,
                                     filter_width))(embedded_sequence)
            pool = GlobalMaxPooling1D(name='MaxPoolingOverTime_{}_{}'.format(
                num_filters, filter_width))(conv)
            convolution_output.append(pool)
        x = Concatenate()(convolution_output)
        for fl in fully_connected_layers:
            x = Dense(fl, activation='selu',
                      kernel_initializer='lecun_normal')(x)
            x = AlphaDropout(0.5)(x)

        output = Dense(self.class_len, activation='sigmoid')(x)
        model = Model(inputs=input, outputs=output)
        return model
def lenet(input_shape: Tuple[int, ...], output_shape: Tuple[int,
                                                            ...]) -> Model:
    num_classes = output_shape[0]

    ##### Your code below (Lab 2)

    model = Sequential()

    print(f'len(input_shape) is {len(input_shape)}'
          )  # this duplicates output from rexp.py btw
    if len(input_shape) < 3:
        model.add(
            Lambda(lambda x: tf.expand_dims(x, -1), input_shape=input_shape))
        input_shape = (input_shape[0], input_shape[1], 1)

    # selu option:

    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               kernel_initializer='lecun_normal',
               activation='selu',
               input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='selu'))
    '''
    # relu option:
    model.add(Conv2D(32, kernel_size = (3, 3), activation = 'relu', input_shape = input_shape))
    model.add(Conv2D(64, (3, 3), activation = 'relu'))
    '''
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(AlphaDropout(0.04))
    # model.add(Dropout(0.08))

    # added conv2d layer:
    # model.add(Conv2D(32, (3, 3), activation = 'selu'))
    # model.add(AlphaDropout(0.03))

    # model.add(MaxPooling2D(pool_size = (2, 2)))
    # model.add(AlphaDropout(0.07))

    model.add(Flatten())
    model.add(Dense(128, activation='selu'))
    model.add(AlphaDropout(0.1))
    model.add(Dense(num_classes, activation='softmax'))
    ##### Your code above (Lab 2)

    return model
Пример #17
0
def create_rnn(num_classes: int = 2) -> tf.keras.Model:
    model = Sequential(name="dns_rnn")
    model.add(Embedding(en2vec.corpus_size + 1, 128, input_length=256))
    model.add(LSTM(256, return_sequences=True))
    model.add(LSTM(128))
    model.add(Dense(1024, activation="selu",
                    kernel_initializer="lecun_normal"))
    model.add(AlphaDropout(0.1))
    model.add(Dense(1024, activation="selu",
                    kernel_initializer="lecun_normal"))
    model.add(AlphaDropout(0.1))
    model.add(Dense(num_classes, activation="softmax"))

    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy", AUC()])
    return model
Пример #18
0
 def block(x):
     for i in range(n_layers):
         x = Conv1D(n_filters,
                    filter_size,
                    padding="same",
                    kernel_regularizer=l2(l2_lambda))(x)
         if batch_norm: x = BatchNormalization()(x)
         x = activation()(x)
         if dropout_prob: AlphaDropout(dropout_prob)
     return x
Пример #19
0
def create_vosoughi_rcnn(num_classes: int = 2) -> tf.keras.Model:
    model = Sequential(name="dns_rcnn")
    model.add(Embedding(en2vec.corpus_size + 1, 128, input_length=256))
    model.add(Conv1D(filters=128, kernel_size=8, activation="relu"))
    model.add(GlobalMaxPooling1D())
    model.add(Reshape((1, 128)))
    model.add(LSTM(128))
    model.add(Dense(1024, activation="selu",
                    kernel_initializer="lecun_normal"))
    model.add(AlphaDropout(0.1))
    model.add(Dense(1024, activation="selu",
                    kernel_initializer="lecun_normal"))
    model.add(AlphaDropout(0.1))
    model.add(Dense(num_classes, activation="softmax"))

    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy", AUC()])
    return model
Пример #20
0
def mlp(
        input_shape: Tuple[int, ...],
        output_shape: Tuple[int, ...],
        layer_size: int = 128,
        # dropout_amount_1: float = 0,
        dropout_amount_2: float = 0.04,
        dropout_amount_3: float = 0.08,
        num_layers: int = 3) -> Model:
    # Simple multi-layer perceptron: just fully-connected layers with softmax predictions; creates num_layers layers.

    num_classes = output_shape[0]
    model = Sequential()

    # model.add(Flatten(input_shape = input_shape))
    # new:
    if len(input_shape) < 3:
        model.add(
            Lambda(lambda x: tf.expand_dims(x, -1), input_shape=input_shape))
        input_shape = (input_shape[0], input_shape[1], 1)
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               kernel_initializer='lecun_normal',
               strides=(1, 1),
               activation='selu',
               input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Conv2D(64, (5, 5), activation='selu'))
    model.add(Conv2D(64, (5, 5), activation='selu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(layer_size, activation='selu'))
    # model.add(Dropout(dropout_amount_1))
    model.add(BatchNormalization())
    model.add(Dense(layer_size, activation='selu'))
    model.add(AlphaDropout(dropout_amount_2))
    model.add(BatchNormalization())
    model.add(Dense(layer_size, activation='selu'))
    model.add(AlphaDropout(dropout_amount_3))
    model.add(BatchNormalization())
    model.add(Dense(num_classes, activation='softmax'))

    return model
Пример #21
0
def classifier_model(input_shape):
    # Input
    X_input = Input(input_shape)

    # Layer(s)
    X = AlphaDropout(rate=0.12)(X_input)
    X = Dense(
        256,
        activation="selu",
        #kernel_regularizer=tf.keras.regularizers.l1_l2(l1=l1reg, l2=l2reg),
        #kernel_regularizer=tf.keras.regularizers.l2(l2reg),
        #kernel_constraint=tf.keras.constraints.max_norm(0.7),
        kernel_initializer=
        'lecun_normal',  # he_normal, he_uniform, lecun_uniform
        name='Dense1')(X)

    X = AlphaDropout(rate=0.10)(X)
    X = Dense(128,
              activation="selu",
              kernel_initializer='lecun_normal',
              name='Dense2')(X)

    X = AlphaDropout(rate=0.10)(X)
    X = Dense(32,
              activation="selu",
              kernel_initializer='lecun_normal',
              name='Dense3')(X)

    # #X = AlphaDropout(rate=0.10)(X)
    # X = Dense(16, activation="selu",
    #           kernel_initializer='lecun_normal',
    #           name = 'Dense4')(X)

    # Output
    X_output = Dense(1, activation='sigmoid', name='output_layer')(X)

    # Build model
    model = Model(inputs=X_input, outputs=X_output, name='classifier_model')

    return model
 def expanding_block(self, current_level):
     """
     Create and return the expanding block keras layer for the current level.
     :param current_level: The current level.
     :return: The keras.layers.Layer.
     """
     layers = []
     for i in range(self.repeats):
         layers.append(self.conv(current_level, str(i)))
         if self.alpha_dropout:
             layers.append(AlphaDropout(self.dropout_ratio))
         else:
             layers.append(Dropout(self.dropout_ratio))
     return Sequential(layers, name='expanding' + str(current_level))
 def __init__(self,
              output_channels: int,
              input_channels: Optional[int] = None,
              kernel_size: int = 3,
              pooling_size: int = 1,
              dropout_rate: float = 0.0):
     super().__init__()
     dimension_decrease_factor = 4
     kernel_initializer = LecunNormal()
     self.dimension_decrease_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=1,
         activation=selu,
         kernel_initializer=kernel_initializer)
     self.convolutional_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=kernel_size,
         activation=selu,
         padding='same',
         kernel_initializer=kernel_initializer)
     self.dimension_increase_layer = Convolution1D(
         output_channels,
         kernel_size=1,
         activation=selu,
         kernel_initializer=kernel_initializer)
     if pooling_size > 1:
         self.pooling_layer = AveragePooling1D(pool_size=pooling_size,
                                               padding='same')
     else:
         self.pooling_layer = None
     if input_channels is not None and output_channels != input_channels:
         if output_channels < input_channels:
             raise NotImplementedError(
                 f'Residual blocks with less output channels than input channels is not'
                 f'implemented. Output channels was {output_channels} and input was'
                 f'{input_channels}')
         self.dimension_change_permute0 = Permute((2, 1))
         self.dimension_change_layer = ZeroPadding1D(
             padding=(0, output_channels - input_channels))
         self.dimension_change_permute1 = Permute((2, 1))
     else:
         self.dimension_change_layer = None
     if dropout_rate > 0:
         self.dropout_layer = AlphaDropout(rate=dropout_rate,
                                           noise_shape=(50, 1,
                                                        output_channels))
     else:
         self.dropout_layer = None
Пример #24
0
    def _build_model(self) -> None:
        """
        Build and compile the Character Level CNN model
        :return: None
        """
        # Input layer
        inputs = Input(shape=(self.input_sz,), name='sent_input', dtype='int16')

        # Embedding layers
        x = Embedding(self.alphabet_sz + 1, self.emb_sz, input_length=self.input_sz)(inputs)
        x = Reshape((self.input_sz, self.emb_sz))(x)

        # Convolutional layers
        for cl in self.conv_layers:
            x = Conv1D(cl[0], cl[1], kernel_initializer="lecun_normal", padding="causal", use_bias=False)(x)
            x = BatchNormalization(scale=False)(x)
            x = Activation('selu')(x)
            x = AlphaDropout(0.5)(x)

            if cl[2] != -1:
                x = MaxPooling1D(cl[2], cl[3])(x)
            if cl[4] != -1:
                x = self._squeeze_and_excitation_block(input_data = x, ratio = cl[4])

        # Flatten the features
        x = Flatten()(x)

        # Fully connected layers
        for fl in self.fc_layers:
            x = Dense(fl)(x)
            x = ThresholdedReLU(self.threshold)(x)
            x = Dropout(self.dropout_p)(x)

        # Output layer
        predictions = Dense(self.num_of_classes, activation="softmax")(x)

        # Build and coompile the model
        model = Model(inputs, predictions)

        # Compile
        model.compile(optimizer='nadam', loss=self.loss, metrics=['accuracy'])
        self.model = model
        self.model.summary()
Пример #25
0
    def _create_model(self, X):
        # Input layer.
        inp = Input(shape=(X.shape[1], ))

        # Dense layers.
        layer = inp
        for size in self.dense_sizes:
            layer = Dense(size,
                          activation="selu",
                          kernel_initializer="lecun_normal")(layer)
            layer = AlphaDropout(self.dropout_rate)(layer)

        # Output layer.
        out = Dense(1)(layer)

        # Create the model.
        self.model = Model(inputs=inp, outputs=out)
        self.model.compile(optimizer="adam", loss="mean_absolute_error")

        # Print summary.
        self.model.summary()
Пример #26
0
y_train = y_train[10000:]

print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_val.shape[0], 'val samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Flatten())
model.add(Dense(512, activation='selu',kernel_initializer='lecun_normal',bias_initializer='zeros'))
model.add(AlphaDropout(0.05))
model.add(Dense(256, activation='selu',kernel_initializer='lecun_normal',bias_initializer='zeros'))
model.add(AlphaDropout(0.05))
model.add(Dense(num_classes, activation='softmax',kernel_initializer='lecun_normal',bias_initializer='zeros'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(learning_rate=0.001),
              metrics=['accuracy'])

model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_val, y_val))

score = model.evaluate(x_test, y_test, verbose=0)
Пример #27
0
def alpha_dropout(rate):
    return AlphaDropout(rate=rate, seed=SEED)
Пример #28
0
def build_model(encoders):
    """Builds and compiles the model from scratch.

    # Arguments
        encoders: dict of encoders (used to set size of text/categorical inputs)

    # Returns
        model: A compiled model which can be used to train or predict.
    """

    # make
    input_make = Input(shape=(4, ), name="input_make")

    # body
    input_body_size = len(encoders['body_encoder'].classes_)
    input_body = Input(
        shape=(input_body_size if input_body_size != 2 else 1, ),
        name="input_body")

    # mileage
    input_mileage = Input(shape=(4, ), name="input_mileage")

    # engV
    input_engv = Input(shape=(4, ), name="input_engv")

    # engType
    input_engtype_size = len(encoders['engtype_encoder'].classes_)
    input_engtype = Input(
        shape=(input_engtype_size if input_engtype_size != 2 else 1, ),
        name="input_engtype")

    # registration
    input_registration_size = len(encoders['registration_encoder'].classes_)
    input_registration = Input(shape=(input_registration_size if
                                      input_registration_size != 2 else 1, ),
                               name="input_registration")

    # year
    input_year = Input(shape=(4, ), name="input_year")

    # drive
    input_drive_size = len(encoders['drive_encoder'].classes_)
    input_drive = Input(
        shape=(input_drive_size if input_drive_size != 2 else 1, ),
        name="input_drive")

    # Combine all the inputs into a single layer
    concat = concatenate([
        input_make, input_body, input_mileage, input_engv, input_engtype,
        input_registration, input_year, input_drive
    ],
                         name="concat")

    # Multilayer Perceptron (MLP) to find interactions between all inputs
    hidden = Dense(64,
                   activation='selu',
                   name='hidden_1',
                   kernel_regularizer=None)(concat)
    hidden = AlphaDropout(0.5, name="dropout_1")(hidden)

    for i in range(2 - 1):
        hidden = Dense(128,
                       activation="selu",
                       name="hidden_{}".format(i + 2),
                       kernel_regularizer=None)(hidden)
        hidden = AlphaDropout(0.5, name="dropout_{}".format(i + 2))(hidden)
    output = Dense(1, name="output", kernel_regularizer=l2(1e-2))(hidden)

    # Build and compile the model.
    model = Model(inputs=[
        input_make, input_body, input_mileage, input_engv, input_engtype,
        input_registration, input_year, input_drive
    ],
                  outputs=[output])
    model.compile(loss="msle",
                  optimizer=AdamWOptimizer(learning_rate=0.1,
                                           weight_decay=0.025))

    return model
Пример #29
0
model = Sequential()
model.add(
    Conv2D(32,
           kernel_size=(3, 3),
           activation='selu',
           input_shape=input_shape,
           kernel_initializer='lecun_normal',
           bias_initializer='zeros'))
model.add(
    Conv2D(64, (3, 3),
           activation='selu',
           kernel_initializer='lecun_normal',
           bias_initializer='zeros'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(AlphaDropout(0.25))
model.add(Flatten())
model.add(
    Dense(512,
          activation='selu',
          kernel_initializer='lecun_normal',
          bias_initializer='zeros'))
model.add(AlphaDropout(0.5))
model.add(
    Dense(num_classes,
          activation='softmax',
          kernel_initializer='lecun_normal',
          bias_initializer='zeros'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
def line_lstm_ctc(input_shape, output_shape, window_width=28, window_stride=14):
    image_height, image_width = input_shape
    output_length, num_classes = output_shape

    num_windows = int((image_width - window_width) / window_stride) + 1
    if num_windows < output_length:
        raise ValueError(f'Window width/stride need to generate at least {output_length} windows (currently {num_windows})')

    image_input = Input(shape=input_shape, name='image')
    y_true = Input(shape=(output_length,), name='y_true')
    input_length = Input(shape=(1,), name='input_length')
    label_length = Input(shape=(1,), name='label_length')

    gpu_present = len(device_lib.list_local_devices()) > 1
    lstm_fn = CuDNNLSTM if gpu_present else LSTM

    # Your code should use slide_window and extract image patches from image_input.
    # Pass a convolutional model over each image patch to generate a feature vector per window.
    # Pass these features through one or more LSTM layers.
    # Convert the lstm outputs to softmax outputs.
    # Note that lstms expect a input of shape (num_batch_size, num_timesteps, feature_length).

    ##### Your code below (Lab 3)
    
    image_reshaped = Reshape((image_height, image_width, 1))(image_input)
    
    # lenet option:
    ''''''
    image_patches = Lambda(
        slide_window,
        arguments = {'window_width': window_width, 'window_stride': window_stride}
    )(image_reshaped)
    
    convnet = lenet((image_height, window_width, 1), (num_classes,))
    convnet = KerasModel(inputs = convnet.inputs, outputs = convnet.layers[-2].output)
    convnet_outputs = TimeDistributed(convnet)(image_patches)
    ''''''
    
    # straight conv to lstm w relu option:
    '''
    # conv = BatchNormalization()(image_reshaped)
    conv = Conv2D(128, (image_height, window_width), (1, window_stride), kernel_initializer = 'lecun_normal', activation = 'selu')(image_reshaped)
    conv = BatchNormalization()(conv)
    conv = AlphaDropout(0.07)(conv)
    
    # conv = MaxPooling2D(pool_size = (2, 2))(conv)
    
    # conv = Conv2D(128, (image_height, window_width), (1, window_stride), activation = 'relu')(image_reshaped)
    
    # conv = Conv2D(256, (1, window_stride), activation = 'relu')(conv)
    
    convnet_outputs = Lambda(lambda x: K.squeeze(x, 1))(conv)
    '''

    # convnet_do = AlphaDropout(0.05)(convnet_outputs)
    
    # lstm_output = Bidirectional(lstm_fn(128, return_sequences = True))(convnet_do)
    
    lstm1_output = Bidirectional(lstm_fn(128, return_sequences = True))(convnet_outputs)
    
    lstm1_do = AlphaDropout(0.04)(lstm1_output)
    
    lstm2_output = Bidirectional(lstm_fn(128, return_sequences = True))(lstm1_do)
    
    lstm2_do = AlphaDropout(0.04)(lstm2_output)
    
    ''''''
    lstm3_output = Bidirectional(lstm_fn(128, return_sequences = True))(lstm2_do)
    # softmax_output = Dense(num_classes, activation = 'softmax', name = 'softmax_output')(lstm3_output)
    ''''''
    
    lstm3_do = AlphaDropout(0.05)(lstm3_output)
    
    softmax_output = Dense(num_classes, activation = 'softmax', name = 'softmax_output')(lstm3_do)
    
    
    # highest run: Test evaluation: 0.9641768591746657

    ##### Your code above (Lab 3)

    input_length_processed = Lambda(
        lambda x, num_windows=None: x * num_windows,
        arguments={'num_windows': num_windows}
    )(input_length)

    ctc_loss_output = Lambda(
        lambda x: K.ctc_batch_cost(x[0], x[1], x[2], x[3]),
        name='ctc_loss'
    )([y_true, softmax_output, input_length_processed, label_length])

    ctc_decoded_output = Lambda(
        lambda x: ctc_decode(x[0], x[1], output_length),
        name='ctc_decoded'
    )([softmax_output, input_length_processed])

    model = KerasModel(
        inputs=[image_input, y_true, input_length, label_length],
        outputs=[ctc_loss_output, ctc_decoded_output]
    )
    return model