Example #1
0
    def build(self, input_shape):
        in_dim = input_shape[-1]
        latent_dim = self.latent_dim

        initializer = RandomNormal(mean=0., stddev=1.)

        transform_shape = (self.n_heads, in_dim, latent_dim)
        # projection weights
        self.query = self.add_weight("query",
                                     transform_shape,
                                     initializer=initializer,
                                     regularizer=l1(1e-6),
                                     dtype=np.float32)
        self.key = self.add_weight("key",
                                   transform_shape,
                                   initializer=initializer,
                                   regularizer=l1(1e-6),
                                   dtype=np.float32)
        self.value = self.add_weight("value",
                                     transform_shape,
                                     initializer=initializer,
                                     regularizer=l1(1e-6),
                                     dtype=np.float32)

        # attention weights
        self.W = self.add_weight("W", (self.n_heads, latent_dim, 1),
                                 initializer=initializer,
                                 dtype=np.float32)
        self.O = self.add_weight("O", (self.n_heads * latent_dim, in_dim),
                                 initializer=initializer,
                                 dtype=np.float32)
        self.P = self.add_weight("P", (input_shape[-2] - 1, in_dim),
                                 regularizer=l1(1e-6),
                                 initializer=initializer,
                                 dtype=np.float32)
        self.bias = self.add_weight("bias", (self.n_heads, ),
                                    initializer=initializer,
                                    regularizer=l2(1e-6),
                                    dtype=np.float32)
Example #2
0
def build_model(n_gpu, vocab):
    with tf.device('/cpu:0'):
        # Inputs
        model = Sequential()
        model.add(
            Dense(dense_size_1,
                  input_dim=vocab.vocab_size,
                  activation='relu',
                  kernel_regularizer=regularizers.l1(regular_lambda)))
        model.add(
            Dense(dense_size_2,
                  activation='relu',
                  kernel_regularizer=regularizers.l1(regular_lambda)))
        model.add(Dense(1, activation='sigmoid'))
    if n_gpu > 1:
        model = multi_gpu_model(model, gpus=n_gpu)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', f1_score])
    model.summary()

    return model
Example #3
0
def get_model(train_dataset):
    model = Sequential([
        Dense(units=32,
              activation=relu,
              input_shape=(train_dataset.shape[1], )),
        Dropout(rate=.5),
        Dense(units=32,
              activation=relu,
              kernel_regularizer=l2(l2=.001),
              bias_regularizer=l1(l1=.002)),
        Dropout(rate=.5),
        Dense(units=32, activation=relu, kernel_regularizer=l1(l1=.001)),
        Dropout(rate=.5),
        Dense(units=32,
              activation=relu,
              kernel_regularizer=l1_l2(l1=.001, l2=.002)),
        Dense(units=32, activation=relu, kernel_regularizer=l1(l1=.001)),
        Dense(units=32, activation=relu, kernel_regularizer=l2(l2=.005)),
        Dense(units=1)
    ])
    model.summary()
    return model
Example #4
0
def GEN(input_shape=(
    64,
    64,
    3,
)):
    mod = mod_inp = Input(shape=input_shape)
    mod = Flatten()(mod)
    mod = Dense(
        2,
        activation="softmax",
        kernel_regularizer=regularizers.l1(0.001),
    )(mod)
    return keras.models.Model(inputs=mod_inp, outputs=mod)
Example #5
0
def create_base_network(input_shape):
    '''
    Base network to be shared.
    '''
    input = Input(shape=input_shape)
    x = Conv2D(32, (7, 7),
               activation='relu',
               input_shape=input_shape,
               kernel_regularizer=regularizers.l2(0.01),
               bias_regularizer=regularizers.l1(0.01))(input)
    x = MaxPooling2D()(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               kernel_regularizer=regularizers.l2(0.01),
               bias_regularizer=regularizers.l1(0.01))(x)
    x = Flatten()(x)
    x = Dense(128,
              activation='relu',
              kernel_regularizer=regularizers.l2(0.01),
              bias_regularizer=regularizers.l1(0.01))(x)

    return Model(input, x)
  def dataclassifier(self):
    baseline = self.baseline
    x = baseline.output
    x = Flatten()(x)
    x = BatchNormalization()(x)
    x = Dense(32, activation='swish', kernel_regularizer=l1(self.lr_dense))(x)
    classification = Dense(self.multi_classes, activation='softmax', kernel_regularizer=l1(self.lr_class))(x)
    optimizer = Adam(lr=.001, amsgrad=True)

    classification = Model(baseline.input, classification)
    classification.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['categorical_accuracy'])

    return classification
def convolutional_block(input_layer, n_filters, activation=None, filter_size=(3, 3, 3), strides=(1, 1, 1),
                        padding='same', data_format='channels_last', batch_normalization=False, dropout=None,
                        l1=None, l2=None, axis=4, use_bias=True, bn_name=None, conv_name=None):
    """
    Block of one convolutional layer with possible activation and batch normalization.

    :param input_layer: Input layer to the convolution. Keras layer.
    :param n_filters: Number of filters in the particular convolutional layer. Positive integer.
    :param activation: Activation_function after every convolution. String activation name.
    :param filter_size: Size of the convolution filter (kernel). Tuple of 3 positive integers.
    :param strides: Strides values. Tuple of 3 positive integers.
    :param padding: Used padding by convolution. Takes values: 'same' or 'valid'.
    :param data_format: Ordering of the dimensions in the inputs. Takes values: 'channel_first' or 'channel_last'.
    :param batch_normalization: If set to True, will use Batch Normalization layers after each convolution layer.
    :param dropout: percentage of weights to be dropped, float between 0 and 1.
    :param l1: L1 regularization.
    :param l2: L2 regularization.
    :param axis: Axis for batch normalization.
    :param use_bias:
    :param bn_name: Name of the batch normalization layer. String.
    :param conv_name:
    :return: Keras layer.
    """

    if l1 is not None:
        if l2 is not None:
            layer = Convolution3D(filters=n_filters, kernel_size=filter_size, strides=strides, padding=padding,
                                  data_format=data_format, kernel_regularizer=regularizers.l1_l2(l1=l1, l2=l2),
                                  use_bias=use_bias, name=conv_name)(input_layer)
        else:
            layer = Convolution3D(filters=n_filters, kernel_size=filter_size, strides=strides, padding=padding,
                                  data_format=data_format, kernel_regularizer=regularizers.l1(l1),
                                  use_bias=use_bias, name=conv_name)(input_layer)
    else:
        if l2 is not None:
            layer = Convolution3D(filters=n_filters, kernel_size=filter_size, strides=strides, padding=padding,
                                  data_format=data_format, kernel_regularizer=regularizers.l2(l2), use_bias=use_bias,
                                  name=conv_name)(
                input_layer)
        else:
            layer = Convolution3D(filters=n_filters, kernel_size=filter_size, strides=strides, padding=padding,
                                  data_format=data_format, use_bias=use_bias, name=conv_name)(input_layer)
    if batch_normalization:
        layer = BatchNormalization(axis=axis, name=bn_name)(
            layer)  # the axis that should be normalized (typically the features axis), integer.
        # Layer input shape: (samples, conv_dim1, conv_dim2, conv_dim3, channels)` if data_format='channels_last'
    if activation is not None:
        layer = Activation(activation=activation)(layer)
    if dropout is not None:
        layer = Dropout(dropout)(layer)
    return layer
Example #8
0
    def __init__(
        self,
        forecastHorizon=1,
        lookBack=32,
        architecture=[32],
        dropout=0.0,
        regularization=0.0,
        loadPath=None,
        bidirectional=True,
        convInputLayer=False,
        modelName="LSTM",
        kernelSize=1,
        filters=5,
        poolSize=2,
    ):

        np.random.seed(1)
        self.modelType = "LSTM"
        self.modelName = modelName
        self.forecastHorizon = forecastHorizon
        self.lookBack = lookBack
        self.architecture = architecture
        self.dropout = dropout
        self.regularization = l1(regularization)
        self.bidirectional = bidirectional
        self.convInputLayer = convInputLayer
        self.kernelSize = kernelSize
        self.filters = filters
        self.batchSize = 128
        self.trainingLossEvaluation = []
        self.poolSize = poolSize

        if loadPath is not None:
            self.model = load_model(loadPath)
            try:
                self.lookBack = self.model.layers[0].output_shape[0][1]
            except:
                self.lookBack = self.model.layers[0].output_shape[1]
            try:
                self.noTimeSeries = self.model.layers[0].input_shape[0][2]
            except:
                self.noTimeSeries = self.model.layers[0].input_shape[2]

            for layer in self.model.layers:
                if hasattr(layer, "rate"):
                    self.dropout = layer.rate

            if loadPath.endswith(".h5"):
                self.modelName = loadPath[9:-3]

            print("Model Loaded")
def get_keras_model(inputDim,
                    hiddenDim=128,
                    encodeDim=8,
                    batchNorm=True,
                    qBatchNorm=False,
                    l1reg=0,
                    input_batchNorm=False,
                    halfcode_layers=4,
                    fan_in_out=64,
                    **kwargs):
    """
    define the keras model
    the model based on the simple dense auto encoder 
    (128*128*128*128*8*128*128*128*128)
    """

    # Declare encode network
    inputLayer = Input(shape=(inputDim, ))
    kwargs = {'kernel_regularizer': l1(l1reg)}

    for i in range(halfcode_layers):
        if i == 0:
            h = Dense(fan_in_out, **kwargs)(inputLayer)
        else:
            h = Dense(hiddenDim, **kwargs)(h)
        if batchNorm:
            h = BatchNormalization()(h)
        h = Activation('relu')(h)

    #Declare latent layer
    if halfcode_layers == 0:
        h = Dense(encodeDim, **kwargs)(inputLayer)
    else:
        h = Dense(encodeDim, **kwargs)(h)
    if batchNorm:
        h = BatchNormalization()(h)
    h = Activation('relu')(h)

    # Declare decoder network
    for i in range(halfcode_layers):
        if i == halfcode_layers - 1:
            h = Dense(fan_in_out, **kwargs)(h)
        else:
            h = Dense(hiddenDim, **kwargs)(h)
        if batchNorm:
            h = BatchNormalization()(h)
        h = Activation('relu')(h)

    h = Dense(inputDim, **kwargs)(h)

    return Model(inputs=inputLayer, outputs=h)
Example #10
0
def construct_model():
    model = tf.keras.models.Sequential()

    #model.add(layers.InputLayer(input_shape=(train_set.shape[1],train_set.shape[2],train_set.shape[3]), batch_size=(BATCHSIZE)))
    model.add(
        layers.Conv2D(filters=3,
                      kernel_size=(3, 3),
                      padding="same",
                      input_shape=(train_set[0].shape)))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))

    model.add(
        layers.Conv2D(filters=16,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))

    model.add(layers.MaxPool2D((2, 2)))

    model.add(
        layers.Conv2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))

    model.add(layers.MaxPool2D((2, 2)))

    model.add(
        layers.Conv2D(filters=48,
                      kernel_size=(3, 3),
                      padding='same',
                      strides=(2, 2)))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))

    model.add(layers.GlobalAveragePooling2D())

    model.add(layers.Flatten())

    model.add(layers.Dense(8, kernel_regularizer=(regularizers.l1(0))))
    model.add(layers.Activation('relu'))

    model.add(layers.Dense(3))
    model.add(layers.Activation('softmax'))

    return model
Example #11
0
def create_model(ModelInfo):
    ### Defining reguralization technique

    #    reg = l1(0.001)
    model = keras.Sequential()
    model.add(
        layers.Dense(
            ModelInfo['Nerouns'][0],
            input_shape=[7],
            activation=ModelInfo['Activation_Method'][0],
            #                           kernel_initializer =ModelInfo['W_Initialization_Method'][0],
            #                            bias_initializer = ModelInfo['W_Initialization_Method'][0],
            #                            activity_regularizer=ModelInfo['Reguralization'][0],
            activity_regularizer=l1(ModelInfo['Reguralization'][0]),

            #    kernel_constraint=ModelInfo['kernel_constraint'][0])),
            kernel_constraint=max_norm(ModelInfo['kernel_constraint'][0]))),
    model.add(layers.Dropout(ModelInfo['Dropout_Value'][0])),
    for c in range(1, ModelInfo['Layers'][0]):
        print('Index=', c)
        model.add(
            layers.Dense(
                ModelInfo['Nerouns'][c],
                activation=tf.nn.relu,
                #                               kernel_initializer =ModelInfo['W_Initialization_Method'][0],
                #                            bias_initializer = ModelInfo['W_Initialization_Method'][0],
                #                            activity_regularizer=ModelInfo['Reguralization'][c],
                activity_regularizer=l1(ModelInfo['Reguralization'][c]),
                #                            kernel_constraint=ModelInfo['kernel_constraint'][c])),
                kernel_constraint=max_norm(
                    ModelInfo['kernel_constraint'][c]))),
        model.add(layers.Dropout(ModelInfo['Dropout_Value'][c])),


#        model.add(layers.BatchNormalization()),

    model.add(layers.Dense(1, activation=ModelInfo['Activation_Method'][0])),
    return model
Example #12
0
def create_net(l1_regularization=None):
    # The regularization:
    regularizer = l1(l1_regularization) if l1_regularization is not None else None
    
    # The encoder:
    input_img = Input(shape=(28, 28, 1)) # 784-dimensional data
    x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    encoded = Conv2D(8, (3, 3), activation='relu', padding='same',
         activity_regularizer=regularizer)(x)
    encoder = Model(input_img, encoded)

    # Features' shape is (4,4,8), i.e. 128-dimensional.

    # The decoder layers:
    decoder_input = Conv2D(8, (3, 3), activation='relu', padding='same')
    decoder_up01 = UpSampling2D((2, 2))
    decoder_conv01 = Conv2D(8, (3, 3), activation='relu', padding='same')
    decoder_up02 = UpSampling2D((2, 2))
    decoder_conv02 = Conv2D(16, (3, 3), activation='relu')
    decoder_up03 = UpSampling2D((2, 2))
    decoder_conv03 = Conv2D(1, (3, 3), activation='sigmoid', padding='same')

    def decoder_from(enc_in):
        x = decoder_input(enc_in)
        x = decoder_up01(x)
        x = decoder_conv01(x)
        x = decoder_up02(x)
        x = decoder_conv02(x)
        x = decoder_up03(x)
        return decoder_conv03(x)

    # The decoder:
    encoded_img = Input(shape=(4,4,8))
    decoded_test = decoder_from(encoded_img)
    decoder = Model(encoded_img,decoded_test)

    # The whole autoencoder:
    decoded = decoder_from(encoded)
    autoencoder = Model(input_img, decoded)

    # The optimization:
    autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
    
    # Returning all:
    return autoencoder, encoder, decoder
Example #13
0
def run(input_dim, encoding_dim):

    #Input() is used to instantiate a Keras tensor

    input_layer = Input(shape=(input_dim, ))
    encoder = Dense(encoding_dim,
                    activation="tanh",
                    activity_regularizer=regularizers.l1(10e-5))(input_layer)

    decoder = Dense(input_dim, activation='relu')(encoder)

    #you either use a keras model or build one yourself as we do here
    autoencoder = Model(inputs=input_layer, outputs=decoder)
    return autoencoder
    def learn(self, Features, Targets=None):
        num_batches = 5

        if Targets is not None:
            raise Exception("Per definition Target is equal to Features")

        if self.encoding_dim is np.NaN:
            self.encoding_dim = Features.shape[1]

        if self.input_dim is np.NaN:
            self.input_dim = (1, self.encoding_dim)

        print(self.encoding_dim, ' ', self.input_dim)

        # Stupid identity function is to learn
        np.random.seed(7)
        input_layer = Input(shape=self.encoding_dim)

        encoder = Dense(
            self.encoding_dim,
            activation="tanh",
            activity_regularizer=regularizers.l1(10e-5))(input_layer)
        encoder = Dense(int(self.encoding_dim / 2), activation="relu")(encoder)
        decoder = Dense(int(self.encoding_dim / 2), activation='relu')(encoder)
        decoder = Dense(self.encoding_dim, activation='tanh')(decoder)
        model = Model(inputs=input_layer, outputs=decoder)

        model.compile(optimizer='rmsprop',
                      loss=['mse', 'categorical_crossentropy'],
                      loss_weights=[1.0, 20.0],
                      metrics=['mean_squared_error'])
        if os.path.isfile(self.name) and self.forcelearning == False:
            # Restore the weights
            model.load_weights(self.name)
        else:
            checkpointer = ModelCheckpoint(filepath=self.name,
                                           verbose=0,
                                           save_best_only=True)
            # Autoencoders learn the identiy, due to that X = X
            model.fit(Features,
                      Features,
                      epochs=100,
                      batch_size=num_batches,
                      verbose=1,
                      callbacks=[checkpointer])

            # Save the weights
            model.save_weights(self.name)

        self.model = model
Example #15
0
 def __init__(self, X_train_shape):
     self.input_shape = (X_train_shape[1], X_train_shape[2])
     self.model = Sequential()
     self.model.add(
         LSTM(units=30,
              input_shape=self.input_shape,
              activation='tanh',
              recurrent_activation='sigmoid',
              use_bias=True,
              return_sequences=True,
              kernel_regularizer=regularizers.l1(1e-05)))
     self.model.add(
         LSTM(units=20,
              input_shape=self.input_shape,
              activation='tanh',
              recurrent_activation='sigmoid',
              use_bias=True,
              return_sequences=False,
              kernel_regularizer=regularizers.l1(1e-05)))
     self.model.add(
         Dense(units=10, kernel_regularizer=regularizers.l1(1e-05)))
     self.model.add(ELU(alpha=1))
     self.model.add(Dense(units=1, activation='relu'))
Example #16
0
def create_model(num_input, hidden_layer, num_classes):

    input_layer = tf.keras.Input(shape=(num_input, ))
    hidden = layers.Dense(
        hidden_layer[0],
        kernel_regularizer=regularizers.l2(0.1),
        activity_regularizer=regularizers.l1(0.1))(input_layer)
    hidden = layers.ELU(alpha=1)(hidden)
    hidden = layers.Dropout(0.1)(hidden)
    hidden = layers.Dense(hidden_layer[1],
                          kernel_regularizer=regularizers.l2(0.1),
                          activity_regularizer=regularizers.l1(0.1))(hidden)
    hidden = layers.ELU(alpha=1)(hidden)
    hidden = layers.Dropout(0.1)(hidden)
    hidden = layers.Dense(hidden_layer[2],
                          kernel_regularizer=regularizers.l2(0.1),
                          activity_regularizer=regularizers.l1(0.1))(hidden)
    hidden = layers.ELU(alpha=1)(hidden)
    hidden = layers.Dropout(0.1)(hidden)
    if num_classes > 2:
        output_layer = layers.Dense(num_classes, activation="softmax")(hidden)
    else:
        output_layer = layers.Dense(1, activation="sigmoid")(hidden)

    model = models.Model(input_layer, output_layer)
    if num_classes > 2:
        loss_func = 'sparse_categorical_crossentropy'
    else:
        loss_func = 'binary_crossentropy'

    opt = optimizers.Adam(learning_rate=0.005,
                          beta_1=0.9,
                          beta_2=0.999,
                          amsgrad=False)
    model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

    return model
def create_model():
    model = Sequential()

    model.add(
        Conv2D(8, (4, 4),
               strides=(1, 1),
               input_shape=(32, 32, 1),
               name="conv2d_0_m"))

    model.add(Activation(activation='relu', name='relu1'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max1'))

    model.add(Conv2D(16, (2, 2), strides=(1, 1), name="conv2d_1_m"))

    model.add(Activation(activation='relu', name='relu2'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max2'))
    model.add(Flatten())
    model.add(
        Dense(120,
              name='fc1',
              kernel_initializer='lecun_uniform',
              kernel_regularizer=l1(0.0001)))
    model.add(Activation(activation='relu', name='relu3'))
    model.add(
        Dense(84,
              name='fc2',
              kernel_initializer='lecun_uniform',
              kernel_regularizer=l1(0.0001)))
    model.add(Activation(activation='relu', name='relu4'))
    model.add(
        Dense(10,
              name='output',
              kernel_initializer='lecun_uniform',
              kernel_regularizer=l1(0.0001)))
    model.add(Activation(activation='softmax', name='softmax'))

    return model
Example #18
0
def Relearn3():
    # Load pickle model
    x = pickle.load(open("Data/x.pickle", "rb"))
    y = pickle.load(open("Data/y.pickle", "rb"))

    # normalizing data
    x = np.asarray(x) / 255.0
    y = np.asarray(y)

    # Building the model
    model = Sequential()
    # 3 convolutional layersy
    model.add(Conv2D(32, (3, 3), input_shape=x.shape[1:]))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    # 1 hidden layers
    model.add(Flatten())
    model.add(Dense(32, activity_regularizer=l1(0.001)))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))

    #model.add(Dense(128))
    #model.add(Activation("relu"))

    # The output layer with 5 neurons, for 5 classes
    model.add(Dense(5))
    model.add(Activation("softmax"))

    # Compiling the model using some basic parameters
    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    # validation_split corresponds to the percentage of images used for the validation phase compared to all the images
    history = model.fit(x, y, batch_size=50, epochs=50, validation_split=0.05)

    # Saving the model as .h5 and .model
    model.save("Data/modelweight.h5")
    model.save("Data/CNN.model")
Example #19
0
def cnn(l1, l2, depth, gamma, lr, w):
    model = models.Sequential()
    model.add(
        layers.Conv1D(l1,
                      w,
                      activation='relu',
                      kernel_regularizer=regularizers.l1(gamma),
                      input_shape=(40, depth),
                      padding='same'))
    model.add(layers.MaxPooling1D(2))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(
        layers.Dense(l2,
                     activation='relu',
                     kernel_regularizer=regularizers.l1(gamma)))
    model.add(
        layers.Dense(1,
                     activation='sigmoid',
                     kernel_regularizer=regularizers.l1(gamma)))
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.RMSprop(lr=lr),
                  metrics=['acc'])
    return model
Example #20
0
def old_unet(input_size, num_classes, num_channels=1, filter_multiplier=10, regularization_rate=0.):
    input_ = Input((input_size, input_size, num_channels))
    skips = []
    output = input_

    num_layers = int(np.floor(np.log2(input_size)))
    down_conv_kernel_sizes = np.zeros([num_layers], dtype=int)
    down_filter_numbers = np.zeros([num_layers], dtype=int)
    up_conv_kernel_sizes = np.zeros([num_layers], dtype=int)
    up_filter_numbers = np.zeros([num_layers], dtype=int)

    for layer_index in range(num_layers):
        down_conv_kernel_sizes[layer_index] = int(3)
        down_filter_numbers[layer_index] = int((layer_index + 1) * filter_multiplier + num_classes)
        up_conv_kernel_sizes[layer_index] = int(4)
        up_filter_numbers[layer_index] = int((num_layers - layer_index - 1) * filter_multiplier + num_classes)

    for shape, filters in zip(down_conv_kernel_sizes, down_filter_numbers):
        skips.append(output)
        output = Conv2D(filters, (shape, shape), strides=2, padding="same", activation="relu",
                        bias_regularizer=l1(regularization_rate))(output)

    for shape, filters in zip(up_conv_kernel_sizes, up_filter_numbers):
        output = UpSampling2D()(output)
        skip_output = skips.pop()
        output = concatenate([output, skip_output], axis=3)
        if filters != num_classes:
            output = Conv2D(filters, (shape, shape), activation="relu", padding="same",
                            bias_regularizer=l1(regularization_rate))(output)
            output = BatchNormalization(momentum=.9)(output)
        else:
            output = Conv2D(filters, (shape, shape), activation="softmax", padding="same",
                            bias_regularizer=l1(regularization_rate))(output)

    assert len(skips) == 0
    return Model([input_], [output])
Example #21
0
 def model(self):
     """Model定义及训练
     """
     log('[{time}] Building model...'.format(time=get_time()))
     model = Sequential()
     model.add(
         Dense(self._dense_size_1,
               input_dim=len(self._feature_train[0]),
               activation='relu',
               kernel_regularizer=regularizers.l1(self._regular)))
     model.add(
         Dense(self._dense_size_2,
               activation='relu',
               kernel_regularizer=regularizers.l1(self._regular)))
     model.add(Dense(1, activation='sigmoid'))
     # if os.path.exists(self.model_file):
     #     model.load_weights(self.model_file)
     model.compile(
         loss='binary_crossentropy',
         optimizer='adam',
         metrics=['accuracy', self.precision, self.recall, self.f1_score])
     log(model.summary())
     # checkpoint
     checkpoint = ModelCheckpoint(self._model_file +
                                  '.{epoch:03d}-{val_f1_score:.4f}.hdf5',
                                  monitor='val_f1_score',
                                  verbose=1,
                                  save_best_only=True,
                                  mode='max')
     callbacks_list = [checkpoint]
     log('[{time}] Training...'.format(time=get_time()))
     model.fit(self._feature_train,
               self._label_train,
               epochs=self._epoch,
               callbacks=callbacks_list,
               validation_data=(self._feature_test, self._label_test))
Example #22
0
    def get_regularizers(self, l2, l1):
        if self.regularizer == 'l2':
            kernel_regularizer = regularizers.l2(l2),
            print('[INFO] -- regularizers: l2\n')
        elif self.regularizer == 'l1':
            kernel_regularizer = regularizers.l1(l1),
            print('[INFO] -- regularizers: l1\n')
        elif self.regularizer == 'l2_l1':
            kernel_regularizer = regularizers.l1_l2(l1=l1, l2=l2),
            print('[INFO] -- regularizers: l1_l2\n')
        else:
            kernel_regularizer = None
            print('[INFO] -- regularizers: None \n')

        return kernel_regularizer
def model_2grus(Inputs,
                Inputs_alt,
                X_train,
                Xalt_train,
                Y_train,
                NPARTS=20,
                NSV=5):
    CLR = 0.001
    L1R = 0.00001
    print(Inputs)
    print(Inputs_alt)

    gru = GRU(100,
              activation='relu',
              recurrent_activation='hard_sigmoid',
              name='gru_base',
              activity_regularizer=l1(L1R))(Inputs)
    dense = Dense(100, activation='relu', activity_regularizer=l1(L1R))(gru)
    norm = BatchNormalization(momentum=0.6, name='dense4_bnorm')(dense)

    gru_alt = GRU(100,
                  activation='relu',
                  recurrent_activation='hard_sigmoid',
                  name='gru_base_alt',
                  activity_regularizer=l1(L1R))(Inputs_alt)
    dense_alt = Dense(100, activation='relu',
                      activity_regularizer=l1(L1R))(gru_alt)
    norm_alt = BatchNormalization(momentum=0.6,
                                  name='dense4_bnorm_alt')(dense_alt)

    added = Add()([norm, norm_alt])

    dense = Dense(50, activation='relu', activity_regularizer=l1(L1R))(added)
    norm = BatchNormalization(momentum=0.6, name='dense5_bnorm')(dense)
    dense = Dense(20, activation='relu', activity_regularizer=l1(L1R))(norm)
    dense = Dense(10, activation='relu', activity_regularizer=l1(L1R))(dense)
    out = Dense(1, activation='sigmoid', activity_regularizer=l1(L1R))(norm)

    classifier = Model(inputs=[Inputs, Inputs_alt], outputs=[out])
    lossfunction = 'binary_crossentropy'
    classifier.compile(loss=[lossfunction],
                       optimizer=Adam(CLR),
                       metrics=['accuracy'])
    models = {'classifier': classifier}

    return models
 def create(self) -> tf.keras.models.Model:
     input_size = self.num_features * self.window_size
     return tf.keras.models.Sequential([
         InputLayer((input_size, ), name='input'),
         Dense(128, activation='relu', name='enc/dense01'),
         Dense(64, activation='relu', name='enc/dense02'),
         Dense(16,
               activation='relu',
               name='enc/dense03',
               activity_regularizer=l1(10e-6)),
         Dense(64, activation='relu', name='dec/dense01'),
         Dense(128, activation='relu', name='dec/dense02'),
         Dense(input_size, activation=None, name='dec/dense03')
     ],
                                       name=self.name)
Example #25
0
def conv2D_block(num_filters=32,
                 sample_shape=None,
                 inputA=None,
                 drop_out=0,
                 first_layer=False,
                 conv_kernel_size=(5, 5),
                 conv_stride=(1, 1),
                 GAP=True,
                 max_pool_size=(2, 2),
                 max_pool_stride=(1, 1),
                 act_regularization=0):
    if first_layer:
        # inputA = Input(shape=sample_shape)
        conv = Conv2D(
            num_filters,
            kernel_size=conv_kernel_size,
            strides=conv_stride,
            kernel_initializer='he_uniform',
            # kernel_regularizer=l1(act_regularization),
            # bias_regularizer=l1(act_regularization),
            # activity_regularizer=None,
            activation=None,
            padding='same',
            use_bias=False,
            input_shape=sample_shape)(inputA)
    else:
        conv = Conv2D(
            num_filters,
            kernel_size=conv_kernel_size,
            strides=conv_stride,
            kernel_initializer='he_uniform',
            # kernel_regularizer=l1(act_regularization),
            # bias_regularizer=l1(act_regularization),
            # activity_regularizer=None,
            activation=None,
            padding='same',
            use_bias=False)(inputA)

    bn = BatchNormalization(center=True, scale=True, trainable=False)(conv)
    act = Activation('relu', activity_regularizer=l1(act_regularization))(bn)
    if GAP:
        out = GlobalAveragePooling2D()(act)
    else:
        max_pool = MaxPooling2D(pool_size=max_pool_size,
                                strides=max_pool_stride)(act)
        out = Dropout(drop_out)(max_pool)

    return out
Example #26
0
def convolutional_model(input_shape, class_names):
    model = tf.keras.models.Sequential([
        layers.Conv2D(32, (3, 3),
                      activation=layers.LeakyReLU(alpha=0.01),
                      input_shape=input_shape),
        layers.Conv2D(32, (3, 3), activation=layers.LeakyReLU(alpha=0.01)),
        #layers.BatchNormalization(),
        layers.MaxPooling2D((2, 2)),
        layers.Dropout(0.1),
        layers.Conv2D(64, (3, 3), activation=layers.LeakyReLU(alpha=0.01)),
        layers.Conv2D(64, (3, 3), activation=layers.LeakyReLU(alpha=0.01)),
        #layers.BatchNormalization(),
        layers.MaxPooling2D((2, 2)),
        layers.Dropout(0.1),
        layers.Conv2D(128, (3, 3), activation=layers.LeakyReLU(alpha=0.01)),
        layers.Conv2D(128, (3, 3), activation=layers.LeakyReLU(alpha=0.01)),
        #layers.BatchNormalization(),
        layers.MaxPooling2D((2, 2)),
        layers.Dropout(0.1),
        layers.Conv2D(256, (3, 3), activation=layers.LeakyReLU(alpha=0.01)),
        layers.Conv2D(256, (3, 3), activation=layers.LeakyReLU(alpha=0.01)),
        #layers.BatchNormalization(),
        layers.MaxPooling2D((2, 2)),
        layers.Dropout(0.1),
        layers.Flatten(),
        layers.Dense(1024,
                     activation='linear',
                     activity_regularizer=regularizers.l1(0.0001)),
        layers.LeakyReLU(alpha=0.01),
        layers.Dense(128, activation=layers.LeakyReLU(alpha=0.01)),
        layers.Dense(64, activation=layers.LeakyReLU(alpha=0.01)),
        layers.Dense(8, activation='softmax')
    ])

    # https://keras.io/optimizers/
    #decay_rate = 0.01 / 100. #learning_rate / epochs
    #sgd = optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=False) #, decay=decay_rate)
    #rmsprop = optimizers.RMSprop(learning_rate=0.001, rho=0.9)
    #adagrad = optimizers.Adagrad(learning_rate=0.01)
    #adadelta = optimizers.Adadelta(learning_rate=1.0, rho=0.95)
    #adam = optimizers.Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, amsgrad=False)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model, "conv_model", 500
Example #27
0
 def initialize_model(self, regularization_type=None):
     r, lr, dim, dropout, dp = self.model_parameters  # model parameters is a tuple:(r=regularization_parameters,lr=learning rate for ADAM, dim=number and dimension of hidden layers, dropout=boolean if dropout is used in trainig, dp=dropout rate)
     dim = [int(d) for d in dim]
     number_of_hidden_layers = len(dim)
     dropout = bool(dropout)
     # -------------------------------------------------- NN Architecture -------------------------------------------------#
     # define input layer
     inputs = layers.Input(shape=(self.X_train.shape[1], ))
     # set regularization
     if regularization_type == 'l2' or regularization_type is None:
         REG = regularizers.l2(r)
         logging.debug('l2 regularization')
     if regularization_type == 'l1':
         REG = regularizers.l1(r)
         logging.debug('l1 regularization')
     if regularization_type == 'l1_l2':
         logging.debug('l1&l2 regularization')
         REG = regularizers.l1_l2(r)
     # first hidden layer
     x = layers.Dense(dim[0],
                      kernel_regularizer=REG,
                      bias_regularizer=REG,
                      activation='relu')(inputs)
     if dropout is True:
         x = layers.Dropout(rate=dp)(x)
     # remaining hidden layer
     for k in range(1, number_of_hidden_layers):
         x = layers.Dense(dim[k],
                          kernel_regularizer=REG,
                          bias_regularizer=REG,
                          activation='relu')(x)
         if dropout is True:
             x = layers.Dropout(rate=dp)(x)
     # final output layer
     predictions = layers.Dense(1, activation='relu')(x)
     model = models.Model(inputs=inputs, outputs=predictions)
     # ADAM = adaptive moment estimation a first-order gradient-based optimization algorithm
     ADAM = optimizers.Adam(lr=lr,
                            beta_1=0.9,
                            beta_2=0.999,
                            decay=0.0,
                            amsgrad=False)
     # compile the model and define the loss function
     model.compile(optimizer=ADAM, loss='mean_absolute_error')
     # -------------------------------------------------- NN Architecture -------------------------------------------------#
     self.model = model
     logging.debug('Neural Net initialized')
def SingleOutputCNN(
        input_shape,
        output_shape,
        cnns_per_maxpool=1,
        maxpool_layers=1,
        dense_layers=1,
        dense_units=64,
        dropout=0.25,
        regularization=False,
        global_maxpool=False,
        name='',
)  -> Model:
    function_name = cast(types.FrameType, inspect.currentframe()).f_code.co_name
    model_name    = f"{function_name}-{name}" if name else function_name
    # model_name  = seq([ function_name, name ]).filter(lambda x: x).make_string("-")  # remove dependency on pyfunctional - not in Kaggle repo without internet

    inputs = Input(shape=input_shape)
    x      = inputs

    for cnn1 in range(0,maxpool_layers):
        for cnn2 in range(1, cnns_per_maxpool+1):
            x = Conv2D( 32 * cnn2, kernel_size=(3, 3), padding='same', activation='relu')(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = BatchNormalization()(x)
        x = Dropout(dropout)(x)

    if global_maxpool:
        x = GlobalMaxPooling2D()(x)

    x = Flatten()(x)

    for nn1 in range(0,dense_layers):
        if regularization:
            x = Dense(dense_units, activation='relu',
                      kernel_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l1(0.01))(x)
        else:
            x = Dense(dense_units, activation='relu')(x)

        x = BatchNormalization()(x)
        x = Dropout(dropout)(x)

    x = Dense(output_shape, activation='softmax')(x)

    model = Model(inputs, x, name=model_name)
    # plot_model(model, to_file=os.path.join(os.path.dirname(__file__), f"{name}.png"))
    return model
def pixleated_structre():
    Input_img = Input(shape=(256, 256, 3))  
    x1 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(Input_img)
    x2 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x1)
    x3 = MaxPool2D(padding='same')(x2)
    x4 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x3)
    x5 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x4)
    x6 = MaxPool2D(padding='same')(x5)
    encoded = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x6)
    x7 = UpSampling2D()(encoded)
    x8 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x7)
    x9 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x8)
    x10 = Add()([x5, x9])
    x11 = UpSampling2D()(x10)
    x12 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x11)
    x13 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l1(10e-10))(x12)
    x14 = Add()([x2, x13])
    decoded = Conv2D(3, (3, 3), padding='same',activation='relu', kernel_regularizer=regularizers.l1(10e-10))(x14)
    autoencoder = Model(Input_img, decoded)
    autoencoder.compile(optimizer='adadelta', loss='mse', metrics=['accuracy'])
    
    return autoencoder
Example #30
0
 def build_model(self):
     self.h_units = cal_hidden_layer_of_units(self.params['h_layers'],
                                              self.params['encode_dim'])
     self.input = Input(shape=(len(self.features), ))
     self.hidden = self.input
     for _unit in self.h_units:
         self.hidden = Dense(_unit,
                             activation=self.params['activation'],
                             activity_regularizer=regularizers.l1(
                                 self.params['l1']))(self.hidden)
     self.fr_output = Dense(len(self.features),
                            activation='relu')(self.hidden)
     self.model = Model(inputs=self.input, outputs=self.fr_output)
     self.model.compile(loss='mae',
                        optimizer=RMSprop(lr=self.params['lr']),
                        metrics=['mae'])
     print(self.model.summary())