示例#1
0
def autoencoder_hyperopt(_n_features, _hparams=default_autoencoder_hyperopt):
    """
    Creazione struttura autoencoder Keras parametrica.
    
    :param _n_features: int
        numero input rete neurale. Configurazione tensore principale
    :param _hparams: dict
        parametri per la configurazione della rete
    
    :return: Model
        autoencoder creato. Keras model
    """
    input_layer = Input(shape=(_n_features, ))
    for layer in range(1, int(_hparams['num_layers']) + 1):

        hidden = Dense(
            units=int(_hparams['num_unit_' + str(layer)]),
            activation=_hparams['actv_func'],
            activity_regularizer=regularizers.l1(
                _hparams['l1_reg']))(input_layer if layer == 1 else hidden)
        if _hparams['drop_enabled']:
            hidden = Dropout(rate=_hparams['drop_factor'])(hidden)

    for layer in reversed(range(1, int(_hparams['num_layers']) + 1)):
        hidden = Dense(units=int(_hparams['num_unit_' + str(layer)]),
                       activation=_hparams['actv_func'],
                       activity_regularizer=regularizers.l1(
                           _hparams['l1_reg']))(hidden)
        if _hparams['drop_enabled']:
            hidden = Dropout(rate=_hparams['drop_factor'])(hidden)

    output_layer = Dense(_n_features,
                         activation=_hparams['actv_func_out'])(hidden)

    autoencoder = Model(input_layer, output_layer)

    if _hparams['optimizer'] == 'adadelta':
        opt_net = Adadelta(_hparams['learn_rate_opt'], rho=0.95)
    elif _hparams['optimizer'] == 'adam':
        opt_net = Adam(_hparams['learn_rate_opt'],
                       beta_1=0.9,
                       beta_2=0.999,
                       amsgrad=False)
    else:
        opt_net = Adam(_hparams['learn_rate_opt'],
                       beta_1=0.9,
                       beta_2=0.999,
                       amsgrad=False)

    autoencoder.compile(optimizer=opt_net, loss=_hparams['loss_func'])

    return autoencoder
示例#2
0
 def block(self,
           inputs,
           filters,
           kernel_size,
           strides,
           dilation_rate=(1, 1)):
     inputs = tf.keras.layers.Conv2D(
         filters=filters,
         dilation_rate=dilation_rate,
         kernel_size=kernel_size,
         strides=strides,
         kernel_regularizer=l1(0.1),
         kernel_initializer=self.utils.msra_initializer(
             kernel_size, filters),
         padding='SAME',
     )(inputs)
     inputs = tf.layers.BatchNormalization(
         fused=True,
         renorm_clipping={
             'rmax': 3,
             'rmin': 0.3333,
             'dmax': 5
         },
         epsilon=1.001e-5,
     )(inputs, training=self.utils.training)
     inputs = tf.keras.layers.LeakyReLU(0.01)(inputs)
     return inputs
    def __init__(self,
                 hidden_state_size,
                 is_on=True,
                 vocab_size=11,
                 utterance_len=6,
                 entropy_reg=0.001):
        """
        lambda is an entropy regularization term
        """
        self.is_on = is_on
        self.utterance_len = utterance_len
        self.vocab = list(range(vocab_size))
        self.vocab_size = vocab_size

        if self.is_on:
            inputs = Input(batch_shape=(1, 1, 1), name='utter_input')
            lstm1 = LSTM(
                100,
                stateful=True,
                name='utter_lstm',
                activity_regularizer=regularizers.l1(entropy_reg))(inputs)
            dense = Dense(vocab_size, activation='softmax',
                          name='utter_dense')(lstm1)
            model = Model(inputs=inputs, outputs=[dense])
            model.compile(
                optimizer='adam',
                loss='categorical_crossentropy'
                # TODO might be cool to use the one below (requires different shape in training)
                # loss='sparse_categorical_crossentropy',
                # metrics=['categorical_accuracy']
                # sample_weight_mode="temporal"
            )
            self.model = model
def build(input_size, channels, latent_dim):
    layer_units = [512, 256]
    input_shape = (input_size, channels)
    drop_rate = 0.8
    inputs = Input(shape=input_shape)
    x = inputs
    x = Dropout(0.4, input_shape=(None, 978, 1))(x)
    for f in layer_units:
        x = Dense(f)(x)
        x = LeakyReLU(alpha=0.2)(x)

    x = Dropout(drop_rate, input_shape=(None, input_size, layer_units[1]))(x)
    shape = K.int_shape(x)
    x = Flatten()(x)
    latent = Dense(latent_dim, kernel_regularizer=regularizers.l2(1e-5),
                   activity_regularizer=regularizers.l1(1e-5))(x)
    #, kernel_regularizer=regularizers.l2(1e-5),
    #               activity_regularizer=regularizers.l1(1e-5)
    encoder = Model(inputs, latent, name="encoder")
    latent_inputs = Input(shape=(latent_dim,))
    x = Dense(shape[1] * shape[2])(latent_inputs)
    x = Reshape((shape[1], shape[2]))(x)
    for f in layer_units[::-1]:
        x = Dense(f)(x)
        x = LeakyReLU(alpha=0.2)(x)

    x = Dropout(drop_rate, input_shape=(None, input_size, layer_units[0]))(x)
    x = Dense(1)(x)
    outputs = Activation("tanh")(x)
    decoder = Model(latent_inputs, outputs, name="decoder")
    autoencoder = Model(inputs, decoder(encoder(inputs)), name="autoencoder")
    return autoencoder
    def __init__(self, hidden_state_size, entropy_reg=0.05):
        # single feedforward layer with sigmoid function
        self.model = Sequential([
            Dense(
                1,
                input_shape=(hidden_state_size, ),
                # kernel_initializer='random_uniform',  # TODO or maybe random_normal
                kernel_initializer=
                'random_normal',  # TODO or maybe random_normal
                activity_regularizer=regularizers.l1(entropy_reg)),
            Activation('sigmoid')
        ])

        # Accuracy is not the right measure for your model's performance. What you are trying to do here is more of a
        # regression task than a classification task. The same can be seen from your loss function, you are using
        # 'mean_squared_error' rather than something like 'categorical_crossentropy'.
        optimizer = optimizers.Adam()
        # lr=learning_rate  0.001 by default which is fine

        self.model.compile(
            optimizer=optimizer,
            loss=
            'binary_crossentropy'  # TODO these are random, needs to be checked
            # metrics=['accuracy']
        )
示例#6
0
 def cnn_layer(self, index, inputs, filters, kernel_size, strides):
     """卷积-BN-激活函数-池化结构生成器"""
     # for i in range(len(kernel_size)):
     with tf.keras.backend.name_scope('unit-{}'.format(index + 1)):
         x = tf.keras.layers.Conv2D(
             filters=filters,
             kernel_size=kernel_size,
             strides=strides[0],
             kernel_regularizer=l1(0.01),
             kernel_initializer=self.msra_initializer(kernel_size, filters),
             padding='same',
             name='cnn-{}'.format(index + 1),
         )(inputs)
         x = tf.layers.BatchNormalization(fused=True,
                                          renorm_clipping={
                                              'rmax': 3,
                                              'rmin': 0.3333,
                                              'dmax': 5
                                          } if index == 0 else None,
                                          epsilon=1.001e-5,
                                          name='bn{}'.format(index + 1))(
                                              x, training=self.training)
         x = tf.keras.layers.LeakyReLU(0.01)(x)
         x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
                                          strides=strides[1])(x)
     return x
def build_and_compile_nn(num_of_features, layer_size=100, dropout_rate=0.2, verbose=0):
    # Build the model
    model = keras.models.Sequential([
        keras.layers.Dense(layer_size, input_shape=[num_of_features], activation='relu',
                           kernel_regularizer=regularizers.l1(0.001)),
        keras.layers.Dropout(dropout_rate),
        keras.layers.Dense(int(layer_size / 2), activation='relu'),
        keras.layers.Dropout(dropout_rate),
        keras.layers.Dense(1),
    ])
    # keras.layers.Dense(3, activation='relu'),
    # keras.layers.Lambda(lambda x: x[0] + tf.cumsum(x[1], axis = 1))

    #     z = L.Input((nh,), name="Patient")
    # x = L.Dense(100, activation="relu", name="d1")(z)
    # x = L.Dense(100, activation="relu", name="d2")(x)
    # p1 = L.Dense(3, activation="linear", name="p1")(x)
    # p2 = L.Dense(3, activation="relu", name="p2")(x)
    # preds = L.Lambda(lambda x: x[0] + tf.cumsum(x[1], axis=1),
    #                  name="preds")([p1, p2])

    if verbose > 0:
        print(model.summary())

    # Construct loss function
    # loss_fn = calc_tensor_score
    # loss_fn = mloss(0.8)
    loss_fn = 'mae'

    # compile loss function into model
    model.compile(optimizer=keras.optimizers.Adam(lr=nn_config.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None,
                                                  decay=nn_config.learning_rate / 10, amsgrad=False),
                  loss=loss_fn,
                  metrics=['mae'])
    return model
示例#8
0
    def cnn_layer(self, index, inputs, filters, kernel_size, strides):
        """卷积-BN-激活函数-池化结构块"""

        with tf.keras.backend.name_scope('unit-{}'.format(index + 1)):
            x = tf.keras.layers.Conv2D(
                filters=filters,
                kernel_size=kernel_size,
                strides=strides[0],
                kernel_regularizer=l1(0.01),
                kernel_initializer=self.msra_initializer(kernel_size, filters),
                padding='same',
                name='cnn-{}'.format(index + 1),
            )(inputs)
            x = tf.layers.batch_normalization(x,
                                              fused=True,
                                              renorm_clipping={
                                                  'rmax': 3,
                                                  'rmin': 0.3333,
                                                  'dmax': 5
                                              } if index == 0 else None,
                                              reuse=False,
                                              momentum=0.9,
                                              name='bn{}'.format(index + 1),
                                              training=self.is_training)
            x = tf.keras.layers.LeakyReLU(0.01)(x)
            x = tf.keras.layers.MaxPooling2D(
                pool_size=(2, 2),
                strides=strides[1],
                padding='same',
            )(x)
        return x
示例#9
0
def autoencoder_dense(data, smell, layers=1, encoding_dimension=32, epochs=10, with_bottleneck=True, is_final=False, threshold=400000):
    encoding_dim = encoding_dimension
    input_layer = Input(shape=(data.max_input_length,))
    no_of_layers = layers
    prev_layer = input_layer
    for i in range(no_of_layers):
        encoder = Dense(int(encoding_dim / pow(2, i)), activation="relu",
                        activity_regularizer=regularizers.l1(10e-3))(prev_layer)
        prev_layer = encoder
    # bottleneck
    if with_bottleneck:
        prev_layer = Dense(int(encoding_dim / pow(2, no_of_layers)), activation="relu")(prev_layer)
    for j in range(no_of_layers - 1, -1, -1):
        decoder = Dense(int(encoding_dim / pow(2, j)), activation='relu')(prev_layer)
        prev_layer = decoder
    prev_layer = Dense(data.max_input_length, activation='relu')(prev_layer)
    autoencoder = Model(inputs=input_layer, outputs=prev_layer)

    autoencoder.compile(optimizer='adam',
                        loss='mean_squared_error',
                        metrics=['accuracy'])
    autoencoder.summary()

    batch_sizes = [32, 64, 128]
    # batch_sizes = [32, 64, 128, 256, 512]
    b_size = int(len(data.train_data) / batch_sizes[len(batch_sizes) - 1])
    if b_size > len(batch_sizes) - 1:
        b_size = len(batch_sizes) - 1

    val_split = 0.2
    if is_final:
        val_split = 0
    history = autoencoder.fit(data.train_data,
                              data.train_data,
                              epochs=epochs,
                              # batch_size=batch_size,
                              batch_size=batch_sizes[b_size],
                              verbose=1,
                              validation_split=val_split,
                              shuffle=True).history

    # plt.plot(history['loss'])
    # plt.plot(history['val_loss'])
    # plt.title('model loss')
    # plt.ylabel('loss')
    # plt.xlabel('epoch')
    # plt.legend(['train', 'test'], loc='upper right')
    # plt.show()

    predictions = autoencoder.predict(data.eval_data)
    mse = np.mean(np.power(data.eval_data - predictions, 2), axis=1)
    error_df = pd.DataFrame({'Reconstruction_error': mse,
                             'True_class': data.eval_labels})
    # print(error_df.describe())
    if is_final:
        return find_metrics(error_df, threshold)
    else:
        return find_optimal(error_df)
示例#10
0
 def _build_model(self, input_size):
     input_data = Input(shape=(input_size,))
     x = input_data
     x = Dense(2048, activation='relu')(x)
     x = Dropout(0.75)(x)
     x = Dense(128, activation='relu')(x)
     x = Dense(NB_CAT, activation='sigmoid', kernel_regularizer=l1(1e-5))(x)
     model = Model(inputs=input_data, outputs=x)
     return model
示例#11
0
def build_model_xception_avg(lock_base_model: bool):
    base_model = Xception(input_shape=INPUT_SHAPE, include_top=False, pooling=None, weights=None)
    if lock_base_model:
        for layer in base_model.layers:
            layer.trainable = False
    x = GlobalAveragePooling2D(name='avg_pool_final')(base_model.layers[-1].output)
    res = Dense(NB_CLASSES, activation='sigmoid', name='classes', kernel_initializer='zero',
                kernel_regularizer=l1(1e-5))(x)
    model = Model(inputs=base_model.inputs, outputs=res)
    return model
示例#12
0
def build_model_nasnet_mobile(lock_base_model: True):
    img_input = Input(shape=INPUT_SHAPE)
    base_model = NASNetMobile(input_tensor=img_input, include_top=False, pooling='avg')
    if lock_base_model:
        for layer in base_model.layers:
            layer.trainable = False
    res = Dense(NB_CLASSES, activation='sigmoid', name='classes', kernel_initializer='zero',
                kernel_regularizer=l1(1e-5))(base_model.layers[-1].output)
    model = Model(inputs=img_input, outputs=res)
    return model
示例#13
0
def modify_model(model: Model, class_index: int,
                 importance_type: ImportanceType) -> Model:
    gamma_initializer: str = "zeros"
    if importance_type & ImportanceType.GAMMA:
        gamma_initializer = "ones"

    gamma_regularizer = None
    if importance_type & ImportanceType.L1 and not importance_type & ImportanceType.L2:
        gamma_regularizer = l1()
    if not importance_type & ImportanceType.L1 and importance_type & ImportanceType.L2:
        gamma_regularizer = l2()
    if importance_type & ImportanceType.L1 and importance_type & ImportanceType.L2:
        gamma_regularizer = l1_l2()

    max_layer: int = len(model.layers)
    last_output: Input = None
    network_input: Input = None
    for i, layer in enumerate(model.layers):
        if i == 0:
            last_output = layer.output
            network_input = layer.input
        if 0 < i < max_layer:
            new_layer: Union[BatchNormalization,
                             BatchNormalization] = BatchNormalization(
                                 center=(importance_type
                                         & ImportanceType.CENTERING),
                                 gamma_initializer=gamma_initializer,
                                 gamma_regularizer=gamma_regularizer)
            last_output = new_layer(last_output)
        if i == max_layer - 1:
            new_end_layer: Dense = Dense(2,
                                         activation="softmax",
                                         name="binary_output_layer")
            last_output = new_end_layer(last_output)

            old_weights = layer.get_weights()
            old_weights[0] = np.transpose(old_weights[0], (1, 0))
            new_weights: List[np.array] = [
                np.append(old_weights[0][class_index:class_index + 1],
                          np.subtract(
                              np.sum(old_weights[0], axis=0, keepdims=True),
                              old_weights[0][class_index:class_index + 1]),
                          axis=0),
                np.append(old_weights[1][class_index:class_index + 1],
                          np.subtract(
                              np.sum(old_weights[1], axis=0, keepdims=True),
                              old_weights[1][class_index:class_index + 1]),
                          axis=0)
            ]
            new_weights[0] = np.transpose(new_weights[0], (1, 0))
            new_end_layer.set_weights(new_weights)
        elif i > 0:
            last_output = layer(last_output)

    return Model(inputs=network_input, outputs=last_output)
示例#14
0
    def build(self):

        input_seq = Input(shape=self.input_shape)

        x = Conv3D(filters=32,
                   kernel_size=(self.filter_dims, 1, 1),
                   padding='same',
                   use_bias=False,
                   kernel_regularizer=l1(1e-3))(input_seq)
        x = BatchNormalization(-1)(x)
        x = AveragePooling3D(pool_size=(self.filter_dims, 1, 1),
                             strides=(int(self.filter_dims / 3), 1, 1),
                             padding='valid')(x)
        x = ConvLSTM2D(filters=64,
                       kernel_size=(3, 3),
                       return_sequences=False,
                       padding='same')(x)

        x = Conv2D(filters=128,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_regularizer=l2(1e-3))(x)
        x = BatchNormalization(-1)(x)
        x = Dropout(0.5)(x)

        x = Conv2D(filters=256,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_regularizer=l2(1e-3))(x)
        x = BatchNormalization(-1)(x)
        x = Dropout(0.5)(x)

        x = Conv2D(filters=512,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_regularizer=l2(1e-3))(x)
        x = BatchNormalization(-1)(x)
        x = Dropout(0.5)(x)

        x = Conv2D(filters=1024,
                   kernel_size=(5, 5),
                   activation='relu',
                   kernel_regularizer=l2(1e-3))(x)
        x = BatchNormalization(-1)(x)
        x = Dropout(0.5)(x)

        x = Flatten()(x)
        logits = Dense(self.output_classes,
                       activation='softmax',
                       kernel_regularizer=l2(1e-3))(x)

        model = Model(inputs=input_seq, outputs=logits)
        model.summary()

        return model
示例#15
0
def build_model_resnet50(lock_base_model: bool):
    base_model = ResNet50(input_shape=INPUT_SHAPE, include_top=False, pooling=None)
    if lock_base_model:
        for layer in base_model.layers:
            layer.trainable = False
    x = AveragePooling2D((5, 5), name='avg_pool5', strides=1)(base_model.layers[-2].output)
    x = GlobalMaxPooling2D()(x)
    res = Dense(NB_CLASSES, activation='sigmoid', name='classes', kernel_initializer='zero',
                kernel_regularizer=l1(1e-5))(x)
    model = Model(inputs=base_model.inputs, outputs=res)
    return model
示例#16
0
def test_kernel_reg(case):
    x, y, model_fn, backbone = case

    l1_reg = regularizers.l1(0.1)
    model = model_fn(backbone)
    reg_model = set_regularization(model, kernel_regularizer=l1_reg)
    _test_regularizer(model, reg_model, x, y)

    l2_reg = regularizers.l2(0.1)
    model = model_fn(backbone, encoder_weights=None)
    reg_model = set_regularization(model, kernel_regularizer=l2_reg)
    _test_regularizer(model, reg_model, x, y)
示例#17
0
def build(input_size, latent_dim, regul_stren=0):
    if regul_stren == 0:
        noise_dropout = 0.1
        l1_weight = 0
        dropout = 0.5
    elif regul_stren == 1:
        noise_dropout = 0.2
        l1_weight = 1e-8
        dropout = 0.8
    elif regul_stren == 2:
        noise_dropout = 0.5
        l1_weight = 1e-7
        dropout = 0.8
    else:
        noise_dropout = 0.5
        l1_weight = 1e-4
        dropout = 0.9

    layer_units = [512, 256]
    input_shape = (input_size, 1)
    inputs = Input(shape=input_shape)
    x = inputs
    xd = Dropout(noise_dropout, input_shape=(None, 978, 1))(x)
    x = xd
    for f in layer_units:
        x = Dense(f)(x)
        x = LeakyReLU(alpha=0.2)(x)

    shape = K.int_shape(x)
    x = Flatten()(x)
    latent = Dense(latent_dim,
                   use_bias=False,
                   activity_regularizer=regularizers.l1(l1_weight))(x)
    encoder = Model(inputs, latent, name="encoder")
    latent_inputs = Input(shape=(latent_dim, ))
    xd_input = Input(shape=input_shape)
    x = Dense(shape[1] * shape[2])(latent_inputs)
    x = Reshape((shape[1], shape[2]))(x)
    for f in layer_units[::-1]:
        x = Dense(f)(x)
        x = LeakyReLU(alpha=0.2)(x)

    x = Dropout(dropout, input_shape=(None, input_size, layer_units[0]))(x)
    z = tf.keras.layers.Concatenate(axis=-1)([x, xd_input])
    x = Dense(1)(z)
    outputs = Activation("tanh")(x)
    decoder = Model([xd_input, latent_inputs], outputs, name="decoder")
    autoencoder = Model(inputs,
                        decoder([xd, encoder(inputs)]),
                        name="autoencoder")
    return autoencoder
示例#18
0
def create_network():
    # Weights initializer (to initialize weights from a normal distribution)
    initializer = keras.initializers.glorot_normal(seed=None)

    # Building the model
    model = keras.Sequential([
        # 2 conv layers with 100 filters and kernel dimension 10
        keras.layers.Conv1D(100, 10, activation='relu', input_shape=(139, 3)),
        keras.layers.Conv1D(100, 10, activation='relu'),
        # Max pooling, dividing the input map, generated by the conv layers, by 3
        keras.layers.MaxPooling1D(3),
        # Same concept applied again, but with global average pooling for dimensionality reduction
        keras.layers.Conv1D(160, 10, activation='relu'),
        keras.layers.Conv1D(160, 10, activation='relu'),
        keras.layers.GlobalAveragePooling1D(),
        # Dropout layer, giving each weight a 50% probability to become 0, avoiding over-fitting
        keras.layers.Dropout(0.5),
        # 2 hidden layers (with 128 and 64 nodes respectively) followed by the output layer
        keras.layers.Dense(128,
                           activation=tf.nn.relu,
                           kernel_initializer=initializer,
                           kernel_regularizer=regularizers.l1(0.01),
                           activity_regularizer=regularizers.l2(0.01)),
        keras.layers.Dense(64,
                           activation=tf.nn.relu,
                           kernel_initializer=initializer,
                           kernel_regularizer=regularizers.l1(0.01),
                           activity_regularizer=regularizers.l2(0.01)),
        keras.layers.Dense(20, activation=tf.nn.softmax)
    ])

    # Model compiling with adam optimizer (which gives an adaptive learning rate)
    opt = optimizers.Adam(lr=0.001)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    return model
示例#19
0
def CNN_Video(nb_channels=3, dropoutRate = 0.5, act='relu', k_size=3, d_layer = 512, 
	k_regularizer = regularizers.l1(0.001), img_size=32,time_slot = 100,num_color_chan=1):

    """ 
    Deep convolutional 3D neural network with softmax classifier

    :param nb_channels: number of class
    :param dropoutRate: drop-out rate of last layer
    :param act: activation function
    :param k_size: convolutional kernel size
    :param k_regularizer: kernel regularizer
    :param d_layer: number of hidden unit in the last layer
    :param img_size: image size
    :param time_slot: number of frames/images in a video, length of the video
    :param num_color_chan = number of color channel in the image/frame, no RGB values used real values of electrodes are used
    :param input_dimension: size of the input 
 
   Expecting 100x32x32x1 video data as input
   
   Conv3D<32> - Conv3D<32> - Conv3D<32> - Conv3D<32> - MaxPool3D<2,2,2> - 
   Conv3D<64> - Conv3D<64> - MaxPool3D<2,2,2> - 
   Dense<512> - Dense<3>
   
    """
 
    strides = None
    # In each convolutional layer, 10 consecutive images are convolved
    kernel = (10, k_size, k_size)

    print('PARAMETERS OF MODELS: ', act, ' ', k_size, ' ', d_layer)
  
    model = Sequential()
    # add layers
    model.add(Conv3D(32, kernel_size=kernel, input_shape=(time_slot,img_size,img_size,num_color_chan), activation=act))
    model.add(Conv3D(32, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act ))
    model.add(Conv3D(32, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act ))
    model.add(Conv3D(32, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act ))
    model.add(MaxPooling3D(pool_size=kernel, strides=strides, data_format='channels_last'))
    # new layer
    model.add(Conv3D(64, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act))
    model.add(Conv3D(64, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act))
    model.add(MaxPooling3D(pool_size=(2,2,2),strides=strides, data_format='channels_last'))
    
    # flatten and check
    model.add(Flatten())
    model.add(Dense(d_layer))
    model.add(Dropout(rate=dropoutRate))
    model.add(Dense(nb_channels, activation='softmax'))
    
    return model
def build_nn(hidden_layer_sizes, input_dim, l1_reg, activation_func='relu'):
	input_layer = Input(shape=(input_dim,))
	x = input_layer
	x = Dropout(0.3, input_shape=(input_dim,))(x)

	# Fully connected hidden layers
	for i, size in enumerate(hidden_layer_sizes):
		x = Dense(size, activation = activation_func, kernel_regularizer=regularizers.l1(l1_reg))(x)
		x = Dropout(0.1)(x)        
		x = BatchNormalization()(x)

	# Wrap base_nn into a model
	base_nn = Model(inputs = input_layer, outputs = x, name = "base_nn")
	return(base_nn, x)
    def __init__(
        self,
        input_dim,
        first_layer_size=100,
        latent_dim=3,
        encoder_layer_num=4,
        drop_out=0.05,
        gaussian_noise=0.1
    ):
        super(Autoencoder, self).__init__()
        self.latent_dim = latent_dim
        self.first_layer_size = first_layer_size
        self.encoder_layer_num = encoder_layer_num
        self.input_dim = input_dim
        self.layer_sizes = [
            int(i) for i in np.linspace(first_layer_size, latent_dim, encoder_layer_num)
        ]

        encoder_layers = []
        for size in self.layer_sizes[:-1]:
            encoder_layers.append(
                Dense(size, activation='relu', activity_regularizer=regularizers.l1(1e-7))
            )
            encoder_layers.append(Dropout(drop_out))
            encoder_layers.append(GaussianNoise(gaussian_noise))
        encoder_layers.append(
            Dense(latent_dim, activation='relu', activity_regularizer=regularizers.l1(1e-7))
        )

        decoder_layers = []
        for size in self.layer_sizes[::-1][1:]:
            decoder_layers.append(Dense(size, activation='relu'))
        decoder_layers.append(Dense(input_dim, activation='sigmoid'))

        self.encoder = tf.keras.Sequential(encoder_layers)
        self.decoder = tf.keras.Sequential(decoder_layers)
示例#22
0
def build_model_resnet50_avg(lock_base_model: bool):
    """
    Build the Resnet50 based level 1 model
    :param lock_base_model:
    :return:
    """
    base_model = ResNet50(input_shape=INPUT_SHAPE, include_top=False, pooling=None)
    if lock_base_model:
        for layer in base_model.layers:
            layer.trainable = False
    x = GlobalAveragePooling2D(name='avg_pool_final')(base_model.layers[-2].output)
    res = Dense(NB_CLASSES, activation='sigmoid', name='classes', kernel_initializer='zero',
                kernel_regularizer=l1(1e-5))(x)
    model = Model(inputs=base_model.inputs, outputs=res)
    return model
示例#23
0
 def KerasModelSmallNet50(self, imgInput):
     """
     Construct small net. The image size is 50*50, which is suitable for map.
     """
     x = Conv2D(16, (3, 3), activation='tanh')(imgInput)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = MaxPooling2D(pool_size=(2, 2))(x)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = MaxPooling2D(pool_size=(2, 2))(x)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = Conv2D(32, (3, 3), activation='relu')(x)
     x = Flatten()(x)
     x = Dense(self.featureDim, kernel_regularizer=regularizers.l2(0.0002),
               activity_regularizer=regularizers.l1(0.0002), name='fc_feature')(x)
     x = PReLU()(x)
     return x
示例#24
0
 def block(self, inputs, filters, kernel_size, strides, dilation_rate=(1, 1)):
     inputs = tf.keras.layers.Conv2D(
         filters=filters,
         dilation_rate=dilation_rate,
         kernel_size=kernel_size,
         strides=strides,
         kernel_regularizer=l1(0.1),
         kernel_initializer=self.utils.msra_initializer(kernel_size, filters),
         padding='SAME',
     )(inputs)
     inputs = tf.layers.batch_normalization(
         inputs,
         reuse=False,
         momentum=0.9,
         training=self.utils.is_training
     )
     inputs = self.utils.hard_swish(inputs)
     return inputs
示例#25
0
    def KerasModelResNet(self, imgInput):
        """
        Construct resNet. The image size is 150*150, which is suitable for image.
        """
        bn_axis = 3

        x = ZeroPadding2D((3, 3))(imgInput)
        x = Convolution2D(8, 7, strides=(2, 2), name='conv1')(x)
        x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x, 3, [8, 8, 16], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, 3, [8, 8, 16], stage=2, block='b')
        x = identity_block(x, 3, [8, 8, 16], stage=2, block='c')

        x = conv_block(x, 3, [16, 16, 32], stage=3, block='a')
        x = identity_block(x, 3, [16, 16, 32], stage=3, block='b')
        x = identity_block(x, 3, [16, 16, 32], stage=3, block='c')
        x = identity_block(x, 3, [16, 16, 32], stage=3, block='d')

        x = conv_block(x, 3, [32, 32, 64], stage=4, block='a')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='b')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='c')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='d')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='e')
        x = identity_block(x, 3, [32, 32, 64], stage=4, block='f')

        x = conv_block(x, 3, [64, 64, 128], stage=5, block='a')
        x = identity_block(x, 3, [64, 64, 128], stage=5, block='b')
        x = identity_block(x, 3, [64, 64, 128], stage=5, block='c')

        x = conv_block(x, 3, [64, 64, 256], stage=6, block='a')
        x = identity_block(x, 3, [64, 64, 256], stage=6, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=6, block='c')

        x = GlobalAveragePooling2D()(x)
        # x = Flatten()(x)
        x = Dense(self.featureDim,
                  kernel_regularizer=regularizers.l2(0.0002),
                  activity_regularizer=regularizers.l1(0.0002),
                  name='fc_feature')(x)
        x = PReLU()(x)
        return x
示例#26
0
def build_model_inception_v3_dropout(lock_base_model: True):
    base_model = InceptionV3(input_shape=INPUT_SHAPE,
                             include_top=False,
                             pooling=None)
    if lock_base_model:
        for layer in base_model.layers:
            layer.trainable = False
    # base_model.summary()
    x = GlobalAveragePooling2D(name='avg_pool_final')(
        base_model.layers[-1].output)
    x = Dropout(0.25)(x)
    res = Dense(NB_CLASSES,
                activation='sigmoid',
                name='classes',
                kernel_initializer='zero',
                kernel_regularizer=l1(1e-5))(x)
    model = Model(inputs=base_model.inputs, outputs=res)
    # model.summary()
    return model
示例#27
0
    def __init__(self):
        self.batch_size = 15
        self.learning_rate = 0.0001
        self.epochs = 6000

        self.time_stamps = 1524
        self.num_steps = 64
        self.dropout = 0.5

        self.regularizer_type = 'l2'
        if self.regularizer_type == 'l2':
            self.regularizer = l2(1)
        elif self.regularizer_type == 'l1':
            self.regularizer = l1(1)
        elif self.regularizer == 'l1_l2':
            self.regularizer = l1_l2(1)

        self.model_type = ''
        self.data_type = 'all'
示例#28
0
def test_bn_reg(case):
    x, y, model_fn, backbone = case

    l1_reg = regularizers.l1(1)
    model = model_fn(backbone)
    reg_model = set_regularization(model, gamma_regularizer=l1_reg)
    _test_regularizer(model, reg_model, x, y)

    model = model_fn(backbone)
    reg_model = set_regularization(model, beta_regularizer=l1_reg)
    _test_regularizer(model, reg_model, x, y)

    l2_reg = regularizers.l2(1)
    model = model_fn(backbone)
    reg_model = set_regularization(model, gamma_regularizer=l2_reg)
    _test_regularizer(model, reg_model, x, y)

    model = model_fn(backbone)
    reg_model = set_regularization(model, beta_regularizer=l2_reg)
    _test_regularizer(model, reg_model, x, y)
    def __init__(self,
                 is_on,
                 hidden_state_size=100,
                 item_num=3,
                 entropy_reg=0.05):
        self.is_on = is_on
        self.item_num = item_num
        if self.is_on:
            self.models = []
            for _ in range(self.item_num):
                model = Sequential([
                    Dense(100, input_shape=(hidden_state_size, )),
                    Dense(6,
                          activity_regularizer=regularizers.l1(entropy_reg)),
                    Activation('softmax')
                ])
                model.compile(
                    optimizer='adam',
                    loss=
                    'categorical_crossentropy'  # TODO these are random, needs to be checked
                    # metrics=['accuracy']
                )

                self.models.append(model)
 def build_model(self):
     self.model = tf.keras.Sequential()
     self.model.add(
         layers.Dense(128,
                      input_shape=(64, ),
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(L2_RATIO),
                      activity_regularizer=regularizers.l1(L2_RATIO)))
     self.model.add(
         layers.Dense(128,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(L2_RATIO),
                      activity_regularizer=regularizers.l1(L2_RATIO)))
     self.model.add(
         layers.Dense(128,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(L2_RATIO),
                      activity_regularizer=regularizers.l1(L2_RATIO)))
     self.model.add(
         layers.Dense(128,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(L2_RATIO),
                      activity_regularizer=regularizers.l1(L2_RATIO)))
     self.model.add(
         layers.Dense(128,
                      activation='tanh',
                      kernel_regularizer=regularizers.l2(L2_RATIO),
                      activity_regularizer=regularizers.l1(L2_RATIO)))
     self.model.add(
         layers.Dense(3,
                      activation='softmax',
                      kernel_regularizer=regularizers.l2(L2_RATIO),
                      activity_regularizer=regularizers.l1(L2_RATIO)))
     self.model.compile(
         optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
         loss='categorical_crossentropy',
         metrics=['accuracy'])
     return