Exemplo n.º 1
0
def conv_to_rnn(inputs, n_out, *args, l2_reg=0.01):
    sigma = 0.05
    """Convolution on each stimulus, then pass sequence to an RNN"""
    # Applies this conv layer to each stimulus in the sequence individually
    y = TimeDistributed(Conv2D(8,
                               15,
                               data_format="channels_first",
                               kernel_regularizer=l2(1e-3)),
                        input_shape=(40, 1, 50, 50))(inputs)
    y = Activation('relu')(GaussianNoise(sigma)(y))
    y = TimeDistributed(
        Conv2D(8,
               11,
               data_format="channels_first",
               kernel_regularizer=l2(1e-3)))(y)
    y = Activation('relu')(GaussianNoise(sigma)(y))
    # Flatten feature maps to pass to LSTM
    y = TimeDistributed(Flatten())(y)
    y = SimpleRNN(50,
                  activation='relu',
                  kernel_initializer='random_normal',
                  recurrent_initializer='random_normal')(y)
    y = Dense(n_out, init='normal')(y)
    outputs = Activation('softplus')(y)
    return Model(inputs, outputs, name="CONV_TO_RNN")
Exemplo n.º 2
0
 def get_seq2seq_model_one_hot(input_size, output_size, MAX_LEN_OUTPUT):
     seq2seq_model = Sequential()
     seq2seq_model.add(GaussianNoise(
         0.15,
         input_shape=(None, input_size)))  # El modelo original no tenia GN
     seq2seq_model.add(
         BatchNormalization())  # El modelo original no tenia BN
     seq2seq_model.add(
         LSTM(750, return_sequences=False, activation="relu")
     )  # max_word_index+2 for one_hot, dim_embeddings for embedding
     seq2seq_model.add(RepeatVector(MAX_LEN_OUTPUT))
     seq2seq_model.add(GaussianNoise(0.15))
     seq2seq_model.add(
         BatchNormalization())  # El modelo original no tenia BN
     seq2seq_model.add(LSTM(950, return_sequences=True, activation="relu"))
     seq2seq_model.add(
         BatchNormalization())  # El modelo original no tenia BN
     seq2seq_model.add(
         TimeDistributed(Dense(output_size, activation="softmax"))
     )  # max_word_index+2 for one_hot, dim_embeddings for embedding
     seq2seq_model.compile(optimizer='adadelta',
                           sample_weight_mode="temporal",
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])
     return seq2seq_model
Exemplo n.º 3
0
def encode(x, use_noise, relu_max):
    print 'encoder input shape:', x._keras_shape
    assert x._keras_shape[1:] == (28, 28, 1)

    # 28, 28, 1
    y = Conv2D(20, 5, 5, activation='relu', border_mode='same', subsample=(2,2))(x)
    y = BN(mode=2, axis=3)(y)
    # 14, 14, 20
    y = Conv2D(40, 3, 3, activation='relu', border_mode='same', subsample=(2,2))(y)
    y = BN(mode=2, axis=3)(y)
    # 7, 7, 40
    print 'pre_fc shape:', y._keras_shape
    latent_dim = 80
    y = Conv2D(latent_dim, 7, 7, activation='linear',
               border_mode='same', subsample=(7,7))(y)
    # 1, 1, latent_dim
    if use_noise and not relu_max:
        print 'add noise and pretend relu_max will be:', RELU_MAX
        y = GaussianNoise(0.2 * RELU_MAX)(y)

    y = Activation(utils.relu_n(relu_max))(y)
    if relu_max:
        print 'relu max:', relu_max
        y = Activation(utils.scale_down(relu_max))(y)
        # y in [0, 1]
        if use_noise:
            y = GaussianNoise(0.2)(y)
            y = Activation('relu')(y)
    y = Reshape((latent_dim,))(y)
    # 80
    return y
Exemplo n.º 4
0
def discriminator_google_mnistM(img_dim,wd):

    disc_input = Input(shape=img_dim, name="discriminator_input")
    x = Conv2D(64, (3, 3), strides=(1, 1), name="conv1",border_mode="same",weight_norm=False, kernel_initializer=RandomNormal(stddev=0.02),kernel_regularizer=l2(wd))(disc_input) 
    x=BatchNormGAN(axis=1)(x)
    x = Dropout(0.1)(x)
    x = LeakyReLU(0.2)(x)
    x = GaussianNoise( sigma=0.2 )(x)
    x = Conv2D(128, (3, 3), strides=(2, 2), name="conv2",border_mode="same",weight_norm=False, kernel_initializer=RandomNormal(stddev=0.02),kernel_regularizer=l2(wd))(x) 
    x = Dropout(0.2)(x)
#    x = LeakyReLU(0.2)(x)
    x = GaussianNoise( sigma=0.2 )(x)
    x = Conv2D(256, (3, 3), strides=(2, 2), name="conv3",border_mode="same",weight_norm=False, kernel_initializer=RandomNormal(stddev=0.02),kernel_regularizer=l2(wd))(x) 
    x=BatchNormGAN(axis=1)(x)
    x = Dropout(0.2)(x)
    x = LeakyReLU(0.2)(x)
    x = GaussianNoise( sigma=0.2 )(x)
    x = Conv2D(512, (3, 3), strides=(2, 2), name="conv4",border_mode="same",weight_norm=False, kernel_initializer=RandomNormal(stddev=0.02),kernel_regularizer=l2(wd))(x) 
    x = Dropout(0.2)(x)
 #   x = LeakyReLU(0.2)(x)
    x = GaussianNoise( sigma=0.2 )(x)
    x = Flatten()(x)
    x = Dense(1, init=RandomNormal(stddev=0.02),activation='sigmoid', name='fc',W_regularizer=l2(wd))(x)
    discriminator_model = Model(input=[disc_input], output=x, name="discriminator_google")
    visualize_model(discriminator_model)
    return discriminator_model
Exemplo n.º 5
0
def build_model(input_dim,
                h0_dim=20,
                h1_dim=20,
                output_dim=1,
                rec_layer_type=ReducedLSTMA,
                rec_layer_init='zero',
                layer_type=TimeDistributedDense,
                lr=.001,
                base_name='rlstm',
                add_input_noise=True,
                add_target_noise=True):
    model = Sequential()
    if add_input_noise:
        model.add(GaussianNoise(.1, input_shape=(None, input_dim)))
    model.add(
        LSTM(h0_dim,
             input_dim=input_dim,
             init='uniform_small',
             activation='tanh'))
    model.add(Dropout(.4))
    model.add(LSTM(h1_dim, init='uniform_small', activation='tanh'))
    model.add(Dropout(.4))
    model.add(
        rec_layer_type(output_dim, init=rec_layer_init, return_sequences=True))
    if add_target_noise:
        model.add(GaussianNoise(5.))
    model.compile(loss="mse", optimizer=RMSprop(lr=lr))

    model.base_name = base_name
    yaml_string = model.to_yaml()
    #    print(yaml_string)
    with open(model_savedir + model.base_name + '.yaml', 'w') as f:
        f.write(yaml_string)
    return model
Exemplo n.º 6
0
def nips_cnn(inputs, n_out):
    """NIPS 2016 CNN Model"""
    # injected noise strength
    sigma = 0.1

    # first layer
    y = Conv2D(16,
               15,
               data_format="channels_first",
               kernel_regularizer=l2(1e-3))(inputs)
    y = Activation('relu')(GaussianNoise(sigma)(y))

    # second layer
    y = Conv2D(8, 9, data_format="channels_first",
               kernel_regularizer=l2(1e-3))(y)
    y = Activation('relu')(GaussianNoise(sigma)(y))

    y = Flatten()(y)
    y = Dense(n_out,
              init='normal',
              kernel_regularizer=l2(1e-3),
              activity_regularizer=l1(1e-3))(y)
    outputs = Activation('softplus')(y)

    return Model(inputs, outputs, name='NIPS_CNN')
Exemplo n.º 7
0
def build_model():
    model = Sequential()

    # 第一个卷积层,4个卷积核
    model.add(
        Convolution2D(8,
                      5,
                      5,
                      border_mode='valid',
                      dim_ordering='th',
                      input_shape=(1, 20, 20)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(GaussianNoise(0.001))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(Activation('tanh'))

    # 第二个卷积层,8个卷积核,
    # model.add(GaussianNoise(0.001))
    # model.add(UpSampling2D(size=(2, 2), dim_ordering='th'))
    model.add(
        AtrousConvolution2D(16, 3, 3, border_mode='valid', dim_ordering='th'))
    # model.add(ZeroPadding2D((1, 1)))
    model.add(Activation('tanh'))
    # model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
    # model.add(Activation('tanh'))

    # 全连接层,先将前一层输出的二维特征图flatten为一维的
    model.add(Flatten())
    model.add(Dense(20))
    model.add(Activation('tanh'))

    # LSTM 层
    model.add(Reshape((20, 1)))
    model.add(
        LSTM(input_dim=1,
             output_dim=32,
             activation='tanh',
             inner_activation='tanh',
             return_sequences=True))
    model.add(GaussianNoise(0.01))
    model.add(
        LSTM(64,
             activation='tanh',
             inner_activation='tanh',
             return_sequences=False))
    model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(1))
    model.add(Activation('linear'))

    start = time.time()

    # 使用SGD + momentum
    # model.compile里的参数loss就是损失函数(目标函数)
    # sgd = SGD(lr=0.08, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="mse", optimizer="Nadam")  # Nadam # rmsprop
    print "Compilation Time : ", time.time() - start
    return model
Exemplo n.º 8
0
def createModel(dropout_fraction=0.0, batch_normalization=False):

    input = Input(shape=(3, 256, 256))

    x = Convolution2D(32,
                      5,
                      5,
                      activation='relu',
                      W_regularizer=l2(0.01),
                      border_mode='same',
                      name='conv1')(input)

    #x = Dropout(dropout_fraction)(x)

    x = MaxPooling2D((4, 4), strides=(4, 4), name='pool1')(x)

    x = BatchNormalization()(x)

    x = GaussianNoise(0.05)(x)  #Roque

    x = Convolution2D(32,
                      5,
                      5,
                      activation='relu',
                      W_regularizer=l2(0.01),
                      border_mode='same',
                      name='conv2')(x)

    #x = Dropout(dropout_fraction)(x)

    x = MaxPooling2D((4, 4), strides=(4, 4), name='pool2')(x)

    x = BatchNormalization()(x)

    x = GaussianNoise(0.05)(x)  #Roque

    x = MaxPooling2D((4, 4), strides=(4, 4),
                     name='pool3')(x)  #Roque (Before (2,2))

    #Classification block

    x = Flatten(name='flatten')(x)

    #x = Dropout(dropout_fraction)(x)

    x = Dense(512, activation='relu', name='fc1')(x)

    #x = Dropout(dropout_fraction)(x)

    x = Dense(8, activation='softmax', name='predictions')(x)

    model = Model(input=input, output=x)

    return model
Exemplo n.º 9
0
def create_mergegrouped(base_params):
    # np.random.seed(seed)

    feature_len = len(X[0, 0, :])
    season_len = len(X[0, :])
    node_stretch = base_params['layer_scale']
    noise_std = base_params['noise']
    conv_size = base_params['conv_size']

    # sized based on number of features times scale factor
    l1_size = int(feature_len * node_stretch)
    l2_size = int(feature_len * node_stretch)

    left_model = Sequential()
    right_model = Sequential()

    left_model.add(
        GaussianNoise(noise_std, input_shape=(season_len, feature_len)))
    right_model.add(
        GaussianNoise(noise_std, input_shape=(season_len, feature_len)))
    left_model.add(
        Convolution1D(conv_size,
                      1,
                      input_shape=(season_len, feature_len),
                      input_length=feature_len))
    right_model.add(
        Convolution1D(conv_size,
                      1,
                      input_shape=(season_len, feature_len),
                      input_length=feature_len))
    left_model.add(Flatten())
    right_model.add(Flatten())

    left_model.add(Dense(l1_size, init='normal', activation='sigmoid'))
    left_model.add(Dropout(.15))

    right_model.add(Dense(l1_size, init='normal', activation='tanh'))
    right_model.add(Dropout(.15))

    final_model = Sequential()
    merged = Merge([left_model, right_model], mode='ave')
    final_model.add(merged)
    final_model.add(Dense(l2_size, init='normal', activation='sigmoid'))
    final_model.add(Dropout(.1))
    final_model.add(Dense(12, init='normal', activation='softmax', bias=True))
    final_model.compile(
        loss='categorical_crossentropy',
        optimizer='adam',
        metrics=['accuracy', 'precision', 'recall', 'fmeasure'])
    return final_model
Exemplo n.º 10
0
def discriminator_2048x7x7(img_dim, wd, n_classes, disc_type):
    disc_input = Input(shape=img_dim, name="discriminator_input")
    x = Conv2D(128, (7, 7),
               strides=(1, 1),
               name="conv1",
               border_mode="same",
               kernel_initializer=RandomNormal(stddev=0.02),
               kernel_regularizer=l2(wd))(disc_input)
    x = BatchNormGAN(axis=1)(x)
    x = Dropout(0.1)(x)
    x = LeakyReLU(0.2)(x)
    x = GaussianNoise(sigma=0.2)(x)
    aux = x
    x = Conv2D(1, (3, 3),
               strides=(1, 1),
               name="finale_conv",
               border_mode="same",
               kernel_initializer=RandomNormal(stddev=0.02),
               kernel_regularizer=l2(wd))(x)
    aux = Flatten()(aux)
    aux = Dense(n_classes,
                activation='softmax',
                name='auxiliary',
                W_regularizer=l2(wd))(aux)
    x = GlobalAveragePooling2D()(x)
    discriminator_model_domain = Model(input=[disc_input],
                                       output=[x],
                                       name="discriminator_domain")
    discriminator_model_class = Model(input=[disc_input],
                                      output=[aux],
                                      name="discriminator_class")

    visualize_model(discriminator_model_domain)
    visualize_model(discriminator_model_class)
    return discriminator_model_domain, discriminator_model_class
Exemplo n.º 11
0
def get_model(input_shape,
              embedding_layer,
              classes=6,
              units=1024,
              dtype=tf.float32):
    sentence_indices = Input(shape=input_shape, dtype=dtype)

    embeddings = embedding_layer(sentence_indices)
    noised_embeddings = GaussianNoise(0.2)(embeddings)
    dropped_embeddings = Dropout(rate=0.2)(noised_embeddings)

    # recurrent_regularizer=l1_l2(0.01,0.01)
    # activity_regularizer=l1_l2(0.01, 0.01)
    # kernel_regularizer=l1_l2(0.01, 0.01)
    # bias_regularizer=l1_l2(0.01, 0.01)

    x = Bidirectional(LSTM(units=units,
                           return_sequences=False))(dropped_embeddings)
    # x = Attention(input_shape[0])(x)
    x = Dropout(rate=0.3)(x)
    # x = Bidirectional(LSTM(units=units))(x)
    # x = Dropout(rate=0.5)(x)
    x = Dense(units=classes, activation="softmax")(x)

    return Model(inputs=sentence_indices, outputs=x)
Exemplo n.º 12
0
def create_model_3_noise():
    inputs = Input((32, 32, 32, 1))

    noise = GaussianNoise(sigma=0.05)(inputs)
    
    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(noise)
    conv1 = SpatialDropout3D(0.1)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)

    conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = SpatialDropout3D(0.1)(conv2)
    conv2 = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling3D(pool_size=(2,2, 2))(conv2)

    x = Flatten()(pool2)
    x = Dense(64, init='normal')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)
        
    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=0.000001)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
Exemplo n.º 13
0
def build_rlstm2(input_dim,
                 h0_dim,
                 h1_dim,
                 output_dim=1,
                 lstm_init='zero',
                 lr=.001,
                 base_name='rlstm',
                 add_input_noise=True,
                 add_target_noise=False):
    model = Sequential()
    if add_input_noise:
        model.add(GaussianNoise(.1, input_shape=(None, input_dim)))
    model.add(
        RLSTM(input_dim,
              h0_dim,
              h1_dim,
              output_dim,
              init=lstm_init,
              W_h0_regularizer=l2(0.0005),
              W_h1_regularizer=l2(0.0005),
              return_sequences=True))
    #    if add_target_noise:
    #        model.add(GaussianNoise(5.))
    model.compile(loss="mse", optimizer=RMSprop(lr=lr))

    model.base_name = base_name
    yaml_string = model.to_yaml()
    #    print(yaml_string)
    with open(model_savedir + model.base_name + '.yaml', 'w') as f:
        f.write(yaml_string)
    return model
Exemplo n.º 14
0
def train_classifier(data, num_classes, train_labels, train_features):
    """
    Function to train the neural network
    :param data: the config dictionary
    :param num_classes: the distinct number of labels (binary/multi-class)
    :param train_labels: the labels of the training instances
    :param train_features: the features of the training instances
    :return: the MLP classifier
    """

    # Neural Network model using keras
    data = data["keras"]
    print("Creating Sequential model in keras")
    # Create new model with the specified params in the config file
    data_dimension = train_features.shape[1]
    epochs = data["epochs"]
    batch_size=data["batch_size"]
    model = Sequential([
        Dense(data["hidden_layers_size"], input_shape=(data_dimension,), name="first_dense"),
        Activation('relu', name="first_activation"),
        Dense(data["hidden_layers_size"], activation='relu', name="second_dense"),
        Dropout(data["dropout"]),
        Dense(data["hidden_layers_size"], activation='relu', name="third_dense"),
        GaussianNoise(data["gaussian_noise"]),
        Dense(num_classes, name="Dense_to_numclasses"),
        Activation('softmax', name="classification_activation"),
    ])
    # Train the model and return it
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    # Convert labels to categorical one-hot encoding
    one_hot_labels = keras.utils.to_categorical(train_labels, num_classes=num_classes)
    model.fit(train_features, one_hot_labels, epochs=epochs, batch_size=batch_size)
    return model
Exemplo n.º 15
0
 def denoisingAutoencoder(self, noise, deep_size):
     if self.dropout_noise is None:
         self.model.add(
             GaussianNoise(noise, input_shape=(self.input_size, )))
     else:
         self.model.add(
             Dropout(self.dropout_noise[0],
                     input_shape=(self.input_size, )))
     if deep_size is not None:
         self.model.add(
             Dense(output_dim=deep_size,
                   input_dim=self.hidden_layer_size,
                   init=self.layer_init,
                   activation=self.hidden_activation,
                   W_regularizer=l2(self.reg)))
     self.model.add(
         Dense(output_dim=self.hidden_layer_size,
               input_dim=self.input_size,
               init=self.layer_init,
               activation=self.hidden_activation,
               W_regularizer=l2(self.reg)))
     self.model.add(
         Dense(output_dim=self.output_size,
               init=self.layer_init,
               activation=self.output_activation,
               W_regularizer=l2(self.reg)))
     self.model.compile(loss=self.loss, optimizer=self.optimizer)
     return self.model
Exemplo n.º 16
0
def encode(x, relu_max):
    print 'encoder input shape:', x._keras_shape
    assert x._keras_shape[1:] == (96, 96, 3)

    # 96, 96, 3
    y = Conv2D(64,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(x)
    y = BN(mode=2, axis=3)(y)
    # 48, 48, 64
    y = Conv2D(128,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(y)
    y = BN(mode=2, axis=3)(y)
    # 24, 24, 128
    y = Conv2D(256,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(y)
    y = BN(mode=2, axis=3)(y)
    # 12, 12, 256
    y = Conv2D(512,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(y)
    y = BN(mode=2, axis=3)(y)
    # 6, 6, 512

    assert y._keras_shape[1:] == (6, 6, 512), \
        '%s vs %s' % (y._keras_shape[1:], [6, 6, 512])
    y = Conv2D(LATENT_DIM,
               6,
               6,
               activation='linear',
               border_mode='same',
               subsample=(6, 6))(y)
    # 1, 1, LATENT_DIM
    if not relu_max:
        print 'add noise and pretend relu_max will be:', RELU_MAX
        y = GaussianNoise(0.2 * RELU_MAX)(y)

    y = Activation(utils.relu_n(relu_max))(y)
    if relu_max:
        print 'relu_max:', relu_max
        y = Activation(utils.scale_down(relu_max))(y)
        # y in [0, 1]

    y = Reshape((LATENT_DIM, ))(y)  # or Reshape([-1])(y) ?
    # LATENT_DIM
    return y
Exemplo n.º 17
0
Arquivo: run.py Projeto: qdbp/kaggle
def mk_model():
    i = Input(shape=(C, H, W))

    stack = [
        GaussianNoise(0.2),
        Conv2D(8, 3, 3, border_mode='valid', activation='relu'),
        Conv2D(8, 3, 3, border_mode='valid', activation='relu'),
        MPool2D(pool_size=(2, 2)),
        Conv2D(16, 3, 3, border_mode='valid', activation='relu'),
        Conv2D(16, 3, 3, border_mode='valid', activation='relu'),
        MPool2D(pool_size=(2, 2)),
        Flatten(),
        Dropout(0.5),
        Dense(32, activation='relu'),
        # Dropout(0.5),
        # Dense(32, activation='relu'),
        Dense(NDIM, activation='softmax'),
    ]

    y = i
    for layer in stack:
        y = layer(y)

    m = Model(input=i, output=y)
    m.compile(optimizer='adam', loss='categorical_crossentropy')

    return m, stack[0]
Exemplo n.º 18
0
def get_RNN_model(in_shape,
                  td_num=512,
                  ltsm_out_dim=256,
                  nb_hidden=100,
                  drop1=0.5,
                  drop2=0.5):
    model = Sequential()

    model.add(GaussianNoise(0.05, input_shape=in_shape))
    model.add(TimeDistributedDense(td_num))
    model.add(LSTM(ltsm_out_dim, return_sequences=True))
    reg = l2(0.05)
    #    model.add(TimeDistributedDense(td_num, W_regularizer=l2(0.03)))
    #reg.set_param(model.layers[3].get_params()[0][0])
    #model.layers[3].regularizers = [reg]
    model.add(Dropout(drop1))

    model.add(LSTM(ltsm_out_dim))
    #  reg = l2(0.05)
    #  reg.set_param(model.layers[3].get_params()[0][0])
    #  model.layers[3].regularizers = [reg]
    model.add(Dropout(drop1))
    #    model.regularizers = [l2(0.05)]
    #model.add(Activation('relu'))

    model.add(Flatten())
    model.add(Dense(nb_hidden, W_regularizer=l2(0.05)))
    model.add(Activation('relu'))
    model.add(Dropout(drop2))

    model.add(Dense(1))
    model.add(Activation('linear'))

    model.compile(loss='mse', optimizer='rmsprop')
    return model
Exemplo n.º 19
0
def build_discriminator():
    # build a relatively standard conv net, with LeakyReLUs as suggested in
    # the reference paper
    cnn = Sequential()

    cnn.add(GaussianNoise(0.05, input_shape=(3, 32, 32)))  # Add this layer to prevent D from overfitting!

    cnn.add(Conv2D(16, kernel_size=3, strides=2, padding='same',
                   kernel_initializer='glorot_normal', bias_initializer='Zeros'))
    cnn.add(LeakyReLU(alpha=0.2))
    cnn.add(Dropout(0.5))

    cnn.add(Conv2D(32, kernel_size=3, strides=1, padding='same',
                   kernel_initializer='glorot_normal', bias_initializer='Zeros'))
    cnn.add(BatchNormalization())
    cnn.add(LeakyReLU(alpha=0.2))
    cnn.add(Dropout(0.5))

    cnn.add(Conv2D(64, kernel_size=3, strides=2, padding='same',
                   kernel_initializer='glorot_normal', bias_initializer='Zeros'))
    cnn.add(BatchNormalization())
    cnn.add(LeakyReLU(alpha=0.2))
    cnn.add(Dropout(0.5))

    cnn.add(Conv2D(128, kernel_size=3, strides=1, padding='same',
                   kernel_initializer='glorot_normal', bias_initializer='Zeros'))
    cnn.add(BatchNormalization())
    cnn.add(LeakyReLU(alpha=0.2))
    cnn.add(Dropout(0.5))

    cnn.add(Conv2D(256, kernel_size=3, strides=2, padding='same',
                   kernel_initializer='glorot_normal', bias_initializer='Zeros'))
    cnn.add(BatchNormalization())
    cnn.add(LeakyReLU(alpha=0.2))
    cnn.add(Dropout(0.5))

    cnn.add(Conv2D(512, kernel_size=3, strides=1, padding='same',
                   kernel_initializer='glorot_normal', bias_initializer='Zeros'))
    cnn.add(BatchNormalization())
    cnn.add(LeakyReLU(alpha=0.2))
    cnn.add(Dropout(0.5))

    cnn.add(Flatten())

    cnn.add(MinibatchDiscrimination(50, 30))

    image = Input(shape=(3, 32, 32))

    features = cnn(image)

    # first output (name=generation) is whether or not the discriminator
    # thinks the image that is being shown is fake, and the second output
    # (name=auxiliary) is the class that the discriminator thinks the image
    # belongs to.
    fake = Dense(1, activation='sigmoid', name='generation',
                 kernel_initializer='glorot_normal', bias_initializer='Zeros')(features)
    aux = Dense(class_num, activation='softmax', name='auxiliary',
                kernel_initializer='glorot_normal', bias_initializer='Zeros')(features)

    return Model(image, [fake, aux])
Exemplo n.º 20
0
def create_G(input_dim=(100, ), output_dim=(9, 20)):
    G = Sequential()
    G.add(Dense(input_shape=input_dim, \
        units= 128, \
        kernel_initializer=initializers.random_normal(stddev=0.02)))

    G.add(BatchNormalization())
    #G.add(Conv2DTranspose(32, 5, strides=(2,1), activation=Activation('relu'), padding='same',kernel_initializer='glorot_uniform'))
    #    G.add(GaussianDropout(0.25))  #https://arxiv.org/pdf/1611.07004v1.pdf
    #    G.add(GaussianNoise(0.05))
    G.add(Activation('relu'))

    G.add(Dense(128))
    G.add(BatchNormalization())
    G.add(GaussianDropout(0.25))  #https://arxiv.org/pdf/1611.07004v1.pdf
    G.add(GaussianNoise(0.05))
    G.add(Activation('relu'))

    G.add(
        Dense(np.prod(output_dim),
              kernel_regularizer=regularizers.l2(0.01),
              activity_regularizer=regularizers.l2(0.01)))
    G.add(BatchNormalization())
    #    G.add(GaussianDropout(0.25))  #https://arxiv.org/pdf/1611.07004v1.pdf
    #    G.add(GaussianNoise(0.05))

    G.add(Activation('sigmoid'))

    G.add(Reshape(output_dim))
    return (G)
Exemplo n.º 21
0
Arquivo: model.py Projeto: moomou/mlab
def build_model5(input_shape,
                 nb_output_bin,
                 kernel_sizes,
                 checkpoints_dir=None):
    # ref: Deep Speaker Feature Learning for Text-independent Speaker Verification
    shape = list(input_shape)
    dilation_depth = 4
    stack = 1

    glog.info('Shape:: %s', shape)
    out = start = Input(shape=shape, name='start')

    out = GaussianNoise(0.025)(out)

    delay_kernel_size = kernel_sizes.pop(0)
    for j in range(stack):
        for i in range(dilation_depth):
            out = Conv2D(128,
                         delay_kernel_size,
                         dilation_rate=(2**i, 1),
                         name='relu_dilation_%s_%s' % (j, 2**i),
                         padding='same',
                         activation='relu')(out)

    out = Dense(512, name='bottleneck')(out)

    out = Conv2D(256, kernel_sizes.pop(0))(out)
    # shape = (6, 24) or (6, 33)
    out = MaxPooling2D(kernel_sizes.pop(0))(out)
    # shape = (3, 12)
    out = Conv2D(256, kernel_sizes.pop(0))(out)
    # shape = (2, 8)
    out = MaxPooling2D(kernel_sizes.pop(0))(out)

    # out = Conv2D(128, kernel_sizes.pop(0))(out)
    # out = Dropout(0.5)(out)
    # out = Conv2D(128, kernel_sizes.pop(0))(out)
    # out = Dropout(0.5)(out)

    print(out.shape)
    # shape = (3, 5)
    out = Reshape(kernel_sizes.pop(0))(out)

    # D vector
    out = Dense(400, name='d_vector')(out)
    out = GlobalAveragePooling1D()(out)
    out = Dense(nb_output_bin, activation='softmax')(out)

    model = Model(inputs=start, outputs=out)
    field_width, field_ms = compute_receptive_field(dilation_depth, 1)
    glog.info('model with width=%s and width_ms=%s' % (field_width, field_ms))
    model.summary()

    if checkpoints_dir:
        _load_checkoint(model, checkpoints_dir)
    else:
        model.epoch_num = 0

    return model
Exemplo n.º 22
0
def copy_cnn(inputs, n_out, *args, l2_reg=0.01):
    """Standard CNN with no batch norm"""
    sigma = 0.05
    print(inputs.shape)
    y = Conv2D(8,
               15,
               data_format="channels_first",
               kernel_regularizer=l2(1e-3))(inputs)
    y = Activation('relu')(GaussianNoise(sigma)(y))
    y = Conv2D(8,
               11,
               data_format="channels_first",
               kernel_regularizer=l2(1e-3))(y)
    y = Activation('relu')(GaussianNoise(sigma)(y))
    y = Dense(n_out, use_bias=False)(Flatten()(y))
    outputs = Activation('softplus')(y)
    return Model(inputs, outputs, name='COPY_CNN')
Exemplo n.º 23
0
def get_unet_mod(patch_size = (None,None),learning_rate = 1e-5,\
                 learning_decay = 1e-6,gn_std = 0.025, drop_out = 0.25):
    ''' Get U-Net model with gaussian noise and dropout'''

    gaussian_noise_std = gn_std
    dropout = drop_out

    input_img = Input((patch_size[0], patch_size[1], 3))
    input_with_noise = GaussianNoise(gaussian_noise_std)(input_img)

    conv1 = Conv2D(32, (3, 3), activation='relu',
                   padding='same')(input_with_noise)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
    pool4 = Dropout(dropout)(pool4)

    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=-1)
    up6 = Dropout(dropout)(up6)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=-1)
    up7 = Dropout(dropout)(up7)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=-1)
    up8 = Dropout(dropout)(up8)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=-1)
    up9 = Dropout(dropout)(up9)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=input_img, outputs=conv10)
    opt = Adam(lr=learning_rate, decay=learning_decay)
    model.compile(optimizer=opt, loss=dice_coef_loss, metrics=[dice_coef])

    return model
Exemplo n.º 24
0
def define_model(input):
    model = Sequential([
        # For each training iteration, we're going to drop 10% of the input
        # nodes, randomly.  This helps avoid overfitting.
        Dropout(0.1, input_shape=(len(input[0]), )),

        # We normalize the inputs to make it easier to weight them correctly.
        BatchNormalization(epsilon=0.001,
                           mode=0,
                           axis=-1,
                           momentum=0.99,
                           weights=None,
                           beta_init='zero',
                           gamma_init='one',
                           gamma_regularizer=None,
                           beta_regularizer=None),
        GaussianNoise(0.01),  # Just a tiny bit of noise.

        # Ended up going with a lot of layers. Trains fast with large batch sizes.
        # Integer input is the number of nodes in given layer.
        Dense(
            256,
            W_regularizer=l2(0.01),
            activity_regularizer=activity_l2(0.01),
        ),

        # LeakyReLU is a modified Rectified Linear Unit.
        # "Leaky" refers to a modification to the negative slope,
        # which can get "stuck" in standard ReLUs.
        LeakyReLU(alpha=0.1818),
        Dense(32),
        LeakyReLU(alpha=0.1818),
        Dense(32),
        LeakyReLU(alpha=0.1818),
        Dense(32),
        LeakyReLU(alpha=0.1818),
        Dense(32),
        LeakyReLU(alpha=0.1818),
        Dense(32),
        LeakyReLU(alpha=0.1818),
        Dense(32),
        LeakyReLU(alpha=0.1818),
        Dense(2),
        Activation("softmax"),
    ])

    # Adam is a form of stochastic gradient descent with improved handling
    # of altering the learning rate on the fly.
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    # Compile the model with all our parameters.
    # We use binary cross-entropy, also known as log-loss.
    print("Training deep neural network model...")
    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    return model
    def BuildUnet(self,
                  _input_shape,
                  _filter_num,
                  _depth,
                  dropout=True,
                  _kernel_size=3,
                  _stride=2,
                  _nchannel=1,
                  _data_format='channels_last',
                  use_softmax=False):

        cut = []  # list used to save all cuts
        filters = []  # list used to save all filter numbers
        filter_num = _filter_num

        if _data_format == 'channels_last':
            axis = -1
        else:
            axis = 1

        inputs = Input(shape=_input_shape)
        x = GaussianNoise(0.03)(inputs)
        x = Conv2D(_filter_num, 1, padding='same', data_format=_data_format)(x)

        #  Down sampling part
        for i in range(_depth):
            filters.append(filter_num)
            x = self.ResidualBlock(_inputs=x, _filter_num=filter_num)
            cut.append(x)
            filter_num *= 2
            x = self.DownsampleBlock(x, filter_num)
            if i >= _depth - 2 and dropout:
                x = Dropout(0.5)(x)

        print(filters)
        # One left over bottom block
        x = self.ResidualBlock(_inputs=x, _filter_num=filter_num)

        #  Up sampling part
        for i in range(_depth):
            filter_num = filter_num // 2
            x = self.UpsampleBlock(x, filter_num)
            shortcut = cut[-(i + 1)]
            x = keras.layers.concatenate([x, shortcut], axis=axis)
            x = self.ResidualBlock(_inputs=x,
                                   _filter_num=2 * filters[-(i + 1)])

        # Output
        if use_softmax:
            # for cross entropy
            outputs = Conv2D(_nchannel, (1, 1), activation='softmax')(x)
        else:
            # for iou loss
            outputs = Conv2D(_nchannel, (1, 1), activation='sigmoid')(x)

        model = Model(inputs=inputs, outputs=outputs)

        return model
Exemplo n.º 26
0
def aSDAE(S, X, latent_dim, hidden_unit_nums=[], stddev=0.1, lam=0.5):
    S_noise = GaussianNoise(stddev)(S)
    X_noise = GaussianNoise(stddev)(X)
    h = S_noise

    for num in hidden_unit_nums:
        h_s = Dense(num,
                    kernel_regularizer=keras.regularizers.l2(lam),
                    bias_regularizer=keras.regularizers.l2(lam))(h)
        h_x = Dense(num,
                    kernel_regularizer=keras.regularizers.l2(lam),
                    bias_regularizer=keras.regularizers.l2(lam))(X_noise)
        h = Add()([h_s, h_x])
        h = Activation("relu")(h)

    latent_s = Dense(latent_dim,
                     kernel_regularizer=keras.regularizers.l2(lam),
                     bias_regularizer=keras.regularizers.l2(lam))(h)
    latent_x = Dense(latent_dim,
                     kernel_regularizer=keras.regularizers.l2(lam),
                     bias_regularizer=keras.regularizers.l2(lam))(X_noise)
    latent = Add()([latent_s, latent_x])
    latent = Activation("relu")(latent)
    h = latent

    for num in hidden_unit_nums[::-1]:
        h_s = Dense(num,
                    kernel_regularizer=keras.regularizers.l2(lam),
                    bias_regularizer=keras.regularizers.l2(lam))(h)
        h_x = Dense(num,
                    kernel_regularizer=keras.regularizers.l2(lam),
                    bias_regularizer=keras.regularizers.l2(lam))(X_noise)
        h = Add()([h_s, h_x])
        h = Activation("relu")(h)

    S_ = Dense(int(S.shape[1]),
               kernel_regularizer=keras.regularizers.l2(lam),
               bias_regularizer=keras.regularizers.l2(lam))(h)
    X_ = Dense(int(X.shape[1]),
               kernel_regularizer=keras.regularizers.l2(lam),
               bias_regularizer=keras.regularizers.l2(lam))(h)
    S_ = Activation("relu")(S_)
    X_ = Activation("relu")(X_)

    return latent, S_, X_
Exemplo n.º 27
0
    def model_arch(
        self
    ):  # Defines the modified sequential class with regularizers defined.

        model = Sequential()
        dropout_param = 0.5
        activation_fn = "relu"
        Gaussian_noise = 1

        adam = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=5e-09)

        model.add(
            Dense(32,
                  input_dim=2,
                  kernel_initializer="uniform",
                  activation=activation_fn,
                  kernel_constraint=maxnorm(3)))
        model.add(Dropout(dropout_param))
        model.add(BatchNormalization())
        model.add(GaussianNoise(Gaussian_noise))

        model.add(
            Dense(128,
                  kernel_initializer="uniform",
                  activation=activation_fn,
                  kernel_constraint=maxnorm(3)))
        model.add(Dropout(dropout_param))
        model.add(BatchNormalization())
        model.add(GaussianNoise(Gaussian_noise))

        model.add(
            Dense(32,
                  kernel_initializer="uniform",
                  activation=activation_fn,
                  kernel_constraint=maxnorm(3)))
        model.add(Dropout(dropout_param))
        model.add(BatchNormalization())
        model.add(GaussianNoise(Gaussian_noise))

        model.add(Dense(2, activation="softmax", kernel_initializer="normal"))
        model.compile(loss="categorical_crossentropy",
                      optimizer=adam,
                      metrics=["accuracy"])

        return model
Exemplo n.º 28
0
def bn_layer(x, nchan, size, l2_reg, sigma=0.05):
    """An individual batchnorm layer"""
    n = int(x.shape[-1]) - size + 1
    y = Conv2D(nchan,
               size,
               data_format="channels_first",
               kernel_regularizer=l2(l2_reg))(x)
    y = Reshape((nchan, n, n))(BatchNormalization(axis=-1)(Flatten()(y)))
    return Activation('relu')(GaussianNoise(sigma)(y))
Exemplo n.º 29
0
    def ANN_Custom(self, inputdimention, labelsnum):
        from keras.models import Sequential
        from keras.layers import Dense
        from keras.layers import Dropout
        from keras.layers.noise import GaussianNoise
        from keras.constraints import maxnorm

        self.__model = Sequential()

        self.__model.add(
            Dense(output_dim=inputdimention,
                  input_dim=inputdimention,
                  init=self.getInit(),
                  activation=self.getACTFUNC(),
                  W_constraint=maxnorm(1)))  #2构建图模型
        #add noise layer
        self.__model.add(GaussianNoise(self.getGNsigma()))

        ##hidden layer
        outputdimention = self.getOutDimention()

        layersNum = self.getLayersNum()

        subdim = int((inputdimention - outputdimention) / layersNum)
        print "subdim", subdim
        while layersNum > 0:
            inputdim = inputdimention - subdim * (self.getLayersNum() -
                                                  layersNum)
            outputdim = inputdim
            print "outputdim", outputdim
            self.__model.add(
                Dense(output_dim=outputdim,
                      input_dim=inputdim,
                      init=self.getInit(),
                      activation=self.getACTFUNC(),
                      W_constraint=maxnorm(1)))
            self.__model.add(Dropout(self.getDropoutRate()))
            layersNum = layersNum - 1
            print "layersNum--", layersNum

        ##full-conect layer
        self.__model.add(
            Dense(output_dim=outputdimention,
                  input_dim=outputdimention,
                  init=self.getInit(),
                  activation=self.getACTFUNC(),
                  W_constraint=maxnorm(1)))

        ##last layer
        self.__model.add(
            Dense(output_dim=labelsnum,
                  input_dim=outputdimention,
                  init=self.getInit(),
                  activation='softmax',
                  W_constraint=maxnorm(1)))  #,W_constraint=maxnorm(1)

        return self.__model
Exemplo n.º 30
0
def build_gmodel():
    graph = Graph()
    graph.add_input(name='time_series', ndim=3)
    graph.add_node(JZS3(63, 40), name='rnn1', input='time_series')
    # # graph.add_node(JZS3(63, 40), name='rnn2', input='time_series')
    # # graph.add_node(JZS3(63, 40), name='rnn3', input='time_series')
    # # graph.add_node(JZS3(63, 40), name='rnn4', input='time_series')
    graph.add_node(Dense(40, 40), name='dense1', input='rnn1')
    # # graph.add_node(Dense(40, 40), name='dense2', input='rnn2')
    # # graph.add_node(Dense(40, 40), name='dense3', input='rnn3')
    # # graph.add_node(Dense(40, 40), name='dense4', input='rnn4')
    graph.add_node(MaxoutDense(40, 20, nb_feature=4),
                   name='maxout1',
                   input='dense1')
    # # graph.add_node(MaxoutDense(40, 80, nb_feature=4), name='maxout2', input='dense2')
    # # graph.add_node(MaxoutDense(40, 80, nb_feature=4), name='maxout3', input='dense3')
    # # graph.add_node(MaxoutDense(40, 80, nb_feature=4), name='maxout4', input='dense4')
    graph.add_node(Dropout(0.5), name='dropout1', input='maxout1')
    # # graph.add_node(Dropout(0.5), name='dropout2', input='maxout2')
    # # graph.add_node(Dropout(0.5), name='dropout3', input='maxout3')
    # # graph.add_node(Dropout(0.5), name='dropout4', input='maxout4')
    # graph.add_node(Dense(320, 160, activation='softmax'), name='merge', inputs=['dropout1', 'dropout2', 'dropout3', 'dropout4'], merge_mode='concat')
    # graph.add_node(MaxoutDense(160, 160, nb_feature=4), name='merge_maxout', input='merge')
    # graph.add_node(Dropout(0.5), name='merge_dropout', input='merge_maxout')
    graph.add_node(Dense(20, 1, activation='sigmoid'),
                   name='out_dense',
                   input='dropout1')

    graph.add_input(name='enrollment', ndim=2)
    graph.add_node(GaussianNoise(0.05), name='noise', input='enrollment')
    # graph.add_node(Dense(54, 64), name='mlp_dense', inputs=['enrollment', 'out_dense'])
    graph.add_node(Dense(53, 64), name='mlp_dense', input='noise')
    graph.add_node(MaxoutDense(64, 64, nb_feature=4),
                   name='mlp_maxout',
                   input='mlp_dense')
    graph.add_node(Dropout(0.5), name='mlp_dropout', input='mlp_maxout')
    # graph.add_node(Dense(32, 16), name='mlp_dense2', input='mlp_dropout')
    graph.add_node(Dense(64, 1, activation='sigmoid'),
                   name='mlp_dense3',
                   input='mlp_dropout')
    graph.add_node(
        Dense(4, 2, activation='softmax'),
        name='mlp_dense4',
        inputs=['mlp_dense3', 'out_dense', 'mlp_dense3', 'out_dense'],
        merge_mode='concat')
    graph.add_node(Dense(2, 1, activation='sigmoid'),
                   name='mlp_dense5',
                   input='mlp_dense4')

    # graph.add_node(Dense(2, 1), name='my_output', inputs=['mlp_dense2', 'out_dense'], merge_mode='concat')

    graph.add_output(name='output', input='mlp_dense5')

    graph.compile('adam', {'output': 'binary_crossentropy'})

    return graph