Exemplo n.º 1
0
def Graham_Simple(shape, NOL):
    """
    http://blog.kaggle.com/2015/01/02/cifar-10-competition-winners-interviews-with-dr-ben-graham-phil-culliton-zygmunt-zajac/
    without sparsity
    k = 320
    """

    model = Sequential()

    model.add(
        Conv2D(320, (2, 2),
               input_shape=shape,
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(
        Conv2D(320, (2, 2),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))

    model.add(BatchNormalization(axis=3))

    model.add(advanced_activations.LeakyReLU(alpha=0.3))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # model.add(Dropout(0.5))

    for i in range(2, 5):
        model.add(
            Conv2D(i * 320, (2, 2),
                   kernel_initializer='he_normal',
                   bias_initializer='zeros'))

        model.add(BatchNormalization(axis=3))

        model.add(advanced_activations.LeakyReLU(alpha=0.3))
        model.add(
            Conv2D(i * 320, (2, 2),
                   kernel_initializer='he_normal',
                   bias_initializer='zeros'))

        model.add(BatchNormalization(axis=3))

        model.add(advanced_activations.LeakyReLU(alpha=0.3))

        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        # if i!=5:
        #     model.add(Dropout(0.5))

    model.add(Flatten())

    model.add(Dense(NOL, activation='softmax'))

    return model
def generate_model_2():
    ip = Input(shape=(MAX_NB_VARIABLES, MAX_TIMESTEPS))
    # stride = 10

    # x = Permute((2, 1))(ip)
    # x = Conv1D(MAX_NB_VARIABLES // stride, 8, strides=stride, padding='same', activation='relu', use_bias=False,
    #            kernel_initializer='he_uniform')(x)  # (None, variables / stride, timesteps)
    # x = Permute((2, 1))(x)

    #ip1 = K.reshape(ip,shape=(MAX_TIMESTEPS,MAX_NB_VARIABLES))
    #x = Permute((2, 1))(ip)
    x = Masking()(ip)
    x = AttentionLSTM(8)(x)
    x = Dropout(0.8)(x)

    y = Permute((2, 1))(ip)
    y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    # y = Activation('relu')(y)
    # Nov.24/2019/CSCE636/activation = pReLU
    # y = advanced_activations.PReLU()(y)
    # Nov.24/2019/CSCE636/activation = LeakyReLU
    y = advanced_activations.LeakyReLU()(y)
    y = squeeze_excite_block(y)

    y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    # y = Activation('relu')(y)
    # Nov.24/2019/CSCE636/activation = pReLU
    # y = advanced_activations.PReLU()(y)
    # Nov.24/2019/CSCE636/activation = LeakyReLU
    y = advanced_activations.LeakyReLU()(y)
    y = squeeze_excite_block(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    # y = Activation('relu')(y)
    # Nov.24/2019/CSCE636/activation = pReLU
    # y = advanced_activations.PReLU()(y)
    # Nov.24/2019/CSCE636/activation = LeakyReLU
    y = advanced_activations.LeakyReLU()(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)
    model.summary()

    # add load model code here to fine-tune

    return model
Exemplo n.º 3
0
def net_beta(img_input, classes, blocks):

    x = ZeroPadding2D((1, 1))(img_input)
    x = Conv2D(64, (3, 3), use_bias=False)(x)

    x = BatchNormalization(epsilon=eps, axis=bn_axis)(x)
    x = Scale(axis=bn_axis)(x)
    x = advanced_activations.LeakyReLU(alpha=0.2)(x)  #

    x, shortcut = b_block(x, 3, [64, 64], strides=(1, 1), shortcut=None)
    for i in xrange(blocks[0]):
        x, shortcut = b_block(x,
                              3, [64, 64],
                              strides=(1, 1),
                              shortcut=shortcut)

    x, shortcut = b_block(x, 3, [128, 128], shortcut=shortcut)  ###ci
    for i in xrange(blocks[0]):
        x, shortcut = b_block(x,
                              3, [128, 128],
                              strides=(1, 1),
                              shortcut=shortcut)

    x, shortcut = b_block(x, 3, [256, 256], shortcut=shortcut)
    for i in xrange(blocks[0]):
        x, shortcut = b_block(x,
                              3, [256, 256],
                              strides=(1, 1),
                              shortcut=shortcut)

    x, shortcut = b_block(x, 3, [512, 512], shortcut=shortcut)
    for i in xrange(blocks[0]):
        x, shortcut = b_block(x,
                              3, [512, 512],
                              strides=(1, 1),
                              shortcut=shortcut)
    # x, shortcut = block(x, 3, [512, 512], shortcut = shortcut)

    x = Conv2D(512, (1, 1), use_bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis)(x)
    x = advanced_activations.LeakyReLU(alpha=0.2)(x)

    x_fc = GlobalAveragePooling2D()(x)
    x_fc = Dense(classes, activation='softmax', name='fc1000')(x_fc)

    model = Model(img_input, x_fc)

    return model
Exemplo n.º 4
0
    def regression_model(model, nr_classes, nr_features, nr_hidden, x_train,
                         x_val, y_train, y_val, epochs, tb, path):
        tot_features = nr_classes * nr_features

        reg_mod = Sequential()
        #make layer 1
        reg_mod.add(
            Reshape((tot_features, ), input_shape=(nr_features, nr_classes)))
        reg_mod.add(Dense(nr_hidden, activation="linear"))
        reg_mod.set_weights(model.layers[2].get_weights())

        #make first hidden
        reg_mod.add(Dense(nr_hidden, activation="linear"))
        reg_mod.add(advanced_activations.LeakyReLU())

        #make final output
        nr_of_labels = y_train.shape[1]
        print("nr_lables", nr_of_labels)
        reg_mod.add(Dense(nr_of_labels, activation="sigmoid"))

        reg_mod.compile(optimizer="adam",
                        loss="mean_squared_error",
                        metrics=['mse', 'mae'])

        reg_mod.fit(x_train,
                    y_train,
                    epochs=epochs,
                    batch_size=32,
                    shuffle=True,
                    verbose=2,
                    validation_data=(x_val, y_val),
                    callbacks=[tb])
        reg_mod.save(path + "/regression/regression_model.h5")
        return (reg_mod)
Exemplo n.º 5
0
def identity_block(X, f, filters, stage, block):
    """
    三层的恒等残差块
    param :
    X -- 输入的张量,维度为(m, n_H_prev, n_W_prev, n_C_prev)
    f -- 整数,指定主路径的中间 CONV 窗口的形状(过滤器大小)
    filters -- python整数列表,定义主路径的CONV层中的过滤器
    stage -- 整数,用于命名层,取决于它们在网络中的位置
    block --字符串/字符,用于命名层,取决于它们在网络中的位置
    return:
    X -- 三层的恒等残差块的输出,维度为:(n_H, n_W, n_C)
    """
    # 定义基本的名字
    conv_name_base = "res" + str(stage) + block + "_branch"
    bn_name_base = "bn" + str(stage) + block + "_branch"

    # 过滤器
    F1, F2, F3 = filters

    # 保存输入值,后面将需要添加回主路径
    X_shortcut = X

    # 主路径第一部分
    # 卷积核大小除以2,向下取整
    # X = ZeroPadding2D(padding=(1, 1), dim_ordering='tf')(X)
    # model.add(ZeroPadding2D((1, 1), batch_input_shape=(1, 4, 4, 1)))#序列模型加padding
    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding="same",
               name=conv_name_base + "2a",
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2a")(X)
    X = Activation("relu")(X)

    # 主路径第二部分
    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding="same",
               name=conv_name_base + "2b",
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2b")(X)
    X = Activation("relu")(X)

    # 主路径第三部分
    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding="same",
               name=conv_name_base + "2c",
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2c")(X)

    # 主路径最后部分,为主路径添加shortcut并通过relu激活
    X = layers.add([X, X_shortcut])
    # X = Activation("relu")(X)
    X = advanced_activations.LeakyReLU(alpha=0.3)(X)
    return X
Exemplo n.º 6
0
 def _relu(self, input, name_prefix, leaky=True, trainable=True):
     if leaky:
         out = KLA.LeakyReLU(name='{}_leakyrelu'.format(name_prefix),
                             trainable=trainable)(input)
     else:
         out = KL.Activation('relu',
                             name='{}_relu'.format(name_prefix),
                             trainable=trainable)(input)
     return out
Exemplo n.º 7
0
def build_train(dtr, ltr, dte, lte):
    '''
    build, train model and evaluate it
    input:
          dtr: train data
          ltr: train labels
          dte: test data
          lte: test labels
    output: model evaluation report and weights.h5
    '''
    model = Sequential()
    model.add(Flatten(input_shape=(60483, 2)))
    # the first lay: Dense(5000) is a fully-connected layer with 5000 hidden units
    model.add(Dense(16000))
    model.add(
        advanced_activations.LeakyReLU(alpha=0.01))  # activation: leakyReLU
    # Dropout  to prevent neural networks from overfitting
    model.add(Dropout(0.3))
    # the second lay: Dense(1000) is a fully-connected layer with 2000 hidden units
    model.add(Dense(2000))
    model.add(advanced_activations.LeakyReLU(alpha=0.01))
    model.add(Dropout(0.3))
    # the third lay: Dense(128) is a fully-connected layer with 128 hidden units
    model.add(Dense(128))
    model.add(advanced_activations.LeakyReLU(alpha=0.01))
    model.add(Dropout(0.3))
    # the last lay: Dense(24) is a fully-connected layer with 24 output units
    model.add(Dense(24, activation='softmax'))

    model.summary()

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    model.fit(dtr, ltr, epochs=20, batch_size=256)

    model.save_weights('stage_weights.h5')

    score = model.evaluate(dte, lte, batch_size=256)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
Exemplo n.º 8
0
def train():
    # model
    X_shape = (FLAGS.image_height, FLAGS.image_width, 3)
    model = Sequential()
    
    model.add(Conv2D(128, kernel_size=3, padding=1, strides=1, input_shape=X_shape))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    # conv => RELU => POOL
    model.add(Conv2D(256, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    # flatten => RELU layers
    model.add(Conv2D(512, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    model.add(Conv2D(1024, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    model.add(Dense(FLAGS.charset_size)
    model.add(Activation('softmax'))

    optimizer = Adam()
    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])


    for epoch in range(FLAGS.num_epochs):
        for i, (X_train, y_train) in enumerate(train_data_iterator()):
            X_train, y_train = np.array(X_train), np.array(y_train)
            loss = model.train_on_batch(X_train, y_train)
            print(f"epoch: {epoch} step: {i} loss: {loss}")

            if i % 100 == 0:
                X_test, y_test = test_data_helper()
                X_test, y_test = np.array(X_test), np.array(y_test)
                score = model.test_on_batch(X_test, y_test)
                print(f"score: {score}")

if __name__ == '__main__':
    train()
Exemplo n.º 9
0
def ClassicalCNN(x_train, y_train, x_test, y_test, NOL):
    model = Sequential()

    shape = x_train[0].shape

    model.add(
        Conv2D(32, (3, 3),
               input_shape=shape,
               activation=advanced_activations.LeakyReLU(alpha=0.18),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(64, (3, 3),
               activation=advanced_activations.LeakyReLU(alpha=0.18),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))
    model.add(
        Conv2D(64, (3, 3),
               activation=advanced_activations.LeakyReLU(alpha=0.18),
               kernel_initializer='he_normal',
               bias_initializer='zeros'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())

    model.add(
        Dense(512,
              activation=advanced_activations.LeakyReLU(alpha=0.18),
              kernel_initializer='he_normal',
              bias_initializer='zeros'))

    model.add(Dropout(0.5))

    model.add(
        Dense(NOL,
              kernel_initializer='he_normal',
              bias_initializer='zeros',
              activation='softmax'))

    return model
Exemplo n.º 10
0
def create_model():
    conv = Sequential()

    conv.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(Conv2D(32, (3, 3)))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(MaxPooling2D(pool_size=(4, 2)))
    conv.add(Dropout(0.25))

    conv.add(Conv2D(64, (3, 3), padding='same'))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(Conv2D(64, (3, 3)))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(MaxPooling2D(pool_size=(2, 1)))
    conv.add(Dropout(0.25))

    conv.add(Flatten())
    conv.add(Dense(128))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(Dropout(0.5))

    conv.add(Dense(128))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(Dropout(0.5))

    conv.add(Dense(num_classes))
    conv.add(Activation('softmax'))

    return conv
Exemplo n.º 11
0
def create_model():
    conv = Sequential()

    conv.add(
        Conv1D(8,
               5,
               input_shape=input_shape,
               padding='same',
               kernel_initializer='he_uniform'))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(MaxPooling1D(pool_size=2, strides=2))
    conv.add(Dropout(0.15))

    conv.add(Conv1D(16, 5, padding='same', kernel_initializer='he_uniform'))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(MaxPooling1D(pool_size=2, strides=2))
    conv.add(Dropout(0.15))

    conv.add(Conv1D(32, 3, padding='same', kernel_initializer='he_uniform'))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(MaxPooling1D(pool_size=2, strides=2))
    conv.add(Dropout(0.5))

    conv.add(Conv1D(16, 1, padding='same', kernel_initializer='he_uniform'))
    conv.add(Conv1D(32, 3, padding='same', kernel_initializer='he_uniform'))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(MaxPooling1D(pool_size=2, strides=2))
    conv.add(Dropout(0.5))

    conv.add(Conv1D(16, 1, padding='same', kernel_initializer='he_uniform'))
    conv.add(Conv1D(32, 3, padding='same', kernel_initializer='he_uniform'))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(MaxPooling1D(pool_size=2, strides=2))
    conv.add(Dropout(0.5))

    conv.add(Flatten())
    conv.add(Dense(32, kernel_initializer='he_uniform'))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(Dropout(0.5))

    conv.add(Dense(32, kernel_initializer='he_uniform'))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(Dropout(0.2))

    conv.add(Dense(8, kernel_initializer='he_uniform'))
    conv.add(advanced_activations.LeakyReLU(alpha=0.3))
    conv.add(Dropout(0.1))

    conv.add(Dense(2, activation='sigmoid'))
    return conv
Exemplo n.º 12
0
def casnet(img_input, classes, blocks):
    x = ZeroPadding2D((1, 1))(img_input)
    x = Conv2D(16, (3, 3),
               use_bias=False,
               kernel_initializer="he_normal",
               kernel_regularizer=regularizers.l2(weight_decay))(x)
    x = advanced_activations.LeakyReLU(alpha=0.2)(x)  #Activation('relu')(x)

    x, shortcut = block(x, 3, [16, 16], strides=(1, 1), shortcut=None)
    for i in xrange(blocks[0]):
        x, shortcut = block(x, 3, [16, 16], strides=(1, 1), shortcut=shortcut)
    x, shortcut = block(x, 3, [16, 16], shortcut=shortcut)

    for i in xrange(blocks[1]):
        x, shortcut = block(x, 3, [32, 32], strides=(1, 1), shortcut=shortcut)
    x, shortcut = block(x, 3, [32, 32], shortcut=shortcut)

    for i in xrange(blocks[2]):
        x, shortcut = block(x, 3, [64, 64], strides=(1, 1), shortcut=shortcut)

    x = Conv2D(64, (1, 1),
               use_bias=False,
               kernel_initializer="he_normal",
               kernel_regularizer=regularizers.l2(weight_decay))(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis)(x)
    x = advanced_activations.LeakyReLU(alpha=0.2)(x)  #Activation('relu')(x)

    x_fc = GlobalAveragePooling2D()(x)
    x_fc = Dense(classes,
                 activation='softmax',
                 kernel_initializer="he_normal",
                 kernel_regularizer=regularizers.l2(weight_decay))(x_fc)

    model = Model(img_input, x_fc)

    return model
Exemplo n.º 13
0
def get_model(max_encoder_seq_length, max_decoder_seq_length,
              num_encoder_tokens, latent_dim):
    # Define an input sequence and process it.
    encoder_inputs = Input(shape=(max_encoder_seq_length, num_encoder_tokens))
    encoder_input1, state_h1, state_c1 = LSTM(
        latent_dim, return_sequences=True, return_state=True)(encoder_inputs)
    encoder_input2, state_h2, state_c2 = LSTM(
        latent_dim, return_sequences=True, return_state=True)(encoder_input1)
    encoder_outputs, state_h, state_c = LSTM(latent_dim,
                                             return_state=True)(encoder_input2)

    # We discard `encoder_outputs` and only keep the states.
    encoder_states1 = [state_h1, state_c1]
    encoder_states2 = [state_h2, state_c2]
    encoder_states = [state_h, state_c]

    # Set up the decoder, using `encoder_states` as initial state.
    decoder_inputs = Input(shape=(max_decoder_seq_length, num_encoder_tokens))
    # We set up our decoder to return full output sequences,
    # and to return internal states as well. We don't use the
    # return states in the training model, but we will use them in inference.

    decoder_output1, _, _ = LSTM(latent_dim,
                                 return_sequences=True,
                                 return_state=True)(
                                     decoder_inputs,
                                     initial_state=encoder_states)
    decoder_output2, _, _ = LSTM(latent_dim,
                                 return_sequences=True,
                                 return_state=True)(
                                     decoder_output1,
                                     initial_state=encoder_states2)
    decoder_output3, _, _ = LSTM(latent_dim,
                                 return_sequences=True,
                                 return_state=True)(
                                     decoder_output2,
                                     initial_state=encoder_states1)
    decoder_output3 = Dense(num_encoder_tokens)(decoder_output3)
    decoder_outputs = advanced_activations.LeakyReLU(
        alpha=0.3)(decoder_output3)
    # Define the model that will turn
    # `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
    model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
    #    model = multi_gpu_model(model,gpus=2)
    # Run training
    model.compile(optimizer='rmsprop', loss='cosine', metrics=['mse'])
    return model
Exemplo n.º 14
0
 def test_serialization_with_layers(self):
     activation = advanced_activations.LeakyReLU(alpha=0.1)
     layer = core.Dense(3, activation=activation)
     config = serialization.serialize(layer)
     # with custom objects
     deserialized_layer = serialization.deserialize(
         config, custom_objects={'LeakyReLU': activation})
     self.assertEqual(deserialized_layer.__class__.__name__,
                      layer.__class__.__name__)
     self.assertEqual(deserialized_layer.activation.__class__.__name__,
                      activation.__class__.__name__)
     # without custom objects
     deserialized_layer = serialization.deserialize(config)
     self.assertEqual(deserialized_layer.__class__.__name__,
                      layer.__class__.__name__)
     self.assertEqual(deserialized_layer.activation.__class__.__name__,
                      activation.__class__.__name__)
Exemplo n.º 15
0
def test_advanced_activations():
    """ Tests that various ways of specifying activations in keras models are handled when replaced with Relu
    """
    inp = Input(shape=(2, ))
    x = Dense(5, activation='elu')(inp)
    x = advanced_activations.LeakyReLU()(x)
    x = Activation('elu')(x)
    model = Model(inp, x)

    # Ensure that layer.activation, Activation and advanced activations are replaced with relu
    modified_model = modify_model_backprop(model, 'guided')
    assert modified_model.layers[1].activation == get('relu')
    assert modified_model.layers[2].activation == get('relu')
    assert modified_model.layers[3].activation == get('relu')

    # Ensure that original model is unchanged.
    assert model.layers[1].activation == get('elu')
    assert isinstance(model.layers[2], advanced_activations.LeakyReLU)
    assert model.layers[3].activation == get('elu')
Exemplo n.º 16
0
    def __init__(self, nr_classes, nr_features, nr_hidden, l1_lambda, noise):
        tot_features = nr_classes * nr_features
        input = Input(shape=(nr_features, nr_classes))

        lyr = Reshape((tot_features, ))(input)
        lyr = Dropout(noise)(lyr)

        lyr = Dense(nr_hidden, activation="linear")(lyr)
        lyr = advanced_activations.LeakyReLU()(lyr)

        #lyr = ActivityRegularization(l1_lambda)(lyr)

        lyr = Dense(tot_features, activation="linear")(lyr)
        lyr = Reshape((nr_features, nr_classes))(lyr)
        lyr = Activation("softmax")(lyr)

        model = Model(inputs=input, outputs=lyr)
        model.compile(optimizer="adam",
                      loss="categorical_crossentropy",
                      metrics=['categorical_accuracy', 'mae'])
        self.model = model
Exemplo n.º 17
0
def darknet_conv(x,
                 filters,
                 kernel_size,
                 train_bn,
                 block_name=None,
                 idx=None,
                 strides=(1, 1)):
    padding = 'valid' if strides == (2, 2) else 'same'
    conv_idx = str(idx) if idx is not None else ''
    name_kwargs = {
        'name': block_name + conv_idx
    } if block_name is not None else {}
    x = KL.Conv2D(filters,
                  kernel_size,
                  kernel_regularizer=KR.l2(5e-4),
                  padding=padding,
                  use_bias=False,
                  **name_kwargs)(x)

    x = BatchNorm(**name_kwargs)(x, training=train_bn)
    x = KLA.LeakyReLU(alpha=0.1)(x)

    return x
Exemplo n.º 18
0
meth_bv = numpy.array(d)
stage = numpy.array(stg)
print '\ncomplete!'

# use 10-fold to apply cross validation
data_train, data_test, labels_train, labels_test = cross_validation.train_test_split(
    meth_bv, stage, test_size=0.1, random_state=0)

print str(len(data_train)) + ' train samples'
print str(len(data_test)) + ' test samples'

# model
model = Sequential()
# the first lay: Dense(5000) is a fully-connected layer with 5000 hidden units
model.add(Dense(5000, input_shape=(25978, )))
model.add(advanced_activations.LeakyReLU(alpha=0.01))  # activation: leakyReLU
# Dropout  to prevent neural networks from overfitting
model.add(Dropout(0.3))
# the second lay: Dense(2000) is a fully-connected layer with 2000 hidden units
model.add(Dense(2000))
model.add(advanced_activations.LeakyReLU(alpha=0.01))
model.add(Dropout(0.3))
# the third lay: Dense(128) is a fully-connected layer with 128 hidden units
model.add(Dense(128))
model.add(advanced_activations.LeakyReLU(alpha=0.01))
model.add(Dropout(0.3))
# the last lay: Dense(24) is a fully-connected layer with 24 output units
model.add(Dense(24, activation='softmax'))

model.summary()
Exemplo n.º 19
0
def create_yolo_model():
    yolo = Sequential()
    leaky_relu = advanced_activations.LeakyReLU(alpha=0.1)
    # block 1
    yolo.add(
        Convolution2D(64,
                      7,
                      7,
                      border_mode='same',
                      subsample=(2, 2),
                      input_shape=(3, 448, 448),
                      name='conv1'))
    yolo.add(leaky_relu)
    yolo.add(MaxPooling2D((2, 2), strides=(2, 2), name='maxpooling1'))

    #block 2
    yolo.add(Convolution2D(192, 3, 3, border_mode='same', name='conv2'))
    yolo.add(leaky_relu)
    yolo.add(MaxPooling2D((2, 2), strides=(2, 2), name='maxpooling2'))

    #block 3
    yolo.add(Convolution2D(128, 1, 1, border_mode='same', name='conv3_1'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(256, 3, 3, border_mode='same', name='conv3_2'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(256, 1, 1, border_mode='same', name='conv3_3'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(512, 3, 3, border_mode='same', name='conv3_4'))
    yolo.add(leaky_relu)
    yolo.add(MaxPooling2D((2, 2), strides=(2, 2), name='maxpooling3'))

    # block 4
    yolo.add(Convolution2D(256, 1, 1, border_mode='same', name='conv4_1'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(512, 3, 3, border_mode='same', name='conv4_2'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(256, 1, 1, border_mode='same', name='conv4_3'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(512, 3, 3, border_mode='same', name='conv4_4'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(256, 1, 1, border_mode='same', name='conv4_5'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(512, 3, 3, border_mode='same', name='conv4_6'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(256, 1, 1, border_mode='same', name='conv4_7'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(512, 3, 3, border_mode='same', name='conv4_8'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(512, 1, 1, border_mode='same', name='conv4_9'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(1024, 3, 3, border_mode='same', name='conv4_10'))
    yolo.add(leaky_relu)
    yolo.add(MaxPooling2D((2, 2), strides=(2, 2), name='maxpooling4'))

    # block 5
    yolo.add(Convolution2D(512, 1, 1, border_mode='same', name='conv5_1'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(1024, 3, 3, border_mode='same', name='conv5_2'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(512, 1, 1, border_mode='same', name='conv5_3'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(1024, 3, 3, border_mode='same', name='conv5_4'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(1024, 3, 3, border_mode='same', name='conv5_5'))
    yolo.add(leaky_relu)
    yolo.add(
        Convolution2D(1024,
                      3,
                      3,
                      border_mode='same',
                      subsample=(2, 2),
                      name='conv5_6'))
    yolo.add(leaky_relu)

    # block 6
    yolo.add(Convolution2D(1024, 3, 3, border_mode='same', name='conv6_1'))
    yolo.add(leaky_relu)
    yolo.add(Convolution2D(1024, 3, 3, border_mode='same', name='conv6_2'))
    yolo.add(leaky_relu)

    # block 7 fc layer
    yolo.add(Flatten(name='flatten'))
    yolo.add(Dense(4096, name='fc1'))
    yolo.add(leaky_relu)
    yolo.add(Dropout(0.5))

    # block 8 fc layer
    yolo.add(Dense(1470, activation='linear', name='fc2'))

    # reshape output from 1D to 3D
    yolo.add(Reshape((30, 7, 7)))

    return yolo
#teteY = np.concatenate((1-teteY, teteY), axis=1)
X_train = trX.reshape(-1, img_channels, img_rows, img_cols)
X_test = teX.reshape(-1, img_channels, img_rows, img_cols)
X_test_test = teteX.reshape(-1, img_channels, img_rows, img_cols)
print('tr, val, te mean, std')
print(X_train.mean(), X_test.mean(), X_test_test.mean())
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
Y_test_test = np_utils.to_categorical(y_test_test, nb_classes)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'val samples')
print(X_test_test.shape[0], 'test samples')
model = Sequential()
lrelu = advanced_activations.LeakyReLU(alpha=0.1)
if modelname == 'alexnet':
    X_train_extend = np.zeros((X_train.shape[0], 3, 227, 227))
    for i in xrange(X_train.shape[0]):
        rex = np.resize(X_train[i, :, :, :], (227, 227))
        X_train_extend[i, 0, :, :] = rex
        X_train_extend[i, 1, :, :] = rex
        X_train_extend[i, 2, :, :] = rex
    X_train = X_train_extend
    X_test_extend = np.zeros((X_test.shape[0], 3, 227, 227))
    for i in xrange(X_test.shape[0]):
        rex = np.resize(X_test[i, :, :, :], (227, 227))
        X_test_extend[i, 0, :, :] = rex
        X_test_extend[i, 1, :, :] = rex
        X_test_extend[i, 2, :, :] = rex
    X_test = X_test_extend
Exemplo n.º 21
0
Arquivo: main.py Projeto: rapsealk/TIL
    def __init__(self):
        self.model = models.Sequential()

        # From Block #1 to Block #4 - ImageNet

        self.model.add(vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3)))

        #""" Block #1 """
        #self.model.add(layers.Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), activation=advanced_activations.LeakyReLU, input_size=(224, 224, 3)))
        #self.model.add(layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

        #""" Block #2 """
        #self.model.add(layers.Conv2D(filters=192, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

        #""" Block #3 """
        #self.model.add(layers.Conv2D(filters=128, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

        #""" Block #4 """
        #self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
        #self.model.add(layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

        """ Block #5
        self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), input_shape=(None, 7, 7, 512), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(2, 2), strides=(2, 2), activation=advanced_activations.LeakyReLU()))
        """

        """ Block #5 """
        self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), input_shape=(None, 7, 7, 512), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))

        """ Block #6
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU()))
        # Linear activation function for the final layer
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation='relu'))
        """

        """ Block #6 """
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
        # Linear activation function for the final layer
        self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation='relu'))

        """ Block #7 """
        self.model.add(layers.Dense(units=1024))
        #self.model.add(layers.Flatten())
        self.model.add(layers.Dense(units=30))

        self.model.summary()

        """ Compile """
        self.model.compile(optimizer='adam', loss=losses.mean_squared_error, metrics=['acc'])
Exemplo n.º 22
0
batch_size = 256
epoch = 500
data_augmentation = True

# Model
model = Sequential()

# Conv Block 1
model.add(
    Convolution2D(filter_num,
                  kernel_size=(3, 3),
                  strides=(1, 1),
                  padding='same',
                  kernel_initializer='he_uniform',
                  input_shape=x_data.shape[1:]))
model.add(advanced_activations.LeakyReLU(alpha=0.05))
model.add(BatchNormalization())

model.add(
    Convolution2D(filter_num,
                  kernel_size=(3, 3),
                  strides=(1, 1),
                  padding='same',
                  kernel_initializer='he_uniform'))
model.add(advanced_activations.LeakyReLU(alpha=0.05))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.2))

# Conv Block 2
Exemplo n.º 23
0
    def make_model(self):
        self.model = Sequential()

        # keras.layers.LSTM(units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True,
        # kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros',
        # unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None,
        # activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None,
        # bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=1,
        # return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False)

        # keras.layers.GRU(units, activation='tanh',
        # recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform',
        # recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None,
        # recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None,
        # recurrent_constraint=None, bias_constraint=None, dropout=0.0,
        # recurrent_dropout=0.0, implementation=1, return_sequences=False, return_state=False, go_backwards=False,
        # stateful=False, unroll=False, reset_after=False)

        # Initializer:
        # Zeros, Ones, Constant, RandomNormal, RandomUniform, TruncatedNormal, VarianceScaling, Orthogonal,
        # Identity: lecun_uniform, glorot_normal, glorot_uniform, he_normal, lecun_normal, he_uniform,

        # -------------------------------------------- GRU Architectures -----------------------------------------------
        if self.mode == 'GRU':

            self.model.add(GRU(100, input_shape=self.input_shape, return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(GRU(200, return_sequences=False, use_bias=True, ))
            self.model.add(Dropout(0.35))

        elif self.mode == 'DashGRU':

            self.model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='elu', return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))
            self.model.add(GRU(150, recurrent_activation='elu', return_sequences=False, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))

        elif self.mode == 'TWSTDGRU':

            self.model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='relu', return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(advanced_activations.LeakyReLU(alpha=0.3))
            self.model.add(GRU(150, recurrent_activation='relu', return_sequences=False, ))
            self.model.add(Dropout(0.35))
            self.model.add(advanced_activations.LeakyReLU(alpha=0.3))

        elif self.mode == 'TWSTD2GRU':

            self.model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='relu', return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))
            self.model.add(GRU(150, recurrent_activation='relu', return_sequences=False, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))

        elif self.mode == 'ActGRU':

            self.model.add(GRU(input_shape=self.input_shape, units=100, return_sequences=True))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))
            self.model.add(GRU(150, return_sequences=False))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))

        elif self.mode == 'CoreGRU':

            self.model.add(GRU(100, input_shape=self.input_shape, return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(advanced_activations.LeakyReLU(alpha=0.3))
            self.model.add(GRU(200, return_sequences=False, ))
            self.model.add(Dropout(0.35))
            self.model.add(advanced_activations.LeakyReLU(alpha=0.3))

        # ------------------------------------------ LSTM Architectures ------------------------------------------------

        elif self.mode == 'LSTM':

            self.model.add(LSTM(100, input_shape=self.input_shape, return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(LSTM(200, return_sequences=False, ))
            self.model.add(Dropout(0.35))

        elif self.mode == 'DashLSTM':

            self.model.add(LSTM(100, input_shape=self.input_shape, recurrent_activation='elu', return_sequences=True))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('relu'))
            self.model.add(LSTM(150, recurrent_activation='elu', return_sequences=False))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('relu'))

        elif self.mode == 'TWSTDLSTM':

            self.model.add(
                LSTM(100, input_shape=self.input_shape, recurrent_activation='relu', return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))
            self.model.add(LSTM(150, recurrent_activation='relu', use_bias=True, return_sequences=False, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))

        elif self.mode == 'ActLSTM':

            self.model.add(LSTM(input_shape=self.input_shape, units=100, return_sequences=True))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('relu'))
            self.model.add(LSTM(150, return_sequences=False))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('relu'))

        elif self.mode == 'CoreLSTM':

            self.model.add(LSTM(100, input_shape=self.input_shape, return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))
            self.model.add(LSTM(200, return_sequences=False, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))

            # ------------------------------------------ MIXED Architectures -----------------------------------------------

        elif self.mode == 'LSTMGRU':

            self.model.add(GRU(100, input_shape=self.input_shape, return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(LSTM(200, return_sequences=False, ))
            self.model.add(Dropout(0.35))

        elif self.mode == 'DashLSTMGRU':

            self.model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='elu', return_sequences=True))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('relu'))
            self.model.add(LSTM(150, recurrent_activation='elu', return_sequences=False))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('relu'))

        elif self.mode == 'TWSTDLSTMGRU':

            self.model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='relu', return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))
            self.model.add(LSTM(150, recurrent_activation='relu', use_bias=True, return_sequences=False, ))
            self.model.add(Dropout(0.35))
            self.model.add(advanced_activations.LeakyReLU(alpha=0.3))

        elif self.mode == 'ActLSTMGRU':

            self.model.add(GRU(input_shape=self.input_shape, units=100, return_sequences=True))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))
            self.model.add(LSTM(150, return_sequences=False))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('relu'))

        elif self.mode == 'CoreLSTMGRU':

            self.model.add(GRU(100, input_shape=self.input_shape, return_sequences=True, ))
            self.model.add(Dropout(0.35))
            self.model.add(Activation('elu'))
            self.model.add(LSTM(200, return_sequences=False, ))
            self.model.add(Dropout(0.35))
            self.model.add(advanced_activations.LeakyReLU(alpha=0.3))

        # ----------------------------------------------- Final Layers -------------------------------------------------

        self.model.add(Dense(units=self.output_shape))

        self.model.add(Activation('linear'))

        self.model.compile(loss='mse', optimizer='rmsprop')
        return self.model
Exemplo n.º 24
0
    def make_model(self):
        self.model = {}

        # ------------------------------------------- GRU Architectures ------------------------------------------------

        if 'GRU' in self.mode:
            model = Sequential()
            model.add(GRU(100, input_shape=self.input_shape, return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(GRU(200, return_sequences=False, ))
            model.add(Dropout(0.35))
            self.model['GRU'] = model

        if 'DashGRU' in self.mode:
            model = Sequential()

            model.add(GRU(input_shape=self.input_shape, recurrent_activation='elu', units=100, return_sequences=True))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(GRU(150, recurrent_activation='elu', return_sequences=False))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            self.model['DashGRU'] = model

        if 'TWSTDGRU' in self.mode:
            model = Sequential()
            model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='relu', return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(advanced_activations.LeakyReLU(alpha=0.3))
            model.add(GRU(150, recurrent_activation='relu', return_sequences=False, ))
            model.add(Dropout(0.35))
            model.add(advanced_activations.LeakyReLU(alpha=0.3))
            self.model['TWSTDGRU'] = model

        if 'TWSTD2GRU' in self.mode:
            model = Sequential()
            model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='relu', return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(GRU(150, recurrent_activation='relu', return_sequences=False, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            self.model['TWSTD2GRU'] = model

        if 'ActGRU' in self.mode:
            model = Sequential()

            model.add(GRU(input_shape=self.input_shape, units=100, return_sequences=True))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(GRU(150, return_sequences=False))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            self.model['ActGRU'] = model

        if 'CoreGRU' in self.mode:
            model = Sequential()

            model.add(GRU(100, input_shape=self.input_shape, return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(advanced_activations.LeakyReLU(alpha=0.3))
            model.add(GRU(200, return_sequences=False, ))
            model.add(Dropout(0.35))
            model.add(advanced_activations.LeakyReLU(alpha=0.3))
            self.model['CoreGRU'] = model

        # ------------------------------------------ LSTM Architectures ------------------------------------------------

        if 'LSTM' in self.mode:
            model = Sequential()

            model.add(LSTM(100, input_shape=self.input_shape, return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(LSTM(200, return_sequences=False, ))
            model.add(Dropout(0.35))
            self.model['LSTM'] = model

        if 'DashLSTM' in self.mode:
            model = Sequential()

            model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='elu', return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(GRU(200, recurrent_activation='elu', return_sequences=False, ))
            model.add(Dropout(0.35))
            model.add(advanced_activations.LeakyReLU(alpha=0.3))
            self.model['DashLSTM'] = model

        if 'TWSTDLSTM' in self.mode:
            model = Sequential()

            model.add(LSTM(100, input_shape=self.input_shape, recurrent_activation='relu', return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(LSTM(150, recurrent_activation='relu', return_sequences=False, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            self.model['TWSTDLSTM'] = model

        if 'ActLSTM' in self.mode:
            model = Sequential()

            model.add(LSTM(input_shape=self.input_shape, units=100, return_sequences=True))
            model.add(Dropout(0.35))
            model.add(Activation('relu'))
            model.add(LSTM(150, return_sequences=False))
            model.add(Dropout(0.35))
            model.add(Activation('relu'))
            self.model['ActLSTM'] = model

        if 'CoreLSTM' in self.mode:
            model = Sequential()

            model.add(LSTM(100, input_shape=self.input_shape, return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(LSTM(200, return_sequences=False, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            self.model['CoreLSTM'] = model

        # ------------------------------------------ MIXED Architectures -----------------------------------------------

        if 'LSTMGRU' in self.mode:
            model = Sequential()

            model.add(GRU(100, input_shape=self.input_shape, return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(LSTM(200, return_sequences=False, ))
            model.add(Dropout(0.35))
            self.model['LSTMGRU'] = model

        if 'DashLSTMGRU' in self.mode:
            model = Sequential()

            model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='elu', return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(LSTM(200, recurrent_activation='elu', return_sequences=False, ))
            model.add(Dropout(0.35))
            model.add(advanced_activations.LeakyReLU(alpha=0.3))
            self.model['DashLSTMGRU'] = model

        if 'TWSTDLSTMGRU' in self.mode:
            model = Sequential()

            model.add(GRU(100, input_shape=self.input_shape, recurrent_activation='relu', return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(LSTM(150, recurrent_activation='relu', return_sequences=False, ))
            model.add(Dropout(0.35))
            model.add(advanced_activations.LeakyReLU(alpha=0.3))
            self.model['TWSTDLSTMGRU'] = model

        if 'ActLSTMGRU' in self.mode:
            model = Sequential()

            model.add(GRU(input_shape=self.input_shape, units=100, return_sequences=True))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(LSTM(150, return_sequences=False))
            model.add(Dropout(0.35))
            model.add(Activation('relu'))
            self.model['ActLSTMGRU'] = model

        if 'CoreLSTMGRU' in self.mode:
            model = Sequential()

            model.add(GRU(100, input_shape=self.input_shape, return_sequences=True, ))
            model.add(Dropout(0.35))
            model.add(Activation('elu'))
            model.add(LSTM(200, return_sequences=False, ))
            model.add(Dropout(0.35))
            model.add(advanced_activations.LeakyReLU(alpha=0.3))
            self.model['CoreLSTMGRU'] = model

        # ----------------------------------------------- Final Layers -------------------------------------------------

        for mode, model in self.model.items():
            model.add(Dense(units=self.output_shape))
            model.add(Activation('linear'))
            model.compile(loss='mse', optimizer='rmsprop')

        return self.model
Exemplo n.º 25
0
data_mean, data_std = np.load('norm.npy')


# In[7]:


x_train_norm = (x_train-data_mean)/data_std


# In[27]:


input1 = keras.layers.Input(shape=(48,48,1))
x1 = Conv2D(64,(3,3))(input1) 
x1 = advanced_activations.LeakyReLU(alpha=0.05)(x1)
x1 = Conv2D(64,(3,3))(x1) 
x1 = advanced_activations.LeakyReLU(alpha=0.05)(x1)
x1 = MaxPooling2D(pool_size=(2,2))(x1)
x1 = Dropout(0.1)(x1)

x1 = Conv2D(128,(3,3))(x1) 
x1 = advanced_activations.LeakyReLU(alpha=0.05)(x1) 
x1 = Conv2D(128,(3,3))(x1) 
x1 = advanced_activations.LeakyReLU(alpha=0.05)(x1)
x1 = MaxPooling2D(pool_size=(2,2))(x1)
x1 = Dropout(0.1)(x1)

x1 = Conv2D(256,(3,3))(x1) 
x1 = advanced_activations.LeakyReLU(alpha=0.05)(x1) 
x1 = Conv2D(256,(3,3))(x1) 
Exemplo n.º 26
0
                                                regul)
                model_C = funcs.make3dConvModel(imgSize, count, fork, skip,
                                                regul)

            elif mode == "2d":
                model_A = funcs.make2dConvModel(imgSize, regul)
                model_S = funcs.make2dConvModel(imgSize, regul)
                model_C = funcs.make2dConvModel(imgSize, regul)

            #
            model.add(
                keras.engine.topology.Merge(
                    [model_A, model_S, model_C], mode='concat',
                    concat_axis=1))  #  output here is 512*3
            model.add(BatchNormalization())
            model.add(advanced_activations.LeakyReLU(alpha=LRELUalpha))
            model.add(Dropout(0.5))
            #
            model.add(Dense(512, activity_regularizer=regul))
            model.add(BatchNormalization())
            model.add(advanced_activations.LeakyReLU(alpha=LRELUalpha))
            model.add(Dropout(0.5))
            #
            model.add(Dense(256, activity_regularizer=regul))
            model.add(BatchNormalization())
            model.add(advanced_activations.LeakyReLU(alpha=LRELUalpha))
            model.add(Dropout(0.5))

        else:

            if mode == "3d":
Exemplo n.º 27
0
    def test_invalid_usage(self):
        with self.assertRaises(ValueError):
            activations.get('unknown')

        # The following should be possible but should raise a warning:
        activations.get(advanced_activations.LeakyReLU())
    def prepare(self):
        self.teacher_model.layers.pop()
        input_layer = self.teacher_model.input
        teacher_logits = self.teacher_model.layers[-1].output
        teacher_logits_T = Lambda(lambda x: x / self.temperature)(
            teacher_logits)
        teacher_probabilities_T = Activation(
            'softmax', name='softmax1_')(teacher_logits_T)

        x = Convolution2D(32, (3, 3), padding='same',
                          name='conv2d1')(input_layer)
        x = BatchNormalization(name='bn1')(x)
        x = advanced_activations.LeakyReLU(alpha=0.1, name='lrelu1')(x)

        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)
        x = Dropout(0.3, name='drop1')(x)

        x = Convolution2D(64, (3, 3), padding='same', name='conv2d3')(x)
        x = BatchNormalization(name='bn3')(x)
        x = advanced_activations.LeakyReLU(alpha=0.1, name='lrelu3')(x)

        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)
        x = Dropout(0.3, name='drop2')(x)

        x = Convolution2D(128, (3, 3), padding='same', name='conv2d5')(x)
        x = BatchNormalization(name='bn5')(x)
        x = advanced_activations.LeakyReLU(alpha=0.1, name='lrelu5')(x)
        x = Convolution2D(128, (3, 3), padding='same', name='conv2d6')(x)
        x = BatchNormalization(name='bn6')(x)
        x = advanced_activations.LeakyReLU(alpha=0.1, name='lrelu6')(x)

        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)
        x = Dropout(0.3, name='drop3')(x)

        x = Flatten(name='flatten1')(x)
        x = Dense(256, activation=None, name='dense1')(x)
        x = BatchNormalization(name='bn8')(x)
        x = advanced_activations.LeakyReLU(alpha=0.1, name='lrelu8')(x)

        logits = Dense(num_classes, activation=None, name='dense2')(x)
        output_softmax = Activation('softmax', name='output_softmax')(logits)
        logits_T = Lambda(lambda x: x / self.temperature,
                          name='logits')(logits)
        probabilities_T = Activation('softmax', name='probabilities')(logits_T)

        with tf.device('/cpu:0'):
            born_again_model = Model(inputs=input_layer,
                                     outputs=output_softmax)
            input_true = Input(name='input_true',
                               shape=[None],
                               dtype='float32')
        output_loss = Lambda(knowledge_distillation_loss,
                             output_shape=(1, ),
                             name='kd_')([
                                 output_softmax, input_true,
                                 teacher_probabilities_T, probabilities_T
                             ])
        inputs = [input_layer, input_true]

        with tf.device('/cpu:0'):
            train_model = Model(inputs=inputs, outputs=output_loss)

        return train_model, born_again_model
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

input_layer = Input(x_train.shape[1:])
x = Convolution2D(64, (3, 3), padding='same')(input_layer)
x = BatchNormalization()(x)
x = advanced_activations.LeakyReLU(alpha=0.1)(x)
x = Convolution2D(64, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = advanced_activations.LeakyReLU(alpha=0.1)(x)

x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Dropout(0.3)(x)

x = Convolution2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = advanced_activations.LeakyReLU(alpha=0.1)(x)
x = Convolution2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = advanced_activations.LeakyReLU(alpha=0.1)(x)

x = MaxPooling2D((2, 2), strides=(2, 2))(x)
Exemplo n.º 30
0
input('read in files......pause')

datagen = ImageDataGenerator(
    featurewise_center=False,
    featurewise_std_normalization=False,
    rotation_range=10,
    width_shift_range=0.1,
    height_shift_range=0.1,
    horizontal_flip=True,data_format='channels_last')
datagen.fit(x_train)
#model=load_model('04222009.h5')

model = Sequential()

model.add(Convolution2D(filters=16,kernel_size=5,input_shape=(48,48,1),padding='same'))
model.add(advanced_activations.LeakyReLU())
model.add(MaxPooling2D((2,2)))##

model.add(Convolution2D(filters=64,kernel_size=5,padding='same'))
model.add(advanced_activations.LeakyReLU())
model.add(MaxPooling2D((2,2)))##

model.add(Convolution2D(filters=128,kernel_size=5,padding='same'))
model.add(advanced_activations.LeakyReLU())
model.add(MaxPooling2D((2,2)))##

model.add(Convolution2D(filters=128,kernel_size=5,padding='same'))
model.add(advanced_activations.LeakyReLU())
model.add(MaxPooling2D((2,2)))##

model.add(Flatten())