strides=(1, 1),
    padding="same",
    activation="relu",
    kernel_regularizer=l2(0.01))(HiggsImageInputs)
HiggsImageLayer = SeparableConv2D(32, (7, 7),
                                  strides=(1, 1),
                                  padding="same",
                                  activation="relu",
                                  kernel_regularizer=l2(0.01))(HiggsImageLayer)
HiggsImageLayer = SeparableConv2D(32, (5, 5),
                                  strides=(1, 1),
                                  padding="same",
                                  activation="relu",
                                  kernel_regularizer=l2(0.01))(HiggsImageLayer)
HiggsImageLayer = BatchNormalization(momentum=0.6)(HiggsImageLayer)
HiggsImageLayer = MaxPool2D(pool_size=(2, 2))(HiggsImageLayer)
HiggsImageLayer = SeparableConv2D(32, (7, 7),
                                  strides=(1, 1),
                                  padding="same",
                                  activation="relu",
                                  kernel_regularizer=l2(0.01))(HiggsImageLayer)
HiggsImageLayer = SeparableConv2D(32, (5, 5),
                                  strides=(1, 1),
                                  padding="same",
                                  activation="relu",
                                  kernel_regularizer=l2(0.01))(HiggsImageLayer)
HiggsImageLayer = SeparableConv2D(32, (2, 2),
                                  strides=(1, 1),
                                  padding="same",
                                  activation="relu",
                                  kernel_regularizer=l2(0.01))(HiggsImageLayer)
示例#2
0













# Diagram

model = Sequential([
  AveragePooling2D(6, 3, input_shape=(SIZE, SIZE, 1)), # pass a 6x6 grid to average the image, move the grid 3 steps each time
  Conv2D(64, 3, activation="relu"),
  Conv2D(32, 3, activation="relu"),
  MaxPool2D(2, 2), # TODO what's the difference from MaxPooling2D?
  Dropout(0.5),
  Flatten(),
  Dense(128, activation="relu"),
  Dense(3, activation="softmax")
])

model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
#print(model.summary())
示例#3
0
datagen = ImageDataGenerator(
        rotation_range=10,
        zoom_range = 0.1,
        width_shift_range=0.1,
        height_shift_range=0.1)

datagen.fit(X_train)

X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1)

model = Sequential()

model.add(Conv2D(32, kernel_size=5,input_shape=(28, 28, 1), activation = 'relu'))
model.add(Conv2D(32, kernel_size=5, activation = 'relu'))
model.add(MaxPool2D(2,2))
model.add(BatchNormalization())
model.add(Dropout(0.4))

model.add(Conv2D(64, kernel_size=3,activation = 'relu'))
model.add(Conv2D(64, kernel_size=3,activation = 'relu'))
model.add(MaxPool2D(2,2))
model.add(BatchNormalization())
model.add(Dropout(0.4))

model.add(Conv2D(128, kernel_size=3, activation = 'relu'))
model.add(BatchNormalization())

model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.4))
示例#4
0
import keras
from keras.layers import Conv2D, Flatten, MaxPool2D, Input, Embedding, Dense, LSTM
from keras.models import Model

input_img = Input(shape=(224, 224, 3))
x = Conv2D(64, (3, 3), activation='relu', padding='same')(input_img)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = MaxPool2D((2, 2))(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(128, (3, 3), activation='relu')(x)
x = MaxPool2D((2, 2))(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(256, (3, 3), activation='relu')(x)
x = MaxPool2D((2, 2))(x)
encoded_img = Flatten()(x)
# vision_model=Model(inputs=input_img,outputs=encoded_img)

question_input = Input(shape=(100, ), dtype='int32')
embedded_question = Embedding(input_dim=1000, output_dim=256,
                              input_length=100)(question_input)
encoded_question = LSTM(256)(embedded_question)
merged = keras.layers.concatenate([encoded_img, encoded_question])
output = Dense(10, activation='softmax')(merged)
vqa_model = Model([input_img, question_input], output)
vqa_model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
import numpy as np
data_img = np.random.random((1000, 224, 224, 3))
data_que = np.random.random((1000, 100))
label = np.random.randint(10, size=(1000, 1))
                               horizontal_flip=True)
# data = image_gen.flow_from_directory('dataset/training_set')

from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Dropout, Flatten

input_targert = (150, 150, 3)

model = Sequential()

model.add(
    Conv2D(filters=32,
           kernel_size=(3, 3),
           input_shape=input_targert,
           activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))

model.add(
    Conv2D(filters=64,
           kernel_size=(3, 3),
           input_shape=input_targert,
           activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))

model.add(
    Conv2D(filters=64,
           kernel_size=(3, 3),
           input_shape=input_targert,
           activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
示例#6
0
from keras.models import load_model
import pickle
import keras

model = Sequential()
# CONV + POOL 1
model.add(
    Conv2D(filters=100,
           kernel_size=3,
           input_shape=(256, 256, 3),
           activation='relu'))
#model.add(MaxPool2D(pool_size=2, strides=2, padding='same'))

# CONV + POOL 2
model.add(Conv2D(filters=50, kernel_size=5, strides=3, activation='relu'))
model.add(MaxPool2D(pool_size=2, strides=2))

# CONV + POOL 3
model.add(Conv2D(filters=30, kernel_size=3, activation='relu'))

# model.add(Conv2D(filters=256,
#             kernel_size=3,
#             padding='same',
#             activation='relu'
#           ))

#Continue here

model.add(MaxPool2D(pool_size=(2, 4), strides=(2, 4)))
#
# # CONV + POOL 4
ytest = to_categorical(ytest)

xtrain = xtrain.astype('float32')
xtest = xtest.astype('float32')
xtrain / 255
xtest / 255
#ytest=ytest.astype('float32')
ytest
xtest
c = Sequential()
c.add(
    Conv2D(32, (3, 3),
           padding='same',
           activation='relu',
           input_shape=(32, 32, 3)))
c.add(MaxPool2D(pool_size=(5, 5)))
c.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
c.add(MaxPool2D(pool_size=(2, 2)))
c.add(Flatten())
c.add(Dense(units=100, kernel_initializer='uniform', activation='relu'))
c.add(Dense(units=10, kernel_initializer='uniform', activation='sigmoid'))
c.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
c.fit(xtrain, ytrain, batch_size=10, epochs=1)
#from sklearn.metrics import r2_score
a = c.predict(xtest)
a
aa = c.evaluate(xtest, ytest)
print(aa)
len(a)
len(ytest)
    def build(self):
        mouth_image_model = Sequential()
        mouth_image_model.add(TimeDistributed(Conv2D(32,(3,3),padding='same',activation="relu",strides=(1, 1)),\
                    name="mouth_image_layer1",input_shape=(self.max_sequence_length, self.input_shape[0], self.input_shape[1], self.input_shape[2])))
        
        mouth_image_model.add(TimeDistributed(MaxPool2D(pool_size=(2, 2))))
        mouth_image_model.add(TimeDistributed(Conv2D(64,kernel_size=(3,3),strides=(1, 1),padding='same',\
                activation="relu",name="mouth_image_layer2")))
        mouth_image_model.add(TimeDistributed(MaxPool2D(pool_size=(2, 2))))
        mouth_image_model.add(TimeDistributed(Conv2D(128,kernel_size=(3,3),strides=(1, 1),padding='same',\
            activation="relu",name="mouth_image_layer3")))
        mouth_image_model.add(TimeDistributed(MaxPool2D(pool_size=(2, 2))))
        mouth_image_model.add(TimeDistributed(Flatten()))
        
        mouth_image_model.add(Bidirectional(LSTM(32,return_sequences=True)))
        mouth_image_model.add(Bidirectional(LSTM(128,return_sequences=False)))
        mouth_image_model.add(Dense(128,activation="relu"))

        face_image_model = Sequential()
        face_image_model.add(TimeDistributed(Conv2D(32,(3,3),padding='same',activation="relu",strides=(1, 1)),\
                    name="face_image_layer1",input_shape=(self.max_sequence_length, self.input_shape[0], self.input_shape[1], self.input_shape[2])))
        
        face_image_model.add(TimeDistributed(MaxPool2D(pool_size=(2, 2))))
        face_image_model.add(TimeDistributed(Conv2D(64,kernel_size=(3,3),strides=(1, 1),padding='same',\
                activation="relu",name="face_image_layer2")))
        face_image_model.add(TimeDistributed(MaxPool2D(pool_size=(2, 2))))
        face_image_model.add(TimeDistributed(Conv2D(128,kernel_size=(3,3),strides=(1, 1),padding='same',\
            activation="relu",name="face_image_layer3")))
        face_image_model.add(TimeDistributed(MaxPool2D(pool_size=(2, 2))))
        face_image_model.add(TimeDistributed(Flatten()))
        
        face_image_model.add(Bidirectional(LSTM(32,return_sequences=True)))
        face_image_model.add(Bidirectional(LSTM(128,return_sequences=False)))
        face_image_model.add(Dense(128,activation="relu"))

        dpts_model = Sequential()
        dpts_model.add(TimeDistributed(Conv2D(32,(1,3),padding='same',activation="relu",strides=(1, 1)),\
                    name="dpts_layer1",input_shape=(self.max_sequence_length, 1, 20, 2)))
        dpts_model.add(TimeDistributed(Conv2D(64,kernel_size=(3,3),strides=(1, 1),padding='same',\
                activation="relu",name="dpts_layer2")))
        dpts_model.add(TimeDistributed(Flatten()))

        dpts_model.add(Bidirectional(LSTM(32,return_sequences=True)))
        dpts_model.add(Bidirectional(LSTM(128,return_sequences=False)))
        dpts_model.add(Dense(128,activation="relu"))


        dpts_dists_model = Sequential()
        dpts_dists_model.add(TimeDistributed(Conv2D(32,(1,3),padding='same',activation="relu",strides=(1, 1)),\
                    name="dpts_dists_layer1",input_shape=(self.max_sequence_length, 1, 20, 1)))
        dpts_dists_model.add(TimeDistributed(Conv2D(64,kernel_size=(3,3),strides=(1, 1),padding='same',\
                activation="relu",name="dpts_dists_layer2")))
        dpts_dists_model.add(TimeDistributed(Flatten()))
        dpts_dists_model.add(Bidirectional(LSTM(32,return_sequences=True)))
        dpts_dists_model.add(Bidirectional(LSTM(128,return_sequences=False)))
        dpts_dists_model.add(Dense(128,activation="relu"))

        dpts_angles_model = Sequential()
        dpts_angles_model.add(TimeDistributed(Conv2D(32,(1,3),padding='same',activation="relu",strides=(1, 1)),\
                    name="dpts_angles_layer1",input_shape=(self.max_sequence_length, 1, 20, 1)))
        dpts_angles_model.add(TimeDistributed(Conv2D(64,kernel_size=(3,3),strides=(1, 1),padding='same',\
                activation="relu",name="dpts_angles_layer2")))
        dpts_angles_model.add(TimeDistributed(Flatten()))
        dpts_angles_model.add(Bidirectional(LSTM(32,return_sequences=True)))
        dpts_angles_model.add(Bidirectional(LSTM(128,return_sequences=False)))
        dpts_angles_model.add(Dense(128,activation="relu"))
    


        merged = keras.layers.concatenate([mouth_image_model.output, face_image_model.output,dpts_model.output,dpts_dists_model.output,dpts_angles_model.output])


        merged = Dense(128,activation="relu")(merged)
        merged = Dense(256,activation="relu")(merged)

        merged = Dense(2,activation="softmax")(merged)

        model = Model(inputs=[mouth_image_model.input,face_image_model.input,dpts_model.input,dpts_dists_model.input,dpts_angles_model.input],outputs=merged)


        return model
示例#9
0
def get_model(embed, num_conti):
    def block_wrap(x, filters):
        x = Conv2D(filters, kernel_size=3, padding='same')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        return x

    def root_mean_squared_error(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_true - y_pred)))

    region = Input(shape=[1], name="region")
    city = Input(shape=[1], name="city")
    pcn = Input(shape=[1], name="parent_category_name")
    cn = Input(shape=[1], name="category_name")
    para1 = Input(shape=[1], name="param_1")
    para2 = Input(shape=[1], name="param_2")
    para3 = Input(shape=[1], name="param_3")
    # act = Input(shape=[1], name="activation_date")
    ut = Input(shape=[1], name="user_type")
    img_top = Input(shape=[1], name="image_top_1")

    title = Input(shape=[title_len], name="title")
    desc = Input(shape=[desc_len], name="desc")
    img = Input(shape=[img_size, img_size, 3], name='image')
    conti = Input(shape=[num_conti], name='conti')
    """
    region                       28
    city                       1752
    parent_category_name          9
    category_name                47
    param_1                     372
    param_2                     278
    param_3                    1277
    title                   1022203
    description             1793973
    activation_date              30
    user_type                     3
    image                   1856666
    image_top_1                3064
    param_combined             2402
    """

    emb_region = Embedding(28, 8)(region)
    emb_city = Embedding(1752, 16)(city)
    emb_pcn = Embedding(9, 3)(pcn)
    emb_cn = Embedding(47, 8)(cn)
    emb_para1 = Embedding(372, 16)(para1)
    emb_para2 = Embedding(278, 16)(para2)
    emb_para3 = Embedding(1277, 16)(para3)
    # emb_act = Embedding(30,8)(act)
    emb_img_top = Embedding(3064, 32)(img_top)
    emb_ut = Embedding(3, 3, weights=[np.eye(3, 3)], trainable=False)(ut)

    num_word = len(embed['title'])
    emb_title = Embedding(num_word,
                          300,
                          weights=[embed['title']],
                          trainable=False)(title)
    num_word = len(embed['desc'])
    emb_desc = Embedding(num_word,
                         300,
                         weights=[embed['desc']],
                         trainable=False)(desc)

    conv = block_wrap(img, 32)
    conv = MaxPool2D(padding='same')(conv)
    conv = block_wrap(conv, 64)
    conv = MaxPool2D(padding='same')(conv)
    conv = block_wrap(conv, 128)
    conv = MaxPool2D(padding='same')(conv)
    conv = GlobalAveragePooling2D()(conv)

    title_gru = Bidirectional(CuDNNGRU(64, return_sequences=True),
                              merge_mode='sum')(emb_title)
    title_gru = Bidirectional(CuDNNGRU(32, return_sequences=True),
                              merge_mode='sum')(title_gru)
    title_gru1 = GlobalMaxPooling1D()(title_gru)
    title_gru2 = GlobalAveragePooling1D()(title_gru)

    desc_gru = Bidirectional(CuDNNGRU(64, return_sequences=True),
                             merge_mode='sum')(emb_desc)
    desc_gru = Bidirectional(CuDNNGRU(64, return_sequences=True),
                             merge_mode='sum')(desc_gru)
    desc_gru1 = GlobalMaxPooling1D()(desc_gru)
    desc_gru2 = GlobalAveragePooling1D()(desc_gru)

    fc = concatenate([
        Flatten()(emb_region),
        Flatten()(emb_city),
        Flatten()(emb_pcn),
        Flatten()(emb_cn),
        Flatten()(emb_para1),
        Flatten()(emb_para2),
        Flatten()(emb_para3),
        # Flatten()(emb_act),
        Flatten()(emb_img_top),
        Flatten()(emb_ut),
        conti,
        title_gru1,
        title_gru2,
        desc_gru1,
        desc_gru2,
        conv
    ])
    fc = Dense(256, activation='relu')(fc)
    fc = Dropout(0.5)(fc)
    fc = Dense(1, activation='sigmoid', name='output')(fc)

    model = Model(
        [
            region,
            city,
            pcn,
            cn,
            para1,
            para2,
            para3,
            # act,
            ut,
            img_top,
            title,
            desc,
            img,
            conti,
        ],
        output=fc)
    model.compile(optimizer=Nadam(),
                  loss="mean_squared_error",
                  metrics=[root_mean_squared_error])

    return model
    x_col='filename',
    y_col='norm_weight',
    shuffle=False,
    target_size=IMAGE_SIZE,
    batch_size=BATCH_SIZE,
    class_mode='other')

inputIm = Input(shape=(
    IMAGE_SIZE[0],
    IMAGE_SIZE[1],
    3,
))
conv1 = Conv2D(64, 3, activation='relu')(inputIm)
conv1 = Conv2D(64, 3, activation='relu')(conv1)
conv1 = BatchNormalization()(conv1)
pool1 = MaxPool2D()(conv1)
conv2 = Conv2D(128, 3, activation='relu')(pool1)
conv2 = Conv2D(128, 3, activation='relu')(conv2)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPool2D()(conv2)
conv3 = Conv2D(256, 3, activation='relu')(pool2)
conv3 = Conv2D(256, 3, activation='relu')(conv3)
conv3 = BatchNormalization()(conv3)
pool3 = MaxPool2D()(conv3)
conv4 = Conv2D(512, 3, activation='relu')(pool3)
conv4 = Conv2D(512, 3, activation='relu')(conv4)
conv4 = BatchNormalization()(conv4)
pool4 = MaxPool2D()(conv4)
conv5 = Conv2D(1024, 3, activation='relu')(pool4)
conv5 = Conv2D(1024, 3, activation='relu')(conv5)
conv5 = BatchNormalization()(conv5)
print("Shape before one-hot encoding: ", y_train.shape)
Y_train = np_utils.to_categorical(y_train, n_classes)
Y_test = np_utils.to_categorical(y_test, n_classes)
print("Shape after one-hot encoding: ", Y_train.shape)

# building a linear stack of layers with the sequential model
model = Sequential()
# convolutional layer
model.add(
    Conv2D(25,
           kernel_size=(3, 3),
           strides=(1, 1),
           padding='valid',
           activation='relu',
           input_shape=(28, 28, 1)))
model.add(MaxPool2D(pool_size=(1, 1)))
# flatten output of conv
model.add(Flatten())
# hidden layer
model.add(Dense(100, activation='relu'))
# output layer
model.add(Dense(10, activation='softmax'))

# compiling the sequential model
model.compile(loss='categorical_crossentropy',
              metrics=['accuracy'],
              optimizer='adam')

# training the model for 10 epochs
model.fit(X_train,
          Y_train,
示例#12
0
    def train(self):
        sequence_length = self.X.shape[1]
        vocabulary_size = len(self.voca_lookup)
        # Hyper parameter
        hparams = HyperParams.get_hyper_params()

        X_train, X_test, y_train, y_test = train_test_split(self.X,
                                                            self.y,
                                                            test_size=0.3,
                                                            random_state=42)

        # Input
        if self.is_embedding:
            inputs = Input(shape=(sequence_length, hparams.embedding_dim),
                           dtype='float32')
            embedding_inputs = inputs
        else:
            inputs = Input(shape=(sequence_length, ), dtype='int32')
            embedding_inputs = Embedding(input_dim=vocabulary_size,
                                         output_dim=hparams.embedding_dim,
                                         input_length=sequence_length)(inputs)
        reshape_input = Reshape(
            (sequence_length, hparams.embedding_dim, 1))(embedding_inputs)

        # Convolution layer 1
        layer_0 = Conv2D(hparams.num_filters,
                         kernel_size=(hparams.filter_sizes[0],
                                      hparams.embedding_dim),
                         padding='valid',
                         kernel_initializer='normal',
                         activation='relu')(reshape_input)
        layer_0 = MaxPool2D(pool_size=(sequence_length -
                                       hparams.filter_sizes[0] + 1, 1),
                            strides=(1, 1),
                            padding='valid')(layer_0)

        # Convolution layer 2
        layer_1 = Conv2D(hparams.num_filters,
                         kernel_size=(hparams.filter_sizes[1],
                                      hparams.embedding_dim),
                         padding='valid',
                         kernel_initializer='normal',
                         activation='relu')(reshape_input)
        layer_1 = MaxPool2D(pool_size=(sequence_length -
                                       hparams.filter_sizes[1] + 1, 1),
                            strides=(1, 1),
                            padding='valid')(layer_1)

        # Convolution layer 3
        layer_2 = Conv2D(hparams.num_filters,
                         kernel_size=(hparams.filter_sizes[2],
                                      hparams.embedding_dim),
                         padding='valid',
                         kernel_initializer='normal',
                         activation='relu')(reshape_input)
        layer_2 = MaxPool2D(pool_size=(sequence_length -
                                       hparams.filter_sizes[2] + 1, 1),
                            strides=(1, 1),
                            padding='valid')(layer_2)

        concatenated_tensor = Concatenate(axis=1)([layer_0, layer_1, layer_2])

        flatten = Flatten()(concatenated_tensor)
        dropout = Dropout(hparams.drop_prob)(flatten)

        # Output layers
        outputs = Dense(units=hparams.dim_output,
                        activation='softmax')(dropout)

        # Checkpoint callback
        checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5',
                                     monitor='val_acc',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='auto')

        optimizer = Adam(lr=hparams.learning_rate,
                         beta_1=0.9,
                         beta_2=0.99,
                         epsilon=1e-08,
                         decay=0.0)

        # Model creation
        model = Model(inputs, outputs)
        model.compile(optimizer,
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
        model.fit(X_train,
                  y_train,
                  batch_size=hparams.batch_size,
                  epochs=hparams.epochs,
                  verbose=1,
                  callbacks=[checkpoint],
                  validation_data=(X_test, y_test))

        # Avoid the NoneType error after training.
        K.clear_session()
示例#13
0
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

## 정규화
#from sklearn.preprocessing import MinMaxScaler
x_train = x_train.reshape(60000, 28, 28, 1).astype('float32') / 255
x_test = x_test.reshape(10000, 28, 28, 1).astype('float32') / 255

## 2 : 모델링
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Flatten, MaxPool2D

model = Sequential()
model.add(Conv2D(3, (2, 2), padding='same', input_shape=(28, 28, 1)))
model.add(Conv2D(3, (2, 2), padding='same'))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(3, (2, 2), padding='same'))
model.add(Conv2D(3, (2, 2), padding='same'))

model.add(Flatten())

model.add(Dense(200, activation='elu'))
model.add(Dense(200, activation='elu'))
model.add(Dense(200, activation='elu'))
model.add(Dense(200, activation='elu'))
model.add(Dense(200, activation='elu'))
model.add(Dense(200, activation='elu'))
model.add(Dense(200, activation='elu'))
model.add(Dense(200, activation='elu'))
model.add(Dense(200, activation='elu'))
model.add(Dense(200, activation='elu'))
    conv_5x5 = Conv2D(filters_5x5, (5, 5), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(conv_5x5)

    pool_proj = MaxPool2D((3, 3), strides=(1, 1), padding='same')(x)
    pool_proj = Conv2D(filters_pool_proj, (1, 1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(pool_proj)

    output = concatenate([conv_1x1, conv_3x3, conv_5x5, pool_proj], axis=3, name=name)
    
    return output

kernel_init = keras.initializers.glorot_uniform()
bias_init = keras.initializers.Constant(value=0.2)

input_layer = Input(shape=(224, 224, 3))

x = Conv2D(64, (7, 7), padding='same', strides=(2, 2), activation='relu', name='conv_1_7x7/2', kernel_initializer=kernel_init, bias_initializer=bias_init)(input_layer)
x = MaxPool2D((3, 3), padding='same', strides=(2, 2), name='max_pool_1_3x3/2')(x)
x = Conv2D(64, (1, 1), padding='same', strides=(1, 1), activation='relu', name='conv_2a_3x3/1')(x)
x = Conv2D(192, (3, 3), padding='same', strides=(1, 1), activation='relu', name='conv_2b_3x3/1')(x)
x = MaxPool2D((3, 3), padding='same', strides=(2, 2), name='max_pool_2_3x3/2')(x)

x = inception_module(x,
                     filters_1x1=64,
                     filters_3x3_reduce=96,
                     filters_3x3=128,
                     filters_5x5_reduce=16,
                     filters_5x5=32,
                     filters_pool_proj=32,
                     name='inception_3a')

x = inception_module(x,
                     filters_1x1=128,
示例#15
0
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Convolution2D
from keras.layers import MaxPool2D
from keras.layers import Flatten
from keras.preprocessing.image import ImageDataGenerator

# build CNN

# Initialising CNN
classifier = Sequential()
# step 1 - Convolution
classifier.add(Convolution2D(input_shape=(128,128,3), data_format="channels_last" , filters=32, kernel_size=(3,3), activation="relu", kernel_initializer="uniform"))
# step 2 - Max pooling
classifier.add(MaxPool2D(pool_size=(2,2), strides = 2))

classifier.add(Convolution2D(data_format="channels_first", filters=16, kernel_size = (3, 3), activation="relu", kernel_initializer="uniform"))
classifier.add(MaxPool2D(pool_size=(2,2), strides = 2))

# step 3 - flatten
classifier.add(Flatten())
# step 4 - using ANN to categorize
classifier.add(Dense(64, activation="relu", kernel_initializer="uniform"))
classifier.add(Dense(16, activation="relu", kernel_initializer="uniform"))
classifier.add(Dense(1, activation="sigmoid", kernel_initializer="uniform"))

# compile and fit
classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])

# image preproccessing
train_datagen = ImageDataGenerator(
示例#16
0
def VGG16_encoder_decoder():
    inputs = Input(shape=(512, 512, 1))
    ####################################################################################################################
    # Backbone Down sampling convolution followed by max-pooling
    ####################################################################################################################

    conv_1 = Conv2D(filters, filter_size, padding="same")(inputs)
    conv_1 = BatchNormalization()(conv_1)
    conv_1 = Activation("relu")(conv_1)

    conv_2 = Conv2D(filters, filter_size, padding="same")(conv_1)
    conv_2 = BatchNormalization()(conv_2)
    conv_2 = Activation("relu")(conv_2)
    ####################################################################################################################
    d1 = MaxPool2D((2, 2), (2, 2))(conv_2)
    d1 = Dropout(dropout)(d1)
    ####################################################################################################################
    conv_3 = Conv2D(filters * 2, filter_size, padding="same")(d1)
    conv_3 = BatchNormalization()(conv_3)
    conv_3 = Activation("relu")(conv_3)

    conv_4 = Conv2D(filters * 2, filter_size, padding="same")(conv_3)
    conv_4 = BatchNormalization()(conv_4)
    conv_4 = Activation("relu")(conv_4)
    ####################################################################################################################
    d2 = MaxPool2D((2, 2), (2, 2))(conv_4)
    d2 = Dropout(dropout)(d2)
    ####################################################################################################################
    conv_5 = Conv2D(filters * 4, filter_size, padding="same")(d2)
    conv_5 = BatchNormalization()(conv_5)
    conv_5 = Activation("relu")(conv_5)

    conv_6 = Conv2D(filters * 4, filter_size, padding="same")(conv_5)
    conv_6 = BatchNormalization()(conv_6)
    conv_6 = Activation("relu")(conv_6)

    conv_7 = Conv2D(filters * 4, filter_size, padding="same")(conv_6)
    conv_7 = BatchNormalization()(conv_7)
    conv_7 = Activation("relu")(conv_7)
    ####################################################################################################################
    d3 = MaxPool2D((2, 2), (2, 2))(conv_7)
    d3 = Dropout(dropout)(d3)
    ####################################################################################################################
    conv_8 = Conv2D(filters * 8, filter_size, padding="same")(d3)
    conv_8 = BatchNormalization()(conv_8)
    conv_8 = Activation("relu")(conv_8)

    conv_9 = Conv2D(filters * 8, filter_size, padding="same")(conv_8)
    conv_9 = BatchNormalization()(conv_9)
    conv_9 = Activation("relu")(conv_9)

    conv_10 = Conv2D(filters * 8, filter_size, padding="same")(conv_9)
    conv_10 = BatchNormalization()(conv_10)
    conv_10 = Activation("relu")(conv_10)
    ####################################################################################################################
    d4 = MaxPool2D((2, 2), (2, 2))(conv_10)
    d4 = Dropout(dropout)(d4)
    ####################################################################################################################
    conv_11 = Conv2D(filters * 8, filter_size, padding="same")(d4)
    conv_11 = BatchNormalization()(conv_11)
    conv_11 = Activation("relu")(conv_11)

    conv_12 = Conv2D(filters * 8, filter_size, padding="same")(conv_11)
    conv_12 = BatchNormalization()(conv_12)
    conv_12 = Activation("relu")(conv_12)

    conv_13 = Conv2D(filters * 8, filter_size, padding="same")(conv_12)
    conv_13 = BatchNormalization()(conv_13)
    conv_13 = Activation("relu")(conv_13)
    ####################################################################################################################
    d5 = MaxPool2D((2, 2))(conv_13)
    d5 = Dropout(dropout)(d5)

    ####################################################################################################################
    # Up sampling convolution followed by up-sampling
    ####################################################################################################################

    u1 = keras.layers.UpSampling2D(
        (2, 2), interpolation='nearest')(d5)  # ,interpolation='bilinear'
    ####################################################################################################################
    skip5 = keras.layers.Concatenate()([conv_13, u1])

    conv_14 = Conv2D(filters * 8, filter_size, padding="same")(skip5)
    conv_14 = BatchNormalization()(conv_14)
    conv_14 = Activation("relu")(conv_14)

    conv_15 = Conv2D(filters * 8, filter_size, padding="same")(conv_14)
    conv_15 = BatchNormalization()(conv_15)
    conv_15 = Activation("relu")(conv_15)

    conv_16 = Conv2D(filters * 8, filter_size, padding="same")(conv_15)
    conv_16 = BatchNormalization()(conv_16)
    conv_16 = Activation("relu")(conv_16)
    ####################################################################################################################
    u2 = keras.layers.UpSampling2D((2, 2), interpolation='nearest')(conv_16)
    ####################################################################################################################
    skip4 = keras.layers.Concatenate()([conv_10, u2])

    conv_17 = Conv2D(filters * 8, filter_size, padding="same")(skip4)
    conv_17 = BatchNormalization()(conv_17)
    conv_17 = Activation("relu")(conv_17)

    conv_18 = Conv2D(filters * 8, filter_size, padding="same")(conv_17)
    conv_18 = BatchNormalization()(conv_18)
    conv_18 = Activation("relu")(conv_18)

    conv_19 = Conv2D(filters * 8, filter_size, padding="same")(conv_18)
    conv_19 = BatchNormalization()(conv_19)
    conv_19 = Activation("relu")(conv_19)

    ####################################################################################################################
    u3 = keras.layers.UpSampling2D((2, 2), interpolation='nearest')(conv_19)
    ####################################################################################################################

    skip3 = keras.layers.Concatenate()([conv_7, u3])
    conv_20 = Conv2D(filters * 4, filter_size, padding="same")(skip3)
    conv_20 = BatchNormalization()(conv_20)
    conv_20 = Activation("relu")(conv_20)

    conv_21 = Conv2D(filters * 4, filter_size, padding="same")(conv_20)
    conv_21 = BatchNormalization()(conv_21)
    conv_21 = Activation("relu")(conv_21)

    conv_22 = Conv2D(filters * 4, filter_size, padding="same")(conv_21)
    conv_22 = BatchNormalization()(conv_22)
    conv_22 = Activation("relu")(conv_22)

    ####################################################################################################################
    u4 = keras.layers.UpSampling2D((2, 2), interpolation='nearest')(conv_22)
    ####################################################################################################################

    skip2 = keras.layers.Concatenate()([conv_4, u4])
    conv_23 = Conv2D(filters * 2, filter_size, padding="same")(skip2)
    conv_23 = BatchNormalization()(conv_23)
    conv_23 = Activation("relu")(conv_23)

    conv_24 = Conv2D(filters * 2, filter_size, padding="same")(conv_23)
    conv_24 = BatchNormalization()(conv_24)
    conv_24 = Activation("relu")(conv_24)

    ####################################################################################################################
    u5 = keras.layers.UpSampling2D((2, 2), interpolation='nearest')(conv_24)
    ####################################################################################################################

    skip1 = keras.layers.Concatenate()([conv_2, u5])
    conv_25 = Conv2D(filters, filter_size, padding="same")(skip1)
    conv_25 = BatchNormalization()(conv_25)
    conv_25 = Activation("relu")(conv_25)

    conv_26 = Conv2D(filters, filter_size, padding="same")(conv_25)
    conv_26 = BatchNormalization()(conv_26)
    conv_26 = Activation("relu")(conv_26)

    ####################################################################################################################
    # Output layer
    ####################################################################################################################

    output = (Conv2D(2, (1, 1), padding='same', activation='softmax'))(conv_26)

    ####################################################################################################################
    model = Model(inputs=inputs, outputs=output)
    ####################################################################################################################

    model.compile(optimizer='adam',
                  loss=keras.losses.categorical_crossentropy,
                  metrics=[iou_metric])
    ####################################################################################################################
    return model
示例#17
0
def TS_ConvMobileNet(input_shape=None,
                     alpha=1.0,
                     depth_multiplier=1,
                     dropout=1e-3,
                     pooling=None,
                     classes=1000):
    rows = None
    #     filter_num = [32,64,128,256,512,1024]
    filter_num = [16, 32, 64, 128, 256, 512]
    if input_shape:
        if len(input_shape
               ) == 4 and input_shape[0] == 128 and input_shape[3] in [1, 3]:
            img_input = Input(shape=input_shape)
        else:
            print('Please check the entered input shape.\n',
                  'The first input (timestep) must be 128\n',
                  'The last input must in 1 or 3')
            raise ValueError
    else:
        img_input = Input(shape=(128, 64, 64, 1))

    x = T_conv_block(img_input, filter_num[0], alpha, strides=(2, 2))
    x = T_depthwise_conv_block(x,
                               filter_num[1],
                               alpha,
                               depth_multiplier,
                               block_id=1)

    x = T_depthwise_conv_block(x,
                               filter_num[2],
                               alpha,
                               depth_multiplier,
                               strides=(2, 2),
                               block_id=2)
    x = T_depthwise_conv_block(x,
                               filter_num[2],
                               alpha,
                               depth_multiplier,
                               block_id=3)

    x = T_depthwise_conv_block(x,
                               filter_num[3],
                               alpha,
                               depth_multiplier,
                               strides=(2, 2),
                               block_id=4)
    x = T_depthwise_conv_block(x,
                               filter_num[3],
                               alpha,
                               depth_multiplier,
                               block_id=5)

    x = T_depthwise_conv_block(x,
                               filter_num[4],
                               alpha,
                               depth_multiplier,
                               strides=(2, 2),
                               block_id=6)
    x = T_depthwise_conv_block(x,
                               filter_num[4],
                               alpha,
                               depth_multiplier,
                               block_id=7)
    x = T_depthwise_conv_block(x,
                               filter_num[4],
                               alpha,
                               depth_multiplier,
                               block_id=8)
    x = T_depthwise_conv_block(x,
                               filter_num[4],
                               alpha,
                               depth_multiplier,
                               block_id=9)
    x = T_depthwise_conv_block(x,
                               filter_num[4],
                               alpha,
                               depth_multiplier,
                               block_id=10)
    x = T_depthwise_conv_block(x,
                               filter_num[4],
                               alpha,
                               depth_multiplier,
                               block_id=11)

    x = T_depthwise_conv_block(x,
                               filter_num[5],
                               alpha,
                               depth_multiplier,
                               strides=(2, 2),
                               block_id=12)
    x = T_depthwise_conv_block(x,
                               filter_num[5],
                               alpha,
                               depth_multiplier,
                               block_id=13)

    if backend.image_data_format() == 'channels_first':
        shape = (1, int(filter_num[5] * alpha))
        channelAxis = 1

    else:
        shape = (int(filter_num[5] * alpha), 1)
        channelAxis = -1

    if pooling == 'max':
        x = TimeDistributed(GlobalMaxPooling2D(), name="MAX_pool")(x)
    else:
        x = TimeDistributed(GlobalAveragePooling2D(), name="AVG_pool")(x)

    x = TimeDistributed(Reshape(shape, name='reshape_1'), name="reshape1")(x)

    # TODO: Conv
    # print(channelAxis)
    TimeFilter_num = [256, 256, 256]
    filter_step = [2, 16, 32]

    conv1 = Conv2D(TimeFilter_num[0],
                   kernel_size=(filter_step[0], int(filter_num[5] * alpha)),
                   padding='valid',
                   name='time_conv_%d_step' % filter_step[0])(x)
    conv1 = BatchNormalization(axis=channelAxis,
                               name='time_bn_%d_step' % filter_step[0])(conv1)
    conv1 = Activation(backend.relu,
                       name='time_relu_%d_step' % filter_step[0])(conv1)
    conv1 = MaxPool2D(pool_size=(128 - filter_step[0] + 1, 1),
                      strides=(1, 1),
                      padding='valid',
                      name='time_Max_%d_step' % filter_step[0])(conv1)

    conv2 = Conv2D(TimeFilter_num[1],
                   kernel_size=(filter_step[1], int(filter_num[5] * alpha)),
                   padding='valid',
                   name='time_conv_%d_step' % filter_step[1])(x)
    conv2 = BatchNormalization(axis=channelAxis,
                               name='time_bn_%d_step' % filter_step[1])(conv2)
    conv2 = Activation(backend.relu,
                       name='time_relu_%d_step' % filter_step[1])(conv2)
    conv2 = MaxPool2D(pool_size=(128 - filter_step[1] + 1, 1),
                      strides=(1, 1),
                      padding='valid',
                      name='time_Max_%d_step' % filter_step[1])(conv2)

    conv3 = Conv2D(TimeFilter_num[2],
                   kernel_size=(filter_step[2], int(filter_num[5] * alpha)),
                   padding='valid',
                   name='time_conv_%d_step' % filter_step[2])(x)
    conv3 = BatchNormalization(axis=channelAxis,
                               name='time_bn_%d_step' % filter_step[2])(conv3)
    conv3 = Activation(backend.relu,
                       name='time_relu_%d_step' % filter_step[2])(conv3)
    conv3 = MaxPool2D(pool_size=(128 - filter_step[2] + 1, 1),
                      strides=(1, 1),
                      padding='valid',
                      name='time_Max_%d_step' % filter_step[2])(conv3)

    concatenated_tensor = Concatenate(axis=1)([conv1, conv2, conv3])
    x = Flatten(name='flatten_concat')(concatenated_tensor)
    x = Dropout(dropout)(x)
    x = Dense(units=classes, activation='softmax')(x)

    # get inputs shape
    inputs = img_input
    print(inputs)

    # Create model.
    model = models.Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))

    return model
cnn_model.add(
    Conv2D(filters=32,
           kernel_size=(5, 5),
           padding='Same',
           activation='relu',
           input_shape=(28, 28, 1)))

cnn_model.add(
    Conv2D(filters=32,
           kernel_size=(5, 5),
           padding='Same',
           activation='relu',
           input_shape=(28, 28, 1)))

cnn_model.add(MaxPool2D(pool_size=(2, 2)))
cnn_model.add(Dropout(0.25))

cnn_model.add(
    Conv2D(filters=64,
           kernel_size=(3, 3),
           padding='Same',
           activation='relu',
           input_shape=(28, 28, 1)))

cnn_model.add(
    Conv2D(filters=64,
           kernel_size=(3, 3),
           padding='Same',
           activation='relu',
           input_shape=(28, 28, 1)))
示例#19
0
def main():
    dataset = pd.read_csv('dataset.csv')
    X = []
    Y = []
    for ind, row in dataset.iterrows():
        img = cv2.imread(row[1])
        img = cv2.resize(img, (256, 256))
        img = img_to_array(img)
        X.append(img)
        Y.append(row[2])

    X = np.array(X, dtype="float") / 255.0
    Y = to_categorical(Y, 2)
    Y = np.array(Y)

    X = X.reshape(-1, 256, 256, 3)

    # Split Train test
    X_train, X_val, Y_train, Y_val = train_test_split(X,
                                                      Y,
                                                      test_size=0.2,
                                                      random_state=5)

    cnn_model = Sequential()

    cnn_model.add(
        Conv2D(
            input_shape=(256, 256, 3),
            filters=64,
            kernel_size=(3, 3),
            padding='valid',
            activation='relu',
        ))

    cnn_model.add(
        Conv2D(
            filters=64,
            kernel_size=(3, 3),
            padding='valid',
            activation='relu',
        ))

    cnn_model.add(MaxPool2D(pool_size=(2, 2), ))

    cnn_model.add(
        Conv2D(
            filters=128,
            kernel_size=(3, 3),
            padding='valid',
            activation='relu',
        ))

    cnn_model.add(
        Conv2D(
            filters=128,
            kernel_size=(3, 3),
            padding='valid',
            activation='relu',
        ))

    cnn_model.add(MaxPool2D(pool_size=2, ))

    cnn_model.add(
        Conv2D(
            filters=256,
            kernel_size=(3, 3),
            padding='valid',
            activation='relu',
        ))

    cnn_model.add(
        Conv2D(
            filters=256,
            kernel_size=(3, 3),
            padding='valid',
            activation='relu',
        ))

    cnn_model.add(MaxPool2D(pool_size=2, ))

    # cnn_model.add(
    #     Conv2D(
    #         filters=512,
    #         kernel_size=(3, 3),
    #         padding='valid',
    #         activation='relu',
    #         ))

    # cnn_model.add(
    #     Conv2D(
    #         filters=512,
    #         kernel_size=(3, 3),
    #         padding='valid',
    #         activation='relu',
    #         ))

    # cnn_model.add(
    #     MaxPool2D(
    #         pool_size=2,
    #         ))

    cnn_model.add(
        Conv2D(
            filters=20,
            kernel_size=(4, 4),
            padding='valid',
            activation='relu',
        ))

    cnn_model.add(MaxPool2D(pool_size=2, ))
    cnn_model.add(Dropout(0.25))

    cnn_model.add(Flatten())
    cnn_model.add(Dense(256, activation="relu"))
    cnn_model.add(Dropout(0.5))
    cnn_model.add(Dense(2, activation="softmax"))

    print(cnn_model.summary())

    cnn_model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=3,
                                   verbose=0,
                                   mode='auto')

    saving_weight = ModelCheckpoint('weights{epoch:08d}.h5',
                                    save_weights_only=True,
                                    period=5)

    epochs = 100
    batch_size = 32

    H = cnn_model.fit(X_train,
                      Y_train,
                      batch_size=batch_size,
                      epochs=epochs,
                      verbose=1,
                      validation_data=(X_val, Y_val),
                      callbacks=[early_stopping, saving_weight])

    cnn_model.save('keras_cnn_model_redone.h5')

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    N = epochs
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")

    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")
    plt.savefig('model.png')
示例#20
0
if module_path not in sys.path:
    print('Adding dataset module.')
    sys.path.append(module_path)

import dataset

X_train = np.load('../dataset-n20-X-reshaped-train.npy')
X_validate = np.load('../dataset-n20-X-reshaped-validate.npy')
y_train = np.load('../dataset-n20-y-reshaped-train.npy')
y_validate = np.load('../dataset-n20-y-reshaped-validate.npy')

example_shape = X_train.shape[1:]
input_layer = Input(shape=example_shape)
conv_1 = Conv2D(filters=40, kernel_size=3, padding='same',
                activation='relu')(input_layer)
pool_1 = MaxPool2D(pool_size=(2, 1))(conv_1)
conv_2 = Conv2D(filters=20, kernel_size=3, padding='same',
                activation='relu')(pool_1)

flatten = Flatten()(conv_2)
predictions = Dense(4, activation='softmax')(flatten)

model = Model(input_layer, predictions)

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print(model.summary())

batch_size = 10000
示例#21
0
model = Sequential()
model.add(GaussianNoise(GN1, input_shape=(None, None, 1)))
model.add(
    Conv2D(int(2 * ConvScale), (kernel_size, kernel_size),
           padding='valid',
           kernel_regularizer=regularizers.l2(reg_scale)))
model.add(LeakyReLU(alpha=alpha))
model.add(SpatialDropout2D(spatial_d_rate))
# model.add(GaussianNoise(GN2))
model.add(
    Conv2D(int(2 * ConvScale), (kernel_size, kernel_size),
           padding='valid',
           kernel_regularizer=regularizers.l2(reg_scale)))
model.add(LeakyReLU(alpha=alpha))
model.add(SpatialDropout2D(spatial_d_rate))
model.add(MaxPool2D())
#model.add(Dropout(dropout_rate / 2))

model.add(
    Conv2D(int(2 * ConvScale), (kernel_size, kernel_size),
           padding='valid',
           kernel_regularizer=regularizers.l2(reg_scale)))
model.add(LeakyReLU(alpha=alpha))
model.add(SpatialDropout2D(spatial_d_rate))

model.add(GaussianNoise(GN3))
model.add(
    Conv2D(int(2 * ConvScale), (kernel_size, kernel_size),
           padding='valid',
           kernel_regularizer=regularizers.l2(reg_scale)))
model.add(LeakyReLU(alpha=alpha))
def CRNN(input_shape, num_classes, prediction_only=False, gru=True):
    """CRNN architecture.
    
    # Arguments
        input_shape: Shape of the input image, (256, 32, 1).
        num_classes: Number of characters in alphabet, including CTC blank.
        
    # References
        https://arxiv.org/abs/1507.05717
    """
    #K.clear_session()
    
    #act = LeakyReLU(alpha=0.3)
    act = 'relu'
    
    x = image_input = Input(shape=input_shape, name='image_input')
    x = Conv2D(64, (3, 3), strides=(1, 1), activation=act, padding='same', name='conv1_1')(x)
    x = MaxPool2D(pool_size=(2, 2), strides=(2, 2), name='pool1', padding='same')(x)
    x = Conv2D(128, (3, 3), strides=(1, 1), activation=act, padding='same', name='conv2_1')(x)
    x = MaxPool2D(pool_size=(2, 2), strides=(2, 2), name='pool2', padding='same')(x)
    x = Conv2D(256, (3, 3), strides=(1, 1), activation=act, padding='same', name='conv3_1')(x)
    x = Conv2D(256, (3, 3), strides=(1, 1), activation=act, padding='same', name='conv3_2')(x)
    x = MaxPool2D(pool_size=(2, 2), strides=(1, 2), name='pool3', padding='same')(x)
    x = Conv2D(512, (3, 3), strides=(1, 1), activation=act, padding='same', name='conv4_1')(x)
    x = BatchNormalization(name='batchnorm1')(x)
    x = Conv2D(512, (3, 3), strides=(1, 1), activation=act, padding='same', name='conv5_1')(x)
    x = BatchNormalization(name='batchnorm2')(x)
    x = MaxPool2D(pool_size=(2, 2), strides=(1, 2), name='pool5', padding='valid')(x)
    x = Conv2D(512, (2, 2), strides=(1, 1), activation=act, padding='valid', name='conv6_1')(x)
    x = Reshape((-1,512))(x)
    if gru:
        x = Bidirectional(GRU(256, return_sequences=True))(x)
        x = Bidirectional(GRU(256, return_sequences=True))(x)
    else:
        x = Bidirectional(LSTM(256, return_sequences=True, name='lstm1'))(x)
        x = Bidirectional(LSTM(256, return_sequences=True, name='lstm2'))(x)
    
    x = Dense(num_classes, name='dense1')(x)
    x = y_pred = Activation('softmax', name='softmax')(x)
    
    model_pred = Model(image_input, x)
    
    if prediction_only:
        return model_pred

    max_string_len = int(y_pred.shape[1])

    # since keras doesn't currently support loss functions with extra parameters
    # CTC loss in lambda layer and dummy loss in compile call
    def ctc_lambda_func(args):
        labels, y_pred, input_length, label_length = args
        return K.ctc_batch_cost(labels, y_pred, input_length, label_length)       

    labels = Input(name='label_input', shape=[max_string_len], dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')

    ctc_loss = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([labels, y_pred, input_length, label_length])

    model_train = Model(inputs=[image_input, labels, input_length, label_length], outputs=ctc_loss)
    
    return model_train, model_pred
示例#23
0
import matplotlib.pyplot as plt
import sys
import keras.backend as K
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPool2D, LeakyReLU, Activation
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
import tensorflow as tf

inputs = Input(shape=(64, 64, 3))

x = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
           padding='same')(inputs)
x = MaxPool2D(pool_size=(2, 2))(x)

x = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
           padding='same')(x)
x = MaxPool2D(pool_size=(2, 2))(x)

# Step 3 - Flattening
x = Flatten()(x)
x = Dense(units=128, activation='relu')(x)
x = Dense(units=128, activation='relu')(x)

output = Dense(units=1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
示例#24
0
def cnn(x, y, num_epochs, batch_size):
    # train test split
    x_train, x_test, y_train, y_test = split_data(x, y, 0.2, 42)

    # standardize pixel data
    x_train = standardize_data(x_train)
    x_test = standardize_data(x_test)
    # print('train data shape after standardization:', x_train.shape)
    # train validate split
    x_train, x_validate, y_train, y_validate = split_data(
        x_train, y_train, 0.1, 2)
    """
    The CNN model architecture used in this analysis is adapted from a Kaggle 
    kernel for the Skin Cancer MNIST: HAM10000 dataset. The code and reasoning 
    behind each layer selection can be found at the following link:
    https://www.kaggle.com/sid321axn/step-wise-approach-cnn-model-77-0344-accuracy
    """

    # set the CNN model
    input_shape = (28, 28, 3)
    num_classes = 7

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               padding='Same',
               input_shape=input_shape))
    model.add(
        Conv2D(
            32,
            kernel_size=(3, 3),
            activation='relu',
            padding='Same',
        ))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), activation='relu', padding='Same'))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='Same'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.40))

    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))
    model.summary()

    # define the optimizer
    optimizer = Adam(lr=0.001,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=None,
                     decay=0.0,
                     amsgrad=False)
    # compile the model
    model.compile(optimizer=optimizer,
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    # set a learning rate annealer
    learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
                                                patience=3,
                                                verbose=1,
                                                factor=0.5,
                                                min_lr=0.00001)

    # augment pixel data to prevent overfitting
    datagen = ImageDataGenerator(featurewise_center=False,
                                 samplewise_center=False,
                                 featurewise_std_normalization=False,
                                 samplewise_std_normalization=False,
                                 zca_whitening=False,
                                 rotation_range=10,
                                 zoom_range=0.1,
                                 width_shift_range=0.1,
                                 height_shift_range=0.1,
                                 horizontal_flip=False,
                                 vertical_flip=False)

    datagen.fit(x_train)

    # fit the model
    history = model.fit_generator(datagen.flow(x_train,
                                               y_train,
                                               batch_size=batch_size),
                                  epochs=num_epochs,
                                  validation_data=(x_validate, y_validate),
                                  verbose=1,
                                  steps_per_epoch=x_train.shape[0] //
                                  batch_size,
                                  callbacks=[learning_rate_reduction])

    # evaluate the model
    loss, accuracy = model.evaluate(x_test, y_test, verbose=1)
    loss_v, accuracy_v = model.evaluate(x_validate, y_validate, verbose=1)
    print("Validation: accuracy = %f  ;  loss_v = %f" % (accuracy_v, loss_v))
    print("Test: accuracy = %f  ;  loss = %f" % (accuracy, loss))
    plot_model_history(history)

    # predict y_test
    y_pred = model.predict(x_test)
    y_true = np.argmax(y_test, axis=1)
    y_pred_classes = np.argmax(y_pred, axis=1)
    score = evaluate_model(y_true, y_pred_classes, model_name='cnn')

    return max(accuracy, score)
def my_model():
    model = Sequential()
    model.add(
        Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               input_shape=(48, 48, 1)))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(units=4096, activation="relu"))
    model.add(Dense(units=4096, activation="relu"))

    model.add(Dense(7, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    plot_model(model,
               to_file='vgg.png',
               show_shapes=True,
               show_layer_names=True)

    return model
示例#26
0
def create_cnn_model(input_shape, num_classes=10, settings={}):
    from keras.models import Model
    from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, BatchNormalization
    from keras.layers import Activation, Permute, Concatenate, MaxPool2D
    from keras.regularizers import l2

    node_in = Input(shape=input_shape, name='inputlayer')

    node_conv1 = Conv2D(filters=settings['nfilters'][0],
                        kernel_size=settings['kn_size'][0],
                        padding='same',
                        activation='relu')(node_in)
    node_conv2 = Conv2D(filters=settings['nfilters'][1],
                        kernel_size=settings['kn_size'][0],
                        padding='same',
                        activation='relu')(node_conv1)
    #node_conv3 = Conv2D(filters=nfilters,kernel_size=kn_size, padding='same',
    #                    activation='relu')(node_conv2)

    node_pool = MaxPool2D((2, 2))(node_conv2)
    #node_pool = MaxPool2D((4,4))(node_conv2) works good.
    node_fl = Flatten(data_format='channels_last')(node_pool)
    #node_fl = Flatten(data_format='channels_last')(node_conv2)

    #node_fl = node_in
    # smaller initsigma does not work well.
    node_ = Dropout(0.5)(node_fl)
    heu = initializers.he_uniform
    h = 1

    for nh in settings['nhidden']:
        if settings['neuron'] == 'focused':
            init_mu = settings['focus_init_mu']
            node_ = FocusedLayer1D(
                units=nh,
                name='focus-' + str(h),
                activation='linear',
                init_sigma=settings['focus_init_sigma'],
                init_mu=init_mu,
                init_w=None,
                train_sigma=settings['focus_train_si'],
                train_weights=settings['focus_train_weights'],
                si_regularizer=settings['focus_sigma_reg'],
                #si_regularizer=None,
                train_mu=settings['focus_train_mu'],
                normed=settings['focus_norm_type'])(node_)
            #si_regularizer=None,

        else:
            node_ = Dense(nh,
                          name='dense-' + str(h),
                          activation='linear',
                          kernel_initializer=heu())(node_)

        node_ = BatchNormalization()(node_)
        node_ = Activation('relu')(node_)
        node_ = Dropout(0.5)(node_)
        h = h + 1

    node_fin = Dense(num_classes,
                     name='softmax',
                     activation='softmax',
                     kernel_initializer=initializers.he_uniform(),
                     kernel_regularizer=None)(node_)

    #decay_check = lambda x: x==decay_epoch

    model = Model(inputs=node_in, outputs=[node_fin])

    return model
示例#27
0
# first feature extractor
embedding = Embedding(top_words,
                      embedding_vecor_length,
                      input_length=max_review_length,
                      trainable=True)(visible)
e = Reshape((sequence_length, embedding_vecor_length, 1))(embedding)
print(embedding.shape)
print(e.shape)

conv_0 = Conv2D(filters1,
                kernel_size=(filter_sizes[0], 100),
                padding='valid',
                kernel_initializer='normal',
                activation=newacti)(e)
maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1),
                      strides=(1, 1),
                      padding='valid')(conv_0)
#maxpool_0=Flatten()(maxpool_0)
#maxpool_0=Reshape((1,gru_output_size))(maxpool_0)
gru = Reshape((sequence_length, embedding_vecor_length))(embedding)
gru = GRU(gru_output_size,
          return_sequences=True,
          dropout=0.2,
          recurrent_dropout=0.2)(gru)
merge2 = maximum([maxpool_0, gru])
merge = Reshape((sequence_length, filters1))(merge2)

gru1 = GRU(gru_output_size,
           return_sequences=True,
           dropout=0.2,
           recurrent_dropout=0.2)(merge)
                                                  test_size=0.1,
                                                  random_state=42)

# Build a black-box model and get its predictions
black_box_model_weights_filename = 'black_box.h5'
black_box_model = keras.Sequential([
    Conv2D(32,
           kernel_size=(3, 3),
           activation='relu',
           kernel_initializer='he_normal',
           input_shape=input_shape),
    Conv2D(32,
           kernel_size=(3, 3),
           activation='relu',
           kernel_initializer='he_normal'),
    MaxPool2D((2, 2)),
    Dropout(0.20),
    Conv2D(64, (3, 3),
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'),
    Conv2D(64, (3, 3),
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'),
    MaxPool2D(pool_size=(2, 2)),
    Dropout(0.25),
    Conv2D(128, (3, 3),
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'),
示例#29
0
from tensorflow import set_random_seed
set_random_seed(2)

from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras import regularizers
n_classes = 2

IMG_SIZE = 50

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), strides=(1, 1),padding = 'Same',
                 activation='relu',
                 input_shape=(IMG_SIZE, IMG_SIZE, 1)))
#model.add(Dropout(0.25))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

model.add(Conv2D(64, (3, 3), strides=(2,2), padding = 'Same', activation='relu'))
model.add(Conv2D(64, (3, 3), kernel_regularizer=regularizers.l2(0.01), padding = 'Same', activation='relu'))
#model.add(Dropout(0.5))
model.add(MaxPool2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3), kernel_regularizer=regularizers.l1(0.01), padding = 'Same', activation='relu'))
#model.add(Dropout(0.5))
model.add(MaxPool2D(pool_size=(2, 2)))



model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(n_classes, activation='softmax'))
示例#30
0
tX, ty = loadlocal_mnist(images_path='t10k-images-idx3-ubyte',
                         labels_path='t10k-labels-idx1-ubyte')

print('[console] PREPARING TEST DATA...')

tX = np.reshape(tX, (len(tX), 28, 28, 1))
enc_ty = to_categorical(ty)

# initizalize model

model = Sequential([  #conv net just for a bit more torture
    Conv2D(16, (3, 3), activation='relu', input_shape=(28, 28, 1)),
    Conv2D(32, (3, 3), activation='relu'),
    Conv2D(8, (3, 3), activation='selu'),
    MaxPool2D(pool_size=[14, 14], strides=[2, 2]),
    Flatten()
])

# tracking data
time_prd = []
acc = []
loss = []
ce_loss = []

epoch = 20
lr = 0.001
b_size = 50

# the making of a horrible MLP