Ejemplo n.º 1
0
def compile():
    single = Sequential()

    single.add(
        Conv2D(64, (7, 7),
               strides=(1, 1),
               padding='valid',
               activation='relu',
               kernel_regularizer=l1_l2(l1=0.01, l2=0.01),
               input_shape=(33, 33, 4)))

    single.add(BatchNormalization())
    single.add(Dropout(0.5))

    single.add(
        Conv2D(128, (5, 5),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=l1_l2(l1=0.01, l2=0.01),
               activation='relu'))
    single.add(BatchNormalization())
    single.add(Dropout(0.5))

    single.add(
        Conv2D(128, (5, 5),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=l1_l2(l1=0.01, l2=0.01),
               activation='relu'))
    single.add(BatchNormalization())
    single.add(Dropout(0.5))

    single.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=l1_l2(l1=0.01, l2=0.01),
               activation='relu'))
    single.add(BatchNormalization())
    single.add(Dropout(0.25))

    single.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=l1_l2(l1=0.01, l2=0.01),
               activation='relu'))
    single.add(Dropout(0.25))

    single.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=l1_l2(l1=0.01, l2=0.01),
               activation='relu'))
    single.add(BatchNormalization())
    single.add(Dropout(0.25))

    single.add(Flatten())
    single.add(Dense(5, activation='softmax'))

    sgd = SGD(lr=0.001, decay=0.01, momentum=0.9)
    single.compile(loss='categorical_crossentropy', optimizer='sgd')

    return single
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))

# Adding a second convolutional layer
classifier.add(Convolution2D(32, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
#When adding another CNN layer, you only need a feature detector, the dimensions and an activation function
#Feature Maps are the input

# Adding a third convolutional layer for improved performance
classifier.add(Convolution2D(64, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))

# Step 3 - Flattening 
classifier.add(Flatten())

# Step 4 - Full Connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))

# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])

# Part 2 - Fitting the CNN to the Images
from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
Ejemplo n.º 3
0
# plot some items
plt.figure(figsize=(5,5))
for i in range(9):
    plt.subplot(3,3,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(x_train[i], cmap=plt.cm.binary)
    plt.xlabel(labels[y_train[i]])
plt.show()


# baseline (85% accuracy)
model = Sequential()
model.add(Flatten(input_shape = [28,28]))
model.add(Dense(10, activation= 'softmax'))

model.compile(loss = 'sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(x_train, y_train, epochs = 20, validation_data = (x_val, y_val))

# fully connected 2 layers (88-89% accuracy on validation)
model = Sequential()
model.add(Flatten(input_shape = [28,28]))
model.add(Dense(100, activation = 'relu'))
model.add(Dense(100, activation = 'relu'))
model.add(Dense(10, activation= 'softmax'))

# see number of parameters (approx 90k)
model.summary()
Ejemplo n.º 4
0
               blocks=[int(b.shape[-3]), 1, 1],
               strides=[int(b.shape[-3]), 1, 1],
               softmax_mode=True,
               normalize_offsets=True,
               use_unshared_regions=True,
               unshared_offset_region=[2])(b)
    b = sum_pooling_layer(b, pool_size=(2, 2))

b = sk.Mex(num_classes,
           blocks=[mex_channels, 1, 1],
           strides=[mex_channels, 1, 1],
           softmax_mode=True,
           normalize_offsets=True,
           use_unshared_regions=True,
           shared_offset_region=[1])(b)
b = Flatten(data_format='channels_first')(b)
model = Model(inputs=[a], outputs=[b])

print(model.summary())


def softmax_loss(y_true, y_pred):
    #return K.categorical_crossentropy(y_pred, y_true, True)
    return keras.losses.categorical_crossentropy(y_pred, y_true)


model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.nadam(lr=5e-1, epsilon=1e-6),
              metrics=['accuracy'])

sk.perform_unsupervised_init(model,
Ejemplo n.º 5
0
 def __init__(self):
     super(MnistModel, self).__init__()
     self.flatten = Flatten()
     self.d1 = Dense(128, activation='relu')
     self.d2 = Dense(10, activation='softmax')
x2 = Convolution2D(no_layer[1], 5,5, activation='tanh', border_mode='same')(x2)
x2 = Convolution2D(no_layer[0], 3,3, activation='tanh', border_mode='same')(x2)
x2 = BatchNormalization(mode=0, axis=1)(x2)
x2 = UpSampling2D((2, 2))(x2)
decode2 = Convolution2D(no_layer[1],3,3, border_mode='same', activation='tanh')(x2)

#Make layer2
autoencoder2 = Model(input_img2, decode2)
autoencoder2.compile(optimizer=opti, loss='mean_squared_error')
encoder2 = Model(input=input_img2, output=y2)
json_string = autoencoder2.to_json()
open(path2sav+'autoencoder2_temp.json', 'w').write(json_string)

# Define layer3
input_img3 = Input(shape=(encoder2.output_shape[1], encoder2.output_shape[2], encoder2.output_shape[3]))
x3 = Flatten()(input_img3)
x3 = Dense(500, activation='tanh')(x3)
y3 = Dense(100, activation='tanh')(x3)
x3 = Dense(500, activation='tanh')(y3)
x3 = Dense(encoder2.output_shape[1]*encoder2.output_shape[2]*encoder2.output_shape[3], activation='tanh')(x3)
decode3 = Reshape((encoder2.output_shape[1], encoder2.output_shape[2], encoder2.output_shape[3])) (x3)

#Make layer3
autoencoder3 = Model(input_img3, decode3)
autoencoder3.compile(optimizer=opti, loss='mean_squared_error')
encoder3 = Model(input=input_img3, output=y3)
json_string = autoencoder3.to_json()
open(path2sav+'autoencoder3_temp.json', 'w').write(json_string)

folder=sorted(glob.glob(path+name+'/*'))
Ejemplo n.º 7
0
model_x.add(Activation('relu'))
print("Output shape of 1st convolution (2d):", model_x.output_shape)
model_x.add(Convolution2D(nb_filters, kernel_size_2d[0], kernel_size_2d[1]))
model_x.add(Activation('relu'))
print("Output shape of 2nd convolution (2d):", model_x.output_shape)
model_x.add(MaxPooling2D(pool_size=pool_size_2d))
#model_x.add(Dropout(0.25))
print("Output shape after max pooling (2d):", model_x.output_shape)
model_x.add(
    Convolution2D(nb_filters + 10, kernel_size_2d[0], kernel_size_2d[1]))
model_x.add(Activation('relu'))
print("Output shape of 3rd convolution (2d):", model_x.output_shape)
model_x.add(MaxPooling2D(pool_size=pool_size_2d))
#model_x.add(Dropout(0.25))
print("Output shape after max pooling (2d):", model_x.output_shape)
model_x.add(Flatten())
print("Output shape after flatten (2d):", model_x.output_shape)

## paralel NN, y
model_y = Sequential()

model_y.add(
    Convolution2D(nb_filters - 5,
                  kernel_size_2d[0],
                  kernel_size_2d[1],
                  border_mode='valid',
                  input_shape=input_shape_2d))
model_y.add(Activation('relu'))

model_y.add(Convolution2D(nb_filters, kernel_size_2d[0], kernel_size_2d[1]))
model_y.add(Activation('relu'))
Ejemplo n.º 8
0
def EnvNetv2(x_shape, num_classes):
    # model: raw-wave to classification
    # x_shape: (input_length, ), with input_length = n_t

    inp = keras.engine.Input(shape=x_shape, name='input')
    inp2 = keras.layers.core.RepeatVector(1)(inp)
    inp2 = Permute((2, 1))(inp2)

    # conv1
    x = Convolution1D(filters=32,
                      kernel_size=64,
                      strides=2,
                      padding='valid',
                      name='conv1',
                      kernel_initializer='he_normal')(inp2)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # conv2
    x = Convolution1D(filters=64,
                      kernel_size=16,
                      strides=2,
                      padding='valid',
                      name='conv2',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling + swap
    poolsize = 64
    x = MaxPooling1D(pool_size=poolsize)(x)
    reshape_size = x.get_shape().as_list()[1]
    x = Permute((2, 1))(x)
    x = Reshape((64, reshape_size, 1))(x)

    # conv3
    x = Convolution2D(filters=32,
                      kernel_size=(8, 8),
                      strides=1,
                      name='conv3',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # conv4
    x = Convolution2D(filters=32,
                      kernel_size=(8, 8),
                      strides=1,
                      name='conv4',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(5, 3))(x)

    # covn5
    x = Convolution2D(filters=64,
                      kernel_size=(1, 4),
                      strides=1,
                      name='conv5',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # covn6
    x = Convolution2D(filters=64,
                      kernel_size=(1, 4),
                      strides=1,
                      name='conv6',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(1, 2))(x)

    # covn7
    x = Convolution2D(filters=128,
                      kernel_size=(1, 2),
                      strides=1,
                      name='conv7',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # covn8
    x = Convolution2D(filters=128,
                      kernel_size=(1, 2),
                      strides=1,
                      name='conv8',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(1, 2))(x)

    # covn9
    x = Convolution2D(filters=256,
                      kernel_size=(1, 2),
                      strides=1,
                      name='conv9',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # covn10
    x = Convolution2D(filters=256,
                      kernel_size=(1, 2),
                      strides=1,
                      name='conv10',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(1, 2))(x)

    # fc11
    x = Flatten()(x)
    # x = Dense(4096, activation='relu', name='fc11')(x)
    # x = Dropout(0.5)(x)
    #
    # # fc12
    # x = Dense(4096, activation='relu', name='fc12')(x)
    # x = Dropout(0.5)(x)

    # fc13
    x = Dense(num_classes, name='fc_classes')(x)  # , activation='relu')(x)

    # to categorical, softmax
    x = Activation(tf.nn.softmax)(x)

    return keras.engine.Model(input=inp, output=x)
Ejemplo n.º 9
0
def EnvNet(x_shape, num_classes):
    # model: raw-wave to classification
    # x_shape: (input_length, ), with input_length = n_t

    filters_raw = 40
    poolsize_raw = 160
    new_dim = 149

    inp = keras.engine.Input(shape=x_shape, name='input')
    inp2 = keras.layers.core.RepeatVector(1)(inp)
    inp2 = Permute((2, 1))(inp2)

    # conv1
    x = Convolution1D(filters=filters_raw,
                      kernel_size=(8),
                      strides=1,
                      padding='valid',
                      name='conv1')(inp2)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # conv2
    x = Convolution1D(filters=filters_raw,
                      kernel_size=8,
                      strides=1,
                      padding='valid',
                      name='conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling + swap
    print(x.shape)
    x = MaxPooling1D(pool_size=(poolsize_raw))(x)
    print(x.shape)
    x = Permute((2, 1))(x)
    print(x.shape)
    #     new_shape = (filters_raw, int(x.shape[1]/poolsize_raw), 1)
    new_shape = (filters_raw, new_dim, 1)
    print(new_shape)
    x = Reshape(new_shape)(x)
    # model.add(Reshape((model.output_shape[1], model.output_shape[2], 1)))

    # conv3
    x = Convolution2D(filters=50, kernel_size=(8, 13), strides=1,
                      name='conv3')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(3, 3))(x)

    # covn4
    x = Convolution2D(filters=50, kernel_size=(1, 5), strides=1,
                      name='conv4')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(1, 3))(x)

    # fc5
    x = Flatten()(x)
    # x = Dense(4096, activation='relu',  name='fc5')(x)
    # x =  Dropout(0.5)(x)

    # fc6
    # x = Dense(4096, activation='relu',  name='fc6')(x)
    # x = Dropout(0.5)(x)

    # fc7
    x = Dense(num_classes, name='fc7')(x)  #, activation='relu')(x)

    # to categorical, softmax
    x = Activation(tf.nn.softmax)(x)

    return keras.engine.Model(input=inp, output=x)
Ejemplo n.º 10
0
embedding_layer = Embedding(len(word_index),
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=True)

sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)  # global max pooling
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(2, activation='softmax')(x)

model = Model(sequence_input, preds)
model.compile(loss='binary_crossentropy', optimizer=rmsprop, metrics=['acc'])

print(model.summary())

# happy learning!
history = model.fit(x_train,
                    y_train,
                    validation_data=(x_val, y_val),
                    epochs=50,
                    batch_size=512)
Ejemplo n.º 11
0
(etreino,streino), (eteste,steste) = mnist.load_data()
entradas = etreino.reshape(etreino.shape[0], 28, 28, 1)
entradas = entradas.astype('float32')
entradas /= 255
saidas = np_utils.to_categorical(streino, 10)

kfold = StratifiedKFold(n_splits = 5, shuffle = True, random_state = seed)
resultados = []

a = np.zeros(5)
b = np.zeros(shape = (saidas.shape[0], 1))

for evalcruzada,svalcruzada in kfold.split(entradas, 
                                            np.zeros(shape=(saidas.shape[0],1))):
    classificador = Sequential()
    classificador.add(Conv2D(32, (3,3), input_shape=(28,28,1), activation='relu'))
    classificador.add(MaxPooling2D(pool_size = (2,2)))
    classificador.add(Flatten())
    classificador.add(Dense(units = 128, activation = 'relu'))
    classificador.add(Dense(units = 10, activation = 'softmax'))
    classificador.compile(loss = 'categorical_crossentropy', optimizer='adam',
                          metrics = ['accuracy'])
    classificador.fit(entradas[evalcruzada], saidas[evalcruzada],
                      batch_size = 128, epochs = 5)
    precisao = classificador.evaluate(entradas[svalcruzada], saidas[svalcruzada])
    resultados.append(precisao[1])

media = sum(resultados) / len(resultados)

Ejemplo n.º 12
0
x_test = x_test[idx2]
y_test = y_test[idx2]

train_labels = to_categorical(y_train)
y_test = to_categorical(y_test)

base_model = applications.VGG16(weights="imagenet",
                                include_top=False,
                                input_shape=(-1, 28, 28, 1))
print(base_model.summary())

for layer in base_model.layers[:15]:
    layer.trainable = False

model = Sequential()
model.add(Flatten(input_shape=base_model.output_shape[1:]))
model.add(Dense(128, activation='relu'))

model.add(Dropout(0.5))
model.add(Dense(5, activation='sigmoid'))

model = Model(inputs=base_model.input,
              outputs=model(base_model.output))  # 新网络=预训练网络+自定义网络

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
              metrics=['accuracy'])
"""
train_datagen = ImageDataGenerator(rescale=1. / 255, horizontal_flip=True)  # 训练数据预处理器,随机水平翻转
test_datagen = ImageDataGenerator(rescale=1. / 255)  # 测试数据预处理器
train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width),
Ejemplo n.º 13
0
    def build_discriminator(self):

        k = 4
        s = 2

        model = Sequential()

        # First Layer
        model.add(
            Conv2D(
                filters=self.ndf,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
                input_shape=self.img_shape,
            )
        )
        model.add(LeakyReLU(alpha=0.2))

        # Layer 2
        model.add(
            Conv2D(
                filters=self.ndf*2,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
            )
        )
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # Layer 3
        model.add(
            Conv2D(
                filters=self.ndf*4,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
            )
        )
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # Layer 4
        model.add(
            Conv2D(
                filters=self.ndf*8,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
            )
        )
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # Layer 5
        model.add(
            Conv2D(
                filters=self.ndf*16,
                kernel_size=k,
                strides=s,
                padding='same',
                use_bias=False,
            )
        )
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        # Final Layer
        model.add(Flatten())
        model.add(Dropout(.3))
        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
Ejemplo n.º 14
0
def model_define_():
    is_cata = False
    reps = []
    ip = Input(shape=(10, 10, 2))
    ipc = Input(shape=(1,))
    h = Conv2D(32, 3, activation='elu')(ip)
    h = MaxPool2D()(h)
    reps.append(Flatten(name='rep0')(h))

    h = Conv2D(128, 3, activation='elu')(h)
    h = MaxPool2D()(h)
    h = Dropout(0.5)(h)
    # h = Conv2D(256, 3, activation='elu')(h)
    # h = Dropout(0.5)(h)
    # h = Conv2D(512, 3, activation='elu')(h)
    reps.append(Flatten(name='rep1')(h))

    h = Conv2D(8, 3, activation='elu', padding='same')(ip)
    h = MaxPool2D()(h)
    h = Conv2D(32, 3, activation='elu', padding='same')(h)
    h = MaxPool2D()(h)
    h = Conv2D(128, 3, activation='elu', padding='same')(h)
    h = MaxPool2D()(h)
    h = Conv2D(512, 1, activation='elu', padding='same')(h)
    h = Dropout(0.5)(h)
    h = Flatten(name='rep2')(h)
    reps.append(h)

    h = Conv2D(8, 3, activation='elu')(ip)
    h = Conv2D(16, 3, activation='elu')(h)
    h = Conv2D(32, 3, activation='elu')(h)
    h = Conv2D(64, 3, activation='elu')(h)
    h = Conv2D(64, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep3')(h))

    h = Conv2D(8, 5, activation='elu', padding='same')(ip)
    h = MaxPool2D()(h)
    h = Conv2D(32, 5, activation='elu', padding='same')(h)
    h = MaxPool2D()(h)
    h = Conv2D(64, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep4')(h))

    h = Conv2D(32, 5, activation='elu')(ip)
    h = Conv2D(64, 5, activation='elu')(h)
    h = Conv2D(64, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep5')(h))

    h = Flatten()(ip)
    reps.append(h)
    for i in range(2):
        h = Dense(128, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Dense(128, activation='elu', name='rep6')(h))

    h = Conv2D(8, 5, activation='elu', padding='same')(ip)
    h = MaxPool2D(pool_size=(1, 2))(h)
    h = Conv2D(16, 5, activation='elu', padding='same')(h)
    h = MaxPool2D(pool_size=(1, 2))(h)
    h = Conv2D(32, 1, activation='elu')(h)
    h = Dropout(0.5)(h)
    reps.append(Flatten(name='rep7')(h))

    reps.append(ipc)
    h = concatenate(reps)
    h = Dense(1024, activation='elu')(h)
    h = Dropout(0.5)(h)
    h = Dense(1024, activation='elu')(h)
    h = Dropout(0.5)(h)
    out = Dense(1)(h)
    out = add([out, ipc])
    m = Model([ip, ipc], out)
    opt = Adam(lr=1e-3)
    m.compile(loss='mse', optimizer=opt)
    m.summary()
    return m
Ejemplo n.º 15
0
    def create_network(self):
        '''
            1. 创建 CNN 网络

                1. 输入层,2D,(n_batch,input_length,input_dim)
                2. Reshape层: 将embedding转换4-dim的shape,4D
                3. 第一层多size卷积层(含1-max pooling),使用三种size.
                4. Flatten层: 卷积的结果进行拼接,变成一列隐含层
                5. output hidden层
                6. output Dropout层
                7. softmax 分类层
            2. compile模型

        :return: cnn model network
        '''

        from keras.layers import Input, Activation, Reshape, Dropout, Flatten, BatchNormalization
        from keras.models import Model
        # from keras import backend as K

        # 1. 输入层
        l1_input_shape = (self.input_length, self.input_dim)
        # l1_input_shape = ( None,self.input_dim)
        l1_input = Input(shape=l1_input_shape)

        # 2. Reshape层: 将embedding转换4-dim的shape
        l2_reshape_output_shape = (1, l1_input_shape[0], l1_input_shape[1])
        # print(l2_reshape_output_shape)
        # quit()
        l2_reshape = Reshape(l2_reshape_output_shape)(l1_input)
        # l2_reshape = BatchNormalization(axis=1)(l2_reshape)

        # 3. 第一层卷积层:多size卷积层(含1-max pooling),使用三种size.
        l3_conv = self.create_convolution_layer(
            input_layer=l2_reshape,
            convolution_filter_type=self.l1_conv_filter_type,
        )
        # 4. 第二层卷积层:单size卷积层 和 max pooling 层
        l4_conv = self.create_convolution_layer(
            input_layer=l3_conv,
            convolution_filter_type=self.l2_conv_filter_type,
        )
        # model = Model(input=l1_input, output=[l3_conv])
        # model.summary()
        # quit()
        # 5. Flatten层: 卷积的结果进行拼接,变成一列隐含层
        l5_flatten = Flatten()(l4_conv)
        # 6. 全连接层
        l6_full_connected_layer = self.create_full_connected_layer(
            input_layer=l5_flatten, units=self.full_connected_layer_units)

        l7_output_layer = self.create_full_connected_layer(
            input_layer=l6_full_connected_layer,
            units=[[self.num_labels, 0., 'none', 'none']])

        # 8. softmax分类层
        l8_softmax_output = Activation("softmax")(l7_output_layer)

        model = Model(input=l1_input, output=[l8_softmax_output])

        if self.verbose > 0:
            model.summary()

        return model
img_width, img_height = 150, 150

train_data_dir = 'train'
validation_data_dir = 'validation'
nb_train_samples = 97
nb_validation_samples = 23
epochs = 50
batch_size = 2

# build the VGG16 network
model = applications.VGG16(weights=weights_path, include_top=False, input_shape=(150,150,3))
print('Model loaded.')

# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))

# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning

# add the model on top of the convolutional base
#model.add(top_model)
x=model.output
x=Flatten(input_shape=model.output_shape[1:])(x)
x=Dropout(0.5)(x)
x=Dense(1, activation='sigmoid')(x)
model = Model(model.input, x)
Ejemplo n.º 17
0
def CNN_conf(cfg, hist_save, epochs=1, test=False, gpu_no=0):
    verbose = 1  #CHRIS TODO set this to 0
    batch_size = 100
    num_classes = 10
    epochs = 2000  #CHRIS increased from 1 to 5 to make results less random and noisy
    data_augmentation = False
    num_predictions = 20
    logfile = 'mnist-cnn.log'
    savemodel = False

    # The data, shuffled and split between train and test sets:
    (x_train, y_train), (x_test,
                         y_test) = cifar10.load_data()  #mnist.load_data()

    #CHRIS reshape only needed for mnist
    #x_train = x_train.reshape(x_train.shape[0],x_train.shape[1],x_train.shape[2],1)
    #x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],x_test.shape[2],1)

    cfg_df = pd.DataFrame(cfg, index=[0])

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train.flatten(), num_classes)
    y_test = keras.utils.to_categorical(y_test.flatten(), num_classes)

    #print('skip steps:')
    #print([cfg['skint_0'],cfg['skint_1'],cfg['skint_2']],[cfg['skst_0'],cfg['skst_1'],cfg['skst_2']])
    #(skip_ints,skip_ints_count) passed to Skip_manager constructor TODO get from cfg vector
    skip_manager = Skip_manager(
        [cfg['skint_0'], cfg['skint_1'], cfg['skint_2']],
        [cfg['skst_0'], cfg['skst_1'], cfg['skst_2']])

    input1 = keras.layers.Input(shape=(x_train.shape[1], x_train.shape[2],
                                       x_train.shape[3]))

    layer = Dropout(cfg['dropout_0'], input_shape=x_train.shape[1:])(input1)
    layer = skip_manager.connect_skip(layer)
    #CHRIS removed following:
    #layer = Conv2D(cfg['filters_0'], (cfg['k_0'], cfg['k_0']), padding='same',kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
    #layer = Activation(cfg['activation'])(layer)#kernel_initializer='random_uniform',
    #layer = skip_manager.connect_skip(layer)

    #stack 0
    for i in range(cfg['stack_0']):
        layer = Conv2D(cfg['filters_0'], (cfg['k_0'], cfg['k_0']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_0'] > 0):
        #maxpooling as cnn
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_1'], (cfg['k_1'], cfg['k_1']),
                           strides=(cfg['s_0'], cfg['s_0']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_1'], cfg['k_1']),
                                 strides=(cfg['s_0'], cfg['s_0']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_1'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 1
    for i in range(cfg['stack_1']):
        layer = Conv2D(cfg['filters_2'], (cfg['k_2'], cfg['k_2']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_1'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_3'], (cfg['k_3'], cfg['k_3']),
                           strides=(cfg['s_1'], cfg['s_1']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_3'], cfg['k_3']),
                                 strides=(cfg['s_1'], cfg['s_1']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_2'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 2
    for i in range(cfg['stack_2']):
        layer = Conv2D(cfg['filters_4'], (cfg['k_4'], cfg['k_4']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_2'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_5'], (cfg['k_5'], cfg['k_5']),
                           strides=(cfg['s_2'], cfg['s_2']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_5'], cfg['k_5']),
                                 strides=(cfg['s_2'], cfg['s_2']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_3'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 3
    for i in range(cfg['stack_3']):
        layer = Conv2D(cfg['filters_6'], (cfg['k_6'], cfg['k_6']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_3'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_7'], (cfg['k_7'], cfg['k_7']),
                           strides=(cfg['s_3'], cfg['s_3']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_7'], cfg['k_7']),
                                 strides=(cfg['s_3'], cfg['s_3']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_4'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 4
    for i in range(cfg['stack_4']):
        layer = Conv2D(cfg['filters_8'], (cfg['k_8'], cfg['k_8']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_4'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_9'], (cfg['k_9'], cfg['k_9']),
                           strides=(cfg['s_4'], cfg['s_4']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_9'], cfg['k_9']),
                                 strides=(cfg['s_4'], cfg['s_4']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_5'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 5
    for i in range(cfg['stack_5']):
        layer = Conv2D(cfg['filters_10'], (cfg['k_10'], cfg['k_10']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_5'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_11'], (cfg['k_11'], cfg['k_11']),
                           strides=(cfg['s_5'], cfg['s_5']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_11'], cfg['k_11']),
                                 strides=(cfg['s_5'], cfg['s_5']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_6'])(layer)
        layer = skip_manager.connect_skip(layer)

    #stack 6
    for i in range(cfg['stack_6']):
        layer = Conv2D(cfg['filters_12'], (cfg['k_12'], cfg['k_12']),
                       padding='same',
                       kernel_regularizer=l2(cfg['l2']),
                       bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = skip_manager.connect_skip(layer)
    if (cfg['stack_6'] > 0):
        if (cfg['no_pooling']):
            layer = Conv2D(cfg['filters_13'], (cfg['k_13'], cfg['k_13']),
                           strides=(cfg['s_6'], cfg['s_6']),
                           padding='same',
                           kernel_regularizer=l2(cfg['l2']),
                           bias_regularizer=l2(cfg['l2']))(layer)
        else:
            layer = MaxPooling2D(pool_size=(cfg['k_13'], cfg['k_13']),
                                 strides=(cfg['s_6'], cfg['s_6']),
                                 padding='same')(layer)
        layer = Activation(cfg['activation'])(layer)
        layer = Dropout(cfg['dropout_7'])(layer)
        layer = skip_manager.connect_skip(layer)

    #global averaging
    if (cfg['global_pooling']):
        layer = GlobalAveragePooling2D()(layer)
    else:
        layer = Flatten()(layer)

    #head
    if cfg['dense_size_0'] > 0:
        layer = Dense(cfg['dense_size_0'],
                      kernel_regularizer=l2(cfg['l2']),
                      bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activ_dense'])(layer)
    if cfg['dense_size_1'] > 0:
        layer = Dense(cfg['dense_size_1'],
                      kernel_regularizer=l2(cfg['l2']),
                      bias_regularizer=l2(cfg['l2']))(layer)
        layer = Activation(cfg['activ_dense'])(layer)
    layer = Dense(num_classes,
                  kernel_regularizer=l2(cfg['l2']),
                  bias_regularizer=l2(cfg['l2']))(layer)
    layer = Activation(cfg['activ_dense'])(layer)

    cfg['decay'] = cfg['lr'] / float(epochs)

    def step_decay(epoch):
        initial_lrate = cfg['lr']
        drop = 0.1
        epochs_drop = 20.0
        lrate = initial_lrate * math.pow(drop,
                                         math.floor((1 + epoch) / epochs_drop))
        return lrate

    callbacks = []
    if (cfg['step'] == True):
        callbacks = [LearningRateScheduler(step_decay)]
        cfg['decay'] = 0.

    # initiate RMSprop optimizer
    #opt = keras.optimizers.rmsprop(lr= cfg['lr'], decay=cfg['decay'])
    opt = keras.optimizers.SGD(lr=cfg['lr'],
                               momentum=0.9,
                               decay=cfg['decay'],
                               nesterov=False)

    model = keras.models.Model(inputs=input1, outputs=layer)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    if test:
        return model  #TODO remove this, just for testing

    #print("amount of parameters:")
    #print(model.count_params())
    #CHRIS test if gpu has enough memory
    nvmlInit()
    handle = nvmlDeviceGetHandleByIndex(int(gpu_no))
    meminfo = nvmlDeviceGetMemoryInfo(handle)
    #max_size = meminfo.total #6689341440
    if meminfo.free / 1024.**2 < 1.0:
        print('gpu is allready in use')
    nvmlShutdown()
    #if model.count_params()*4*2 >= max_size:#CHRIS *4*2: 4 byte per parameter times 2 for backpropagation
    #print('network too large for memory')
    #return 1000000000.0*(model.count_params()*4*2/max_size), 5.0*(model.count_params()*4*2/max_size)

    #max_size = 32828802 * 2 #CHRIS twice as large as RESnet-34-like implementation
    #max_size = 129200130 #CHRIS twice as wide as RESnet-34-like implementation with batchsize=10, one network of this size was able to be ran on tritanium gpu
    max_size = 130374394  #CHRIS twice as wide as RESnet-34-like implementation with batchsize=100, one network of this size was able to be ran on tritanium gpu
    #if model.count_params() > max_size:
    #print('network too large for implementation')
    #return 1000000000.0*(model.count_params()/max_size), 5.0*(model.count_params()/max_size)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255.
    x_test /= 255.

    hist_func = TimedAccHistory()

    if not data_augmentation:
        print('Not using data augmentation.')
        start = time.time()
        hist = model.fit(x_train,
                         y_train,
                         batch_size=batch_size,
                         epochs=epochs,
                         validation_data=(x_test, y_test),
                         callbacks=[hist_func],
                         verbose=verbose,
                         shuffle=True)
        stop = time.time()
    else:
        print('Using real-time data augmentation.')
        # This will do preprocessing and realtime data augmentation:
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=
            0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=
            0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=
            0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        datagen.fit(x_train)

        # Fit the model on the batches generated by datagen.flow().
        start = time.time()
        hist = model.fit_generator(datagen.flow(x_train,
                                                y_train,
                                                batch_size=batch_size),
                                   verbose=verbose,
                                   callbacks=callbacks,
                                   epochs=epochs,
                                   steps_per_epoch=len(x_train) / batch_size,
                                   validation_data=(x_test, y_test))
        stop = time.time()

    timer = stop - start
    #print('run-time:')
    #print(timer)
    hist_save.append([hist.history['val_acc'], hist_func.timed])

    if savemodel:
        model.save('best_model_mnist.h5')
    maxval = max(hist.history['val_acc'])
    #loss = -1 * math.log( 1.0 - max(hist.history['val_acc']) ) #np.amin(hist.history['val_loss'])
    loss = -1 * math.log(max(hist.history['val_acc'])
                         )  #CHRIS minimizing this will maximize accuracy
    #print('max val_acc:')
    #print(max(hist.history['val_acc']))
    #print('loss:')
    #print(loss)
    #perf5 = max(hist.history['val_top_5_categorical_accuracy'])

    if logfile is not None:
        log_file = logfile  #os.path.join(data_des, logfile)
        cfg_df['perf'] = maxval

        # save the configurations to log file
        if os.path.isfile(log_file):
            cfg_df.to_csv(log_file, mode='a', header=False, index=False)
        else:
            cfg_df.to_csv(log_file, mode='w', header=True, index=False)
    return timer, loss
Ejemplo n.º 18
0
    print("Iteration " + str(i) + ":\n===============================")

    model = Sequential()
    model.add(
        Convolution2D(32, (3, 3), activation='relu', input_shape=(64, 64, 3)))
    model.add(Convolution2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_last"))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_last"))
    model.add(Dropout(0.25))

    model.add(
        Flatten())  # this converts our 3D feature maps to 1D feature vectors
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(21))
    model.add(Activation('sigmoid'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', acc_top3])

    train_generator = train_datagen.flow_from_directory(
        '../10CrossFolderValidation/' + str(i) + '/train',
        target_size=(64, 64),
        batch_size=128,
        class_mode='categorical')
e2_embed =  Reshape([300,1])(e2_embed)

print x_embed.shape
W1x = MyLayer()(x_embed)
print W1x.shape
W2x = MyLayer()(x_embed)
A1 = merge([W1x, e1_embed], output_shape=(73, 1), mode=get_R)
A1 = Lambda(lambda x: x / 300)(A1)
print A1.shape
A2 = merge([W2x, e2_embed], output_shape=(73, 1), mode=get_R)
A2 = Lambda(lambda x: x / 300)(A2)
print A2.shape
alpha1 = Lambda(lambda x: softmax(x, axis=0), name="alpha1")(A1)
alpha2 = Lambda(lambda x: softmax(x, axis=0), name="alpha2")(A2)
alpha = average([alpha1, alpha2])
alpha = Flatten()(alpha)
alpha = RepeatVector(300)(alpha)
alpha = Reshape([73, 300])(alpha)
print alpha.shape
att_output = multiply([x_embed, alpha])


c_models = merge([distanceModel1, distanceModel2, POSModel2, att_output], mode='concat', concat_axis=-1)

c_models = Convolution1D(nb_filter=nb_filter,
                        filter_length=filter_length,
                        border_mode='same',
                        activation='tanh',
                        subsample_length=1)(c_models)

c_models = Bidirectional(LSTM(150))(c_models)
Ejemplo n.º 20
0
train = False
if train:
    users, movies, ratings = parse_train()
    users_train, users_val, movies_train, movies_val, ratings_train, ratings_val \
        = train_test_split(users, movies, ratings, test_size=0.1)

    n_users = 6040
    n_movies = 3952
    d = 128

    user_input = Input(shape=(1, ))
    user_embedding = Embedding(input_dim=n_users + 1,
                               output_dim=d,
                               embeddings_initializer='orthogonal')(user_input)
    user_embedding = Flatten()(user_embedding)

    movie_input = Input(shape=(1, ))
    movie_embedding = Embedding(
        input_dim=n_movies + 1,
        output_dim=d,
        embeddings_initializer='orthogonal')(movie_input)
    movie_embedding = Flatten()(movie_embedding)

    user_bias = Embedding(input_dim=n_users + 1,
                          output_dim=1,
                          embeddings_initializer='orthogonal',
                          embeddings_constraint=non_neg())(user_input)
    user_bias = Flatten()(user_bias)
    movie_bias = Embedding(input_dim=n_movies + 1,
                           output_dim=1,
Ejemplo n.º 21
0
X = LeakyReLU(alpha=0.2)(X);
X = Dropout(0.25)(X);
X = Conv2D(64, kernel_size=3, strides=2, padding="same")(X);
X = ZeroPadding2D(padding=((0,1),(0,1)))(X);
X = BatchNormalization(momentum=0.8)(X);
X = LeakyReLU(alpha=0.2)(X);
X = Dropout(0.25)(X);
X = Conv2D(128, kernel_size=3, strides=2, padding="same")(X);
X = BatchNormalization(momentum=0.8)(X);
X = LeakyReLU(alpha=0.2)(X);
X = Dropout(0.25)(X);
X = Conv2D(256, kernel_size=3, strides=1, padding="same")(X);
X = BatchNormalization(momentum=0.8)(X);
X = LeakyReLU(alpha=0.2)(X);
X = Dropout(0.25)(X);
X = Flatten()(X);
X = Dense(2048, activation='relu')(X);
X = Dense(1024, activation='relu')(X);
X = Dense(512,  activation='relu')(X);
X = Dense(72,   activation='relu')(X);
#_________ Semantic ____________
S_input =Input(shape=(100,),name='S_in');
S = Dense(72, activation='relu',name='S')(S_input);
V_input = Input(shape=(72,72),name='V_input');
#_________ Combine Visual and Semantic ___________
XS = Dot(axes=-1)([S,V_input]);
XS = Multiply(name='XS_2')([X,XS]);
#_________ Concept Learning Layers ______________
XS = Dense(72, activation='relu',name='A_t')(XS);
XS = Dense(100, activation='relu')(XS);
XS = Dense(256, activation='relu',name='XS_4')(XS);
Ejemplo n.º 22
0
''' Dense 10x10x10 '''
test_x = np.random.rand(10, 10).astype('f')
test_y = np.random.rand(10, 10).astype('f')
model = Sequential([
    Dense(10, input_dim=10),
    Dense(10)
])
output_testcase(model, test_x, test_y, 'dense_10x10x10', '1e-6')


''' Conv1D 2 '''
test_x = np.random.rand(10, 2, 1).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential([
    Conv1D(1, 2, input_shape=(2, 1)),
    Flatten(),
    Dense(1)
])
output_testcase(model, test_x, test_y, 'conv1d_2', '1e-6')


''' Conv1D 3 '''
test_x = np.random.rand(10, 3, 1).astype('f').astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential([
    Conv1D(1, 3, input_shape=(3, 1)),
    Flatten(),
    Dense(1)
])
output_testcase(model, test_x, test_y, 'conv1d_3', '1e-6')
Ejemplo n.º 23
0
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3'))
model.add(MaxPool2D((2, 2), strides=(2, 2), name='block3_pool'))

#第四个卷积块
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3'))
model.add(MaxPool2D((2, 2), strides=(2, 2), name='block4_pool'))
          
#第五个卷积块
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3'))
model.add(MaxPool2D((2, 2), strides=(2, 2), name='block5_pool'))
          
model.add(Flatten(name='flatten'))
model.add(Dense(4096, activation='relu', name='fc1'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu', name='fc2'))
model.add(Dense(1000, activation='softmax', name='predictions'))

model.summary()

plot_model(model, to_file='vgg16.png')
Image('vgg16.png')

'''
#用keras function-api来创建模型
input_shape = (224,224,3)

#第一个block卷积块
Ejemplo n.º 24
0
    def __call__(self, input, input_labels=None):

        # 8, 125
        filters = self.filters * 8  # 64*8=512

        if input_labels is not None:
            size = 8 * 125 * filters  # 8x125x512=512000
            embed = Embedding(self.num_class, size)(input_labels)
            embed = Flatten()(embed)
            x = Flatten()(input)
            output1 = Add()(embed, x)
            output1 = Reshape((8, 125, filters))(output1)
        else:
            output1 = input

        output1 = Conv2D(filters=self.num_class,
                         kernel_size=1,
                         padding='same',
                         data_format='channels_last',
                         kernel_initializer='glorot_uniform',
                         activation='relu')(output1)  # 8, 125

        x = Conv2D(filters=filters,
                   kernel_size=3,
                   padding='same',
                   data_format='channels_last',
                   kernel_initializer='glorot_uniform',
                   activation='relu')(x)
        x = Conv2D(filters=filters,
                   kernel_size=3,
                   padding='same',
                   data_format='channels_last',
                   kernel_initializer='glorot_uniform',
                   activation='relu')(x)
        x = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(x)

        # 4, 62
        output2 = x
        output2 = Conv2D(filters=self.num_class,
                         kernel_size=1,
                         padding='same',
                         data_format='channels_last',
                         kernel_initializer='glorot_uniform',
                         activation='relu')(x)
        output2 = Conv2DTranspose(filters=self.num_class,
                                  kernel_size=4,
                                  dilation_rate=2,
                                  padding='valid')(output2)
        output2 = Cropping2D(
            cropping=((1, 1), (1, 0)),
            data_format='channels_last')  # (4-1)x2+4-1x2=8, (62-1)x2+4-1=125

        x = Conv2D(filters=filters,
                   kernel_size=3,
                   padding='same',
                   data_format='channels_last',
                   kernel_initializer='glorot_uniform',
                   activation='relu')(x)
        x = Conv2D(filters=filters,
                   kernel_size=3,
                   padding='same',
                   data_format='channels_last',
                   kernel_initializer='glorot_uniform',
                   activation='relu')(x)
        x = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(x)

        # 2, 31
        output3 = x
        output3 = Conv2D(filters=self.num_class,
                         kernel_size=1,
                         padding='same',
                         data_format='channels_last',
                         kernel_initializer='glorot_uniform',
                         activation='relu')(output3)
        output3 = Conv2DTranspose(filters=self.num_class,
                                  kernel_size=8,
                                  dilation_rate=4,
                                  padding='valid')(output3)
        output3 = Cropping2D(
            cropping=((2, 2), (2, 1)), data_format='channels_last')(
                output3)  # (2-1)x4+8-2x2=8, (31-1)x4+8-2-1=125

        x = Add()([output1, output2, output3])

        # finally we re-construct the original shape (64, 1000, 1)
        x = Conv2DTranspose(filters=1,
                            kernel_size=16,
                            dilation_rate=8,
                            padding='valid')(
                                x)  # (8-1)x8+16-2x4=64, (125-1)x8+16-2x4=1000
        output = Cropping2D(cropping=((4, 4), (4, 4)),
                            data_format='channels_last')(x)

        # assume to be Gaussaian Noise
        return output
Ejemplo n.º 25
0
parser.add_argument('--input-right', default='./data/Synthetic/TR1.png', type=str, help='input right image')
parser.add_argument('--output', default='./TL1.pfm', type=str, help='left disparity map')

# load model for cost computation
print('load model...') 
with open('model4cost.json', "r") as f:
    model = model_from_json(json.loads(f.read()));
model.load_weights('model4cost');

patch_size = 9 # according to the size from training data
feature_dim = 200
################################### define models for extracting features and predicting match cost from features pair ############################################
# feature model
INPUT1_1 = Input(shape = (9,9,1),name='INPUT1_1')
CONV1_1 = Conv2D(32,kernel_size=5,padding='valid',activation='relu',name='CONV1_1')(INPUT1_1)
FLAT1_1 = Flatten()(CONV1_1)
DEN1_2 = Dense(200,activation='relu',name='DEN1_2')(FLAT1_1)
DEN1_3 = Dense(200,activation='relu',name='DEN1_3')(DEN1_2)
model4left_feature = Model(INPUT1_1,DEN1_3)
model4left_feature.load_weights('model4cost',by_name=True)
    
INPUT2_1 = Input(shape = (9,9,1),name='INPUT2_1')
CONV2_1 = Conv2D(32,kernel_size=5,padding='valid',activation='relu',name='CONV2_1')(INPUT2_1)
FLAT2_1 = Flatten()(CONV2_1)    
DEN2_2 = Dense(200,activation='relu',name='DEN2_2')(FLAT2_1)
DEN2_3 = Dense(200,activation='relu',name='DEN2_3')(DEN2_2)
model4right_feature = Model(INPUT2_1,DEN2_3)
model4right_feature.load_weights('model4cost',by_name=True)

# prediction model
INPUT = Input(shape = (1,400))
Ejemplo n.º 26
0
def ResNet50(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000,
             channels="gray"):
    """Instantiates the ResNet50 architecture.

    Optionally loads weights pre-trained
    on ImageNet. Note that when using TensorFlow,
    for best performance you should set
    `image_data_format='channels_last'` in your Keras config
    at ~/.keras/keras.json.

    The model and the weights are compatible with both
    TensorFlow and Theano. The data format
    convention used by the model is the one
    specified in your Keras config file.

    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 197.
            E.g. `(200, 200, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same',
               name='conv1')(img_input)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='resnet50')

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file(
                'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
        else:
            weights_path = "/users/ipan/grayscale-models/weights/resnet50_gray.h5"
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1000')
                layer_utils.convert_dense_weights_data_format(
                    dense, shape, 'channels_first')

        if K.image_data_format() == 'channels_first' and K.backend(
        ) == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')
    elif weights is not None:
        model.load_weights(weights)

    return model
Ejemplo n.º 27
0
def save_bottlebeck_features():
    np.random.seed(2929)

    vgg_model = applications.VGG16(weights='imagenet',
                                   include_top=False,
                                   input_shape=(150, 150, 3))
    print('Model loaded.')

    #initialise top model
    top_model = Sequential()
    top_model.add(Flatten(input_shape=vgg_model.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(1, activation='sigmoid'))

    model = Model(inputs=vgg_model.input, outputs=top_model(vgg_model.output))

    model.trainable = True

    model.summary()

    #Total of 20 layers. The classification is considered as one layer
    #Therefore, intermediate is 19 layers
    #0, 4[:4], 3[:7], 4[:11], 4[:15], 4[:19] (Group 0, 1, 2, 3, 4, 5)
    #0 -> All trainable
    #5 -> All non-trainable except classification layer
    #Always keep layer 20 trainable because it is classification layer
    #layer_count = 1
    for layer in model.layers[:7]:
        layer.trainable = False
    #print("NO-Top: Layer is %d trainable" %layer_count)
    #layer_count = layer_count + 1

    model.summary()

    train_datagen = ImageDataGenerator(rescale=1. / 255)
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    sgd = optimizers.Adam(
        lr=1e-6
    )  #optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss="binary_crossentropy",
                  optimizer=sgd,
                  metrics=['accuracy'])

    #        model.compile(optimizer='rmsprop',
    #            loss='binary_crossentropy', metrics=['accuracy'])

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples // batch_size,
        verbose=1)

    history_dict = history.history

    #Plotting the training and validation loss
    history_dict = history.history
    loss_values = history_dict['loss']
    val_loss_values = history_dict['val_loss']
    epochs_0 = range(1, len(history_dict['acc']) + 1)
    plt.plot(epochs_0, loss_values, 'bo', label='Training loss')
    plt.plot(epochs_0, val_loss_values, 'b', label='Validation loss')
    plt.title(
        'ADvsMC_64_VGG16_Freeze_data2_group2 - Training and validation loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    #plt.show()
    plt.savefig('ADvsMC_64_VGG16_Freeze_data2_group2_loss.png')
    plt.close()

    #Plotting the training and validation accuracy
    acc_values = history_dict['acc']
    val_acc_values = history_dict['val_acc']
    plt.plot(epochs_0, acc_values, 'bo', label='Training acc')
    plt.plot(epochs_0, val_acc_values, 'b', label='Validation acc')
    plt.title(
        'ADvsMC_64_VGG16_Freeze_data2_group2 - Training and validation accuracy'
    )
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    #plt.show()
    plt.savefig('ADvsMC_64_VGG16_Freeze_data2_group2_acc.png')
    plt.close()
Ejemplo n.º 28
0
def train_detector(X_train, X_test, Y_train, Y_test, nb_filters = 32, batch_size=128, nb_epoch=5, nb_classes=2, do_augment=False, save_file='models/detector_model.hdf5'):
    """ vgg-like deep convolutional network """
    
    np.random.seed(1337)  # for reproducibility
      
    # input image dimensions
    img_rows, img_cols = X_train.shape[1], X_train.shape[2]
    
    # size of pooling area for max pooling
    pool_size = (2, 2)
    # convolution kernel size
    kernel_size = (3, 3) 
    input_shape = (img_rows, img_cols, 1)


    model = Sequential()
    model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                            border_mode='valid',
                            input_shape=input_shape))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    # (16, 8, 32)
     
    model.add(Convolution2D(nb_filters*2, kernel_size[0], kernel_size[1]))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters*2, kernel_size[0], kernel_size[1]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    # (8, 4, 64) = (2048)
        
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
        
    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    
    if do_augment:
        datagen = ImageDataGenerator(
            rotation_range=20,
            width_shift_range=0.2,
            height_shift_range=0.2,
            shear_range=0.2,
            zoom_range=0.2)
        datagen.fit(X_train)
        model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                            samples_per_epoch=len(X_train), nb_epoch=nb_epoch,
                            validation_data=(X_test, Y_test))
    else:
        model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=1, validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
    model.save(save_file)  
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D

from facerec_cnn import facerec_cnn_load_data as data

user_data_file = 'user_data.json'
face_images_folder = 'face_images/'
x_train, x_test, y_train, y_test, input_shape, num_classes = data.load_data(user_data_file, face_images_folder)

model = Sequential()
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape, padding='same', activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

# Compile model
epochs = 25
lrate = 0.01
decay = lrate/epochs
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print("[DEBUG] model.summary() = {}".format(model.summary()))

# Fit the model
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=epochs, batch_size=32)
question_attention_vector = Lambda(lambda q: keras.activations.softmax(q, axis=1))(question_attention_vector)
print(question_attention_vector)

question_attention_vector = Lambda(lambda q: q[0] * q[1])([question_encoding, question_attention_vector])
question_attention_vector = Lambda(lambda q: K.sum(q, axis=1))(question_attention_vector)
question_attention_vector = RepeatVector(N)(question_attention_vector)

answer_start = Lambda(lambda arg:
                      concatenate([arg[0], arg[1], arg[2]]))([
    passage_encoding,
    question_attention_vector,
    multiply([passage_encoding, question_attention_vector])])

answer_start = TimeDistributed(Dense(W, activation='relu'))(answer_start)
answer_start = TimeDistributed(Dense(1))(answer_start)
answer_start = Flatten()(answer_start)
answer_start = Activation('softmax')(answer_start)

def s_answer_feature(x):
    maxind = K.argmax(
        x,
        axis=1,
    )
    return maxind

x = Lambda(lambda x: K.tf.cast(s_answer_feature(x), dtype=K.tf.int32))(answer_start)
start_feature = Lambda(lambda arg: K.tf.gather_nd(arg[0], K.tf.stack(
    [K.tf.range(K.tf.shape(arg[1])[0]), K.tf.cast(arg[1], K.tf.int32)], axis=1)))([passage_encoding, x])
start_feature = RepeatVector(N)(start_feature)

# Answer end prediction