Пример #1
0
def vgg_face(weights_path=None):
    img = Input(shape=(3, 224, 224))

    pad1_1 = ZeroPadding2D(padding=(1, 1))(img)
    conv1_1 = Convolution2D(64, 3, 3, activation='relu',
                            name='conv1_1')(pad1_1)
    pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)
    conv1_2 = Convolution2D(64, 3, 3, activation='relu',
                            name='conv1_2')(pad1_2)
    pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)

    pad2_1 = ZeroPadding2D((1, 1))(pool1)
    conv2_1 = Convolution2D(128, 3, 3, activation='relu',
                            name='conv2_1')(pad2_1)
    pad2_2 = ZeroPadding2D((1, 1))(conv2_1)
    conv2_2 = Convolution2D(128, 3, 3, activation='relu',
                            name='conv2_2')(pad2_2)
    pool2 = MaxPooling2D((2, 2), strides=(2, 2))(conv2_2)

    pad3_1 = ZeroPadding2D((1, 1))(pool2)
    conv3_1 = Convolution2D(256, 3, 3, activation='relu',
                            name='conv3_1')(pad3_1)
    pad3_2 = ZeroPadding2D((1, 1))(conv3_1)
    conv3_2 = Convolution2D(256, 3, 3, activation='relu',
                            name='conv3_2')(pad3_2)
    pad3_3 = ZeroPadding2D((1, 1))(conv3_2)
    conv3_3 = Convolution2D(256, 3, 3, activation='relu',
                            name='conv3_3')(pad3_3)
    pool3 = MaxPooling2D((2, 2), strides=(2, 2))(conv3_3)

    pad4_1 = ZeroPadding2D((1, 1))(pool3)
    conv4_1 = Convolution2D(512, 3, 3, activation='relu',
                            name='conv4_1')(pad4_1)
    pad4_2 = ZeroPadding2D((1, 1))(conv4_1)
    conv4_2 = Convolution2D(512, 3, 3, activation='relu',
                            name='conv4_2')(pad4_2)
    pad4_3 = ZeroPadding2D((1, 1))(conv4_2)
    conv4_3 = Convolution2D(512, 3, 3, activation='relu',
                            name='conv4_3')(pad4_3)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)

    pad5_1 = ZeroPadding2D((1, 1))(pool4)
    conv5_1 = Convolution2D(512, 3, 3, activation='relu',
                            name='conv5_1')(pad5_1)
    pad5_2 = ZeroPadding2D((1, 1))(conv5_1)
    conv5_2 = Convolution2D(512, 3, 3, activation='relu',
                            name='conv5_2')(pad5_2)
    pad5_3 = ZeroPadding2D((1, 1))(conv5_2)
    conv5_3 = Convolution2D(512, 3, 3, activation='relu',
                            name='conv5_3')(pad5_3)
    pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)

    flat = Flatten()(pool5)
    fc6 = Dense(4096, activation='relu', name='fc6')(flat)
    fc6_drop = Dropout(0.5)(fc6)
    fc7 = Dense(4096, activation='relu', name='fc7')(fc6_drop)
    fc7_drop = Dropout(0.5)(fc7)
    out = Dense(2622, activation='softmax', name='fc8')(fc7_drop)

    model = Model(input=img, output=out)

    if weights_path:
        model.load_weights(weights_path)

    return model
Пример #2
0
def cnn_model():
    branch_0 = Sequential()
    branch_1 = Sequential()
    model0 = Sequential()
    model = Sequential()
    # ********************************************** 48*48
    model0.add(
        Convolution2D(32,
                      3,
                      3,
                      border_mode='same',
                      init='he_normal',
                      input_shape=(IMG_SIZE, IMG_SIZE, 3)))
    model0.add(BatchNormalization(epsilon=1e-06, axis=3))
    model0.add(Activation('relu'))

    model0.add(Convolution2D(48, 7, 1, border_mode='same', init='he_normal'))
    model0.add(BatchNormalization(epsilon=1e-06, axis=3))
    model0.add(Activation('relu'))
    model0.add(Convolution2D(48, 1, 7, border_mode='same', init='he_normal'))
    model0.add(BatchNormalization(epsilon=1e-06, axis=3))
    model0.add(Activation('relu'))
    model0.add(MaxPooling2D(pool_size=(2, 2)))
    model0.add(Dropout(0.2))
    # ****************************************** 24*24
    branch_0.add(model0)
    branch_1.add(model0)

    branch_0.add(Convolution2D(64, 3, 1, border_mode='same', init='he_normal'))
    branch_0.add(BatchNormalization(epsilon=1e-06, axis=3))
    branch_0.add(Activation('relu'))
    branch_0.add(Convolution2D(64, 1, 3, border_mode='same', init='he_normal'))
    branch_0.add(BatchNormalization(epsilon=1e-06, axis=3))
    branch_0.add(Activation('relu'))

    branch_1.add(Convolution2D(64, 1, 7, border_mode='same', init='he_normal'))
    branch_1.add(BatchNormalization(epsilon=1e-06, axis=3))
    branch_1.add(Activation('relu'))
    branch_1.add(Convolution2D(64, 7, 1, border_mode='same', init='he_normal'))
    branch_1.add(BatchNormalization(epsilon=1e-06, axis=3))
    branch_1.add(Activation('relu'))

    model.add(Merge([branch_0, branch_1], mode='concat', concat_axis=-1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))

    # ******************************************* 12*12
    model.add(Convolution2D(128, 3, 3, border_mode='same', init='he_normal'))
    model.add(BatchNormalization(epsilon=1e-06, axis=3))

    model.add(Activation('relu'))
    model.add(Convolution2D(256, 3, 3, border_mode='same',
                            init='he_normal'))  # 之前是256个滤波器
    model.add(BatchNormalization(epsilon=1e-06, axis=3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.3))

    # *************************************** 6*6
    model.add(Flatten())
    model.add(Dense(256, init='he_normal'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(NUM_CLASSES, activation='softmax', init='he_normal'))
    return model
 def conv_2d(kernels, kernel_size):
     return Convolution2D(kernels,
                          kernel_size,
                          kernel_size,
                          init="he_uniform",
                          border_mode="same")
Пример #4
0
# img_types = ["flair", "FA", "anatomica"]

# exp1
img_types = ["FA", "anatomica"]

# prepare input for the
input_shape_2d = (inp_dim_2d, inp_dim_2d, len(img_types))
input_shape_3d = (inp_dim_3d, inp_dim_3d, inp_dim_3d, len(img_types))

## paralel NN, x
model_x = Sequential()
print("Input shape to the 2d networks:", input_shape_2d)
model_x.add(
    Convolution2D(nb_filters,
                  kernel_size_2d[0],
                  kernel_size_2d[1],
                  border_mode='valid',
                  input_shape=input_shape_2d))
model_x.add(Activation('relu'))
print("Output shape of 1st convolution (2d):", model_x.output_shape)
model_x.add(
    Convolution2D(nb_filters,
                  kernel_size_2d[0],
                  kernel_size_2d[1],
                  border_mode='valid',
                  input_shape=input_shape_2d))
model_x.add(Activation('relu'))
print("Output shape of 2nd convolution (2d):", model_x.output_shape)
model_x.add(Convolution2D(nb_filters, kernel_size_2d[0], kernel_size_2d[1]))
model_x.add(Activation('relu'))
print("Output shape of 3rd convolution (2d):", model_x.output_shape)
Пример #5
0
def model(summary=True):

    input_img = Input((64, 64, 3))

    normalized_input = Lambda(lambda z: z / 255. - .5)(input_img)

    conv = Convolution2D(8,
                         3,
                         3,
                         activation='relu',
                         init='glorot_uniform',
                         border_mode='same')(normalized_input)
    conv1 = Convolution2D(8,
                          3,
                          3,
                          activation='relu',
                          init='glorot_uniform',
                          border_mode='same')(normalized_input)
    conv2 = Convolution2D(8,
                          3,
                          3,
                          activation='relu',
                          init='glorot_uniform',
                          border_mode='same')(conv1)
    merge1 = merge([conv, conv2], mode='concat', concat_axis=3)
    maxpool = MaxPooling2D((2, 2))(merge1)
    dropout = Dropout(0.5)(maxpool)

    conv = Convolution2D(16,
                         3,
                         3,
                         activation='relu',
                         init='glorot_uniform',
                         border_mode='same')(dropout)
    conv1 = Convolution2D(16,
                          3,
                          3,
                          activation='relu',
                          init='glorot_uniform',
                          border_mode='same')(dropout)
    conv2 = Convolution2D(16,
                          3,
                          3,
                          activation='relu',
                          init='glorot_uniform',
                          border_mode='same')(conv1)
    merge1 = merge([conv, conv2], mode='concat', concat_axis=3)
    maxpool = MaxPooling2D((2, 2))(merge1)
    dropout = Dropout(0.5)(maxpool)

    conv = Convolution2D(32,
                         3,
                         3,
                         activation='relu',
                         init='glorot_uniform',
                         border_mode='same')(dropout)
    conv1 = Convolution2D(32,
                          3,
                          3,
                          activation='relu',
                          init='glorot_uniform',
                          border_mode='same')(dropout)
    conv2 = Convolution2D(32,
                          3,
                          3,
                          activation='relu',
                          init='glorot_uniform',
                          border_mode='same')(conv1)
    merge1 = merge([conv, conv2], mode='concat', concat_axis=3)
    maxpool = MaxPooling2D((2, 2))(merge1)
    dropout = Dropout(0.5)(maxpool)

    flatten = Flatten()(dropout)

    dense = Dense(64,
                  activation='relu',
                  init='glorot_uniform',
                  W_regularizer=l2(0.01))(flatten)
    dropout = Dropout(0.5)(dense)

    out = Dense(1)(dropout)
    #out = Dense(1)(flatten)

    model = Model(input=input_img, output=out)

    if summary:
        model.summary()

    return model
Пример #6
0
x, y = createFaceData.gen_data_for_classification(samp_f)

x_train, x_test, y_train, y_test = cross_validation.train_test_split(
    x, y, test_size=.25)

inp_shape = x_train.shape[1:]

mini_batch = 1
no_epoch = 5
kern_size = 3
nb_filter = [6, 12]

model = Sequential()
# 6 filters
model.add(
    Convolution2D(nb_filter[0], kern_size, kern_size, input_shape=inp_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))  # downsample
model.add(Dropout(.25))
# 12 filters
model.add(Convolution2D(nb_filter[1], kern_size, kern_size))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))  # downsample
model.add(Dropout(.25))

# now flatten
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dropout(.25))
model.add(Dense(10))
Пример #7
0
def create_model(nb_classes, input_shape):
    """Create a VGG-16 like model."""
    model = Sequential()
    print("input_shape: %s" % str(input_shape))
    # input_shape = (None, None, 3)  # for fcn
    model.add(
        Convolution2D(32, (3, 3),
                      padding='same',
                      input_shape=input_shape,
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(32, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(32, (2, 2),
                      padding='same',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001),
                      strides=2))

    model.add(
        Convolution2D(64, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(64, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(64, (2, 2),
                      padding='same',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001),
                      strides=2))

    model.add(
        Convolution2D(64, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(
        Convolution2D(64, (2, 2),
                      padding='same',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001),
                      strides=2))

    model.add(Convolution2D(512, (4, 4), padding='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(
        Convolution2D(512, (1, 1),
                      padding='same',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(
        Convolution2D(nb_classes, (1, 1),
                      padding='same',
                      kernel_initializer='he_uniform',
                      kernel_regularizer=l2(0.0001)))
    model.add(BatchNormalization())
    model.add(Activation('softmax'))
    model.add(Flatten())  # Remove for FCN
    return model
Пример #8
0
def create_model_descriptor():
    """ Create model descriptor of VGG-Face classificator """
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Convolution2D(4096, (7, 7), activation='relu'))
    model.add(Dropout(0.5))
    model.add(Convolution2D(4096, (1, 1), activation='relu'))
    model.add(Dropout(0.5))
    model.add(Convolution2D(2622, (1, 1)))
    model.add(Flatten())
    model.add(Activation('softmax'))

    model.load_weights('vgg_face_weights.h5')

    # create VGG-Face descriptor without the last layer
    vgg_descriptor = Model(inputs=model.layers[0].input,
                           outputs=model.layers[-2].output)
    return vgg_descriptor
def model_architecture(img_rows,img_cols,img_channels,nb_classes):
    #function defining the architecture of defined CNN
    model = Sequential()
    model.add(Convolution2D(32, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True, input_shape=(img_channels,img_rows, img_cols)))
    model.add(Convolution2D(32, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(Convolution2D(32, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(MaxPooling2D(pool_size=(2, 2), strides = (2,2)))
    model.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(MaxPooling2D(pool_size=(2, 2), strides = (2,2)))
    model.add(Convolution2D(96, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(Convolution2D(96, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(Convolution2D(96, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(MaxPooling2D(pool_size=(2, 2), strides = (2,2)))
    model.add(Convolution2D(128, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    Dropout((0.5))
    model.add(Convolution2D(512, 1, 1, activation='relu', border_mode='same',init='orthogonal', bias = True))
    Dropout((0.5))
    model.add(Convolution2D(2, 1, 1, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(GlobalAveragePooling2D(dim_ordering='default'))
    #model.add(Convolution2D(10,1,1, border_mode='same',init='orthogonal', bias = True))
    #model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
#    model.summary()
    return model
Пример #10
0
Y_train2 = Y_train[20000:60000]
Y_train3 = np.concatenate((Y_train[0:20000], Y_train[40000:60000]))

XTRAIN = (X_train1, X_train2, X_train3)
YTRAIN = (Y_train1, Y_train2, Y_train3)

learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
                                            patience=3,
                                            verbose=1,
                                            factor=0.5,
                                            min_lr=0.000001)

model = Sequential()
model.add(
    Convolution2D(64,
                  kernel_size=(3, 3),
                  activation='relu',
                  input_shape=(28, 28, 1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
#model.add(Convolution2D(256, (3, 3), activation='relu'))
#model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(BatchNormalization())
#model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
#model.add(BatchNormalization())
Пример #11
0
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator

# As initialize a sequential constructor
classifier = Sequential()

# now the model will take as input arrays of shape (*, 64)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
# Initialize the first layer
classifier.add(
    Convolution2D(32, (3, 3), input_shape=(64, 64, 3), activation="relu"))

# Feature mapping reduce the unnessary pixel and remain essential pixel only
classifier.add(MaxPooling2D(pool_size=(2, 2)))

# Initialize the Second Layer
classifier.add(Convolution2D(32, (3, 3), activation="relu"))

# Feature mapping second layer
classifier.add(MaxPooling2D(pool_size=(2, 2)))

# Convert multi-diamention array into single array
classifier.add(Flatten())

# Initialize the third layer
classifier.add(Dense(activation='relu', units=128))
Пример #12
0
x = x[:10960]
y = y[:len(x)]
x = x.reshape((1096, 10, 3, 64, 64))
y = y.reshape((1096, 10, 6))
y = y.mean(axis=1)
X_train, Y_train, X_test, Y_test = set_para.preprocessing(x=x,
                                                          y=y,
                                                          split=True,
                                                          to_categorical=False)

# this model will encode an image into a vector.
vision_model = Sequential()
vision_model.add(
    Convolution2D(64,
                  3,
                  3,
                  activation='relu',
                  border_mode='same',
                  input_shape=(3, img_rows, img_cols)))
vision_model.add(Convolution2D(64, 3, 3, activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(
    Convolution2D(128, 3, 3, activation='relu', border_mode='same'))
vision_model.add(Convolution2D(128, 3, 3, activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(
    Convolution2D(256, 3, 3, activation='relu', border_mode='same'))
vision_model.add(Convolution2D(256, 3, 3, activation='relu'))
vision_model.add(Convolution2D(256, 3, 3, activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(Flatten())
import numpy as np
from keras import backend as K
from keras.callbacks import ModelCheckpoint, CSVLogger
from keras.layers import MaxPooling2D, UpSampling2D, Convolution2D, Input, merge, concatenate
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from skimage.io import imsave

from data import load_train_data, load_test_data


# def get_fractalunet(f=16):
    inputs = Input((size, size, 3))

    conv1 = Convolution2D(f, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = BatchNormalization()(conv1)
    conv1 = Convolution2D(f, 3, 3, activation='relu', border_mode='same')(conv1)

    down1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = BatchNormalization()(down1)
    conv2 = Convolution2D(2 * f, 3, 3, activation='relu', border_mode='same')(conv2)
    conv2 = BatchNormalization()(conv2)
    conv2 = Convolution2D(2 * f, 3, 3, activation='relu', border_mode='same')(conv2)

    down2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = BatchNormalization()(down2)
    conv3 = Convolution2D(4 * f, 3, 3, activation='relu', border_mode='same')(conv3)
    conv3 = BatchNormalization()(conv3)
Пример #14
0
(X_train, y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(-1, 1, 28, 28)
X_test = X_test.reshape(-1, 1, 28, 28)
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)

#创建CNN
model = Sequential()
#第一层卷积
model.add(
    Convolution2D(
        nb_filter=32,
        nb_row=5,
        nb_col=5,
        border_mode='same',  #padding method
        input_shape=(
            1,  #channels
            28,
            28),  #height,weight
    ))
model.add(Activation('relu'))
#第一层pooling
model.add(
    MaxPooling2D(
        pool_size=(2, 2),
        strides=(2, 2),
        border_mode='same'  #padding method
    ))
# 第二层卷积
model.add(Convolution2D(64, 5, 5, border_mode='same'))
model.add(Activation('relu'))
Пример #15
0
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[1:2]
Y_test = np_utils.to_categorical(y_test, nb_classes)[1:2]

discriminator = Sequential()

discriminator.add(
    Convolution2D(nb_filters,
                  kernel_size[0],
                  kernel_size[1],
                  border_mode='valid',
                  input_shape=input_shape))
discriminator.add(Activation('relu'))
discriminator.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
discriminator.add(Activation('relu'))
discriminator.add(MaxPooling2D(pool_size=pool_size))
discriminator.add(Dropout(0.25))
discriminator.add(Flatten())
discriminator.add(Dense(64))
discriminator.add(Activation('relu'))
discriminator.add(Dropout(0.5))
discriminator.add(Dense(nb_classes))
discriminator.add(Activation('sigmoid'))

discriminator.compile(loss='categorical_crossentropy',
Пример #16
0
if not os.path.exists(out_dir): os.mkdir(out_dir)

print('INFO - %s' % ('building regression model.'))
units = 512
reg_input = Input(shape=(num_reg, ), dtype='float32', name='reg_input')
x = Dense(units, activation='relu')(reg_input)
x = Dense(units, activation='relu')(x)
reg_output = Dense(units, activation='relu')(x)

print('INFO - %s' % ('building sequence model.'))
filters = [256, 128]
kernel_width = [19, 11]
seq_input = Input(shape=(4, seq_length, 1), dtype='float32', name='seq_input')
x = Convolution2D(nb_filter=filters[0],
                  nb_row=4,
                  nb_col=kernel_width[0],
                  subsample=(1, 1),
                  border_mode='valid')(seq_input)
x = MaxPooling2D(pool_size=(1, 4))(x)
x = Activation('relu')(x)

x = Convolution2D(nb_filter=filters[1],
                  nb_row=1,
                  nb_col=kernel_width[1],
                  subsample=(1, 1),
                  border_mode='valid')(x)
x = MaxPooling2D(pool_size=(1, 4))(x)
x = Activation('relu')(x)

seq_output = Flatten()(x)
Пример #17
0
# env.seed(123)
env = PoorMansGymEnv()
nb_actions = env.action_space.n

# Next, we build our model. We use the same model that was described by Mnih et al. (2015).
input_shape = (WINDOW_LENGTH, ) + INPUT_SHAPE
model = Sequential()
if K.image_dim_ordering() == 'tf':
    # (width, height, channels)
    model.add(Permute((2, 3, 1), input_shape=input_shape))
elif K.image_dim_ordering() == 'th':
    # (channels, width, height)
    model.add(Permute((1, 2, 3), input_shape=input_shape))
else:
    raise RuntimeError('Unknown image_dim_ordering.')
model.add(Convolution2D(16, 8, 8, subsample=(4, 4)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 4, 4, subsample=(2, 2)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3, subsample=(1, 1)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())

# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=1000, window_length=WINDOW_LENGTH)
def save_bottleneck_features():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))


    assert os.path.exists(weight_path), 'Model weights not found (see "weights_path" variable in script).'
    f = h5py.File(weight_path)
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            break

        g = f['layer_{}'.format(k)]
        weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
        model.layers[k].set_weights(weights)
    f.close()
    print('Model loaded.')

    X, y = load2d()
    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
    X_flipped, y_flipped = flip_image(X_train, y_train)

    X_train = np.vstack((X_train, X_flipped))
    y_train = np.vstack((y_train, y_flipped))
    X_train = gray_to_rgb(X_train)
    X_val = gray_to_rgb(X_val)

    bottleneck_features_train = model.predict(X_train)
    np.save(open('bottleneck_features_train.npy', 'w'), bottleneck_features_train)
    np.save(open('label_train.npy', 'w'), y_train)

    bottleneck_features_validation = model.predict(X_val)
    np.save(open('bottleneck_features_validation.npy', 'w'), bottleneck_features_validation)
    np.save(open('label_validation.npy', 'w'), y_val)
Пример #19
0
def VGG_16(weights_path=None, dim_ordering='th'):
    model = Sequential()
    model.add(
        ZeroPadding2D((1, 1),
                      dim_ordering=dim_ordering,
                      input_shape=(3, 224, 224)))
    model.add(
        Convolution2D(64, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(64, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), dim_ordering=dim_ordering))

    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(128, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(128, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), dim_ordering=dim_ordering))

    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(256, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(256, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(256, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), dim_ordering=dim_ordering))

    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(512, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(512, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(512, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), dim_ordering=dim_ordering))

    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(512, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(512, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(ZeroPadding2D((1, 1), dim_ordering=dim_ordering))
    model.add(
        Convolution2D(512, 3, 3, activation='relu', dim_ordering=dim_ordering))
    model.add(MaxPooling2D((2, 2), strides=(2, 2), dim_ordering=dim_ordering))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    return model
Пример #20
0
# -*- coding: utf-8 -*-
# 作者: xcl
# 时间: 2019/7/31 18:52

from keras.layers import merge, Convolution2D, MaxPooling2D, Input

input_img = Input(shape=(3, 256, 256))

tower_1 = Convolution2D(64, 1, 1, border_mode='same',
                        activation='relu')(input_img)
tower_1 = Convolution2D(64, 3, 3, border_mode='same',
                        activation='relu')(tower_1)

tower_2 = Convolution2D(64, 1, 1, border_mode='same',
                        activation='relu')(input_img)
tower_2 = Convolution2D(64, 5, 5, border_mode='same',
                        activation='relu')(tower_2)

tower_3 = MaxPooling2D((3, 3), strides=(1, 1), border_mode='same')(input_img)
tower_3 = Convolution2D(64, 1, 1, border_mode='same',
                        activation='relu')(tower_3)

output = merge([tower_1, tower_2, tower_3], mode='concat', concat_axis=1)

from keras.layers import merge, Convolution2D, Input

# input tensor for a 3-channel 256x256 image
x = Input(shape=(3, 256, 256))
# 3x3 conv with 3 output channels(same as input channels)
y = Convolution2D(3, 3, 3, border_mode='same')(x)
# this returns x + y.
Пример #21
0
def VGGFace(include_top=False,
            weights='vggface',
            input_tensor=None,
            input_shape=None,
            pooling='max',
            classes=2622,
            WEIGHTS_FILE='None'):
    """Instantiates the VGGFace architecture.
    Optionally loads weights pre-trained
    on VGGFace dataset. Note that when using TensorFlow,
    for best performance you should set
    `image_data_format="channels_last"` in your Keras config
    at ~/.keras/keras.json.
    The model and the weights are compatible with both
    TensorFlow and Theano. The data format
    convention used by the model is the one
    specified in your Keras config file.
    # Arguments
        include_top: whether to include the 3 fully-connected
            layers at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 244)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 48.
            E.g. `(200, 200, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """

    if weights not in {'vggface', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `vggface` '
                         '(pre-training on VGGFace Dataset).')

    if weights == 'vggface' and include_top and classes != 2622:
        raise ValueError(
            'If using `weights` as vggface original with `include_top`'
            ' as true, `classes` should be 2622')
    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=48,
                                      data_format=K.image_data_format(),
                                      include_top=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # Block 1
    x = Convolution2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv1_1')(img_input)
    x = Convolution2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    # Block 2
    x = Convolution2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv2_1')(x)
    x = Convolution2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv2_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)

    # Block 3
    x = Convolution2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv3_1')(x)
    x = Convolution2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv3_2')(x)
    x = Convolution2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)

    # Block 4
    x = Convolution2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv4_1')(x)
    x = Convolution2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv4_2')(x)
    x = Convolution2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x)

    # Block 5
    x = Convolution2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv5_1')(x)
    x = Convolution2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv5_2')(x)
    x = Convolution2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='conv5_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5')(x)

    if include_top:
        # Classification block
        x = Flatten(name='flatten')(x)
        x = Dense(4096, name='fc6')(x)
        x = Activation('relu', name='fc6/relu')(x)
        x = Dense(4096, name='fc7')(x)
        x = Activation('relu', name='fc7/relu')(x)
        x = Dense(2622, name='fc8')(x)
        x = Activation('relu', name='fc8/softmax')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

            # Ensure that the model takes into account
            # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
        # Create model.
    model = Model(inputs, x, name='VGGFace')  # load weights
    if weights == 'vggface':
        weights_path = 'weights/' + WEIGHTS_FILE
        model.load_weights(weights_path, by_name=True)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first':
            if include_top:
                maxpool = model.get_layer(name='pool5')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc6')
                layer_utils.convert_dense_weights_data_format(
                    dense, shape, 'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model
Пример #22
0
img_rows, img_cols = 28, 28

nb_filters = 32

pool_size = (2, 2)

kernel_size = (3, 3)
input_shape = (28, 28, 1)

learning_rate = 0.02
decay_rate = 5e-5
momentum = 0.9

denoise = Sequential()
denoise.add(
    Convolution2D(20, 3, 3, border_mode='valid', input_shape=input_shape))
denoise.add(BatchNormalization(mode=2))
denoise.add(Activation('relu'))
denoise.add(UpSampling2D(size=(2, 2)))
denoise.add(Convolution2D(20, 3, 3, init='glorot_uniform'))
denoise.add(BatchNormalization(mode=2))
denoise.add(Activation('relu'))
denoise.add(Convolution2D(20, 3, 3, init='glorot_uniform'))
denoise.add(BatchNormalization(mode=2))
denoise.add(Activation('relu'))
denoise.add(MaxPooling2D(pool_size=(3, 3)))
denoise.add(Convolution2D(4, 3, 3, init='glorot_uniform'))
denoise.add(BatchNormalization(mode=2))
denoise.add(Activation('relu'))
denoise.add(Reshape((28, 28, 1)))
sgd = SGD(lr=learning_rate,
Пример #23
0
model = Sequential()
model.add(Reshape((input_shape), input_shape=(WINDOW_LENGTH,) + input_shape))
if K.image_dim_ordering() == 'tf':
    print('tensorflow ordering')
    # (width, height, channels)
    model.add(Permute((2, 3, 1), input_shape=input_shape))
    permute_shape = (MAP_X, MAP_Y, num_zones)
elif K.image_dim_ordering() == 'th':
    # (channels, width, height)
    model.add(Permute((1, 2, 3), input_shape=input_shape))
    permute_shape = (num_zones, MAP_X, MAP_Y)
else:
    raise RuntimeError('Unknown image_dim_ordering.')

model.add(Convolution2D(32, (8, 8), strides=(2, 2), padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, (4, 4), strides=(2, 2), padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, (3, 3), strides=(1, 1), padding='valid'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())

# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=1000000, window_length=1)
Пример #24
0
    Y = np.zeros((batch_size, 14951))  # one hot
    Y[np.arange(batch_size), Y_list] = 1

    return X, Y


# In[30]:

# model

model = Sequential()

model.add(
    Convolution2D(filters=16,
                  kernel_size=(5, 5),
                  padding='same',
                  input_shape=(128, 128, 3),
                  activation='relu'))

model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(
    Convolution2D(filters=32,
                  kernel_size=(5, 5),
                  padding='same',
                  activation='relu'))

model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(
    Convolution2D(filters=64,
Пример #25
0
def vgg16(include_top=True,
          weights='imagenet',
          input_tensor=None,
          input_shape=None,
          classes=1000,
          weight_decay=5e-4):
    """Instantiate the VGG16 architecture,
    optionally loading weights pre-trained
    on ImageNet. Note that when using TensorFlow,
    for best performance you should set
    `image_dim_ordering="tf"` in your Keras config
    at ~/.keras/keras.json.
    The model and the weights are compatible with both
    TensorFlow and Theano. The dimension ordering
    convention used by the model is the one
    specified in your Keras config file.
    # Arguments
        include_top: whether to include the 3 fully-connected
            layers at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `tf` dim ordering)
            or `(3, 224, 244)` (with `th` dim ordering).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 48.
            E.g. `(200, 200, 3)` would be one valid value.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    """
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')
    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=48,
                                      dim_ordering=K.image_dim_ordering(),
                                      include_top=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    # Block 1
    x = Convolution2D(64,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block1_conv1')(img_input)
    x = Convolution2D(64,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block1_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Convolution2D(128,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block2_conv1')(x)
    x = Convolution2D(128,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Convolution2D(256,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block3_conv1')(x)
    x = Convolution2D(256,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block3_conv2')(x)
    x = Convolution2D(256,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Convolution2D(512,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block4_conv1')(x)
    x = Convolution2D(512,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block4_conv2')(x)
    x = Convolution2D(512,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block4_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Convolution2D(512,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block5_conv1')(x)
    x = Convolution2D(512,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block5_conv2')(x)
    x = Convolution2D(512,
                      3,
                      3,
                      activation='relu',
                      border_mode='same',
                      W_regularizer=l2(weight_decay),
                      b_regularizer=l2(weight_decay),
                      name='block5_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    if include_top:
        # Classification block
        x = Flatten(name='flatten')(x)
        x = Dense(4096,
                  activation='relu',
                  W_regularizer=l2(weight_decay),
                  b_regularizer=l2(weight_decay),
                  name='fc1')(x)
        x = Dense(4096,
                  activation='relu',
                  W_regularizer=l2(weight_decay),
                  b_regularizer=l2(weight_decay),
                  name='fc2')(x)
        x = Dense(classes,
                  activation='softmax',
                  W_regularizer=l2(weight_decay),
                  b_regularizer=l2(weight_decay),
                  name='predictions')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='vgg16')

    # load weights
    if weights == 'imagenet':
        if K.image_dim_ordering() == 'th':
            if include_top:
                weights_path = get_file(
                    'vgg16_weights_th_dim_ordering_th_kernels.h5',
                    TH_WEIGHTS_PATH,
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'vgg16_weights_th_dim_ordering_th_kernels_notop.h5',
                    TH_WEIGHTS_PATH_NO_TOP,
                    cache_subdir='models')
            model.load_weights(weights_path)
            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image dimension ordering convention '
                              '(`image_dim_ordering="th"`). '
                              'For best performance, set '
                              '`image_dim_ordering="tf"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
                convert_all_kernels_in_model(model)
        else:
            if include_top:
                weights_path = get_file(
                    'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
                    TF_WEIGHTS_PATH,
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
                    TF_WEIGHTS_PATH_NO_TOP,
                    cache_subdir='models')
            model.load_weights(weights_path)
            if K.backend() == 'theano':
                convert_all_kernels_in_model(model)
    return model
Пример #26
0
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D, Dropout, Flatten
from keras.models import Model, Sequential
from keras.optimizers import SGD, Adadelta

# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.

model = Sequential()
model.add(Convolution2D(32, 3, 3, activation='relu',
                        input_shape=(150, 150, 3)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(6, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer=Adadelta(),
              metrics=['accuracy'])
model.save('ASLModel.h5')
    def load_model(classes=10, img_rows=28, img_cols=28):
        input_shape = SketchANetModel.load_inputshape(img_rows, img_cols)

        model = Sequential()
        # 1 Input: 225x225 Output: 71x71
        model.add(
            Convolution2D(input_shape=input_shape,
                          data_format='channels_last',
                          strides=(1, 1),
                          filters=64,
                          kernel_size=(2, 2),
                          padding="same",
                          activation="relu"))
        # Inout 71x71 Output: 35x35
        model.add(
            MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same"))

        # 2 Input: 35x35 Output: 31x31
        model.add(
            Convolution2D(kernel_size=(3, 3),
                          filters=128,
                          strides=(1, 1),
                          activation="relu",
                          padding="same"))
        # Input: 31x31 Output: 15x15s
        model.add(
            MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same"))

        # Input: 15x15 Output: 15x15
        model.add(
            Convolution2D(kernel_size=(3, 3),
                          filters=256,
                          strides=(1, 1),
                          activation="relu",
                          padding="valid"))
        model.add(
            Convolution2D(kernel_size=(3, 3),
                          filters=256,
                          strides=(1, 1),
                          activation="relu",
                          padding="valid"))
        model.add(
            Convolution2D(kernel_size=(3, 3),
                          filters=256,
                          strides=(1, 1),
                          activation="relu",
                          padding="valid"))
        # Input: 15x15 Output: 7x7
        model.add(
            MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same"))

        # Input: 7x7 Output: 1x1
        model.add(
            Convolution2D(kernel_size=(7, 7),
                          filters=512,
                          strides=(1, 1),
                          activation="relu",
                          padding="same"))
        # Input: 1x1 Output: 1x1
        model.add(Dropout(rate=0.5))

        model.add(
            Convolution2D(kernel_size=(1, 1),
                          filters=512,
                          strides=(1, 1),
                          activation="relu",
                          padding="same"))
        model.add(Dropout(rate=0.5))

        model.add(
            Convolution2D(kernel_size=(1, 1),
                          filters=512,
                          strides=(1, 1),
                          activation="relu",
                          padding="same"))

        model.add(Flatten())
        model.add(Dense(units=classes, activation="softmax"))

        model.compile(optimizer='Adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        return model
def SqueezeNet(input_tensor=None,
               input_shape=None,
               weights='imagenet',
               classes=1000):

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    # input_shape = _obtain_input_shape(input_shape,
    #                                   default_size=227,
    #                                   min_size=48,
    #                                   data_format=K.image_data_format(),
    #                                   include_top=True)

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=227,
                                      min_size=48,
                                      data_format=K.image_data_format(),
                                      require_flatten=True)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Convolution2D(64, (3, 3),
                      strides=(2, 2),
                      padding='valid',
                      name='conv1')(img_input)
    x = Activation('relu', name='relu_conv1')(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)

    x = fire_module(x, fire_id=2, squeeze=16, expand=64)
    x = fire_module(x, fire_id=3, squeeze=16, expand=64)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)

    x = fire_module(x, fire_id=4, squeeze=32, expand=128)
    x = fire_module(x, fire_id=5, squeeze=32, expand=128)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)

    x = fire_module(x, fire_id=6, squeeze=48, expand=192)
    x = fire_module(x, fire_id=7, squeeze=48, expand=192)
    x = fire_module(x, fire_id=8, squeeze=64, expand=256)
    x = fire_module(x, fire_id=9, squeeze=64, expand=256)
    x = Dropout(0.5, name='drop9')(x)

    x = Convolution2D(classes, (1, 1), padding='valid', name='conv10')(x)
    x = Activation('relu', name='relu_conv10')(x)
    x = GlobalAveragePooling2D()(x)
    out = Activation('softmax', name='loss')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, out, name='squeezenet')

    # load weights
    if weights == 'imagenet':

        weights_path = get_file(
            'squeezenet_weights_tf_dim_ordering_tf_kernels.h5',
            WEIGHTS_PATH,
            cache_subdir='models')
        model.load_weights(weights_path)

        if K.image_data_format() == 'channels_first':

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model
Пример #29
0
# Part 1 - Building the CNN

# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense

# Initializing the CNN
classifier = Sequential()

# Step 1 - Convolution
classifier.add(
    Convolution2D(filters=32,
                  kernel_size=3,
                  input_shape=(64, 64, 3),
                  activation='relu'))

# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))

# Adding a 2nd convolutional layer
classifier.add(
    Convolution2D(filters=32,
                  kernel_size=3,
                  data_format="channels_last",
                  activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))

# Step 3 - Flattening
classifier.add(Flatten())
Пример #30
0
# dimensions of our images.
img_width, img_height = 150, 150

# train_data_dir = 'data/train'
# validation_data_dir = 'data/validation'
test_data_dir = '/home/pablo/Documents/Git_repositories/data_examples/DogsVsCats/data/test_small'
# nb_train_samples = 2000
# nb_validation_samples = 800
nb_predict_samples = len(os.listdir(os.path.join(test_data_dir, 'test')))
# nb_epoch = 50

# build the VGG16 network
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))