Exemple #1
0
def convnet_simple_lion_keras(image_dims):
    model = keras.models.Sequential()

    model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))

    model.add(
        convolutional.Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(convolutional.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        convolutional.Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(convolutional.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        convolutional.Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(convolutional.MaxPooling2D(pool_size=(2, 2)))

    model.add(core.Flatten())

    model.add(core.Dense(512, activation='relu'))
    model.add(core.Dropout(0.5))
    model.add(core.Dense(1024, activation='relu'))
    model.add(core.Dropout(0.5))
    model.add(core.Dense(6, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])

    return model
    def __init__(self, img_size, nb_classes):
        batch_size = 128
        img_rows, img_cols = img_size

        nb_filters_1 = 32  # 64
        nb_filters_2 = 64  # 128
        nb_filters_3 = 128  # 256
        nb_conv = 3


        cnn = models.Sequential()

        cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", input_shape=(img_rows, img_cols, 1),
                                   border_mode='same'))
        cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", border_mode='same'))
        cnn.add(conv.MaxPooling2D(strides=(2, 2)))

        cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same'))
        cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same'))
        cnn.add(conv.MaxPooling2D(strides=(2, 2)))

        # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
        # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
        # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
        # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
        # cnn.add(conv.MaxPooling2D(strides=(2,2)))

        cnn.add(core.Flatten())
        cnn.add(core.Dropout(0.2))
        cnn.add(core.Dense(128, activation="relu"))  # 4096
        cnn.add(core.Dense(nb_classes, activation="softmax"))

        cnn.summary()
        cnn.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
        self.cnn = cnn
Exemple #3
0
def create_simple_model(num_classes, layer1_filters=32, layer2_filters=64):
    epochs = 5
    n_conv = 2
    model = models.Sequential()

    # First layer
    model.add(conv.ZeroPadding2D(
        (1, 1),
        input_shape=(1, IMG_COLS, IMG_ROWS),
    ))
    model.add(
        conv.Convolution2D(layer1_filters, n_conv, n_conv, activation="relu"))
    model.add(conv.MaxPooling2D(strides=(2, 2)))

    # Second layer
    model.add(conv.ZeroPadding2D((1, 1)))
    model.add(
        conv.Convolution2D(layer2_filters, n_conv, n_conv, activation="relu"))
    model.add(conv.MaxPooling2D(strides=(2, 2)))

    model.add(core.Flatten())
    model.add(core.Dropout(0.2))
    model.add(core.Dense(128, activation="relu"))
    model.add(core.Dense(num_classes, activation="softmax"))

    model.summary()
    model.compile(loss="categorical_crossentropy",
                  optimizer="adadelta",
                  metrics=["accuracy"])

    return model, epochs
def main(n_filters,
         conv_size,
         pool_size,
         dropout,
         patch_size,
         n_astro=7,
         out_path=None):
    # Imports must be in the function, or whenever we import this module, Keras
    # will dump to stdout.
    import keras.layers.core as core
    from keras.layers import Input, Dense, Concatenate
    import keras.layers.convolutional as conv
    import keras.layers.merge
    from keras.models import Model

    im_in = Input(shape=(1, patch_size, patch_size))
    astro_in = Input(shape=(n_astro, ))
    # 1 x 32 x 32
    conv1 = conv.Convolution2D(filters=n_filters,
                               kernel_size=(conv_size, conv_size),
                               border_mode='valid',
                               activation='relu',
                               data_format='channels_first')(im_in)
    # 32 x 28 x 28
    pool1 = conv.MaxPooling2D(pool_size=(pool_size, pool_size),
                              data_format='channels_first')(conv1)
    # 32 x 14 x 14
    conv2 = conv.Convolution2D(filters=n_filters,
                               kernel_size=(conv_size, conv_size),
                               border_mode='valid',
                               activation='relu',
                               data_format='channels_first')(pool1)
    # 32 x 10 x 10
    pool2 = conv.MaxPooling2D(pool_size=(pool_size, pool_size),
                              data_format='channels_first')(conv2)
    # 32 x 5 x 5
    conv3 = conv.Convolution2D(filters=n_filters,
                               kernel_size=(conv_size, conv_size),
                               border_mode='valid',
                               activation='relu',
                               data_format='channels_first')(pool2)
    # 32 x 1 x 1
    dropout = core.Dropout(dropout)(conv3)
    flatten = core.Flatten()(dropout)
    conc = Concatenate()([astro_in, flatten])
    lr = Dense(1, activation='sigmoid')(conc)

    model = Model(inputs=[astro_in, im_in], outputs=[lr])
    model.compile(loss='binary_crossentropy', optimizer='adadelta')

    model_json = model.to_json()

    if out_path is not None:
        with open(out_path, 'w') as f:
            f.write(model_json)

    return model_json
def test_maxpooling_2d_dim_ordering():
    stack_size = 3

    input = np.random.random((1, stack_size, 10, 10))

    layer = convolutional.MaxPooling2D((2, 2),
                                       input_shape=input.shape[1:],
                                       dim_ordering='th')
    layer.input = K.variable(input)
    out_th = K.eval(layer.get_output(False))

    input = np.transpose(input, (0, 2, 3, 1))
    layer = convolutional.MaxPooling2D((2, 2),
                                       input_shape=input.shape[1:],
                                       dim_ordering='tf')
    layer.input = K.variable(input)
    out_tf = K.eval(layer.get_output(False))

    assert_allclose(out_tf, np.transpose(out_th, (0, 2, 3, 1)), atol=1e-05)
def Simple_Convo(train, nb_classes):
    batch_size = 128
    img_rows, img_cols = 56, 56

    nb_filters_1 = 32  # 64
    nb_filters_2 = 64  # 128
    nb_filters_3 = 128  # 256
    nb_conv = 3

    # train = np.concatenate([train, train], axis=1)
    trainX = train[:, 1:].reshape(train.shape[0], 28, 28, 1)
    trainX = trainX.astype(float)

    trainX /= 255.0
    trainX = np.concatenate([trainX, np.roll(trainX, 14, axis=1)], axis=1)
    trainX = np.concatenate([trainX, np.fliplr(np.roll(trainX, 7, axis=2))], axis=2)
    print(trainX.shape)

    cnn = models.Sequential()

    cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", input_shape=(img_rows, img_cols, 1),
                               border_mode='same'))
    cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", border_mode='same'))
    cnn.add(conv.MaxPooling2D(strides=(2, 2)))

    cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same'))
    cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same'))
    cnn.add(conv.MaxPooling2D(strides=(2, 2)))

    # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
    # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
    # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
    # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
    # cnn.add(conv.MaxPooling2D(strides=(2,2)))

    cnn.add(core.Flatten())
    cnn.add(core.Dropout(0.2))
    cnn.add(core.Dense(128, activation="relu"))  # 4096
    cnn.add(core.Dense(nb_classes, activation="softmax"))

    cnn.summary()
    return cnn
Exemple #7
0
def test_maxpooling_2d():
    nb_samples = 9
    stack_size = 7
    input_nb_row = 11
    input_nb_col = 12
    pool_size = (3, 3)

    input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
    for strides in [(1, 1), (2, 2)]:
        layer = convolutional.MaxPooling2D(strides=strides,
                                           border_mode='valid',
                                           pool_size=pool_size)
        layer.input = K.variable(input)
        for train in [True, False]:
            K.eval(layer.get_output(train))
        layer.get_config()
    def test_maxpooling_2d(self):
        nb_samples = 9

        stack_size = 7
        input_nb_row = 11
        input_nb_col = 12
        poolsize = (3, 3)

        input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
        for ignore_border in [True, False]:
            for stride in [None, (2, 2)]:
                layer = convolutional.MaxPooling2D(stride=stride, ignore_border=ignore_border, poolsize=poolsize)
                layer.input = theano.shared(value=input)
                for train in [True, False]:
                    layer.get_output(train).eval()

                config = layer.get_config()
Exemple #9
0
def cpg_layers(params):
    layers = []
    if params.drop_in:
        layer = kcore.Dropout(params.drop_in)
        layers.append(('xd', layer))
    nb_layer = len(params.nb_filter)
    w_reg = kr.WeightRegularizer(l1=params.l1, l2=params.l2)
    for l in range(nb_layer):
        layer = kconv.Convolution2D(nb_filter=params.nb_filter[l],
                                    nb_row=1,
                                    nb_col=params.filter_len[l],
                                    activation=params.activation,
                                    init='glorot_uniform',
                                    W_regularizer=w_reg,
                                    border_mode='same')
        layers.append(('c%d' % (l + 1), layer))
        layer = kconv.MaxPooling2D(pool_size=(1, params.pool_len[l]))
        layers.append(('p%d' % (l + 1), layer))

    layer = kcore.Flatten()
    layers.append(('f1', layer))
    if params.drop_out:
        layer = kcore.Dropout(params.drop_out)
        layers.append(('f1d', layer))
    if params.nb_hidden:
        layer = kcore.Dense(params.nb_hidden,
                            activation='linear',
                            init='glorot_uniform')
        layers.append(('h1', layer))
        if params.batch_norm:
            layer = knorm.BatchNormalization()
            layers.append(('h1b', layer))
        layer = kcore.Activation(params.activation)
        layers.append(('h1a', layer))
        if params.drop_out:
            layer = kcore.Dropout(params.drop_out)
            layers.append(('h1d', layer))
    return layers
Exemple #10
0
def model_main(
    nc: int,
    nf_l: int,
    nt_l: int,
    nf_h: int,
    nt_h: int,
    *,
    mw=False,
    preload_weights: str = None,
    opti="adam",
    noise=False,
    noise_amp=0.2,
) -> krm.Model:

    if mw:
        pre_shape: ty.Tuple[int, ...] = (3, )
    else:
        pre_shape = ()

    i_x_l = klc.Input(shape=pre_shape + (nc, nf_l, nt_l), name="input.lo")
    i_x_h = klc.Input(shape=pre_shape + (nc, nf_h, nt_h), name="input.hi")

    conv_stack_h = [
        kcv.Conv2D(8, (3, 3), name="conv_h.0.0", activation="elu"),
        kcv.Conv2D(8, (3, 3), name="conv_h.0.1", activation="elu"),
        kcv.MaxPooling2D((2, 2)),
        kcv.Conv2D(8, (3, 3), name="conv_h.1.0", activation="elu"),
        kcv.Conv2D(8, (3, 3), name="conv_h.1.1", activation="elu"),
        kcv.MaxPooling2D((2, 2)),
        klc.Flatten(name="conv_h.flatten"),
    ]

    conv_stack_l = [
        kcv.Conv2D(8, (3, 3), name="conv_l.0.0", activation="elu"),
        kcv.Conv2D(8, (3, 3), name="conv_l.0.1", activation="elu"),
        kcv.MaxPooling2D((2, 2)),
        klc.Flatten(name="conv_l.flatten"),
    ]

    if mw:
        conv_stack_h = [
            klc.Permute((1, 4, 2, 3)),
            klc.Reshape((-1, nc, nf_h), input_shape=(3, nc, nf_h, nt_h)),
            klc.Permute((2, 1, 3)),
        ] + conv_stack_h

        conv_stack_l = [
            klc.Permute((1, 4, 2, 3)),
            klc.Reshape((-1, nc, nf_l), input_shape=(3, nc, nf_l, nt_l)),
            klc.Permute((2, 1, 3)),
        ] + conv_stack_l

    if noise:
        conv_stack_h = [kno.GaussianNoise(noise_amp, name="inoise_h")
                        ] + conv_stack_h
        conv_stack_l = [kno.GaussianNoise(noise_amp, name="inoise_l")
                        ] + conv_stack_l

    conv_l = i_x_l
    for layer in conv_stack_l:
        conv_l = layer(conv_l)

    conv_h = i_x_h
    for layer in conv_stack_h:
        conv_h = layer(conv_h)

    dn_suff = ".mw" if mw else ""
    merged_conv = concatenate([conv_l, conv_h])
    dense_stack = [
        klc.Dense(24, name="dense.0" + dn_suff, activation="elu"),
        klc.Dropout(0.5),
        klc.Dense(24, name="dense.1" + dn_suff, activation="elu"),
    ]

    y = merged_conv
    for layer in dense_stack:
        y = layer(y)

    y = klc.Dense(6, name="y" + dn_suff, activation="softmax")(y)
    m = krm.Model(inputs=[i_x_l, i_x_h], outputs=y)
    m.compile(optimizer=opti, loss="categorical_crossentropy")

    if preload_weights and mw:
        m.load_weights(preload_weights, by_name=True)

    return m
trainX = trainX.astype(float)
trainX /= 255.0  # preprocess the data

trainY = kutils.to_categorical(train[:, 0])
nb_classes = trainY.shape[1]

cnn = models.Sequential()

cnn.add(conv.ZeroPadding2D(
    (1, 1),
    input_shape=(1, 48, 48),
))
cnn.add(conv.Convolution2D(32, 3, 3, activation="relu"))
cnn.add(conv.ZeroPadding2D((1, 1)))
cnn.add(conv.Convolution2D(32, 3, 3, activation="relu"))
cnn.add(conv.MaxPooling2D(strides=(2, 2)))

cnn.add(conv.ZeroPadding2D((1, 1)))
cnn.add(conv.Convolution2D(64, 3, 3, activation="relu"))
cnn.add(conv.ZeroPadding2D((1, 1)))
cnn.add(conv.Convolution2D(64, 3, 3, activation="relu"))
cnn.add(conv.MaxPooling2D(strides=(2, 2)))

# cnn.add(conv.ZeroPadding2D((1, 1)))
# cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
# cnn.add(conv.ZeroPadding2D((1, 1)))
# cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
# cnn.add(conv.ZeroPadding2D((1, 1)))
# cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
# cnn.add(conv.ZeroPadding2D((1, 1)))
# cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
trainX = trainX.astype(float)
trainX /= 255.0

trainY = kutils.to_categorical(train[:, 0])
nb_classes = trainY.shape[1]

cnn = models.Sequential()

cnn.add(conv.ZeroPadding2D(
    (1, 1),
    input_shape=(1, img_rows, img_cols),
))

cnn.add(conv.Convolution2D(filters[0], kernel, kernel))
cnn.add(core.Activation('relu'))
cnn.add(conv.MaxPooling2D(strides=(pool, pool)))

cnn.add(conv.ZeroPadding2D((1, 1)))

cnn.add(conv.Convolution2D(filters[1], kernel, kernel))
cnn.add(core.Activation('relu'))
cnn.add(conv.MaxPooling2D(strides=(pool, pool)))

cnn.add(conv.ZeroPadding2D((1, 1)))

cnn.add(core.Flatten())
cnn.add(core.Dropout(0.5))
cnn.add(core.Dense(128))
cnn.add(core.Activation('relu'))
cnn.add(core.Dense(nb_classes))
cnn.add(core.Activation('softmax'))
Exemple #13
0
def convnet_alexnet_lion_keras(image_dims):
    #    model = Sequential()
    #    model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))

    NR_CLASSES = 6

    input = layers.Input(shape=image_dims, name="Input")
    conv_1 = convolutional.Convolution2D(96,
                                         11,
                                         11,
                                         border_mode='valid',
                                         name="conv_1",
                                         activation='relu',
                                         init='glorot_uniform')(input)
    pool_1 = convolutional.MaxPooling2D(pool_size=(3, 3),
                                        name="pool_1")(conv_1)
    zero_padding_1 = convolutional.ZeroPadding2D(padding=(1, 1),
                                                 name="zero_padding_1")(pool_1)
    conv_2 = convolutional.Convolution2D(256,
                                         3,
                                         3,
                                         border_mode='valid',
                                         name="conv_2",
                                         activation='relu',
                                         init='glorot_uniform')(zero_padding_1)
    pool_2 = convolutional.MaxPooling2D(pool_size=(3, 3),
                                        name="pool_2")(conv_2)
    zero_padding_2 = keras.layers.convolutional.ZeroPadding2D(
        padding=(1, 1), name="zero_padding_2")(pool_2)
    conv_3 = convolutional.Convolution2D(384,
                                         3,
                                         3,
                                         border_mode='valid',
                                         name="conv_3",
                                         activation='relu',
                                         init='glorot_uniform')(zero_padding_2)
    conv_4 = convolutional.Convolution2D(384,
                                         3,
                                         3,
                                         border_mode='valid',
                                         name="conv_4",
                                         activation='relu',
                                         init='glorot_uniform')(conv_3)
    conv_5 = convolutional.Convolution2D(256,
                                         3,
                                         3,
                                         border_mode='valid',
                                         name="conv_5",
                                         activation='relu',
                                         init='glorot_uniform')(conv_4)
    pool_3 = convolutional.MaxPooling2D(pool_size=(3, 3),
                                        name="pool_3")(conv_5)
    flatten = core.Flatten(name="flatten")(pool_3)
    fc_1 = core.Dense(4096,
                      name="fc_1",
                      activation='relu',
                      init='glorot_uniform')(flatten)
    fc_1 = core.Dropout(0.5, name="fc_1_dropout")(fc_1)
    output = core.Dense(4096,
                        name="Output",
                        activation='relu',
                        init='glorot_uniform')(fc_1)
    output = core.Dropout(0.5, name="Output_dropout")(output)
    fc_2 = core.Dense(NR_CLASSES,
                      name="fc_2",
                      activation='softmax',
                      init='glorot_uniform')(output)

    return models.Model([input], [fc_2])
# convert to 2D images
x_train = np.reshape(ds.train.x, (ds.train.N, 1, 16, 16))
x_test = np.reshape(ds.test.x, (ds.test.N, 1, 16, 16))

model = kmodel.Sequential()

model.add(
    kconv.Convolution2D(nb_filter=4,
                        nb_row=5,
                        nb_col=5,
                        input_shape=(1, 16, 16),
                        border_mode='valid'))
model.add(klcore.Activation('tanh'))
# instead of average pooling, we use max pooling
model.add(kconv.MaxPooling2D(pool_size=(2, 2)))

# the 12 feature maps in this layer are connected in a specific pattern to the below layer, but it is not possible
# do this in keras easily. in fact, I don't know how keras connects the feature maps in one layer to the next.
model.add(kconv.Convolution2D(nb_filter=12, nb_row=5, nb_col=5))
model.add(klcore.Activation('tanh'))
model.add(kconv.MaxPooling2D(pool_size=(2, 2)))

model.add(klcore.Flatten())
model.add(klcore.Dense(output_dim=10))
model.add(klcore.Activation('softmax'))

model.compile(optimizer='sgd', loss='categorical_crossentropy')

model.fit(x=x_train,
          y=ds.train.y,
Exemple #15
0
trainX = trainX.astype(float)
trainX /= 255.0

trainY = kutils.to_categorical(trainData[:, 0])
nb_classes = trainY.shape[1]

cnn = models.Sequential()
cnn.add(
    conv.Convolution2D(nb_filters,
                       nb_conv,
                       nb_conv,
                       border_mode="valid",
                       input_shape=(1, 28, 28),
                       activation="relu"))
cnn.add(conv.Convolution2D(nb_filters, nb_conv, nb_conv, activation="relu"))
cnn.add(conv.MaxPooling2D())
cnn.add(core.Dropout(0.25))
cnn.add(core.Flatten())
cnn.add(core.Dense(128, activation="relu"))
cnn.add(core.Dropout(0.15))
cnn.add(core.Dense(64, activation="relu"))
cnn.add(core.Dropout(0.15))
cnn.add(core.Dense(nb_classes, activation="softmax"))

sgd = optm.sgd(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
cnn.compile(
    loss="mean_squared_error",
    optimizer="adadelta",
)

cnn.fit(trainX,
Exemple #16
0
    pass

conv_model = MODELS.Sequential()
loc_model = MODELS.Sequential()
model = MODELS.Sequential()

if conv1:
    conv_model.add(
        CONV.Convolution2D(conv1_filters,
                           conv1_filter_size,
                           conv1_filter_size,
                           subsample=(conv1_stride, conv1_stride),
                           border_mode='valid',
                           input_shape=(prev_frames, image_size, image_size)))
    if pool1:
        conv_model.add(CONV.MaxPooling2D(pool_size=(pool1_size, pool1_size)))
    conv_model.add(CORE.Activation(conv1_act))
    conv_model.add(CORE.Flatten())
    conv_model.add(CORE.Dense(fc1_size))
    conv_model.add(CORE.Activation(fc1_act))
loc_model.add(CORE.Dense(fc1_size, input_shape=(prev_frames * 4, )))
loc_model.add(CORE.Activation(fc1_act))
#model.add(CONV.Convolution2D(conv2_filters, conv2_filter_size, conv2_filter_size, border_mode='valid'))
#model.add(CONV.MaxPooling2D(pool_size=(pool2_size, pool2_size)))
#model.add(CORE.Activation(conv2_act))
model.add(CORE.Merge([conv_model, loc_model], mode='concat'))
model.add(CORE.Dense(4, init='zero'))
model.add(CORE.Activation(fc2_act))

print 'Building bouncing MNIST generator'