Example #1
0
    Convolution2D(
        batch_input_shape=(None, 1, 28, 28),
        filters=32,
        kernel_size=5,
        strides=1,
        padding='same',
        data_format='channels_first',
    ))
model.add(Activation('relu'))

# MaxPool2D, 二维最大池化层
# output shape = (32, 14, 14)
model.add(
    MaxPool2D(
        pool_size=2,
        strides=2,
        padding='same',
        data_format='channels_first',
    ))

# output shape = (64, 14, 14)
model.add(
    Convolution2D(
        filters=64,
        kernel_size=5,
        strides=1,
        padding='same',
        data_format='channels_first',
    ))
model.add(Activation('relu'))

# output shape = (64, 7, 7)
Example #2
0
#example:
print(y_train[0])

model = Sequential()

model.add(
    Conv2D(filters=16,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(BatchNormalization())
#model.add(Conv2D(filters = 16, kernel_size = (3, 3), activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPool2D(strides=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
model.add(BatchNormalization())
#model.add(Conv2D(filters = 32, kernel_size = (3, 3), activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPool2D(strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
Example #3
0
def ResNet152(include_top=True,
              weights='imagenet',
              input_tensor=None,
              input_shape=None,
              pooling=None,
              classes=1000):
    """Instantiates the ResNet152 architecture.

        Optionally loads weights pre-trained
        on ImageNet. Note that when using TensorFlow,
        for best performance you should set
        `image_data_format='channels_last'` in your Keras config
        at ~/.keras/keras.json.

        The model and the weights are compatible with both
        TensorFlow and Theano. The data format
        convention used by the model is the one
        specified in your Keras config file.

        # Arguments
            include_top: whether to include the fully-connected
                layer at the top of the network.
            weights: one of `None` (random initialization),
                  'imagenet' (pre-training on ImageNet),
                  or the path to the weights file to be loaded.
            input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
                to use as image input for the model.
            input_shape: optional shape tuple, only to be specified
                if `include_top` is False (otherwise the input shape
                has to be `(224, 224, 3)` (with `channels_last` data format)
                or `(3, 224, 224)` (with `channels_first` data format).
                It should have exactly 3 inputs channels,
                and width and height should be no smaller than 197.
                E.g. `(200, 200, 3)` would be one valid value.
            pooling: Optional pooling mode for feature extraction
                when `include_top` is `False`.
                - `None` means that the output of the model will be
                    the 4D tensor output of the
                    last convolutional layer.
                - `avg` means that global average pooling
                    will be applied to the output of the
                    last convolutional layer, and thus
                    the output of the model will be a 2D tensor.
                - `max` means that global max pooling will
                    be applied.
            classes: optional number of classes to classify images
                into, only to be specified if `include_top` is True, and
                if no `weights` argument is specified.

        # Returns
            A Keras model instance.

        # Raises
            ValueError: in case of invalid argument for `weights`,
                or invalid input shape.
    """
    WEIGHTS_PATH = None

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=197,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same',
               name='conv1')(img_input)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPool2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    for i in range(1, 8):
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='b' + str(i))

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    for i in range(1, 36):
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b' + str(i))

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = AvgPool2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = GlobalAvgPool2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPool2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='resnet152')

    # load weights
    if weights == 'imagenet':
        weights_path = get_file('resnet152_weights_tf.h5',
                                WEIGHTS_PATH,
                                cache_subdir='models')
        if include_top:
            model.load_weights(weights_path)
        else:
            f = h5py.File(weights_path)
            layer_names = [name for name in f.attrs['layer_names']]

            for i, layer in enumerate(model.layers):
                g = f[layer_names[i]]
                weights = [g[name] for name in g.attrs['weight_names']]
                layer.set_weights(weights)

        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1000')
                layer_utils.convert_dense_weights_data_format(
                    dense, shape, 'channels_first')

        if K.image_data_format() == 'channels_first' and K.backend(
        ) == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')

    elif weights is not None:
        model.load_weights(weights)

    return model
Example #4
0
                padding='valid',
                kernel_initializer='normal',
                activation='relu')(reshape)
conv_1 = Conv2D(num_filters,
                kernel_size=(filter_sizes[1], embedding_dim),
                padding='valid',
                kernel_initializer='normal',
                activation='relu')(reshape)
conv_2 = Conv2D(num_filters,
                kernel_size=(filter_sizes[2], embedding_dim),
                padding='valid',
                kernel_initializer='normal',
                activation='relu')(reshape)

maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1),
                      strides=(1, 1),
                      padding='valid')(conv_0)
maxpool_1 = MaxPool2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1),
                      strides=(1, 1),
                      padding='valid')(conv_1)
maxpool_2 = MaxPool2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1),
                      strides=(1, 1),
                      padding='valid')(conv_2)

concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
flatten = Flatten()(concatenated_tensor)
dropout = Dropout(drop)(flatten)
output = Dense(5, activation='softmax')(dropout)

# this creates a model that includes
model = Model(inputs=inputs, outputs=output)
# 2. 모델

# model = Sequential()
# model.add(Con))
# model.add(Flatten())
# model.add(Dense(1))
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Conv2D(
    32,
    (3, 3),
    padding='same',
))
model.add(MaxPool2D(
    pool_size=2))  # MaxPool 자원소모 x Conv2D + MaxPool2D 한 layer라고 생각하는게 편함
model.add(Flatten())
model.add(Dense(10, activation='softmax'))

model.summary()

# model.save('./model/model_test01.h5')

# 3. 훈련
from keras.callbacks import EarlyStopping

model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['acc'])  # metrics = ['acc']
earlystopping = EarlyStopping(monitor='loss', patience=3, mode='min')
hist = model.fit(x_train,
Example #6
0
            training_img.append(img)
            training_txt.append(encode_to_labels(txt)) 
        
        if i == 150000:
            flag = 1
            break
        i+=1
    if flag == 1:
        break
train_padded_txt = pad_sequences(training_txt, maxlen=max_label_len, padding='post', value = len(char_list))
valid_padded_txt = pad_sequences(valid_txt, maxlen=max_label_len, padding='post', value = len(char_list))

inputs = Input(shape=(32,128,1))
 
conv_1 = Conv2D(64, (3,3), activation = 'relu', padding='same')(inputs)
pool_1 = MaxPool2D(pool_size=(2, 2), strides=2)(conv_1)
 
conv_2 = Conv2D(128, (3,3), activation = 'relu', padding='same')(pool_1)
pool_2 = MaxPool2D(pool_size=(2, 2), strides=2)(conv_2)
 
conv_3 = Conv2D(256, (3,3), activation = 'relu', padding='same')(pool_2)
 
conv_4 = Conv2D(256, (3,3), activation = 'relu', padding='same')(conv_3)
pool_4 = MaxPool2D(pool_size=(2, 1))(conv_4)
 
conv_5 = Conv2D(512, (3,3), activation = 'relu', padding='same')(pool_4)
batch_norm_5 = BatchNormalization()(conv_5)
 
conv_6 = Conv2D(512, (3,3), activation = 'relu', padding='same')(batch_norm_5)
batch_norm_6 = BatchNormalization()(conv_6)
pool_6 = MaxPool2D(pool_size=(2, 1))(batch_norm_6)
Example #7
0
# setup model

model = Sequential()

# 1st Conv2D layer
model.add(
    Convolution2D(filters=32,
                  kernel_size=[5, 5],
                  padding='same',
                  input_shape=(28, 28, 1)))
model.add(Activation('relu'))

model.add(MaxPool2D(
    pool_size=(2, 2),
    strides=(2, 2),
    padding="same",
))

# 2nd Conv2D layer
model.add(Convolution2D(
    filters=64,
    kernel_size=(5, 5),
    padding='same',
))

model.add(Activation('relu'))
model.add(MaxPool2D(
    pool_size=(2, 2),
    strides=(2, 2),
    padding="same",
Example #8
0
def make_model(in_shape, layers, initial_depth, prefix=''):
    # type: (Tuple[int, int, int], int, int, str) -> keras.models.Model
    """
    A simple, crop-free UNET for quick experimentation and baseline references
    :param in_shape:
    :param layers:
    :param initial_depth:
    :param prefix:
    :return:
    >>> simple_model = make_model((32, 32, 1), 2, 8, prefix='HI')
    >>> len(simple_model.layers)
    20
    >>> simple_model.summary()
    __________________________________________________________________________________________________
    Layer (type)                    Output Shape         Param #     Connected to
    ==================================================================================================
    UNET_HI_Input (InputLayer)      (None, 32, 32, 1)    0
    __________________________________________________________________________________________________
    CONV_HI_0 (Conv2D)              (None, 32, 32, 8)    80          UNET_HI_Input[0][0]
    __________________________________________________________________________________________________
    BN_HI_0 (BatchNormalization)    (None, 32, 32, 8)    32          CONV_HI_0[0][0]
    __________________________________________________________________________________________________
    RELU_HI_0 (Activation)          (None, 32, 32, 8)    0           BN_HI_0[0][0]
    __________________________________________________________________________________________________
    MP_HI_0 (MaxPooling2D)          (None, 16, 16, 8)    0           RELU_HI_0[0][0]
    __________________________________________________________________________________________________
    CONV_HI_1 (Conv2D)              (None, 16, 16, 16)   1168        MP_HI_0[0][0]
    __________________________________________________________________________________________________
    BN_HI_1 (BatchNormalization)    (None, 16, 16, 16)   64          CONV_HI_1[0][0]
    __________________________________________________________________________________________________
    RELU_HI_1 (Activation)          (None, 16, 16, 16)   0           BN_HI_1[0][0]
    __________________________________________________________________________________________________
    MP_HI_1 (MaxPooling2D)          (None, 8, 8, 16)     0           RELU_HI_1[0][0]
    __________________________________________________________________________________________________
    DECONV_HI_1 (Conv2DTranspose)   (None, 16, 16, 16)   2320        MP_HI_1[0][0]
    __________________________________________________________________________________________________
    SKIP_HI_1 (Concatenate)         (None, 16, 16, 24)   0           DECONV_HI_1[0][0]
                                                                     MP_HI_0[0][0]
    __________________________________________________________________________________________________
    BN_HI_U1 (BatchNormalization)   (None, 16, 16, 24)   96          SKIP_HI_1[0][0]
    __________________________________________________________________________________________________
    RELU_HI_U1 (Activation)         (None, 16, 16, 24)   0           BN_HI_U1[0][0]
    __________________________________________________________________________________________________
    DECONV_HI_0 (Conv2DTranspose)   (None, 32, 32, 8)    1736        RELU_HI_U1[0][0]
    __________________________________________________________________________________________________
    SKIP_HI_0 (Concatenate)         (None, 32, 32, 9)    0           DECONV_HI_0[0][0]
                                                                     UNET_HI_Input[0][0]
    __________________________________________________________________________________________________
    BN_HI_U0 (BatchNormalization)   (None, 32, 32, 9)    36          SKIP_HI_0[0][0]
    __________________________________________________________________________________________________
    RELU_HI_U0 (Activation)         (None, 32, 32, 9)    0           BN_HI_U0[0][0]
    ==================================================================================================
    Total params: 5,532
    Trainable params: 5,418
    Non-trainable params: 114
    __________________________________________________________________________________________________
    """
    in_layer = Input(in_shape, name='UNET_{}_Input'.format(prefix))

    start_x = in_layer
    skip_layers = []
    for c_layer in range(layers + 1):
        skip_layers += [start_x]

        x = Conv2D(filters=initial_depth * 2 ** c_layer,
                   kernel_size=(3, 3),
                   activation='linear',
                   padding='same',
                   name='CONV_{}_{}'.format(prefix, c_layer))(start_x)
        x = BatchNormalization(name='BN_{}_{}'.format(prefix, c_layer))(x)
        x = Activation('relu', name='RELU_{}_{}'.format(prefix, c_layer))(x)
        start_x = MaxPool2D((2, 2), name='MP_{}_{}'.format(prefix, c_layer))(x)
    start_x = x
    for c_layer, c_skip in reversed(list(zip(range(layers), skip_layers))):
        x = Deconv2D(filters=initial_depth * 2 ** c_layer,
                     kernel_size=(3, 3),
                     strides=(2, 2),
                     activation='linear',
                     padding='same',
                     name='DECONV_{}_{}'.format(prefix, c_layer)
                     )(start_x)
        x = concatenate([x, c_skip], name='SKIP_{}_{}'.format(prefix, c_layer))
        x = BatchNormalization(name='BN_{}_U{}'.format(prefix, c_layer))(x)
        x = Activation('relu', name='RELU_{}_U{}'.format(prefix, c_layer))(x)
        start_x = x
    return Model(inputs=[in_layer], outputs=[x], name='UNET_{}'.format(prefix))
Example #9
0
def get_model(img_shape, classes_num, last_activation):
    block0_input = Input(shape=(img_shape, img_shape, 3))

    block1_conv1 = Conv2D(64, (3, 3), padding="same",
                          activation="relu")(block0_input)
    block1_conv2 = Conv2D(64, (3, 3), padding="same",
                          activation="relu")(block1_conv1)
    block1_conv3 = Conv2D(64, (3, 3), padding="same",
                          activation="relu")(block1_conv2)
    block1_pool1 = MaxPool2D(2)(block1_conv3)

    block2_conv1 = Conv2D(128, (3, 3), padding="same",
                          activation="relu")(block1_pool1)
    block2_conv2 = Conv2D(128, (3, 3), padding="same",
                          activation="relu")(block2_conv1)
    block2_conv3 = Conv2D(128, (3, 3), padding="same",
                          activation="relu")(block2_conv2)
    block2_pool1 = MaxPool2D(2)(block2_conv3)

    block3_conv1 = Conv2D(256, (3, 3), padding="same",
                          activation="relu")(block2_pool1)
    block3_conv2 = Conv2D(256, (3, 3), padding="same",
                          activation="relu")(block3_conv1)
    block3_conv3 = Conv2D(256, (3, 3), padding="same",
                          activation="relu")(block3_conv2)
    block3_pool1 = MaxPool2D(2)(block3_conv3)

    block4_conv1 = Conv2D(512, (3, 3), padding="same",
                          activation="relu")(block3_pool1)
    block4_conv2 = Conv2D(512, (3, 3), padding="same",
                          activation="relu")(block4_conv1)
    block4_conv3 = Conv2D(512, (3, 3), padding="same",
                          activation="relu")(block4_conv2)
    block4_upsa1 = UpSampling2D(2, interpolation="bilinear")(block4_conv3)

    block5_conc1 = Concatenate()([block3_conv3, block4_upsa1])
    block5_conv1 = Conv2D(256, (3, 3), padding="same",
                          activation="relu")(block5_conc1)
    block5_conv2 = Conv2D(256, (3, 3), padding="same",
                          activation="relu")(block5_conv1)
    block5_conv3 = Conv2D(256, (3, 3), padding="same",
                          activation="relu")(block5_conv2)
    block5_upsa1 = UpSampling2D(2, interpolation="bilinear")(block5_conv3)

    block6_conc1 = Concatenate()([block2_conv3, block5_upsa1])
    block6_conv1 = Conv2D(128, (3, 3), padding="same",
                          activation="relu")(block6_conc1)
    block6_conv2 = Conv2D(128, (3, 3), padding="same",
                          activation="relu")(block6_conv1)
    block6_conv3 = Conv2D(128, (3, 3), padding="same",
                          activation="relu")(block6_conv2)
    block6_upsa1 = UpSampling2D(2, interpolation="bilinear")(block6_conv3)

    block7_conc1 = Concatenate()([block1_conv3, block6_upsa1])
    block7_conv1 = Conv2D(64, (3, 3), padding="same",
                          activation="relu")(block7_conc1)
    block7_conv2 = Conv2D(64, (3, 3), padding="same",
                          activation="relu")(block7_conv1)
    block7_conv3 = Conv2D(64, (3, 3), padding="same",
                          activation="relu")(block7_conv2)

    block8_output = Conv2D(classes_num, (1, 1),
                           padding="same",
                           activation=last_activation)(block7_conv3)

    return Model(inputs=block0_input, outputs=block8_output)
Example #10
0
epochs = 30
batch_size = 128

# this returns a tensor
print("Creating Model...")

model = Sequential()

model.add(
    Embedding(input_dim=len(embeddings_matrix),
              output_dim=embedding_dim,
              weights=[embeddings_matrix],
              input_length=sequence_length,
              trainable=False))
model.add(Bidirectional(LSTM(300, dropout=0.3)))
model.add(MaxPool2D(pool_size=(1, 10), strides=(1, 1), padding='valid'))
model.add(Bidirectional(LSTM(300, dropout=0.3)))
model.add(MaxPool2D(pool_size=(1, 10), strides=(1, 1), padding='valid'))
model.add(Dense(units=2, activation='softmax'))
model.summary()

checkpoint = ModelCheckpoint(
    './lstm/lstm_word_pianpang_weights.{epoch:03d}-{val_acc:.4f}.hdf5',
    monitor='val_acc',
    verbose=1,
    save_best_only=True,
    mode='auto')
adam = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
# TensorBoard
@author: omprakash
"""

from keras.models import Sequential  # initialise neural network
from keras.layers import Convolution2D  # convolutional layer add
from keras.layers import MaxPool2D
from keras.layers import Flatten
from keras.layers import Dense  # to add fully connected layer in a cnn

se = Sequential()

# Convolution step - 32 filter with dimension 3*3,
se.add(Convolution2D(32, 3, 3, input_shape=(64, 64, 3), activation='relu'))
#pooling
se.add(MaxPool2D(pool_size=(2, 2)))

#u can add more convilution layer or fully connected layer to increase accuracy
se.add(Convolution2D(32, 3, 3, activation='relu'))
se.add(MaxPool2D(pool_size=(2, 2)))

#flattening
se.add(Flatten())

#Fully connected
se.add(Dense(output_dim=128, activation='relu'))
#when outcome more than 2 category then use softmax activation function
se.add(Dense(1, activation='sigmoid'))

#compiling the CNN
#more 2 outcome use categorical_crossentropy
Example #12
0
## 정규화
#from sklearn.preprocessing import MinMaxScaler
x_train = x_train.reshape(60000,28,28,1).astype('float32') / 255 ## float 안해줘도 되지 않나?
x_test = x_test.reshape(10000,28,28,1).astype('float32') / 255


from keras.models import Model
from keras.layers import Conv2D, Dense, Flatten, MaxPool2D, Dropout, Input

input1 = Input(shape = (28,28,1))
Conv1 = Conv2D(32, (3,3),activation='elu', padding = 'same', input_shape=(28,28,1))(input1)
Conv1 = Dropout(0.2)(Conv1)
Conv1 = Conv2D(32, (3,3),padding = 'same',activation='elu')(Conv1)
Conv1 = Dropout(0.2)(Conv1)

Conv1 = MaxPool2D((2,2))(Conv1)
Conv1 = Conv2D(64, (3,3),padding = 'same',activation='elu')(Conv1)
Conv1 = Dropout(0.2)(Conv1)
Conv1 = Conv2D(64, (3,3),padding = 'same',activation='elu')(Conv1)
Conv1 = Dropout(0.2)(Conv1)

Conv1 = MaxPool2D((2,2))(Conv1)

Conv1 = Conv2D(128, (3,3),padding = 'same',activation='elu')(Conv1)
Conv1 = Dropout(0.2)(Conv1)
Conv1 = Conv2D(128, (3,3),padding = 'same',activation='elu')(Conv1)
Conv1 = Dropout(0.2)(Conv1)

Conv1 = Flatten()(Conv1)

Dense1 = Dense(200, activation='elu')(Conv1)
Example #13
0
def mlp(X, Y):  #training an validation generator
    model = Sequential()  #initalise model
    model.add(
        Conv2D(64,
               kernel_size=3,
               activation='relu',
               strides=(2, 2),
               input_shape=(240, 240, 3)))  # input layer
    model.add(MaxPool2D(pool_size=(2, 2), padding='same'))  #padding added
    model.add(
        Conv2D(128,
               kernel_size=3,
               activation='relu',
               strides=(2, 2),
               activity_regularizer=regularizers.l2(0.001))
    )  #regulization added to convolutional layer with lamda parameter 0.001
    model.add(MaxPool2D(pool_size=(2, 2), padding='same'))
    model.add(Flatten())  #allow connection to fukky connect layer
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.7))
    model.add(Dense(1, activation='sigmoid'))
    adam = optimizers.Adam(lr=0.1,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=None,
                           decay=0.0,
                           amsgrad=False)  # optimzer created
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])  #back propogation
    callbacks = [
        EarlyStopping(monitor='val_loss', patience=100),
        ModelCheckpoint(filepath='best_model_glasses.h5',
                        monitor='val_loss',
                        save_best_only=True)
    ]  #conditon for early stop when model stops improving
    history = model.fit_generator(
        X,
        steps_per_epoch=X.n / X.batch_size,
        epochs=100,  # Maximum number of forward + back props
        validation_data=Y,
        validation_steps=Y.n / Y.batch_size,
        callbacks=callbacks)
    model.save_weights('first_try.h5')
    model.summary()
    #plots training graphs after trianing (accuracy and loss achieved)
    print(history.history.keys())
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()

    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()
def Network(height, width, depth, weights=None):

    # Height: Image Height
    # Width: Image Width
    # Depth: Image Depth
    # Weights: learned weights

    # we will create this network with 3 classes
    classes = 3

    # Let's create the network
    model = Sequential()

    # Add 2 Convolution layers with 'relu' activations
    # We will add one max pooling layer
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               input_shape=(depth, height, width)))
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu',
                     padding='same'))
    model.add(MaxPool2D(pool_size=(2, 2)))

    # Add 4 more Convolution layers with one max pool
    model.add(
        Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(
        Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(
        Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(
        Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(MaxPool2D(2, 2))

    # Add 4 More Convolution layers with another max pool
    model.add(
        Conv2D(256, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(
        Conv2D(256, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(
        Conv2D(256, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(
        Conv2D(256, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(MaxPool2D(2, 2))

    # Here we will convert 2D feature maps to 1D
    model.add(Flatten())

    # Add a Fully connected Layer with 50% drop-out
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.50))

    # One more Fully connected layer here
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.50))

    # Final layer with 3 outputs
    model.add(Dense(classes, activation='softmax'))

    # If learned weights provide load them into the network
    if weights is not None:
        model.load_weights(weights)

    return model
Example #15
0
def build_model(do_batch_norm, dropout, weight_decay, initial_learning_rate):
    model = Sequential()
    model.add(
        Conv2D(
            16,
            (3, 3),
            padding='same',
            activation='relu',
            input_shape=(32, 32, 3),
            kernel_regularizer=regularizers.l2(weight_decay)
        )
    )

    if do_batch_norm:
        model.add(BatchNormalization())

    model.add(
        Conv2D(
            16,
            (3, 3),
            activation='relu',
            padding='same',
            kernel_regularizer=regularizers.l2(weight_decay)

        )
    )
    if do_batch_norm:
        model.add(BatchNormalization())
    model.add(MaxPool2D())

    model.add(
        Conv2D(
            16,
            (3, 3),
            padding='same',
            activation='relu',
            kernel_regularizer=regularizers.l2(weight_decay)
        )
    )

    if do_batch_norm:
        model.add(BatchNormalization())
    model.add(
        Conv2D(
            32,
            (3, 3),
            activation='relu',
            padding='same',
            kernel_regularizer=regularizers.l2(weight_decay)

        )
    )
    if do_batch_norm:
        model.add(BatchNormalization())
    model.add(MaxPool2D())

    model.add(
        Conv2D(
            32,
            (3, 3),
            padding='same',
            activation='relu',
            kernel_regularizer=regularizers.l2(weight_decay)
        )
    )

    if do_batch_norm:
        model.add(BatchNormalization())

    model.add(
        Conv2D(
            64,
            (3, 3),
            padding='same',
            activation='relu',
            kernel_regularizer=regularizers.l2(weight_decay)
        )
    )
    model.add(MaxPool2D())
    model.add(Flatten())

    if do_batch_norm:
        model.add(BatchNormalization())

    model.add(Dropout(dropout))

    model.add(Dense(10, activation='softmax', kernel_regularizer=regularizers.l2(weight_decay)))

    sgd = keras.optimizers.SGD(lr=initial_learning_rate, momentum=0.9, nesterov=True)
    model.compile(
        loss='categorical_crossentropy',
        optimizer=sgd,
        metrics=['accuracy']
    )

    model.summary()

    return model
Example #16
0
def get_test_model(params_transform,params_train,chaneels):

    # params set
    batch_size = int(params_train['batch_size'])
    crop_size_x = int(params_transform['crop_size_x'])
    crop_size_y = int(params_transform['crop_size_y'])
    stride = int(params_transform['stride'])

    stage_num = 6

    # input output
    image = Input(shape=(crop_size_y,crop_size_x,chaneels),
                  batch_shape=(batch_size,crop_size_y,crop_size_x,chaneels),
                  name='image')
    net_input = [image]

    image_padded = ZeroPadding2D()(image)
    conv1_1 = Conv2D(filters=64,kernel_size=(3,3),activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(),name='conv1_1')(image_padded)
    conv1_1_padded = ZeroPadding2D()(conv1_1)
    conv1_2 = Conv2D(filters=64,kernel_size=(3,3),activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(),name='conv1_2')(conv1_1_padded)
    pool1_stage1 = MaxPool2D(pool_size=(2,2),strides=2,name='pool1_stage1')(conv1_2)

    pool1_stage1_padded = ZeroPadding2D()(pool1_stage1)
    conv2_1 = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv2_1')(pool1_stage1_padded)
    conv2_1_padded = ZeroPadding2D()(conv2_1)
    conv2_2 = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv2_2')(conv2_1_padded)
    # conv2_2 = ZeroPadding2D()(conv2_2)
    pool2_stage1 = MaxPool2D(pool_size=(2,2),strides=2,name='pool2_stage1')(conv2_2)

    pool2_stage1_padded = ZeroPadding2D()(pool2_stage1)
    conv3_1 = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv3_1')(pool2_stage1_padded)
    conv3_1_padded = ZeroPadding2D()(conv3_1)
    conv3_2 = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv3_2')(conv3_1_padded)
    conv3_2_padded = ZeroPadding2D()(conv3_2)
    conv3_3 = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv3_3')(conv3_2_padded)
    conv3_3_padded = ZeroPadding2D()(conv3_3)
    conv3_4 = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv3_4')(conv3_3_padded)

    pool3_stage1 = MaxPool2D(pool_size=(2, 2), strides=2,  name='pool3_stage1')(conv3_4)
    pool3_stage1_padded = ZeroPadding2D()(pool3_stage1)
    conv4_1 = Conv2D(filters=512, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv4_1')(pool3_stage1_padded)
    conv4_1_padded = ZeroPadding2D()(conv4_1)
    conv4_2 = Conv2D(filters=512, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv4_2')(conv4_1_padded)
    conv4_2_padded = ZeroPadding2D()(conv4_2)
    conv4_3_CPM = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv4_3_CPM')(conv4_2_padded)
    conv4_3_CPM_padded = ZeroPadding2D()(conv4_3_CPM)

    conv4_4_CPM = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv4_4_CPM')(conv4_3_CPM_padded)

    # stage 1
    # L2 confidence maps
    conv4_4_CPM_padded = ZeroPadding2D()(conv4_4_CPM)
    conv5_1_CPM_L2 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv5_1_CPM_L2')(conv4_4_CPM_padded)
    conv5_1_CPM_L2_padded = ZeroPadding2D()(conv5_1_CPM_L2)
    conv5_2_CPM_L2 = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_2_CPM_L2')(conv5_1_CPM_L2_padded)
    conv5_2_CPM_L2_padded = ZeroPadding2D()(conv5_2_CPM_L2)
    conv5_3_CPM_L2 = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_3_CPM_L2')(conv5_2_CPM_L2_padded)
    conv5_3_CPM_L2_padded = ZeroPadding2D(padding=(0,0))(conv5_3_CPM_L2)

    conv5_4_CPM_L2 = Conv2D(filters=512, kernel_size=(1, 1),  activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_4_CPM_L2')(conv5_3_CPM_L2_padded)
    conv5_4_CPM_L2_padded = ZeroPadding2D(padding=(0,0))(conv5_4_CPM_L2)

    conv5_5_CPM_L2 = Conv2D(filters=19, kernel_size=(1, 1),  activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_5_CPM_L2')(conv5_4_CPM_L2_padded)
    # L1 PAFs
    # conv4_4_CPM_padded = ZeroPadding2D()(conv4_4_CPM)
    conv5_1_CPM_L1 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_1_CPM_L1')(conv4_4_CPM_padded)
    conv5_1_CPM_L1_padded = ZeroPadding2D()(conv5_1_CPM_L1)
    conv5_2_CPM_L1 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_2_CPM_L1')(conv5_1_CPM_L1_padded)
    conv5_2_CPM_L1_padded = ZeroPadding2D()(conv5_2_CPM_L1)
    conv5_3_CPM_L1 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_3_CPM_L1')(conv5_2_CPM_L1_padded)
    conv5_3_CPM_L1_padded = ZeroPadding2D(padding=(0, 0))(conv5_3_CPM_L1)

    conv5_4_CPM_L1 = Conv2D(filters=512, kernel_size=(1, 1), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_4_CPM_L1')(conv5_3_CPM_L1_padded)
    conv5_4_CPM_L1_padded = ZeroPadding2D(padding=(0, 0))(conv5_4_CPM_L1)

    conv5_5_CPM_L1 = Conv2D(filters=38, kernel_size=(1, 1), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_5_CPM_L1')(conv5_4_CPM_L1_padded)

    temp_L1 = conv5_5_CPM_L1
    temp_L2 = conv5_5_CPM_L2

    for i in range(2,stage_num+1):
        concat_stagei = concatenate([temp_L1,temp_L2,conv4_4_CPM],axis=3)
        # L1
        concat_stage_padded = ZeroPadding2D(padding=(3,3))(concat_stagei)
        Mconv1_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='Mconv1_stage%s_L1'%(str(i)))(concat_stage_padded)
        Mconv1_stagei_L1_padded = ZeroPadding2D(padding=(3,3))(Mconv1_stagei_L1)
        Mconv2_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv2_stage%s_L1'%(str(i)))(Mconv1_stagei_L1_padded)
        Mconv2_stagei_L1_padded = ZeroPadding2D(padding=(3, 3))(Mconv2_stagei_L1)
        Mconv3_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv3_stage%s_L1'%(str(i)))(Mconv2_stagei_L1_padded)
        Mconv3_stagei_L1_padded = ZeroPadding2D(padding=(3, 3))(Mconv3_stagei_L1)
        Mconv4_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv4_stage%s_L1'%(str(i)))(Mconv3_stagei_L1_padded)
        Mconv4_stagei_L1_padded = ZeroPadding2D(padding=(3, 3))(Mconv4_stagei_L1)
        Mconv5_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv5_stage%s_L1'%(str(i)))(Mconv4_stagei_L1_padded)
        Mconv5_stagei_L1_padded = ZeroPadding2D(padding=(0, 0))(Mconv5_stagei_L1)
        Mconv6_stagei_L1 = Conv2D(filters=128, kernel_size=(1, 1), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv6_stage%s_L1'%(str(i)))(Mconv5_stagei_L1_padded)
        Mconv6_stagei_L1_padded = ZeroPadding2D(padding=(0, 0))(Mconv6_stagei_L1)
        Mconv7_stagei_L1 = Conv2D(filters=38, kernel_size=(1, 1), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv7_stage%s_L1'%(str(i)))(Mconv6_stagei_L1_padded)

        # L2
        Mconv1_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv1_stage%s_L2'%(str(i)))(concat_stage_padded)
        Mconv1_stagei_L2_padded = ZeroPadding2D(padding=(3, 3))(Mconv1_stagei_L2)
        Mconv2_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv2_stage%s_L2'%(str(i)))(Mconv1_stagei_L2_padded)
        Mconv2_stagei_L2_padded = ZeroPadding2D(padding=(3, 3))(Mconv2_stagei_L2)
        Mconv3_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv3_stage%s_L2'%(str(i)))(Mconv2_stagei_L2_padded)
        Mconv3_stagei_L2_padded = ZeroPadding2D(padding=(3, 3))(Mconv3_stagei_L2)
        Mconv4_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv4_stage%s_L2'%(str(i)))(Mconv3_stagei_L2_padded)
        Mconv4_stagei_L2_padded = ZeroPadding2D(padding=(3, 3))(Mconv4_stagei_L2)
        Mconv5_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv5_stage%s_L2'%(str(i)))(Mconv4_stagei_L2_padded)
        Mconv5_stagei_L2_padded = ZeroPadding2D(padding=(0, 0))(Mconv5_stagei_L2)
        Mconv6_stagei_L2 = Conv2D(filters=128, kernel_size=(1, 1), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv6_stage%s_L2'%(str(i)))(Mconv5_stagei_L2_padded)
        Mconv6_stagei_L2_padded = ZeroPadding2D(padding=(0, 0))(Mconv6_stagei_L2)
        Mconv7_stagei_L2 = Conv2D(filters=19, kernel_size=(1, 1), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv7_stage%s_L2'%(str(i)))(Mconv6_stagei_L2_padded)

        temp_L1 = Mconv7_stagei_L1
        temp_L2 = Mconv7_stagei_L2

        # net_output.append(lossi)
        # net_output.append((Mconv7_stagei_L1,Mconv7_stagei_L2))
    net_output = [Mconv7_stagei_L1,Mconv7_stagei_L2]
    model = Model(inputs=net_input,outputs=net_output)
    #model.compile(optimizer=SGD(lr=0.000040,momentum=0.9,decay=0.0005),
     #             loss='mean_absolute_error')
    return model
for x_start, x_end in zip(x[:-1], x[1:]):
    for y_start, y_end in zip(y[:-1], y[1:]):
        patch_list.append(im_rgb[x_start:x_end, y_start:y_end, :])
        # 把此patch的mask的mean值作为对应的分类结果
        lbl_list.append(masks[x_start:x_end, y_start:y_end].mean())
patches = np.array(patch_list)
lbls = np.array(lbl_list)
# print(patches.shape,lbls.shape)

x_train, x_test, y_train, y_test = train_test_split(patches, lbls, train_size=0.8, test_size=0.2)

model = Sequential()
model.add(Conv2D(64, (1, 1), input_shape=(16, 16, 8), padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(64, (1, 1), padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPool2D((2, 2)))
model.add(Flatten())
model.add(Dense(1))

model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])

model.fit(x_train, y_train, batch_size=32, epochs=10)

Example #18
0
def get_model(params_transform,params_train):

    # params set
    batch_size = int(params_train['batch_size'])
    crop_size_x = int(params_transform['crop_size_x'])
    crop_size_y = int(params_transform['crop_size_y'])
    stride = int(params_transform['stride'])
    num_parts = int(params_transform['np'])
    grid_x = crop_size_x / stride
    grid_y = crop_size_y / stride

    stage_num = 6

    # input output
    image = Input(shape=(crop_size_y,crop_size_x,3),
                  batch_shape=(batch_size,crop_size_y,crop_size_x,3),
                  name='image')
    label = Input(shape=(grid_y,grid_x,(num_parts + 1)*2),
                  batch_shape=(batch_size,grid_y,grid_x,(num_parts+1)*2),
                  name='label')

    net_input = [image,label]
    net_output = []

    # ground-truth

    paf_weight = Lambda(lambda x: x[:, :, :, :38])(label)# mask
    confid_weight = Lambda(lambda x: x[:,:, :, 38:57])(label)# mask
    paf_temp = Lambda(lambda x: x[:,:, :, 57:95])(label)# gt
    confid_temp = Lambda(lambda x: x[:,:, :, 95:114])(label)#gt

    gt = concatenate([paf_temp, confid_temp], axis=3,name='ground-truth')

    # temp = concatenate([paf_weight,confid_weight],axis=3)
    # print(temp.shape)
    # common op
    image_padded = ZeroPadding2D()(image)
    conv1_1 = Conv2D(filters=64,kernel_size=(3,3),activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(),name='conv1_1')(image_padded)
    conv1_1_padded = ZeroPadding2D()(conv1_1)
    conv1_2 = Conv2D(filters=64,kernel_size=(3,3),activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(),name='conv1_2')(conv1_1_padded)
    pool1_stage1 = MaxPool2D(pool_size=(2,2),strides=2,name='pool1_stage1')(conv1_2)

    pool1_stage1_padded = ZeroPadding2D()(pool1_stage1)
    conv2_1 = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv2_1')(pool1_stage1_padded)
    conv2_1_padded = ZeroPadding2D()(conv2_1)
    conv2_2 = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv2_2')(conv2_1_padded)
    # conv2_2 = ZeroPadding2D()(conv2_2)
    pool2_stage1 = MaxPool2D(pool_size=(2,2),strides=2,name='pool2_stage1')(conv2_2)

    pool2_stage1_padded = ZeroPadding2D()(pool2_stage1)
    conv3_1 = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv3_1')(pool2_stage1_padded)
    conv3_1_padded = ZeroPadding2D()(conv3_1)
    conv3_2 = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv3_2')(conv3_1_padded)
    conv3_2_padded = ZeroPadding2D()(conv3_2)
    conv3_3 = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv3_3')(conv3_2_padded)
    conv3_3_padded = ZeroPadding2D()(conv3_3)
    conv3_4 = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv3_4')(conv3_3_padded)

    pool3_stage1 = MaxPool2D(pool_size=(2, 2), strides=2,  name='pool3_stage1')(conv3_4)
    pool3_stage1_padded = ZeroPadding2D()(pool3_stage1)
    conv4_1 = Conv2D(filters=512, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv4_1')(pool3_stage1_padded)
    conv4_1_padded = ZeroPadding2D()(conv4_1)
    conv4_2 = Conv2D(filters=512, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv4_2')(conv4_1_padded)
    conv4_2_padded = ZeroPadding2D()(conv4_2)
    conv4_3_CPM = Conv2D(filters=256, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv4_3_CPM')(conv4_2_padded)
    conv4_3_CPM_padded = ZeroPadding2D()(conv4_3_CPM)

    conv4_4_CPM = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv4_4_CPM')(conv4_3_CPM_padded)

    # stage 1
    # L2 confidence maps
    conv4_4_CPM_padded = ZeroPadding2D()(conv4_4_CPM)
    conv5_1_CPM_L2 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
                     kernel_initializer=RandomNormal(stddev=0.00999999977648),
                     bias_initializer=Constant(), name='conv5_1_CPM_L2')(conv4_4_CPM_padded)
    conv5_1_CPM_L2_padded = ZeroPadding2D()(conv5_1_CPM_L2)
    conv5_2_CPM_L2 = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_2_CPM_L2')(conv5_1_CPM_L2_padded)
    conv5_2_CPM_L2_padded = ZeroPadding2D()(conv5_2_CPM_L2)
    conv5_3_CPM_L2 = Conv2D(filters=128, kernel_size=(3, 3),  activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_3_CPM_L2')(conv5_2_CPM_L2_padded)
    conv5_3_CPM_L2_padded = ZeroPadding2D(padding=(0,0))(conv5_3_CPM_L2)

    conv5_4_CPM_L2 = Conv2D(filters=512, kernel_size=(1, 1),  activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_4_CPM_L2')(conv5_3_CPM_L2_padded)
    conv5_4_CPM_L2_padded = ZeroPadding2D(padding=(0,0))(conv5_4_CPM_L2)

    conv5_5_CPM_L2 = Conv2D(filters=19, kernel_size=(1, 1),  activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_5_CPM_L2')(conv5_4_CPM_L2_padded)
    # L1 PAFs
    # conv4_4_CPM_padded = ZeroPadding2D()(conv4_4_CPM)
    conv5_1_CPM_L1 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_1_CPM_L1')(conv4_4_CPM_padded)
    conv5_1_CPM_L1_padded = ZeroPadding2D()(conv5_1_CPM_L1)
    conv5_2_CPM_L1 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_2_CPM_L1')(conv5_1_CPM_L1_padded)
    conv5_2_CPM_L1_padded = ZeroPadding2D()(conv5_2_CPM_L1)
    conv5_3_CPM_L1 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_3_CPM_L1')(conv5_2_CPM_L1_padded)
    conv5_3_CPM_L1_padded = ZeroPadding2D(padding=(0, 0))(conv5_3_CPM_L1)

    conv5_4_CPM_L1 = Conv2D(filters=512, kernel_size=(1, 1), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_4_CPM_L1')(conv5_3_CPM_L1_padded)
    conv5_4_CPM_L1_padded = ZeroPadding2D(padding=(0, 0))(conv5_4_CPM_L1)

    conv5_5_CPM_L1 = Conv2D(filters=38, kernel_size=(1, 1), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='conv5_5_CPM_L1')(conv5_4_CPM_L1_padded)

    paf_masked_stage1_L1 = multiply([conv5_5_CPM_L1,paf_weight],name='paf_masked_stage1_L1')
    confid_masked_stage1_L2 = multiply([conv5_5_CPM_L2,confid_weight],name='confid_masked_stage1_L2')

    pred_label_stage1 = concatenate([paf_masked_stage1_L1,confid_masked_stage1_L2],axis=3,name='s1')
    pred_label_stage1 = Lambda(lambda x:tf.multiply(x,-1.0))(pred_label_stage1)
    pred_label_stage1 = add([pred_label_stage1,gt])
    pred_label_stage1 = Lambda(lambda x:tf.square(x))(pred_label_stage1)
    loss1 = Lambda(lambda x:tf.reduce_sum(x,axis=[1,2,3],keep_dims=True),name="scalar_s1")(pred_label_stage1)
    loss1 = Reshape((1,),name='final_s1')(loss1)
    # net_output.append(paf_masked_stage1_L1)
    # net_output.append(confid_masked_stage1_L2)
    net_output.append(loss1)

    temp_L1 = conv5_5_CPM_L1
    temp_L2 = conv5_5_CPM_L2

    # model = Model(inputs=image,outputs=[temp_L2,temp_L1])
    # return model

    for i in range(2,stage_num+1):
        concat_stagei = concatenate([temp_L1,temp_L2,conv4_4_CPM],axis=3)
        # L1
        concat_stage_padded = ZeroPadding2D(padding=(3,3))(concat_stagei)
        Mconv1_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                            kernel_initializer=RandomNormal(stddev=0.00999999977648),
                            bias_initializer=Constant(), name='Mconv1_stage%s_L1'%(str(i)))(concat_stage_padded)
        Mconv1_stagei_L1_padded = ZeroPadding2D(padding=(3,3))(Mconv1_stagei_L1)
        Mconv2_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv2_stage%s_L1'%(str(i)))(Mconv1_stagei_L1_padded)
        Mconv2_stagei_L1_padded = ZeroPadding2D(padding=(3, 3))(Mconv2_stagei_L1)
        Mconv3_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv3_stage%s_L1'%(str(i)))(Mconv2_stagei_L1_padded)
        Mconv3_stagei_L1_padded = ZeroPadding2D(padding=(3, 3))(Mconv3_stagei_L1)
        Mconv4_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv4_stage%s_L1'%(str(i)))(Mconv3_stagei_L1_padded)
        Mconv4_stagei_L1_padded = ZeroPadding2D(padding=(3, 3))(Mconv4_stagei_L1)
        Mconv5_stagei_L1 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv5_stage%s_L1'%(str(i)))(Mconv4_stagei_L1_padded)
        Mconv5_stagei_L1_padded = ZeroPadding2D(padding=(0, 0))(Mconv5_stagei_L1)
        Mconv6_stagei_L1 = Conv2D(filters=128, kernel_size=(1, 1), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv6_stage%s_L1'%(str(i)))(Mconv5_stagei_L1_padded)
        Mconv6_stagei_L1_padded = ZeroPadding2D(padding=(0, 0))(Mconv6_stagei_L1)
        Mconv7_stagei_L1 = Conv2D(filters=38, kernel_size=(1, 1), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv7_stage%s_L1'%(str(i)))(Mconv6_stagei_L1_padded)

        # L2
        Mconv1_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv1_stage%s_L2'%(str(i)))(concat_stage_padded)
        Mconv1_stagei_L2_padded = ZeroPadding2D(padding=(3, 3))(Mconv1_stagei_L2)
        Mconv2_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv2_stage%s_L2'%(str(i)))(Mconv1_stagei_L2_padded)
        Mconv2_stagei_L2_padded = ZeroPadding2D(padding=(3, 3))(Mconv2_stagei_L2)
        Mconv3_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv3_stage%s_L2'%(str(i)))(Mconv2_stagei_L2_padded)
        Mconv3_stagei_L2_padded = ZeroPadding2D(padding=(3, 3))(Mconv3_stagei_L2)
        Mconv4_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv4_stage%s_L2'%(str(i)))(Mconv3_stagei_L2_padded)
        Mconv4_stagei_L2_padded = ZeroPadding2D(padding=(3, 3))(Mconv4_stagei_L2)
        Mconv5_stagei_L2 = Conv2D(filters=128, kernel_size=(7, 7), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv5_stage%s_L2'%(str(i)))(Mconv4_stagei_L2_padded)
        Mconv5_stagei_L2_padded = ZeroPadding2D(padding=(0, 0))(Mconv5_stagei_L2)
        Mconv6_stagei_L2 = Conv2D(filters=128, kernel_size=(1, 1), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv6_stage%s_L2'%(str(i)))(Mconv5_stagei_L2_padded)
        Mconv6_stagei_L2_padded = ZeroPadding2D(padding=(0, 0))(Mconv6_stagei_L2)
        Mconv7_stagei_L2 = Conv2D(filters=19, kernel_size=(1, 1), activation='relu',
                                  kernel_initializer=RandomNormal(stddev=0.00999999977648),
                                  bias_initializer=Constant(), name='Mconv7_stage%s_L2'%(str(i)))(Mconv6_stagei_L2_padded)

        temp_L1 = Mconv7_stagei_L1
        temp_L2 = Mconv7_stagei_L2

        paf_masked_stagei_L1 = multiply([temp_L1, paf_weight],name='paf_masked_stage%s_L1'%(str(i)))
        confid_masked_stagei_L2 = multiply([temp_L2, confid_weight],name='confid_masked_stage%s_L2'%(str(i)))

        pred_label_stagei = concatenate([paf_masked_stagei_L1, confid_masked_stagei_L2], axis=3,
                                        name='s%s'%(str(i)))

        pred_label_stagei = Lambda(lambda x:tf.multiply(x,-1.0))(pred_label_stagei)
        pred_label_stagei = add([pred_label_stagei,gt])
        pred_label_stagei = Lambda(lambda x:tf.square(x))(pred_label_stagei)
        # pred_label_stagei = Lambda(lambda x:tf.reduce_sum(x))(pred_label_stagei)
        lossi = Lambda(lambda x:tf.reduce_sum(x,axis=[1,2,3],keep_dims=True),name="scalar_s%s"%(str(i)))(pred_label_stagei)
        lossi = Reshape((1,), name='final_s%s'%(str(i)))(lossi)

        # net_output.append(paf_masked_stagei_L1)
        # net_output.append(confid_masked_stagei_L2)
        net_output.append(lossi)

    model = Model(inputs=net_input,outputs=net_output)
    model.compile(optimizer=SGD(lr=0.000040,momentum=0.9,decay=0.0005),
                  loss='mean_absolute_error')
    return model
Example #19
0
print("Shape before one-hot encoding: ", y_train.shape)
Y_train = np_utils.to_categorical(y_train, n_classes)
Y_test = np_utils.to_categorical(y_test, n_classes)
print("Shape after one-hot encoding: ", Y_train.shape)

# building a linear stack of layers with the sequential model
model = Sequential()
# convolutional layer
model.add(
    Conv2D(25,
           kernel_size=(3, 3),
           strides=(1, 1),
           padding='valid',
           activation='relu',
           input_shape=(28, 28, 1)))
model.add(MaxPool2D(pool_size=(1, 1)))
# flatten output of conv
model.add(Flatten())
# hidden layer
model.add(Dense(100, activation='relu'))
# output layer
model.add(Dense(10, activation='softmax'))

# compiling the sequential model
model.compile(loss='categorical_crossentropy',
              metrics=['accuracy'],
              optimizer='adam')

# training the model for 10 epochs
model.fit(X_train,
          Y_train,
Example #20
0
    def get_text_model(self):

        # Modality specific hyperparameters
        self.epochs = 100
        self.batch_size = 50

        # Modality specific parameters
        self.embedding_dim = self.data.W.shape[1]

        # For text model
        self.vocabulary_size = self.data.W.shape[0]
        self.filter_sizes = [3, 4, 5]
        self.num_filters = 512

        print("Creating Model...")

        sentence_length = self.train_x.shape[2]

        # Initializing sentence representation layers
        embedding = Embedding(input_dim=self.vocabulary_size,
                              output_dim=self.embedding_dim,
                              weights=[self.data.W],
                              input_length=sentence_length,
                              trainable=False)
        conv_0 = Conv2D(self.num_filters,
                        kernel_size=(self.filter_sizes[0], self.embedding_dim),
                        padding='valid',
                        kernel_initializer='normal',
                        activation='relu')
        conv_1 = Conv2D(self.num_filters,
                        kernel_size=(self.filter_sizes[1], self.embedding_dim),
                        padding='valid',
                        kernel_initializer='normal',
                        activation='relu')
        conv_2 = Conv2D(self.num_filters,
                        kernel_size=(self.filter_sizes[2], self.embedding_dim),
                        padding='valid',
                        kernel_initializer='normal',
                        activation='relu')
        maxpool_0 = MaxPool2D(pool_size=(sentence_length -
                                         self.filter_sizes[0] + 1, 1),
                              strides=(1, 1),
                              padding='valid')
        maxpool_1 = MaxPool2D(pool_size=(sentence_length -
                                         self.filter_sizes[1] + 1, 1),
                              strides=(1, 1),
                              padding='valid')
        maxpool_2 = MaxPool2D(pool_size=(sentence_length -
                                         self.filter_sizes[2] + 1, 1),
                              strides=(1, 1),
                              padding='valid')
        dense_func = Dense(100, activation='tanh', name="dense")
        dense_final = Dense(units=self.classes, activation='softmax')
        reshape_func = Reshape((sentence_length, self.embedding_dim, 1))

        def slicer(x, index):
            return x[:, K.constant(index, dtype='int32'), :]

        def slicer_output_shape(input_shape):
            shape = list(input_shape)
            assert len(shape) == 3  # batch, seq_len, sent_len
            new_shape = (shape[0], shape[2])
            return new_shape

        def reshaper(x):
            return K.expand_dims(x, axis=3)

        def flattener(x):
            x = K.reshape(x, [-1, x.shape[1] * x.shape[3]])
            return x

        def flattener_output_shape(input_shape):
            shape = list(input_shape)
            new_shape = (shape[0], 3 * shape[3])
            return new_shape

        inputs = Input(shape=(self.sequence_length, sentence_length),
                       dtype='int32')
        cnn_output = []
        for ind in range(self.sequence_length):

            local_input = Lambda(slicer,
                                 output_shape=slicer_output_shape,
                                 arguments={"index": ind
                                            })(inputs)  # Batch, word_indices

            #cnn-sent
            emb_output = embedding(local_input)
            reshape = Lambda(reshaper)(emb_output)
            concatenated_tensor = Concatenate(axis=1)([
                maxpool_0(conv_0(reshape)),
                maxpool_1(conv_1(reshape)),
                maxpool_2(conv_2(reshape))
            ])
            flatten = Lambda(
                flattener,
                output_shape=flattener_output_shape,
            )(concatenated_tensor)
            dense_output = dense_func(flatten)
            dropout = Dropout(0.5)(dense_output)
            cnn_output.append(dropout)

        def stack(x):
            return K.stack(x, axis=1)

        cnn_outputs = Lambda(stack)(cnn_output)

        masked = Masking(mask_value=0)(cnn_outputs)
        lstm = Bidirectional(
            LSTM(300, activation='relu', return_sequences=True,
                 dropout=0.3))(masked)
        lstm = Bidirectional(LSTM(300,
                                  activation='relu',
                                  return_sequences=True,
                                  dropout=0.3),
                             name="utter")(lstm)
        output = TimeDistributed(Dense(self.classes,
                                       activation='softmax'))(lstm)

        model = Model(inputs, output)
        return model
Example #21
0
    print("Test data shapes : ", X_test.shape, y_test.shape)

    # To build a first neural network we need to turn the target variable into a
    # vector "one-hot-encoding" representation. Keras provides a utility function
    # to convert integer-encoded categorical variables as one-hot encoded values:

    Y_train = to_categorical(y_train)

    N = X_train.shape[1]  # input size
    H = 100  # hidden layer size or 'width'
    K = 10  # output layer size, i.e number of classes

    # --- Keras sequential model
    model = Sequential()
    model.add(Conv2D(4, 5, activation='relu', input_shape=(8, 8, 1)))
    model.add(MaxPool2D(2, strides=2))
    model.add(Flatten())
    model.add(Dense(H, input_dim=N))
    model.add(Activation("tanh"))
    model.add(Dense(K))
    model.add(Activation("softmax"))

    # --- Compile and fit the model using SGD
    model.compile(optimizer=optimizers.SGD(lr=0.1),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(X_train, Y_train, epochs=15, batch_size=32)

    # --- Display the report for model training
    # summarize history for loss
Example #22
0
model += GInftlyLayer(
    'dcnn0',
    w_regularizer=(c_l2, w_reg),
    f_regularizer=(c_l2, f_reg),
    reweight_regularizer=False,
    f_layer=[
        lambda reg: Convolution2D(init_cnn_count, (3, 3), padding='same'),
        lambda reg: Dropout(0.25),
        lambda reg: GammaRegularizedBatchNorm(reg, max_free_gamma=0.),
    ],
    h_step=[
        lambda reg: Activation('relu'),
    ],
    w_step=w_step,
)
model += MaxPool2D()
model += Convolution2D(init_cnn_count * 2, (3, 3),
                       trainable=False,
                       padding='same')
model += GInftlyLayer(
    'dcnn1',
    w_regularizer=(c_l2, w_reg),
    f_regularizer=(c_l2, f_reg),
    reweight_regularizer=False,
    f_layer=[
        lambda reg: Convolution2D(init_cnn_count * 2, (3, 3), padding='same'),
        lambda reg: Dropout(0.25),
        lambda reg: GammaRegularizedBatchNorm(reg, max_free_gamma=0.),
    ],
    h_step=[
        lambda reg: Activation('relu'),
            yield sklearn.utils.shuffle(X_train, y_train)


## Creating training and validation generator

train_generator = generator(training_data, batch_size=64)
validation_generator = generator(validation_data, batch_size=64)

## Define architecture

model = Sequential()
model.add(Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(160, 320, 3)))
model.add(Lambda(lambda x: (x / 255.0) - 0.5))

model.add(Conv2D(24, (5, 5), strides=(2, 2), activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.3))

model.add(Conv2D(36, (5, 5), strides=(2, 2), activation="relu"))
model.add(Dropout(0.3))

model.add(Conv2D(48, (5, 5), strides=(2, 2), activation="relu"))
model.add(Dropout(0.3))

model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))

model_image = './images/architecture.png'
def train_CNN(X_train, y_train, X_test, y_test, qstn=None, outputDF=False):
    # Create a vocab (word to index)
    vocab_dict = build_vocab(X_train)

    pickle_object(vocab_dict, 'vocab_dict_comb_test')

    vocabulary_size = len(vocab_dict)+1

    # Create one-hot-vector encodings
    # These are not really one-hot-vectors in this file
    # Its just a vector of the word indices for each word in the question
    # Need to try one-hot-vecs also
    X_train_embedding = retrieve_one_hot_embeddings(X_train,vocab_dict)
    X_test_embedding = retrieve_one_hot_embeddings(X_test,vocab_dict)

    # Number of labels would be just 4: +,-,*,/
    num_of_labels = len(set(y_train))
    labels = list(set(y_train))

    # Create dict for labels to index
    label_to_index = {o:i for i,o in enumerate(labels)}
    index_to_label = {i:o for i,o in enumerate(labels)}


    # Convert labels in the training and test datset to numeric format
    y_train_label_numeric_rep = [label_to_index[label] for label in y_train]
    y_test_label_numeric_rep = [label_to_index[label] for label in y_test]

    # Just creates the actual one hot encoded vectors
    # e.g. 0 : [0 0 0 0]
    # 1: [0 1 0 0]
    y_train_distribution = np_utils.to_categorical(y_train_label_numeric_rep, num_of_labels)
    y_test_distribution = np_utils.to_categorical(y_test_label_numeric_rep, num_of_labels)


    # pad (post) questions to max length
    max_length = 100
    X_train_embedding_padded = pad_sequences(X_train_embedding, maxlen=max_length, padding='post')
    X_test_embedding_padded = pad_sequences(X_test_embedding, maxlen=max_length, padding='post')

    X_shuffled, y_shuffled = X_train_embedding_padded, y_train_distribution
    length = len(X_shuffled)

    # Split the training dataset into train (80%) + dev (20%)
    X_train_onehot = np.array(X_shuffled[:int(0.8*length)])
    X_dev_onehot = np.array(X_shuffled[int(0.8*length):])
    y_train_distribution = np.array(y_shuffled[:int(0.8*length)])
    y_dev_distribution = np.array(y_shuffled[int(0.8*length):])

    embedding_dim = 256
    filter_sizes = [3, 4, 5]
    num_filters = 128
    std_drop = 0.5

    epochs = 15
    # batch_size = 128
    batch_size = 256


    print("Creating CNN Model...")
    inputs = Input(shape=(max_length,), dtype='int32')
    embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=max_length)(inputs)
    reshape = Reshape((max_length, embedding_dim, 1))(embedding)

    # Kernel size specifies the size of the 2-D conv window
    # looking at 3 words at a time in the 1st layer, 4 in the 2nd ...
    # set padding to valid to ensure no padding
    conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embedding_dim), padding='valid', kernel_initializer='normal',
                activation='relu')(reshape)
    conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embedding_dim), padding='valid', kernel_initializer='normal',
                activation='relu')(reshape)
    conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embedding_dim), padding='valid', kernel_initializer='normal',
                activation='relu')(reshape)

    # Pool size is the downscaling factor
    maxpool_0 = MaxPool2D(pool_size=(max_length-filter_sizes[0]+1, 1), strides=(2,2), padding='valid')(conv_0)
    maxpool_1 = MaxPool2D(pool_size=(max_length-filter_sizes[1]+1, 1), strides=(2,2), padding='valid')(conv_1)
    maxpool_2 = MaxPool2D(pool_size=(max_length-filter_sizes[2]+1, 1), strides=(2,2), padding='valid')(conv_2)

    concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
    flatten = Flatten()(concatenated_tensor)
    dropout = Dropout(std_drop)(flatten)
    output = Dense(units=num_of_labels, activation='softmax')(dropout)

    model = Model(inputs=inputs, outputs=output)

    checkpoint = ModelCheckpoint('trained_CNN_model.hdf5', monitor='val_loss', verbose=0, save_best_only=True, mode='auto')
    # adam = Adam(lr=2e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    #print(model.summary())
    print("Training CNN Model...")
    # model.fit(X_train_onehot, y_train_distribution, batch_size=batch_size, epochs=epochs, verbose=0, callbacks=[checkpoint],
    #      validation_data=(X_dev_onehot, y_dev_distribution))

    model.fit(X_train_onehot, y_train_distribution, batch_size=batch_size, epochs=epochs, verbose=0, callbacks=[checkpoint],
         validation_data=(X_dev_onehot, y_dev_distribution))
Example #25
0
#Building CNN
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import MaxPool2D
from keras.layers import Flatten
from keras.layers import Dropout

classifier = Sequential()

classifier.add(Conv2D(64, (3, 3), input_shape=(256, 256, 3),
                      activation="relu"))
classifier.add(MaxPool2D(pool_size=(2, 2)))
classifier.add(Conv2D(32, (3, 3), activation="relu"))
classifier.add(MaxPool2D(pool_size=(2, 2)))
classifier.add(Conv2D(32, (3, 3), activation="relu"))
classifier.add(MaxPool2D(pool_size=(2, 2)))
classifier.add(Flatten())

classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dropout(rate=0.4))
classifier.add(Dense(units=64, activation='relu'))
classifier.add(Dropout(rate=0.2))
classifier.add(Dense(units=32, activation='relu'))
classifier.add(Dropout(rate=0.2))
classifier.add(Dense(units=1, activation='sigmoid'))
classifier.compile(optimizer="adam",
                   loss="binary_crossentropy",
                   metrics=['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
Example #26
0
traindata = trdata.flow_from_directory(directory="data",
                                       target_size=(224, 224))
valdata = ImageDataGenerator()
valdata = valdata.flow_from_directory(directory="validation",
                                      target_size=(224, 224))

model = Sequential()
model.add(
    Conv2D(input_shape=(224, 224, 3),
           filters=64,
           kernel_size=(3, 3),
           padding="same",
           activation="relu"))
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

model.add(
    Conv2D(filters=128, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(
    Conv2D(filters=128, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

model.add(
    Conv2D(filters=256, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(
    Conv2D(filters=256, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(
    Conv2D(filters=256, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
Example #27
0
def cnn_model():
    input_img1 = Input(shape=(256, 256, 1))  # channel first
    input_img2 = Input(shape=(256, 256, 1))
    input_img3 = Input(shape=(256, 256, 1))

    X1 = Convolution2D(filters=32,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(input_img1)
    X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    X1 = Convolution2D(filters=64,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(X1)
    X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    # X1 = Convolution2D(filters=128, kernel_size=(7, 7), padding='same', activation='relu')(X1)
    # X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    # X1 = Convolution2D(filters=256, kernel_size=(7, 7), padding='same', activation='relu')(X1)
    # X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    # X1 = Convolution2D(filters=512, kernel_size=(7, 7), padding='same', activation='relu')(X1)
    # X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X1)
    # X1 = Convolution2D(filters=1024, kernel_size=(7, 7), padding='same', activation='relu')(X1)
    # X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X1)

    X2 = Convolution2D(filters=32,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(input_img2)
    X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    X2 = Convolution2D(filters=64,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(X2)
    X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    # X2 = Convolution2D(filters=128, kernel_size=(7, 7), padding='same', activation='relu')(X2)
    # X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    # X2 = Convolution2D(filters=256, kernel_size=(7, 7), padding='same', activation='relu')(X2)
    # X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    # X2 = Convolution2D(filters=512, kernel_size=(7, 7), padding='same', activation='relu')(X2)
    # X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X2)
    # X2 = Convolution2D(filters=1024, kernel_size=(7, 7), padding='same', activation='relu')(X2)
    # X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X2)

    X3 = Convolution2D(filters=32,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(input_img3)
    X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X3)
    X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X3)
    X3 = Convolution2D(filters=64,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(X3)
    X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X3)
    # X3 = Convolution2D(filters=128, kernel_size=(7, 7), padding='same', activation='relu')(X3)
    # X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X3)
    # X3 = Convolution2D(filters=512, kernel_size=(7, 7), padding='same', activation='relu')(X3)
    # X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X3)
    # X3 = Convolution2D(filters=1024, kernel_size=(7, 7), padding='same', activation='relu')(X3)
    # X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X3)
    concat = Concatenate()([X1, X2, X3])
    X = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(concat)
    X = Convolution2D(filters=128,
                      kernel_size=(7, 7),
                      padding='same',
                      activation='relu')(X)
    # X = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X)
    # X = Convolution2D(filters=64, kernel_size=(7, 7), padding='same', activation='relu')(X)
    # X = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X)
    # X = Convolution2D(filters=256, kernel_size=(7, 7), padding='same', activation='relu')(X)
    X = Flatten()(X)
    X = Dense(128, activation='relu')(X)
    X = Dense(64, activation='relu')(X)
    # X = Dense(128, activation='relu')(X)
    # X = Dense(64, activation='relu')(X)
    # print('before ' +str(X.shape))
    X = Dense(2, activation='sigmoid')(X)
    # print('after ' + str(X.shape))
    model = Model(inputs=[input_img1, input_img2, input_img3], outputs=X)

    return model
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape
import numpy as np
train_X = []
for i in range(10000):
    img = Image.open("train/cat.%d.jpg" % i).resize((64, 64))
    train_X.append(np.array(img))
    img = Image.open("train/dog.%d.jpg" % i).resize((64, 64))
    train_X.append(np.array(img))
train_X = np.float32(train_X) / 255

train_y = [0, 1] * 10000
train_Y = np.eye(2)[train_y]

model = Sequential()
model.add(
    Conv2D(filters=32,
           kernel_size=(3, 3),
           padding='same',
           activation="relu",
           input_shape=(64, 64, 3)))
model.add(MaxPool2D())
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation="relu"))
model.add(MaxPool2D())
model.add(Reshape((-1, )))
model.add(Dense(units=1024, activation="relu"))
model.add(Dense(units=2, activation="softmax"))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(train_X, train_Y, validation_split=0.1, batch_size=64, epochs=20)
Example #29
0
def model_creation():

    model = Sequential()

    model.add(
        Conv2D(2,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='SAME',
               input_shape=(48, 48, 1),
               activation='relu'))  ##Input Layers

    model.add(
        Conv2D(2,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='SAME',
               activation='relu'))
    model.add(BatchNormalization(axis=-1))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1),
                        padding='SAME'))  #Max Pool1
    model.add(Dropout(0.5))

    model.add(ZeroPadding2D(padding=(2, 2)))
    model.add(
        Conv2D(3,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='SAME',
               activation='relu'))

    model.add(
        Conv2D(3,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='SAME',
               activation='relu'))

    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1),
                        padding='SAME'))  #MaxPool2
    model.add(Dropout(0.5))

    model.add(ZeroPadding2D(padding=(2, 2)))
    model.add(
        Conv2D(7,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='SAME',
               activation='relu'))

    model.add(
        Conv2D(7,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='SAME',
               activation='relu'))
    model.add(BatchNormalization(axis=-1))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1),
                        padding='SAME'))  #MaxPool3
    model.add(Dropout(0.5))

    model.add(ZeroPadding2D(padding=(2, 2)))
    model.add(
        Conv2D(10,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='SAME',
               activation='relu'))

    model.add(
        Conv2D(10,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='SAME',
               activation='relu'))

    model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1),
                        padding='SAME'))  #MaxPool4
    model.add(Dropout(0.5))
    """model.add(ZeroPadding2D(padding = (2, 2)))
	model.add(Conv2D(15, kernel_size=(3, 3), strides=(1, 1), padding='SAME',
activation = 'relu'))
	
	model.add(Conv2D(15, kernel_size=(3, 3), strides=(1, 1), padding='SAME',
activation = 'relu'))
	model.add(BatchNormalization(axis = -1))
	model.add(MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding = 'SAME')) #MaxPool5
	model.add(Dropout(0.5))
	"""

    model.add(Flatten())

    #Dense1
    model.add(Dense(200, activation='relu'))
    model.add(BatchNormalization(axis=-1))
    model.add(Dropout(0.5))

    #Dense2
    model.add(Dense(140, activation='relu'))
    model.add(BatchNormalization(axis=-1))
    model.add(Dropout(0.5))

    #Dense3
    model.add(Dense(100, activation='relu'))
    model.add(BatchNormalization(axis=-1))
    model.add(Dropout(0.5))

    #Dense4
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(7, activation='softmax'))
    model.summary()

    return model
Example #30
0
def fit_cnn():
    X_train = train_embed
    y_train = np.array(train_labels)
    X_test = test_embed
    y_test = test_labels
    X_train = X_train.reshape(
        (X_train.shape[0], X_train.shape[1], X_train.shape[2], 1))
    X_test = X_test.reshape(
        (X_test.shape[0], X_test.shape[1], X_test.shape[2], 1))
    y_train, y_test = convert_one_hot(y_train, y_test)
    sequence_length = train_embed.shape[1]  # 60
    embedding_dim = train_embed.shape[2]
    filter_sizes = [2, 3, 4, 5]
    num_filters = 64
    drop = 0.6
    input_shape = train_embed[0].shape
    epochs = 1000
    batch_size = 64
    inputs = Input(shape=(sequence_length, embedding_dim, 1), dtype='float32')
    batch_norm = BatchNormalization(input_shape=input_shape)(inputs)
    conv_0 = Conv2D(num_filters,
                    kernel_size=(filter_sizes[0], embedding_dim),
                    padding='valid',
                    kernel_initializer='normal',
                    activation='relu',
                    input_shape=input_shape,
                    kernel_regularizer=regularizers.l2(0.01))(batch_norm)
    conv_1 = Conv2D(num_filters,
                    kernel_size=(filter_sizes[1], embedding_dim),
                    padding='valid',
                    kernel_initializer='normal',
                    activation='relu',
                    input_shape=input_shape,
                    kernel_regularizer=regularizers.l2(0.01))(batch_norm)
    conv_2 = Conv2D(num_filters,
                    kernel_size=(filter_sizes[2], embedding_dim),
                    padding='valid',
                    kernel_initializer='normal',
                    activation='relu',
                    input_shape=input_shape,
                    kernel_regularizer=regularizers.l2(0.01))(batch_norm)
    conv_3 = Conv2D(num_filters,
                    kernel_size=(filter_sizes[3], embedding_dim),
                    padding='valid',
                    kernel_initializer='normal',
                    activation='relu',
                    input_shape=input_shape,
                    kernel_regularizer=regularizers.l2(0.01))(batch_norm)

    maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1),
                          strides=(1, 1),
                          padding='valid')(conv_0)
    maxpool_1 = MaxPool2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1),
                          strides=(1, 1),
                          padding='valid')(conv_1)
    maxpool_2 = MaxPool2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1),
                          strides=(1, 1),
                          padding='valid')(conv_2)
    maxpool_3 = MaxPool2D(pool_size=(sequence_length - filter_sizes[3] + 1, 1),
                          strides=(1, 1),
                          padding='valid')(conv_3)

    concatenated_tensor = Concatenate(axis=1)(
        [maxpool_0, maxpool_1, maxpool_2, maxpool_3])
    flatten = Flatten()(concatenated_tensor)
    dropout = Dropout(drop)(flatten)
    output = Dense(units=3,
                   activation='softmax',
                   kernel_regularizer=regularizers.l2(0.1))(dropout)

    # this creates a model that includes
    model = Model(inputs=inputs, outputs=output)
    adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    print("Traning Model...")
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(X_test, y_test))
    model.save("model/cnn.h5")
    return model