p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(0.5)(p3)

c4 = Conv2D(neurons_num * 8, (3, 3), activation='relu', padding='same')(p3)
c4 = BatchNormalization()(c4)
c4 = Conv2D(neurons_num * 8, (3, 3), activation='relu', padding='same')(c4)
c4 = BatchNormalization()(c4)
sc_4 = Conv2D(neurons_num * 8, (1, 1), padding='same')(p3)
c4 = add([c4, sc_4])
c4 = BatchNormalization()(c4)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)
p4 = Dropout(0.5)(p4)

# Join features information in the depthest layer
f_repeat = RepeatVector(8 * 8)(input_features)
f_conv = Reshape((8, 8, n_features))(f_repeat)
p4_feat = concatenate([p4, f_conv], -1)

c5 = Conv2D(neurons_num * 16, (3, 3), activation='relu',
            padding='same')(p4_feat)
c5 = BatchNormalization()(c5)
c5 = Conv2D(neurons_num * 16, (3, 3), activation='relu', padding='same')(c5)
c5 = BatchNormalization()(c5)
sc_5 = Conv2D(neurons_num * 16, (1, 1), padding='same')(p4_feat)
c5 = add([c5, sc_5])
c5 = BatchNormalization()(c5)

u6 = Conv2DTranspose(neurons_num * 8, (2, 2), strides=(2, 2),
                     padding='same')(c5)
u6 = concatenate([u6, c4])
u6 = Dropout(0.5)(u6)
Example #2
0
def create_model(metadata, clusters):
    """
    Creates all the layers for our neural network model.
    """

    # Arbitrary dimension for all embeddings
    embedding_dim = 10

    # Quarter hour of the day embedding
    embed_quarter_hour = Sequential()
    embed_quarter_hour.add(
        Embedding(metadata['n_quarter_hours'], embedding_dim, input_length=1))
    embed_quarter_hour.add(Reshape((embedding_dim, )))

    # Day of the week embedding
    embed_day_of_week = Sequential()
    embed_day_of_week.add(
        Embedding(metadata['n_days_per_week'], embedding_dim, input_length=1))
    embed_day_of_week.add(Reshape((embedding_dim, )))

    # Week of the year embedding
    embed_week_of_year = Sequential()
    embed_week_of_year.add(
        Embedding(metadata['n_weeks_per_year'], embedding_dim, input_length=1))
    embed_week_of_year.add(Reshape((embedding_dim, )))

    # Client ID embedding
    embed_client_ids = Sequential()
    embed_client_ids.add(
        Embedding(metadata['n_client_ids'], embedding_dim, input_length=1))
    embed_client_ids.add(Reshape((embedding_dim, )))

    # Taxi ID embedding
    embed_taxi_ids = Sequential()
    embed_taxi_ids.add(
        Embedding(metadata['n_taxi_ids'], embedding_dim, input_length=1))
    embed_taxi_ids.add(Reshape((embedding_dim, )))

    # Taxi stand ID embedding
    embed_stand_ids = Sequential()
    embed_stand_ids.add(
        Embedding(metadata['n_stand_ids'], embedding_dim, input_length=1))
    embed_stand_ids.add(Reshape((embedding_dim, )))

    # GPS coordinates (5 first lat/long and 5 latest lat/long, therefore 20 values)
    coords = Sequential()
    coords.add(Dense(1, input_dim=20, init='normal'))

    # Merge all the inputs into a single input layer
    model = Sequential()
    model.add(
        Merge([
            embed_quarter_hour, embed_day_of_week, embed_week_of_year,
            embed_client_ids, embed_taxi_ids, embed_stand_ids, coords
        ],
              mode='concat'))

    # Simple hidden layer
    model.add(Dense(500))
    model.add(Activation('relu'))

    # Determine cluster probabilities using softmax
    model.add(Dense(len(clusters)))
    model.add(Activation('softmax'))

    # Final activation layer: calculate the destination as the weighted mean of cluster coordinates
    cast_clusters = K.cast_to_floatx(clusters)

    def destination(probabilities):
        return tf.matmul(probabilities, cast_clusters)

    model.add(Activation(destination))

    # Compile the model
    optimizer = SGD(
        lr=0.01, momentum=0.9,
        clipvalue=1.)  # Use `clipvalue` to prevent exploding gradients
    model.compile(loss=tf_haversine, optimizer=optimizer)

    return model
Example #3
0
def FCN8(nClasses=2, input_height=68, input_width=68, vgg_level=3):

    img_input = Input(shape=(input_height, input_width, 3))
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               data_format=IMAGE_ORDERING)(img_input)
    keras.layers.BatchNormalization()
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block1_pool',
                     data_format=IMAGE_ORDERING)(x)
    f1 = x
    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    f2 = x

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block3_pool',
                     data_format=IMAGE_ORDERING)(x)
    f3 = x

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block4_pool',
                     data_format=IMAGE_ORDERING)(x)
    f4 = x

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3',
               data_format=IMAGE_ORDERING)(x)
    keras.layers.BatchNormalization()
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block5_pool',
                     data_format=IMAGE_ORDERING)(x)
    f5 = x

    o = f5

    o = (Conv2D(4096, (7, 7),
                activation='relu',
                padding='same',
                data_format=IMAGE_ORDERING))(o)
    keras.layers.BatchNormalization()
    o = (Conv2D(4096, (1, 1),
                activation='relu',
                padding='same',
                data_format=IMAGE_ORDERING))(o)
    keras.layers.BatchNormalization()

    o = (Conv2D(nClasses, (1, 1),
                kernel_initializer='he_normal',
                data_format=IMAGE_ORDERING))(o)
    o = Conv2DTranspose(nClasses,
                        kernel_size=(4, 4),
                        strides=(2, 2),
                        use_bias=False,
                        data_format=IMAGE_ORDERING)(o)

    o2 = f4
    o2 = (Conv2D(nClasses, (1, 1),
                 kernel_initializer='he_normal',
                 data_format=IMAGE_ORDERING))(o2)

    o, o2 = crop(o, o2, img_input)

    o = Add()([o, o2])

    o = Conv2DTranspose(nClasses,
                        kernel_size=(4, 4),
                        strides=(2, 2),
                        use_bias=False,
                        data_format=IMAGE_ORDERING)(o)
    o2 = f3
    o2 = (Conv2D(nClasses, (1, 1),
                 kernel_initializer='he_normal',
                 data_format=IMAGE_ORDERING))(o2)
    o2, o = crop(o2, o, img_input)
    o = Add()([o2, o])

    o = Conv2DTranspose(nClasses,
                        kernel_size=(12, 12),
                        strides=(8, 8),
                        use_bias=False,
                        data_format=IMAGE_ORDERING)(o)

    o_shape = Model(img_input, o).output_shape

    outputHeight = o_shape[1]
    outputWidth = o_shape[2]
    print(o_shape)
    o = (Reshape((-1, outputHeight * outputWidth)))(o)
    o = (Permute((2, 1)))(o)
    o = (Activation('softmax'))(o)
    model = Model(img_input, o)
    model.outputWidth = outputWidth
    model.outputHeight = outputHeight

    return model
autoencoder.add(Layer(input_shape=(3,360, 480)))

#autoencoder.add(GaussianNoise(sigma=0.3))
autoencoder.encoding_layers = create_encoding_layers()
autoencoder.decoding_layers = create_decoding_layers()
for i,l in enumerate(autoencoder.encoding_layers):
    autoencoder.add(l)
    print(i,l.input_shape,l.output_shape)
for l in autoencoder.decoding_layers:
    autoencoder.add(l)
    print(i,l.input_shape,l.output_shape)

the_conv=(Convolution2D(num_classes, 1, 1, border_mode='valid',))
autoencoder.add(the_conv)
print (the_conv.input_shape,the_conv.output_shape)
autoencoder.add(Reshape((num_classes,data_shape)))#, input_shape=(num_classes,360,480)))
autoencoder.add(Permute((2, 1)))
autoencoder.add(Activation('softmax'))
#from keras.optimizers import SGD
#optimizer = SGD(lr=0.01, momentum=0.8, decay=0., nesterov=False)
autoencoder.compile(loss="categorical_crossentropy", optimizer='adadelta',metrics=['accuracy'])

#current_dir = os.path.dirname(os.path.realpath(__file__))
#model_path = os.path.join(current_dir, "autoencoder.png")
#plot(model_path, to_file=model_path, show_shapes=True)

nb_epoch = 2 
batch_size = 7

history = autoencoder.fit(train_data, train_label, batch_size=batch_size, nb_epoch=nb_epoch)#,
                    #show_accuracy=True)#, class_weight=class_weighting )#, validation_data=(X_test, X_test))
Example #5
0
X_train = X_train[:, np.newaxis, :, :]


# Function for initializing network weights
def initNormal(shape, name=None):
    return initializations.normal(shape, scale=0.02, name=name)


# Optimizer
adam = Adam(lr=0.0002, beta_1=0.5)

# Generator
generator = Sequential()
generator.add(Dense(128 * 7 * 7, input_dim=randomDim, init=initNormal))
generator.add(LeakyReLU(0.2))
generator.add(Reshape((128, 7, 7)))
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Convolution2D(64, 5, 5, border_mode='same'))
generator.add(LeakyReLU(0.2))
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Convolution2D(1, 5, 5, border_mode='same', activation='sigmoid'))
generator.compile(loss='binary_crossentropy', optimizer=adam)

# Discriminator
discriminator = Sequential()
discriminator.add(
    Convolution2D(64,
                  5,
                  5,
                  border_mode='same',
                  subsample=(2, 2),
def baseline_model(num_classes, image_shape):
    model = Sequential()
    model.add(Reshape(int(image_shape[0] * image_shape[1]), input_shape = image_shape))
    model.add(Dense(128, input_dim=128, init='normal', activation='relu'))
    model.add(Dense(num_classes, init='normal', activation='softmax'))
    return model
layer2 = Dropout(dr)(layer2)
layer2 = ZeroPadding2D((0, 2), data_format="channels_first")(layer2)

layer3 = Conv2D(50, (1, 7),
                padding='valid',
                activation="relu",
                name="conv3",
                init='glorot_uniform',
                data_format="channels_first")(layer2)
layer3 = Dropout(dr)(layer3)

concat = keras.layers.concatenate([layer1, layer3])
concat_size = list(np.shape(concat))
input_dim = int(concat_size[-1] * concat_size[-2])
timesteps = int(concat_size[-3])
concat = Reshape((timesteps, input_dim))(concat)
lstm_out = LSTM(50, input_dim=input_dim, input_length=timesteps)(concat)
layer_dense1 = Dense(256, activation='relu', init='he_normal',
                     name="dense1")(lstm_out)
layer_dropout = Dropout(dr)(layer_dense1)
layer_dense2 = Dense(2, init='he_normal', name="dense2")(layer_dropout)
layer_softmax = Activation('softmax')(layer_dense2)

output = Reshape([2])(layer_softmax)

model = Model(inputs=input_x, outputs=output)

model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()
# End of building, we will start fitting the neural network
# Set up some params
Example #8
0
    def __init__(self, input_shape, dimention):
        self.ip1 = Input(shape=(input_shape, 1))
        self.ip2 = Input(shape=(input_shape, 1))
        self.sim = Input(shape=(1, ))
        print(self.ip1.shape)
        ##---projection layer-----##
        ## assume each argument is of size 4 and 5 argument per event. hence ip1 is 20x1. so is ip2self.
        ## after conv each argument will have 5 channles(dimention)
        self.projection1 = Conv1D(
            5, 4, strides=4, padding='valid',
            activation='tanh')  #(Dropout(0.5)(Dense(1500, activation='relu')))
        self.sh_projection1_op1 = self.projection1(self.ip1)
        self.sh_projection1_op2 = self.projection1(self.ip2)

        print(self.sh_projection1_op1.shape)
        self.f_v1 = Flatten()(self.sh_projection1_op1)
        self.f_v2 = Flatten()(self.sh_projection1_op2)
        print('after flatten {}'.format(self.f_v1.get_shape().as_list()))
        self.projection2 = Conv1D(1,
                                  5,
                                  strides=5,
                                  padding='valid',
                                  activation='tanh')
        self.allignment = list()

        x0 = self.f_v2
        x1 = Lambda(myfunc,
                    output_shape=self.f_v2.get_shape().as_list()[1])(x0)
        x2 = Lambda(myfunc,
                    output_shape=self.f_v2.get_shape().as_list()[1])(x1)
        x3 = Lambda(myfunc,
                    output_shape=self.f_v2.get_shape().as_list()[1])(x2)
        x4 = Lambda(myfunc,
                    output_shape=self.f_v2.get_shape().as_list()[1])(x3)

        print('*****@@@@@+++++{} and {} and {} and {} and{}'.format(
            x0.shape, x1.shape, x2.shape, x3.shape, x4.shape))

        self.merged_layer = merge([self.f_v1, x0], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)

        self.merged_layer = merge([self.f_v1, x1], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)

        self.merged_layer = merge([self.f_v1, x2], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)

        self.merged_layer = merge([self.f_v1, x3], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)

        self.merged_layer = merge([self.f_v1, x4], mode='concat')
        self.merged_layer = Reshape((-1, 2))(self.merged_layer)
        allgn = self.projection2(self.merged_layer)
        self.allignment.append(allgn)
        '''
        for i in range(5):
            self.merged_layer = merge([self.f_v1, self.f_v2], mode = 'concat')
            self.merged_layer = Reshape((-1,2))(self.merged_layer)
            allgn = self.projection2(self.merged_layer)
            #print('{} dim merged layer{} dim of conv allgn{}'.format(i,self.merged_layer.shape,allgn.shape ))
            self.allignment.append(allgn)
            temp1 = self.f_v2[0:4]
            temp2 = self.f_v2[5:]
            self.f_v2 = merge([temp2, temp1], mode = 'concat')
            #print(self.f_v2.type)
        '''
        #print(self.allignment[0].shape)
        for i in range(len(self.allignment)):
            self.allignment[i] = Reshape((5, 1))(self.allignment[i])
            #print('{} dim allgnlayer{}'.format(i,self.allignment[i] ))

        self.allignment_all = merge(self.allignment, mode='concat')

        self.prediction = Dense(1, activation='sigmoid')(Flatten()(
            self.allignment_all))

        self.model = Model(input=[self.ip1, self.ip2, self.sim],
                           output=self.prediction)
        sgd = SGD(lr=0.1, momentum=0.9, decay=0, nesterov=False)
        self.model.compile(loss='mean_squared_error',
                           optimizer=sgd,
                           metrics=['accuracy'])
Example #9
0
    def comp_double(self):
        '''
        double model. Simialar to two-pathway, except takes in a 4x33x33 patch and it's center 4x5x5 patch. merges paths at flatten layer.
        '''
        print('Compiling double model...')
        single = Sequential()
        single.add(
            Convolution2D(64,
                          7,
                          7,
                          border_mode='valid',
                          W_regularizer=l1l2(l1=0.01, l2=0.01),
                          input_shape=(4, 33, 33)))
        single.add(Activation('relu'))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
        single.add(Dropout(0.5))
        single.add(
            Convolution2D(nb_filter=128,
                          nb_row=5,
                          nb_col=5,
                          activation='relu',
                          border_mode='valid',
                          W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
        single.add(Dropout(0.5))
        single.add(
            Convolution2D(nb_filter=256,
                          nb_row=5,
                          nb_col=5,
                          activation='relu',
                          border_mode='valid',
                          W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
        single.add(Dropout(0.5))
        single.add(
            Convolution2D(nb_filter=128,
                          nb_row=3,
                          nb_col=3,
                          activation='relu',
                          border_mode='valid',
                          W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(Dropout(0.25))
        single.add(Flatten())

        # add small patch to train on
        five = Sequential()
        five.add(Reshape((100, 1), input_shape=(4, 5, 5)))
        five.add(Flatten())
        five.add(MaxoutDense(128, nb_feature=5))
        five.add(Dropout(0.5))

        model = Sequential()
        # merge both paths
        model.add(Merge([five, single], mode='concat', concat_axis=1))
        model.add(Dense(5))
        model.add(Activation('softmax'))

        sgd = SGD(lr=0.001, decay=0.01, momentum=0.9)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        print('Done.')
        return model
Example #10
0
def tag3d_network_dense(input,
                        nb_units=64,
                        nb_dense_units=[512, 512],
                        depth=2,
                        nb_output_channels=1,
                        trainable=True):
    n = nb_units

    def conv(n, repeats=None):
        def normal(shape, name=None):
            return keras.initializations.normal(shape, scale=0.01, name=name)

        if repeats is None:
            repeats = depth
        return [[
            Convolution2D(n, 3, 3, border_mode='same', init='he_normal'),
            Activation('relu')
        ] for _ in range(repeats)]

    base = sequential(
        [
            [
                Dense(nb_dense, activation='relu')
                for nb_dense in nb_dense_units
            ],
            Dense(8 * n * 4 * 4),
            Activation('relu'),
            Reshape((
                8 * n,
                4,
                4,
            )),
            conv(8 * n),
            UpSampling2D(),  # 8x8
            conv(4 * n),
            UpSampling2D(),  # 16x16
            conv(2 * n),
        ],
        ns='tag3d_gen.base',
        trainable=trainable)(input)

    tag3d = sequential(
        [
            conv(2 * n),
            UpSampling2D(),  # 32x32
            conv(n),
            UpSampling2D(),  # 64x64
            conv(n, 1),
            Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
        ],
        ns='tag3d',
        trainable=trainable)(base)

    depth_map = sequential([
        conv(n // 2, depth - 1),
        Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
    ],
                           ns='depth_map',
                           trainable=trainable)(base)

    return name_tensor(tag3d, 'tag3d'), name_tensor(depth_map, 'depth_map')
Example #11
0
def simple_gan_generator(nb_units, z, labels, depth_map, tag3d, depth=2):
    n = nb_units
    depth_map_features = sequential([
        conv2d_block(n),
        conv2d_block(2 * n),
    ])(depth_map)

    tag3d_features = sequential([
        conv2d_block(n, subsample=2),
        conv2d_block(2 * n, subsample=2),
    ])(tag3d)

    x = sequential([
        Dense(5 * n),
        BatchNormalization(mode=2),
        Activation('relu'),
        Dense(5 * n),
        BatchNormalization(mode=2),
        Activation('relu'),
    ])(concat([z, labels]))

    blur = InBounds(0, 1, clip=True)(Dense(1)(x))

    x = sequential([
        Dense(8 * 4 * 4 * n),
        Activation('relu'),
        BatchNormalization(mode=2),
        Reshape((8 * n, 4, 4)),
    ])(x)

    x = sequential([
        conv2d_block(8 * n, filters=1, depth=1, up=True),  # 4x4 -> 8x8
        conv2d_block(8 * n, depth=depth, up=True),  # 8x8 -> 16x16
    ])(x)

    off_depth_map = sequential([
        conv2d_block(2 * n, depth=depth),
    ])(concat([x, depth_map_features]))

    light = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 -> 64x64
    ])(off_depth_map)

    def get_light(x):
        return sequential([
            conv2d_block(1, filters=1, batchnorm=False),
            GaussianBlur(sigma=4),
            InBounds(0, 1, clip=True),
        ])(x)

    light_sb = get_light(light)
    light_sw = get_light(light)
    light_t = get_light(light)

    background = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, batchnorm=False),
        InBounds(-1, 1, clip=True),
    ])(off_depth_map)

    details = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, depth=1, batchnorm=False),
        InBounds(-1, 1, clip=True)
    ])(concat(tag3d_features, off_depth_map))
    return blur, [light_sb, light_sw, light_t], background, details
X_train, X_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=42)

sequence_length = x.shape[1]
vocabulary_size = len(vocabulary_inv)
embedding_dim = 256
filter_sizes = [3,4,5]
num_filters = 512
drop = 0.5

nb_epoch = 10
batch_size = 128

# this returns a tensor
inputs = Input(shape=(sequence_length,), dtype='int32')
embedding = Embedding(output_dim=embedding_dim, input_dim=vocabulary_size, input_length=sequence_length)(inputs)
reshape = Reshape((sequence_length,embedding_dim,1))(embedding)

conv_0 = Convolution2D(num_filters, filter_sizes[0], embedding_dim, border_mode='valid', init='normal', activation='relu', dim_ordering='tf')(reshape)
conv_1 = Convolution2D(num_filters, filter_sizes[1], embedding_dim, border_mode='valid', init='normal', activation='relu', dim_ordering='tf')(reshape)
conv_2 = Convolution2D(num_filters, filter_sizes[2], embedding_dim, border_mode='valid', init='normal', activation='relu', dim_ordering='tf')(reshape)

maxpool_0 = MaxPooling2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1), strides=(1,1), border_mode='valid', dim_ordering='tf')(conv_0)
maxpool_1 = MaxPooling2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1), strides=(1,1), border_mode='valid', dim_ordering='tf')(conv_1)
maxpool_2 = MaxPooling2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1), strides=(1,1), border_mode='valid', dim_ordering='tf')(conv_2)

merged_tensor = merge([maxpool_0, maxpool_1, maxpool_2], mode='concat', concat_axis=1)
flatten = Flatten()(merged_tensor)
# reshape = Reshape((3*num_filters,))(merged_tensor)
dropout = Dropout(drop)(flatten)
output = Dense(output_dim=2, activation='softmax')(dropout)
Example #13
0
    def __init__(self, params,outfile,weights_file,save_files):
        batch_size=params['batch_size']
        original_dim=params['original_dim']
        latent_dim=params['latent_dim']
        s_dim=params['s_dim']
        l_size=params['l_size']
        filter_size = params['filter_size']
        n_features = params['n_features']
        s_clss = params['s_clss']
        init = params['Init']#eval(params['Init']+'()')
        if 'LeakyReLU' in params['Activation']:
            act = eval(params['Activation']+'(0.1)')
        else:
            act = Activation(params['Activation'])
        actstr = params['Activation']
        use_bn = params['BN']
        dp = params['dp']   
        if 'RGB' in params:
            RGB=params['RGB']
        else:
            RGB=1

        if 'mseloss' in params and params['mseloss']==True:
            mseloss = 'mse'
        else:
            mseloss = 'binary_crossentropy'
        encS_bn = params['EncBN']
        encZ_bn = params['EncBN']
        dec_bn = params['DecBN']
        
        def add_dense_layer(inp,dim,out_dp=0):
            h = Dense(dim,init=init)(inp)
            if use_bn:
                h = BatchNormalization(mode=2)(h)#mode=2
            h = act(h)
            if out_dp==1 and dp>0:
                h = Dropout(dp)(h)
            return h
        
        def clipping(args):
            vals = args
            return K.clip(vals,-30,30)
            
        def add_conv_layer(inp,n_features,filter_size,bn=False,actconv='relu',stride=True,dilation=False,maxpool=False,upsamp = False,avgpool=False):
            if stride:
                h = Conv2D(n_features, kernel_size=(filter_size, filter_size),strides=(2,2), padding='same')(inp)
            elif dilation:
                h = Conv2D(n_features, kernel_size=(filter_size, filter_size),dilation_rate=(2,2), padding='same')(inp)
            else:
                h = Conv2D(n_features, kernel_size=(filter_size, filter_size), padding='same')(inp)
            if upsamp:
                h = UpSampling2D((2, 2))(h)
            if bn:
                h = BatchNormalization(axis = 1)(h)#mode=2
            h = actconv(h)
            if maxpool:
                h = MaxPooling2D((2, 2), padding='same')(h)
            if avgpool:
                h = AveragePooling2D((2, 2), padding='same')(h)
            return h
        
        #shp = (1,original_dim,original_dim)
        input_img = Input(batch_shape=(batch_size, RGB, original_dim, original_dim))
        #input_img = Input(shape=shp)
        
        #Each part of the network has to be adjusted to the complexity of the data
        #here I use simple architecture with 3 convolutional layers
        #Enc Z ########################################
        x = add_conv_layer(input_img,n_features,filter_size,bn=encZ_bn,actconv=act,stride=True,dilation=False,maxpool=False,upsamp = False,avgpool=False)
        x = add_conv_layer(x,n_features/2,filter_size,bn=encZ_bn,actconv=act,stride=True,dilation=False,maxpool=False,upsamp = False,avgpool=False)
        x = add_conv_layer(x,n_features/2,filter_size,bn=encZ_bn,actconv=act,stride=False,dilation=False,maxpool=False,upsamp = False,avgpool=False)
        
        h = Flatten()(x)
        h = add_dense_layer(h,256,out_dp=0)
        z_mean = Dense(latent_dim,kernel_initializer=init, activation=actstr)(h)
        #z_log_var = Dense(latent_dim,kernel_initializer=init, activation=actstr)(h)
        self.EncZ = Model(inputs = input_img, output=[z_mean])#,z_log_var])
            
        
        #Enc S ########################################
        x = add_conv_layer(input_img,n_features,filter_size,bn=encS_bn,actconv=act,stride=True,dilation=False,maxpool=False,upsamp = False,avgpool=False)
        x = add_conv_layer(x,n_features/2,filter_size,bn=encS_bn,actconv=act,stride=True,dilation=False,maxpool=False,upsamp = False,avgpool=False)
        x = add_conv_layer(x,n_features/2,filter_size,bn=encS_bn,actconv=act,stride=False,dilation=False,maxpool=False,upsamp = False,avgpool=False)
        divShape = 4
        h = Flatten()(x)
        h = add_dense_layer(h,256,out_dp=0)
        s = Dense(s_dim,kernel_initializer=init, activation=actstr)(h)  
        self.EncS = Model(inputs = input_img, outputs=s)
        

        #Dec ########################################
        in_z = Input(batch_shape=(batch_size, latent_dim))
        in_s = Input(batch_shape=(batch_size, s_dim))
        
        inz_s = concatenate(inputs = [in_z, in_s],axis=1)#([in_z, in_s])

        x = Dense(n_features*original_dim*original_dim/(2*divShape*divShape),kernel_initializer=init, activation=actstr)(inz_s)
        x = Reshape((n_features/2,original_dim/divShape,original_dim/divShape))(x)
        x = add_conv_layer(x,n_features/2,filter_size,bn=dec_bn,actconv=act,stride=False,upsamp = True)
        x = add_conv_layer(x,n_features/2,filter_size,bn=dec_bn,actconv=act,stride=False,upsamp = True)
        x = add_conv_layer(x,n_features,filter_size,bn=dec_bn,actconv=act,stride=False,upsamp = False)
        
        if mseloss == 'binary_crossentropy':
            decoder_h = Conv2D(RGB, kernel_size=(filter_size, filter_size), activation='sigmoid', padding='same')(x)
        else:
            decoder_h = Conv2D(RGB, kernel_size=(filter_size, filter_size), padding='same')(x)
 
        self.Dec = Model([in_z,in_s],[decoder_h])#,x_decoded_log_std])#logpxz

        #Adv ########################################
        adv_h = add_dense_layer(in_z,l_size,out_dp=0)
        adv_h = add_dense_layer(adv_h,l_size,out_dp=0)
        adv_h = add_dense_layer(adv_h,l_size,out_dp=0)
        adv_h = Dense(s_clss,kernel_initializer=init)(adv_h)
        out = Activation('softmax')(adv_h)
        
        self.Adv = Model(in_z,out)
        ########################################
        
        #Sclsfr ########################################
        hclsfr = add_dense_layer(in_s,l_size,out_dp=0)
        hclsfr = add_dense_layer(hclsfr,l_size,out_dp=0)
        hclsfr = Dense(s_clss,kernel_initializer=init)(hclsfr)
        outhclsfr= Activation('softmax')(hclsfr)
        
        self.Sclsfr = Model(in_s,outhclsfr)
        ########################################
        
        print 'building enc...'
        x1 = Input(batch_shape=(batch_size,RGB, original_dim, original_dim))
        Z1in = Input(batch_shape=(batch_size, latent_dim))
        
        s1 = self.EncS(x1)
        z1_mean = self.EncZ(x1)#,z1_log_var
            
        def sampling(args):
            z_mean, z_log_var = args
            epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.)
            return z_mean + K.exp(z_log_var / 2) * epsilon
        
        #z1 = Lambda(sampling, output_shape=(latent_dim,))([z1_mean, z1_log_var]) #VAE encoder
        z1 = z1_mean #Normal encoder 
                   
        print 'building dec...'
        x11 = self.Dec([z1,in_s])#,x11_log_std
        
        print 'building Sclsifier...'
        Sclsfr = self.Sclsfr(s1)
        
        print 'building Adv...'
        Adv1 = self.Adv(z1)
        Adv1_netAdv = self.Adv(Z1in)
        
        recweight = params['recweight'] if 'recweight' in params else 0.5
        advweight = params['advweight'] if 'advweight' in params else -0.1
            
        print 'compile...'
        self.DistNet = Model([x1,in_s], [x11,Adv1])#[x11,z1,Adv1])
        self.freeze_unfreeze_Adv(False)
        self.freeze_unfreeze_Enc(True)
        self.freeze_unfreeze_Dec(True)
        self.freeze_unfreeze_Spart(False)
        opt = Adam(lr=0.0001, beta_1=0.5)
        self.DistNet.compile(optimizer=opt, loss=[mseloss,'categorical_crossentropy'],loss_weights=[recweight,advweight])
        
        self.Snet = Model(x1, Sclsfr)
        self.freeze_unfreeze_Adv(False)
        self.freeze_unfreeze_Enc(False)
        self.freeze_unfreeze_Dec(False)
        self.freeze_unfreeze_Spart(True)
        opt = Adam(lr=0.0001, beta_1=0.9)
        self.Snet.compile(optimizer=opt, loss='categorical_crossentropy',metrics=['accuracy'])
    
        self.AdvNet = Model(Z1in,Adv1_netAdv)
        self.freeze_unfreeze_Enc(False)
        self.freeze_unfreeze_Dec(False)
        self.freeze_unfreeze_Adv(True)
        self.freeze_unfreeze_Spart(False)
        opt = Adam(lr=0.00002, beta_1=0.9)
        self.AdvNet.compile(optimizer='sgd', loss='categorical_crossentropy')#loss#adv_loss)  
        
        self.params = params
        self.outfile = outfile
        self.save_files = save_files
        if self.save_files:
            self.log_results(self.outfile,params,debug=False)
            self.weights_file = weights_file
            self.checkpointer = ModelCheckpoint(filepath=(self.weights_file + '_Snet_weights.h5'), monitor='val_acc', verbose=1, save_best_only=True)
Example #14
0
    def get_vnet(self):

        inputs = Input((self.sz, self.sz, self.z_sz, self.nch))

        in_tr = intro(self.nf, self.sz, self.z_sz, self.nch, self.bn)(inputs)

        #down_path

        dwn_tr1 = down_transition(self.nf * 2, 2, int(in_tr.shape[2]),
                                  int(in_tr.shape[3]), int(in_tr.shape[4]),
                                  self.bn, self.dr)(in_tr)
        dwn_tr2 = down_transition(self.nf * 4, 2, int(dwn_tr1.shape[2]),
                                  int(dwn_tr1.shape[3]), int(dwn_tr1.shape[4]),
                                  self.bn, self.dr)(dwn_tr1)
        dwn_tr3 = down_transition(self.nf * 8, 3, int(dwn_tr2.shape[2]),
                                  int(dwn_tr2.shape[3]), int(dwn_tr2.shape[4]),
                                  self.bn, self.dr)(dwn_tr2)
        dwn_tr4 = down_transition(self.nf * 16, 3, int(dwn_tr3.shape[2]),
                                  int(dwn_tr3.shape[3]), int(dwn_tr3.shape[4]),
                                  self.bn, self.dr)(dwn_tr3)

        #up_path

        up_tr4 = up_transition(self.nf * 8, 3, int(dwn_tr4.shape[2]),
                               int(dwn_tr4.shape[3]), int(dwn_tr4.shape[4]),
                               int(dwn_tr3.shape[4]), self.bn,
                               self.dr)([dwn_tr4, dwn_tr3])
        up_tr3 = up_transition(self.nf * 4, 3, int(up_tr4.shape[2]),
                               int(up_tr4.shape[3]), int(up_tr4.shape[4]),
                               int(dwn_tr2.shape[4]), self.bn,
                               self.dr)([up_tr4, dwn_tr2])
        up_tr2 = up_transition(self.nf * 2, 2, int(up_tr3.shape[2]),
                               int(up_tr3.shape[3]), int(up_tr3.shape[4]),
                               int(dwn_tr1.shape[4]), self.bn,
                               self.dr)([up_tr3, dwn_tr1])
        up_tr1 = up_transition(self.nf * 1, 2, int(up_tr2.shape[2]),
                               int(up_tr2.shape[3]), int(up_tr2.shape[4]),
                               int(in_tr.shape[4]), self.bn,
                               self.dr)([up_tr2, in_tr])

        #classification
        res = Conv3D(self.n_channels, 1, padding='same')(up_tr1)
        res = Reshape((self.sz * self.sz * self.z_sz, self.n_channels))(res)
        act = 'softmax'
        out = Activation(act, name='main')(res)

        if not self.aux_output:
            model = Model(input=inputs, output=out)

        #aux and deep supervision
        else:
            #aux_output
            aux_res = Conv3D(2, 1, padding='same')(up_tr1)
            aux_res = Reshape((self.sz * self.sz * self.z_sz, 2))(aux_res)
            aux_out = Activation(act, name='aux')(aux_res)

            outputs = [out, aux_out]

            if (self.deep_supervision > 0):
                # deep supervision#1
                deep_1 = UpSampling3D((2, 2, 2))(up_tr2)
                res = Conv3D(self.n_channels, 1, padding='same')(deep_1)
                res = Reshape(
                    (self.sz * self.sz * self.z_sz, self.n_channels))(res)

                d_out_1 = Activation(act, name='d1')(res)

                outputs.append(d_out_1)

            if (self.deep_supervision > 1):
                # deep supervision#2
                deep_2 = UpSampling3D((2, 2, 2))(up_tr3)
                deep_2 = UpSampling3D((2, 2, 2))(deep_2)
                res = Conv3D(self.n_channels, 1, padding='same')(deep_2)
                res = Reshape(
                    (self.sz * self.sz * self.z_sz, self.n_channels))(res)

                d_out_2 = Activation(act, name='d2')(res)

                outputs.append(d_out_2)

            model = Model(input=inputs, output=outputs)

        return model
Example #15
0
def build_model():
    def conv(f, k=3, act='relu'):
        return Conv2D(f, (k, k),
                      activation=act,
                      kernel_initializer='he_normal',
                      padding='same')

    def _res_conv(inputs, f, k=3):  # very simple residual module
        channels = int(inputs.shape[-1])

        cs = inputs

        cs = BatchNormalization()(cs)
        cs = Activation('relu')(cs)
        cs = conv(f, 3, act=None)(cs)

        cs = BatchNormalization()(cs)
        cs = Activation('relu')(cs)
        cs = conv(f, 3, act=None)(cs)

        if f != channels:
            t1 = conv(f, 1, None)(inputs)  # identity mapping
        else:
            t1 = inputs

        out = Add()([t1, cs])  # t1 + c2
        return out

    def pool():
        return MaxPooling2D((2, 2))

    def up(x, shape):
        x = Lambda(lambda x: tf.image.resize_bilinear(
            x, shape, align_corners=True))(x)
        x = Conv2D(K.int_shape(x)[-1] // 2, (2, 2),
                   activation=None,
                   kernel_initializer='he_normal',
                   padding='same')(x)
        return x

    inputs = Input(shape=(img_size_ori, img_size_ori, 1))
    depths = Input(shape=(1, ))
    preprocess1 = Lambda(lambda x: x / 255.0)(inputs)

    r = 16
    rep = 4
    mid_rep = 3
    x = preprocess1

    skip_connections = []
    shapes = []
    pad_mode = ['same', 'valid', 'same', 'valid']

    for t in range(rep):
        x = conv(r * int(2**t), 3, None)(x)
        x = _res_conv(x, r * int(2**t), 3)
        x = _res_conv(x, r * int(2**t), 3)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        skip_connections.append(x)
        shapes.append(K.int_shape(x)[1:3])
        x = pool()(x)
        x = Dropout(0.2)(x)

    shape = K.int_shape(x)[1:3]
    aux = Lambda(lambda x: x / 1000.0)(depths)
    aux = RepeatVector(np.prod(np.asarray(shape)))(aux)
    aux = Reshape((*shape, 1))(aux)
    x = concatenate([x, aux])

    x = conv(r * int(2**rep), 3, None)(x)
    for t in range(mid_rep):
        x = _res_conv(x, r * int(2**rep))

    for t, s, p in zip(reversed(range(rep)), reversed(skip_connections),
                       reversed(shapes)):
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = up(x, p)
        x = concatenate([s, x])
        x = Dropout(0.2)(x)
        x = conv(r * int(2**t), 3, None)(x)
        x = _res_conv(x, r * int(2**t), 3)
        x = _res_conv(x, r * int(2**t), 3)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    outputs = Conv2D(1, (1, 1),
                     activation=None,
                     kernel_initializer='he_normal',
                     padding='valid')(x)
    return Model([inputs, depths], [outputs])
Example #16
0
y = y[indices]

# balance classes because are unbalanced
class_totals = y.sum(axis=0)
class_weight = class_totals.max() / class_totals

print(X.dtype, X.min(), X.max(), X.shape)
print(y.dtype, y.min(), y.max(), y.shape)
img_rows, img_cols = X.shape[1:]
nb_filters = 32
nb_pool = 2
nb_conv = 3

####Convolutional network architecture
model = Sequential()
model.add(Reshape((1, img_rows, img_cols), input_shape=(img_rows, img_cols)))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, activation='relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, activation='relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Convolution2D(16, nb_conv, nb_conv, activation='relu'))
model.add(Convolution2D(16, nb_conv, nb_conv, activation='relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
#####

adam = Adam(lr=0.001)
model.compile(loss='categorical_crossentropy',
Example #17
0
def get_rwResVGG16model(params, network_params):
    x = Input(shape=(params['nb_channels'], params['seq_len'],
                     params['win_len']))
    sig_cnn = x

    #CNN_unit_1
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][0],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)

    #CNN_unit_2
    sig_cnn_r = sig_cnn
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][0],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][0],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Add()([sig_cnn, sig_cnn_r])
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = MaxPooling2D(pool_size=(1, network_params['pool_size']))(sig_cnn)
    sig_cnn = Dropout(network_params['dropout_rate'])(sig_cnn)

    #CNN_unit_3
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][1],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)

    #CNN_unit_4
    sig_cnn_r = sig_cnn
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][1],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][1],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Add()([sig_cnn, sig_cnn_r])
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = MaxPooling2D(pool_size=(1, network_params['pool_size']))(sig_cnn)
    sig_cnn = Dropout(network_params['dropout_rate'])(sig_cnn)

    #CNN_unit_5
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][2],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)

    #CNN_unit_6
    sig_cnn_r = sig_cnn
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][2],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][2],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][2],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Add()([sig_cnn, sig_cnn_r])
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = MaxPooling2D(pool_size=(1, network_params['pool_size']))(sig_cnn)
    sig_cnn = Dropout(network_params['dropout_rate'])(sig_cnn)

    #CNN_unit_7
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][3],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)

    #CNN_unit_8
    sig_cnn_r = sig_cnn
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][3],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][3],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][3],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Add()([sig_cnn, sig_cnn_r])
    sig_cnn = MaxPooling2D(pool_size=(1, network_params['pool_size']))(sig_cnn)
    sig_cnn = Dropout(network_params['dropout_rate'])(sig_cnn)

    #CNN_unit_9
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][3],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)

    #CNN_unit_10
    sig_cnn_r = sig_cnn
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][3],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][3],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Conv2D(filters=network_params['nb_cnn1d_filt'][3],
                     kernel_size=(1, 5),
                     padding='same')(sig_cnn)
    sig_cnn = BatchNormalization()(sig_cnn)
    sig_cnn = Activation('relu')(sig_cnn)
    sig_cnn = Add()([sig_cnn, sig_cnn_r])
    sig_cnn = MaxPooling2D(pool_size=(1, network_params['pool_size']))(sig_cnn)
    sig_cnn = Dropout(network_params['dropout_rate'])(sig_cnn)
    sig_cnn = Permute((2, 1, 3))(sig_cnn)

    #RNN

    sig_rnn = Reshape((params['seq_len'], -1))(sig_cnn)
    for nb_rnn in network_params['rnn_size']:
        sig_rnn = Bidirectional(GRU(
            nb_rnn,
            activation='tanh',
            dropout=network_params['dropout_rate'],
            recurrent_dropout=network_params['dropout_rate'],
            return_sequences=True),
                                merge_mode='mul')(sig_rnn)
    # FCN - Swalowing Event Detection (SwED)
    swed = sig_rnn
    for nb_fcn in network_params['fcn_size']:
        swed = TimeDistributed(Dense(nb_fcn))(swed)
        swed = Dropout(network_params['dropout_rate'])(swed)
    swed = TimeDistributed(Dense(1))(
        swed)  #TimeDistributed(Dense(params['nb_classes']))(swed)
    swed = Activation('sigmoid', name='swed_out')(swed)

    model = Model(inputs=x, outputs=[swed])
    model.compile(optimizer=Adam(),
                  loss=['binary_crossentropy'],
                  metrics=['accuracy'])
    model.summary()
    return model
        inputTok = Input(shape=(maxTotalLength,),dtype='int32',name='inputTok')
        inputDis = Input(shape=(maxTotalLength,),dtype='int32',name='inputDis')

    embeddingW = Embedding(output_dim=vDim,input_dim=vSize,input_length=maxTotalLength*maxFeatureLength,weights=[embeddings], trainable=trainable, mask_zero=True)(inputWord)
  
#     embeddingW = Embedding(output_dim=vDim,input_dim=vSize,input_length=maxTotalLength*maxFeatureLength,weights=[embeddings], mask_zero=True, trainable=trainable)(inputWord)
#     embeddingS = Embedding(output_dim=sDim,input_dim=sSize,input_length=maxTotalLength, mask_zero=True)(inputSen)
#     embeddingC = Embedding(output_dim=cDim,input_dim=cSize,input_length=maxTotalLength, mask_zero=True)(inputCla)
#     embeddingP = Embedding(output_dim=pDim,input_dim=pSize,input_length=maxTotalLength, mask_zero=True)(inputPhr)
#     embeddingT = Embedding(output_dim=tDim,input_dim=tSize,input_length=maxTotalLength, mask_zero=True)(inputTok)
#     embeddingDM = Embedding(output_dim=dmDim,input_dim=dmSize,input_length=maxTotalLength, mask_zero=True)(inputDis)

    if merging == 'max':
        SW_concate = merge([embeddingW,embeddingS],mode='concat',concat_axis=1)
        SW_pooled = MaxPooling1D(pool_length=2,border_mode='valid')(SW_concate)
        SW = Reshape((maxTotalLength,vDim))(SW_pooled)
    elif merging == 'transform':
        RS = Reshape((vDim,rDim))
        SWs=[]
        for i in range(maxTotalLength):
            Wi = Lambda(lib.getWrapper(i),output_shape=lib.getWrapper_output_shape)(embeddingW)
            Si = Lambda(lib.getWrapper(i),output_shape=lib.getWrapper_output_shape)(embeddingS)
            Ci = Lambda(lib.getWrapper(i),output_shape=lib.getWrapper_output_shape)(embeddingC)
            Ti = Lambda(lib.getWrapper(i),output_shape=lib.getWrapper_output_shape)(embeddingT)
            
            SiReshape = RS(Si)
            CiReshape = RS(Ci)
            TiReshape = RS(Ti)
            
            SWi = merge([Wi,SiReshape],mode='dot',dot_axes=(1,1))
            CWi = merge([SWi,CiReshape],mode='dot',dot_axes=(1,1))
Example #19
0

shp = X_train.shape[1:]
dropout_rate = 0.25
opt = Adam(lr=1e-4)
dopt = Adam(lr=1e-3)

# Build Generative model ...
nch = 200
g_input = Input(shape=[100])
# H = Dense(nch*14*14, init='glorot_normal')(g_input)
H = Dense(nch * 14 * 14, kernel_initializer='glorot_normal')(g_input)
# H = BatchNormalization(mode=2)(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Reshape([nch, 14, 14])(H)
H = UpSampling2D(size=(2, 2), data_format='channels_first')(H)
H = Convolution2D(filters=nch / 2,
                  kernel_size=(3, 3),
                  border_mode='same',
                  data_format='channels_first',
                  init='glorot_uniform')(H)
# H = BatchNormalization(mode=2)(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Convolution2D(filters=nch / 4,
                  kernel_size=(3, 3),
                  border_mode='same',
                  data_format='channels_first',
                  init='glorot_uniform')(H)
# H = BatchNormalization(mode=2)(H)
# RGB MODALITY BRANCH OF CNN
inputs_rgb = Input(shape=(input_dim[0],input_dim[1],3))
vgg_model_rgb = VGG16(weights='imagenet', include_top = False,modality_num=0)
conv_model_rgb = vgg_model_rgb(inputs_rgb)
conv_model_rgb = Conv2D(32, (3,3), strides=(1, 1), padding = 'same', activation='relu',data_format="channels_last") (conv_model_rgb)
conv_model_rgb = Conv2D(64, (3,3), strides=(1, 1), padding = 'same', activation='relu',data_format="channels_last") (conv_model_rgb)
conv_model_rgb = Conv2D(128, (3,3), strides=(1, 1), padding = 'same', activation='relu',data_format="channels_last") (conv_model_rgb)
conv_model_rgb = Conv2D(256, (3,3), strides=(1, 1), padding = 'same', activation='relu',data_format="channels_last") (conv_model_rgb)
dropout_rgb = Dropout(0.2)(conv_model_rgb)

# DECONVOLUTION Layers
deconv_last = Conv2DTranspose(num_class, (64,64), strides=(32, 32), padding='same', data_format="channels_last", activation='relu',kernel_initializer='glorot_normal') (dropout_rgb)

#VECTORIZING OUTPUT
out_reshape = Reshape((input_dim[0]*input_dim[1],num_class))(deconv_last)
out = Activation('softmax')(out_reshape)

# MODAL [INPUTS , OUTPUTS]
model = Model(inputs=[inputs_rgb], outputs=[out])
print 'compiling'
model.compile(optimizer=SGD(lr=0.008, decay=1e-6, momentum=0.9, nesterov=True),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.load_weights('late_fusion_unimodal_99.hdf5')
model.summary()
#================================================TRAINING============================================================
# Save the model according to the conditions  
progbar = ProgbarLogger(count_mode='steps')
checkpoint = ModelCheckpoint("late_fusion_unimodal_{epoch:02d}.hdf5", monitor='val_loss', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=1, verbose=1, mode='auto')
Example #21
0
from keras.layers.merge import concatenate
from keras.layers.merge import Add

from keras.layers.core import Dense, Reshape
from keras.layers.embeddings import Embedding
from keras.models import Sequential

# build skip-gram architecture
word_model = Sequential()
word_model.add(
    Embedding(vocab_size,
              embed_size,
              embeddings_initializer="glorot_uniform",
              input_length=1))
word_model.add(Reshape((embed_size, )))

context_model = Sequential()
context_model.add(
    Embedding(vocab_size,
              embed_size,
              embeddings_initializer="glorot_uniform",
              input_length=1))
context_model.add(Reshape((embed_size, )))

model = Sequential()
model.add(Add([word_model, context_model], mode="dot"))
model.add(Dense(1, kernel_initializer="glorot_uniform", activation="sigmoid"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")

# view model summary
Y_test = to_onehot(map(lambda x: mods.index(lbl[x][0]), test_idx))

# set the input shape [2,128]
in_shp = list(X_train.shape[1:])
print(X_train.shape, in_shp)  # (110000, 2, 128) [2, 128]
classes = mods

# Build VT-CNN2 Neural Net model using Keras primitives --
#  - Reshape [N,2,128] to [N,1,2,128] on input
#  - Pass through 2 2DConv/ReLu layers
#  - Pass through 2 Dense layers (ReLu and Softmax)
#  - Perform categorical cross entropy optimization

dr = 0.5  # dropout rate (%)
model = models.Sequential()
model.add(Reshape([1] + in_shp, input_shape=in_shp))
model.add(ZeroPadding2D((0, 2), data_format="channels_first"))
model.add(
    Convolution2D(256, (1, 3),
                  activation="relu",
                  name="conv1",
                  data_format="channels_first"))
model.add(Dropout(dr))
model.add(ZeroPadding2D((0, 2), data_format="channels_first"))
model.add(
    Convolution2D(80, (2, 3),
                  activation="relu",
                  name="conv2",
                  data_format="channels_first"))
model.add(Dropout(dr))
model.add(Flatten())
Example #23
0
def netvgg(inputs, is_training=True):
    inputs = tf.cast(inputs, tf.float32)
    inputs = ((inputs / 255.0) - 0.5) * 2
    num_anchors = 9
    with tf.variable_scope("vgg_16"):
        with slim.arg_scope(vgg.vgg_arg_scope()):
            net = inputs
            net = slim.repeat(net, 2, slim.conv2d, 64, [3, 3], scope='conv1')
            net = slim.max_pool2d(net, [2, 2], scope='pool1')
            net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
            net = slim.max_pool2d(net, [2, 2], scope='pool2')
            net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
            net = slim.max_pool2d(net, [2, 2], scope='pool3')
            net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
            net = slim.max_pool2d(net, [2, 2], scope='pool4')
            net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')

            initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
            net_cnn = net
            net1 = slim.conv2d(net,
                               2 * num_anchors, [1, 1],
                               scope="prob",
                               weights_initializer=initializer,
                               activation_fn=tf.nn.softmax)  #tf.nn.sigmoid)

            net1 = Reshape((-1, 2),
                           input_shape=(net1.shape[1], net1.shape[2],
                                        net1.shape[3]))(net1)
            #net1 = tf.reshape(net1, [0, -1, -1, 2])

            net2 = slim.conv2d(net,
                               4 * num_anchors, [1, 1],
                               scope='bbox',
                               weights_initializer=initializer,
                               activation_fn=None)
            net2 = Reshape((-1, 4),
                           input_shape=(net2.shape[1], net2.shape[2],
                                        net2.shape[3]))(net2)

            net = slim.max_pool2d(net, [2, 2], scope='pool5')

        net = slim.flatten(net)

        w_init = tf.contrib.layers.xavier_initializer()
        w_reg = slim.l2_regularizer(0.0005)
        net = slim.fully_connected(net,
                                   4096,
                                   weights_initializer=w_init,
                                   weights_regularizer=w_reg,
                                   scope='fc6')
        net = slim.dropout(net, keep_prob=0.5, is_training=is_training)
        net = slim.fully_connected(net,
                                   4096,
                                   weights_initializer=w_init,
                                   weights_regularizer=w_reg,
                                   scope='fc7')
        net = slim.dropout(net, keep_prob=0.5, is_training=is_training)
        net = slim.fully_connected(net,
                                   1000,
                                   weights_initializer=w_init,
                                   weights_regularizer=w_reg,
                                   scope='fc8')
        print("SHAPE!!!!", net)
    return net_cnn, net2, net1, net
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = X_train[:, np.newaxis, :, :]
''' Model Definition '''

# Optimizer
adam = Adam(lr=0.0002, beta_1=0.5)
''' Generator '''
generator = Sequential()

generator.add(
    Dense(256 * 7 * 7,
          input_dim=randomDim,
          kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(BatchNormalization(momentum=0.9))
generator.add(LeakyReLU(0.2))
generator.add(Reshape((256, 7, 7)))

generator.add(UpSampling2D(size=(2, 2)))
generator.add(Conv2D(128, kernel_size=(5, 5), padding='same'))
generator.add(BatchNormalization(momentum=0.9))
generator.add(LeakyReLU(0.2))

generator.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
generator.add(BatchNormalization(momentum=0.9))
generator.add(LeakyReLU(0.2))

generator.add(UpSampling2D(size=(2, 2)))
generator.add(Conv2D(1, kernel_size=(5, 5), padding='same', activation='tanh'))
generator.compile(loss='binary_crossentropy', optimizer=adam)
''' Discriminator '''
discriminator = Sequential()
def build_cifar10_generator(ngf=64, z_dim=128):
    """ Builds CIFAR10 DCGAN Generator Model
    PARAMS
    ------
    ngf: number of generator filters
    z_dim: number of dimensions in latent vector

    RETURN
    ------
    G: keras sequential
    """
    init = initializers.RandomNormal(stddev=0.02)

    G = Sequential()

    # Dense 1: 2x2x512
    G.add(
        Dense(2 * 2 * ngf * 8,
              input_shape=(z_dim, ),
              use_bias=True,
              kernel_initializer=init))
    G.add(Reshape((2, 2, ngf * 8)))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 1: 4x4x256
    G.add(
        Conv2DTranspose(ngf * 4,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 2: 8x8x128
    G.add(
        Conv2DTranspose(ngf * 2,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 3: 16x16x64
    G.add(
        Conv2DTranspose(ngf,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(BatchNormalization())
    G.add(LeakyReLU(0.2))

    # Conv 4: 32x32x3
    G.add(
        Conv2DTranspose(3,
                        kernel_size=5,
                        strides=2,
                        padding='same',
                        use_bias=True,
                        kernel_initializer=init))
    G.add(Activation('tanh'))

    print("\nGenerator")
    G.summary()

    return G
Example #26
0
def __create_fcn_dense_net(nb_classes,
                           img_input,
                           include_top,
                           nb_dense_block=5,
                           growth_rate=12,
                           reduction=0.0,
                           dropout_rate=None,
                           weight_decay=1e-4,
                           nb_layers_per_block=4,
                           nb_upsampling_conv=128,
                           upsampling_type='upsampling',
                           init_conv_filters=48,
                           input_shape=None,
                           activation='deconv'):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay
        nb_layers_per_block: number of layers in each dense block.
            Can be a positive integer or a list.
            If positive integer, a set number of layers per dense block.
            If list, nb_layer is used as provided. Note that list size must
            be (nb_dense_block + 1)
        nb_upsampling_conv: number of convolutional layers in upsampling via subpixel convolution
        upsampling_type: Can be one of 'upsampling', 'deconv' and 'subpixel'. Defines
            type of upsampling algorithm used.
        input_shape: Only used for shape inference in fully convolutional networks.
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                    Note that if sigmoid is used, classes must be 1.
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if concat_axis == 1:  # channels_first dim ordering
        _, rows, cols = input_shape
    else:
        rows, cols, _ = input_shape

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # check if upsampling_conv has minimum number of filters
    # minimum is set to 12, as at least 3 color channels are needed for correct upsampling
    assert nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0, 'Parameter `upsampling_conv` number of channels must ' \
                                                                    'be a positive number divisible by 4 and greater ' \
                                                                    'than 12'

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block + 1), 'If list, nb_layer is used as provided. ' \
                                                       'Note that list size must be (nb_dense_block + 1)'

        bottleneck_nb_layers = nb_layers[-1]
        rev_layers = nb_layers[::-1]
        nb_layers.extend(rev_layers[1:])
    else:
        bottleneck_nb_layers = nb_layers_per_block
        nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1)

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    x = Conv2D(init_conv_filters, (7, 7),
               kernel_initializer='he_normal',
               padding='same',
               name='initial_conv2D',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)
    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    nb_filter = init_conv_filters

    skip_list = []

    # Add dense blocks and transition down block
    for block_idx in range(nb_dense_block):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)

        # Skip connection
        skip_list.append(x)

        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               weight_decay=weight_decay)

        nb_filter = int(
            nb_filter *
            compression)  # this is calculated inside transition_down_block

    # The last dense_block does not have a transition_down_block
    # return the concatenated feature maps without the concatenation of the input
    _, nb_filter, concat_list = __dense_block(x,
                                              bottleneck_nb_layers,
                                              nb_filter,
                                              growth_rate,
                                              dropout_rate=dropout_rate,
                                              weight_decay=weight_decay,
                                              return_concat_list=True)

    skip_list = skip_list[::-1]  # reverse the skip list

    # Add dense blocks and transition up block
    for block_idx in range(nb_dense_block):
        n_filters_keep = growth_rate * nb_layers[nb_dense_block + block_idx]

        # upsampling block must upsample only the feature maps (concat_list[1:]),
        # not the concatenation of the input with the feature maps (concat_list[0].
        l = concatenate(concat_list[1:], axis=concat_axis)

        t = __transition_up_block(l,
                                  nb_filters=n_filters_keep,
                                  type=upsampling_type,
                                  weight_decay=weight_decay)

        # concatenate the skip connection with the transition block
        x = concatenate([t, skip_list[block_idx]], axis=concat_axis)

        # Dont allow the feature map size to grow in upsampling dense blocks
        x_up, nb_filter, concat_list = __dense_block(x,
                                                     nb_layers[nb_dense_block +
                                                               block_idx + 1],
                                                     nb_filter=growth_rate,
                                                     growth_rate=growth_rate,
                                                     dropout_rate=dropout_rate,
                                                     weight_decay=weight_decay,
                                                     return_concat_list=True,
                                                     grow_nb_filters=False)

    if include_top:
        x = Conv2D(nb_classes, (1, 1),
                   activation='linear',
                   padding='same',
                   use_bias=False)(x_up)

        if K.image_data_format() == 'channels_first':
            channel, row, col = input_shape
        else:
            row, col, channel = input_shape

        x = Reshape((row * col, nb_classes))(x)
        x = Activation(activation)(x)
        x = Reshape((row, col, nb_classes))(x)
    else:
        x = x_up

    return x
# dot_product = Reshape((1,))(dot_product)
#
# output = Dense(1, activation='sigmoid')(dot_product)
#
# # *******************************SEQUENTIAL****************************************

input_target = Input((1, ))
input_context = Input((1, ))

embedding = Embedding(vocab_size,
                      embed_size,
                      input_length=1,
                      embeddings_initializer="glorot_uniform")

word_embedding = embedding(input_target)
word_embedding = Reshape((embed_size, 1))(word_embedding)
context_embedding = embedding(input_context)
context_embedding = Reshape((embed_size, 1))(context_embedding)

# performing the dot product operation
dot_product = dot([word_embedding, context_embedding], axes=1)
dot_product = Reshape((1, ))(dot_product)

# add the sigmoid output layer
output = Dense(1, activation='sigmoid')(dot_product)
model = Model([input_target, input_context], output)
model.compile(loss='mean_squared_error', optimizer='rmsprop')

print(model.summary())

# Visualize model structure
Example #28
0
def subPixelModel(input_shape=(para.img_cols, para.img_rows, para.channels),
                  classes=para.num_classes,
                  input_tensor=None):
    #img_input = Input(shape=input_shape)

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=96,
                                      data_format=K.image_data_format(),
                                      require_flatten=False)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = img_input

    # Encoder
    x = Conv2D(64, (3, 3), padding="same", name="block1_conv0")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = Conv2D(64, (3, 3), padding="same", name="block1_conv1")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(128, (3, 3), padding="same", name="block2_conv0")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = Conv2D(128, (3, 3), padding="same", name="block2_conv1")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(256, (3, 3), padding="same", name="block3_conv0")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = Conv2D(256, (3, 3), padding="same", name="block3_conv1")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = Conv2D(256, (3, 3), padding="same", name="block3_conv2")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(512, (3, 3), padding="same", name="block4_conv0")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = Conv2D(512, (3, 3), padding="same", name="block4_conv1")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    x = Conv2D(512, (3, 3), padding="same", name="block4_conv2")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    #   =============
    #   The Sub-Pixel Block Replacing the Decoder....
    #   =============
    scale = 8
    o = subPixelConv2D.SubpixelConv2D(input_shape, scale=scale)(x)

    model = Model(img_input, o)
    cols = model.output_shape[1]
    rows = model.output_shape[2]

    o = Conv2D(classes, (1, 1), padding="valid")(o)
    o = Reshape((cols * rows, classes))(o)  #  *****************
    o = Activation("softmax")(o)

    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, o)
    return model, rows, cols
Example #29
0
def FCN32(nClasses=2, input_height=68, input_width=68, vgg_level=3):

    img_input = Input(shape=(input_height, input_width, 3))

    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               data_format=IMAGE_ORDERING)(img_input)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               data_format=IMAGE_ORDERING)(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block1_pool',
                     data_format=IMAGE_ORDERING)(x)
    f1 = x
    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               data_format=IMAGE_ORDERING)(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               data_format=IMAGE_ORDERING)(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block2_pool',
                     data_format=IMAGE_ORDERING)(x)
    f2 = x

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1',
               data_format=IMAGE_ORDERING)(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2',
               data_format=IMAGE_ORDERING)(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3',
               data_format=IMAGE_ORDERING)(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block3_pool',
                     data_format=IMAGE_ORDERING)(x)
    f3 = x

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1',
               data_format=IMAGE_ORDERING)(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2',
               data_format=IMAGE_ORDERING)(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3',
               data_format=IMAGE_ORDERING)(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block4_pool',
                     data_format=IMAGE_ORDERING)(x)
    f4 = x

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1',
               data_format=IMAGE_ORDERING)(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2',
               data_format=IMAGE_ORDERING)(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3',
               data_format=IMAGE_ORDERING)(x)
    #x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool', data_format=IMAGE_ORDERING )(x)
    f5 = x

    o = f5

    o = (Conv2D(4096, (7, 7),
                activation='relu',
                padding='same',
                data_format=IMAGE_ORDERING))(o)
    o = Dropout(0.5)(o)
    o = (Conv2D(4096, (1, 1),
                activation='relu',
                padding='same',
                data_format=IMAGE_ORDERING))(o)
    o = Dropout(0.5)(o)
    o = Conv2DTranspose(nClasses,
                        kernel_size=(36, 36),
                        strides=(32, 32),
                        use_bias=False,
                        data_format=IMAGE_ORDERING)(o)

    o_shape = Model(img_input, o).output_shape

    outputHeight = o_shape[1]
    outputWidth = o_shape[2]
    print(o_shape)
    o = (Reshape((-1, outputHeight * outputWidth)))(o)
    o = (Permute((2, 1)))(o)
    o = (Activation('softmax'))(o)
    model = Model(img_input, o)
    model.outputWidth = outputWidth
    model.outputHeight = outputHeight

    return model
def main():
    news = pd.read_csv('data/data_seged_monpa.csv')
    news_tag = news[['text', 'replyType', 'seg_text']]
    news_tag = news_tag[news_tag['replyType'] != 'NOT_ARTICLE']
    types = news_tag.replyType.unique()
    dic = {}
    for i, types in enumerate(types):
        dic[types] = i
    print(dic)
    news_tag['type_id'] = news_tag.replyType.apply(lambda x: dic[x])
    labels = news_tag.replyType.apply(lambda x: dic[x])
    news_tag = find_null(news_tag)
    X = news_tag.seg_text
    y = news_tag.type_id
    print(y.value_counts())
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=42)
    print(X_train.shape, 'training data ')
    print(X_test.shape, 'testing data')
    X_train = transfer_lsit(X_train)
    X_test = transfer_lsit(X_test)
    all_data = pd.concat([X_train, X_test])

    # embedding setting
    EMBEDDING_DIM = 100
    NUM_WORDS = 2764036
    vocabulary_size = NUM_WORDS
    embedding_matrix = np.zeros((vocabulary_size, EMBEDDING_DIM))
    word_vectors = word2vec.Word2Vec.load("output/word2vec.model")
    embedding_matrix = to_embedding(EMBEDDING_DIM, NUM_WORDS, vocabulary_size,
                                    embedding_matrix, word_vectors, X_train,
                                    X_test)
    del (word_vectors)

    embedding_layer = Embedding(vocabulary_size,
                                EMBEDDING_DIM,
                                weights=[embedding_matrix],
                                trainable=True)

    tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n\'')
    tokenizer.fit_on_texts(all_data.values)

    train_text = X_train.values
    train_index = X_train.index

    sequences_train = tokenizer.texts_to_sequences(train_text)

    X_train = pad_sequences(sequences_train, maxlen=600)

    y_train = to_categorical(np.asarray(labels[train_index]))

    print('Shape of X train:', X_train.shape)
    print('Shape of label train:', y_train.shape)

    test_text = X_test.values
    test_index = X_test.index
    sequences_test = tokenizer.texts_to_sequences(test_text)
    X_test = pad_sequences(sequences_test, maxlen=X_train.shape[1])
    y_test = to_categorical(np.asarray(labels[test_index]))

    sequence_length = X_train.shape[1]
    filter_sizes = [2, 3, 4]
    num_filters = 128
    drop = 0.2
    penalty = 0.0001

    inputs = Input(shape=(sequence_length, ))
    embedding = embedding_layer(inputs)
    reshape = Reshape((sequence_length, EMBEDDING_DIM, 1))(embedding)

    conv_0 = Conv2D(num_filters, (filter_sizes[1], EMBEDDING_DIM),
                    activation='softmax',
                    kernel_regularizer=regularizers.l2(penalty))(reshape)
    conv_1 = Conv2D(num_filters, (filter_sizes[2], EMBEDDING_DIM),
                    activation='relu',
                    kernel_regularizer=regularizers.l2(penalty))(reshape)
    conv_2 = Conv2D(num_filters, (filter_sizes[2], EMBEDDING_DIM),
                    activation='relu',
                    kernel_regularizer=regularizers.l2(penalty))(reshape)

    maxpool_0 = MaxPooling2D((sequence_length - filter_sizes[1] + 1, 1),
                             strides=(1, 1))(conv_0)

    maxpool_1 = MaxPooling2D((sequence_length - filter_sizes[2] + 1, 1),
                             strides=(1, 1))(conv_1)
    maxpool_2 = MaxPooling2D((sequence_length - filter_sizes[2] + 1, 1),
                             strides=(1, 1))(conv_2)

    merged_tensor = concatenate([maxpool_0, maxpool_1, maxpool_2], axis=1)
    dropout = Dropout(drop)(merged_tensor)
    flatten = Flatten()(dropout)
    reshape = Reshape((3 * num_filters, ))(flatten)
    output = Dense(units=2,
                   activation='softmax',
                   kernel_regularizer=regularizers.l2(penalty))(reshape)

    # this creates a model that includes
    model = Model(inputs, output)
    model.summary()

    adam = Adam(lr=1e-3)

    model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['acc'])
    callbacks = [EarlyStopping(monitor='val_loss')]
    history = model.fit(X_train,
                        y_train,
                        batch_size=64,
                        epochs=50,
                        verbose=1,
                        validation_split=0.1,
                        callbacks=callbacks)

    predictions = model.predict(X_test)
    matrix = confusion_matrix(y_test.argmax(axis=1), predictions.argmax(axis=1))
    print(matrix)

    # Plot training & validation accuracy values
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'val'], loc='upper left')
    plt.savefig("output/acc.png")
    score, acc = model.evaluate(X_test, y_test)
    print('Test accuracy:', acc)

    plot_model(model,
               to_file='output/model.png',
               show_shapes=False,
               show_layer_names=False)