Example #1
0
 def deconv2d(layer_input):
     """Layers used during upsampling"""
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)
     u = Activation('relu')(u)
     return u
Example #2
0
    def lateFusion(self):
        input = Input(shape=(2, 80, 80, 3))
        input1 = Lambda(lambda x: x[:, 0, :, :, :],
                        output_shape=(80, 80, 3))(input)
        input2 = Lambda(lambda x: x[:, 1, :, :, :],
                        output_shape=(80, 80, 3))(input)

        x = Conv2D(kernel_size=11,
                   filters=96,
                   strides=(3, 3),
                   padding='same',
                   activation='relu')(input1)
        x = normalization.BatchNormalization()(x)
        x = MaxPooling2D(pool_size=2, strides=(2, 2))(x)
        x = (Conv2D(kernel_size=5,
                    filters=256,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(x)
        x = (normalization.BatchNormalization())(x)
        x = (MaxPooling2D(pool_size=2, strides=(2, 2)))(x)
        x = (Conv2D(kernel_size=3,
                    filters=384,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(x)
        x = (Conv2D(kernel_size=3,
                    filters=384,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(x)
        x = (Conv2D(kernel_size=3,
                    filters=256,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(x)
        x = (MaxPooling2D(pool_size=2, strides=(2, 2)))(x)

        y = Conv2D(kernel_size=11,
                   filters=96,
                   strides=(3, 3),
                   padding='same',
                   activation='relu')(input2)
        y = normalization.BatchNormalization()(y)
        y = MaxPooling2D(pool_size=2, strides=(2, 2))(y)
        y = (Conv2D(kernel_size=5,
                    filters=256,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(y)
        y = (normalization.BatchNormalization())(y)
        y = (MaxPooling2D(pool_size=2, strides=(2, 2)))(y)
        y = (Conv2D(kernel_size=3,
                    filters=384,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(y)
        y = (Conv2D(kernel_size=3,
                    filters=384,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(y)
        y = (Conv2D(kernel_size=3,
                    filters=256,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(y)
        y = (MaxPooling2D(pool_size=2, strides=(2, 2)))(y)

        merge = concatenate([x, y])
        merge = Flatten()(merge)
        merge = (Dense(4096, activation='relu'))(merge)
        merge = (Dense(4096, activation='relu'))(merge)
        merge = (Dense(self.nb_classes, activation='softmax'))(merge)
        model = Model(inputs=input, outputs=merge)

        return model
Example #3
0
    def lrcn_new(self):
        """Build a CNN into RNN.
        Starting version from:
            https://github.com/udacity/self-driving-car/blob/master/
                steering-models/community-models/chauffeur/models.py

        Heavily influenced by VGG-16:
            https://arxiv.org/abs/1409.1556

        Also known as an LRCN:
            https://arxiv.org/pdf/1411.4389.pdf
        """
        model = Sequential()
        model.add(
            TimeDistributed(Conv2D(kernel_size=11,
                                   filters=96,
                                   strides=(3, 3),
                                   padding='same',
                                   activation='relu'),
                            input_shape=self.input_shape))
        model.add(TimeDistributed(normalization.BatchNormalization()))
        model.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=(2, 2))))
        model.add(
            TimeDistributed(
                Conv2D(kernel_size=5,
                       filters=256,
                       strides=(1, 1),
                       padding='same',
                       activation='relu')))
        model.add(TimeDistributed(normalization.BatchNormalization()))
        model.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=(2, 2))))
        model.add(
            TimeDistributed(
                Conv2D(kernel_size=3,
                       filters=384,
                       strides=(1, 1),
                       padding='same',
                       activation='relu')))
        model.add(
            TimeDistributed(
                Conv2D(kernel_size=3,
                       filters=384,
                       strides=(1, 1),
                       padding='same',
                       activation='relu')))
        model.add(
            TimeDistributed(
                Conv2D(kernel_size=3,
                       filters=256,
                       strides=(1, 1),
                       padding='same',
                       activation='relu')))
        model.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=(2, 2))))
        model.add(TimeDistributed(Flatten()))
        model.add(TimeDistributed(Dense(4096, activation='relu')))
        model.add(TimeDistributed(Dense(4096, activation='relu')))

        model.add(LSTM(256, return_sequences=False, dropout=0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
# Apenas ajusta a matriz para as dimensões esperadas do TensorFlow
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
# Normaliza as entradas de 0-255 para 0-1
X_train = X_train / 255
X_test = X_test / 255
# Gera os vetores com as classes do conjunto de dados de treinamento e teste
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]


# Cria o modelo
model = Sequential()
#Convolução 2D com função de ativação Rectified Linear Units 32 kernels/Pesos (filtros) 
model.add(Conv2D(32, (5, 5), input_shape=(1, 28, 28), activation='relu', data_format='channels_first'))
print( model.output_shape)
#Camada de Pooling 	    
model.add(MaxPooling2D(pool_size=(2, 2)))
	
#Convolução 2D com função de ativação Rectified Linear Units 64 kernels/Pesos (filtros) 
model.add(Conv2D(64, (5, 5), activation='relu'))
print( model.output_shape)
#Camada de Pooling 	
model.add(MaxPooling2D(pool_size=(2, 2)))
	
#Remove 20% das ativações de entrada aleatoriamente 
model.add(Dropout(0.2))
#Converte o conjunto de imagens e um vetor unidimensional para a entrada da rede neural totalmente conectada
model.add(Flatten())
print( model.output_shape)
Example #5
0
traindirangvel = '/content/vel_merged/trainingangvel/trainangvel'
valdirangvel = '/content/vel_merged/valangvel'

data_format = K.image_data_format()
K.set_image_data_format(data_format)
np.random.seed(42)

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

num_of_classes = 27

angvelinput = Input(shape=(65, features, 1))

angvelmodel = Conv2D(32, (3, 3),
                     kernel_regularizer=l2(0.001),
                     kernel_initializer='glorot_normal',
                     activation='relu')(angvelinput)
angvelmodel = Conv2D(32, (3, 3),
                     kernel_regularizer=l2(0.001),
                     kernel_initializer='glorot_normal',
                     activation="relu")(angvelmodel)

angvelmodel = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                           padding='same')(angvelmodel)
angvelmodel = BatchNormalization()(angvelmodel)

angvelmodel = Conv2D(64, (3, 3),
                     kernel_regularizer=l2(0.001),
                     kernel_initializer='glorot_normal',
                     activation="relu")(angvelmodel)
angvelmodel = Conv2D(64, (3, 3),
Example #6
0
def __create_dense_net(nb_classes,
                       img_input,
                       include_top,
                       depth=40,
                       nb_dense_block=3,
                       growth_rate=12,
                       nb_filter=-1,
                       nb_layers_per_block=-1,
                       bottleneck=False,
                       reduction=0.0,
                       dropout_rate=None,
                       weight_decay=1e-4,
                       subsample_initial_block=False,
                       activation='softmax'):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
                Can be a -1, positive integer or a list.
                If -1, calculates nb_layer_per_block from the depth of the network.
                If positive integer, a set number of layers per dense block.
                If list, nb_layer is used as provided. Note that list size must
                be (nb_dense_block + 1)
        bottleneck: add bottleneck blocks
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay rate
        subsample_initial_block: Set to True to subsample the initial convolution and
                add a MaxPool2D before the dense blocks are added.
        subsample_initial:
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                Note that if sigmoid is used, classes must be 1.
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block), 'If list, nb_layer is used as provided. ' \
                                                   'Note that list size must be (nb_dense_block)'
        final_nb_layer = nb_layers[-1]
        nb_layers = nb_layers[:-1]
    else:
        if nb_layers_per_block == -1:
            assert (
                depth - 4
            ) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
            count = int((depth - 4) / 3)

            if bottleneck:
                count = count // 2

            nb_layers = [count for _ in range(nb_dense_block)]
            final_nb_layer = count
        else:
            final_nb_layer = nb_layers_per_block
            nb_layers = [nb_layers_per_block] * nb_dense_block

    # compute initial nb_filter if -1, else accept users initial nb_filter
    if nb_filter <= 0:
        nb_filter = 2 * growth_rate

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    if subsample_initial_block:
        initial_kernel = (7, 7)
        initial_strides = (2, 2)
    else:
        initial_kernel = (3, 3)
        initial_strides = (1, 1)

    x = Conv2D(nb_filter,
               initial_kernel,
               kernel_initializer='he_normal',
               padding='same',
               strides=initial_strides,
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)

    if subsample_initial_block:
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     bottleneck=bottleneck,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)
        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    # The last dense_block does not have a transition_block
    x, nb_filter = __dense_block(x,
                                 final_nb_layer,
                                 nb_filter,
                                 growth_rate,
                                 bottleneck=bottleneck,
                                 dropout_rate=dropout_rate,
                                 weight_decay=weight_decay)

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalMaxPooling2D()(x)

    if include_top:
        x = Dense(nb_classes, activation=activation)(x)

    return x
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers import Cropping2D
import matplotlib.pyplot as plt

print('----create model')

# use Nvidia Architecture
model = Sequential()

# Normalizaiton and cropping
model.add(Cropping2D(cropping=((50, 25), (0, 0)), input_shape=(160, 320, 3)))
model.add(Lambda(lambda x: (x / 255.0 - 0.5)))

# Nvidia Architecture
model.add(Conv2D(24, (5, 5), strides=(2, 2), activation="relu"))
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation="relu"))
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation="relu"))
model.add(Conv2D(64, (3, 3), strides=(2, 2), activation="relu"))
model.add(Conv2D(64, (3, 3), strides=(2, 2), activation="relu"))
model.add(Flatten())
# model.add(Dense(1164,activation = 'relu'))
model.add(Dense(1164))
# model.add(Dense(100,activation = 'relu'))
model.add(Dense(100))
# model.add(Dense(50,activation = 'relu'))
model.add(Dense(50))
model.add(Dense(1))
model.summary()
print(
    '------------------------------------------Start training-----------------------------------------------------------'
#train_idx=range(0,104000)
#test_idx=range(104000,200000)
#X_train = X[train_idx]
#X_test =  X[test_idx]
#Y_train_labeld=X_labeld[train_idx]
#Y_test_labeld=X_labeld[test_idx]

#keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

dr = 0.0

model = Sequential()
model.add(Reshape(in_dim+[1],input_shape=in_dim))
#model.add(ZeroPadding2D((2, 2)))
model.add(Conv2D(64, (2, 3), name='conv1', padding='valid', activation='relu', kernel_initializer='glorot_uniform'))
#model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))
model.add(Dropout(dr))
#model.add(ZeroPadding2D((2, 2)))
model.add(Conv2D(16, (1, 3), name='conv2', padding='valid', activation='relu', kernel_initializer='glorot_uniform'))
#model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))
model.add(Dropout(dr))
model.add(Flatten())
model.add(Dense(128, activation='relu', init='he_normal', name="dense1"))
model.add(Dropout(dr))
model.add(Dense(numclass, init='he_normal', name="dense2" ))
model.add(Activation('softmax'))
model.add(Reshape([numclass]))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#model.compile(loss='sparse_categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
model.summary()
Example #9
0
def resnet_v2_stem(input):
    '''The stem of the pure Inception-v4 and Inception-ResNet-v2 networks. This is input part of those networks.'''

    # Input shape is 299 * 299 * 3 (Tensorflow dimension ordering)
    x = Conv2D(32, (3, 3),
               kernel_regularizer=l2(0.0002),
               activation="relu",
               strides=(2, 2))(input)  # 149 * 149 * 32
    x = Conv2D(32, (3, 3), kernel_regularizer=l2(0.0002),
               activation="relu")(x)  # 147 * 147 * 32
    x = Conv2D(64, (3, 3),
               kernel_regularizer=l2(0.0002),
               activation="relu",
               padding="same")(x)  # 147 * 147 * 64

    x1 = MaxPooling2D((3, 3), strides=(2, 2))(x)
    x2 = Conv2D(96, (3, 3),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                strides=(2, 2))(x)

    x = concatenate([x1, x2], axis=3)  # 73 * 73 * 160

    x1 = Conv2D(64, (1, 1),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x)
    x1 = Conv2D(96, (3, 3), kernel_regularizer=l2(0.0002),
                activation="relu")(x1)

    x2 = Conv2D(64, (1, 1),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x)
    x2 = Conv2D(64, (7, 1),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x2)
    x2 = Conv2D(64, (1, 7),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x2)
    x2 = Conv2D(96, (3, 3),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="valid")(x2)

    x = concatenate([x1, x2], axis=3)  # 71 * 71 * 192

    x1 = Conv2D(192, (3, 3),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                strides=(2, 2))(x)

    x2 = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = concatenate([x1, x2], axis=3)  # 35 * 35 * 384

    x = BatchNormalization(axis=3)(x)
    x = Activation("relu")(x)

    return x
Example #10
0
	def build(width, height, depth, classes, finalAct="softmax"):
		"""
		Model builder.
		Parameter:
			width: The width dimension of image/number of horizontal pixels.
			height: The height dimension of image/number of vertical pixels.
			depth: The number of image channels.
			classes:
			finalAct:  The optional argument, finalAct (with a default value of "softmax") will be utilized at the end of the network architecture.
					   Changing this value from 'softmax' to 'dsigmoid' will enable us to perform 'multi-label classification' with Keras.
					   Control whether we are performing 'simple classification' or 'multi-class classification'.
		Return:
			model: The constructed network architecture.
		"""
		# Initialize the model along with the input shape to be "channels last" and the channels dimension itself.
		model = Sequential()
		inputShape = (height, width, depth)
		chanDim = -1

		# If channels order is "channels first", modify the input shape and channels dimension.
		if K.image_data_format() == "channels_first":
			inputShape = (depth, height, width)
			chanDim = 1

		# First CONV block, CONV => RELU => POOL.
		model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape, name="block_1--CONV_1"))
		model.add(Activation("relu", name="block_1--ACT_relu_1"))
		model.add(BatchNormalization(axis=chanDim, name="block_1--BN_1"))
		model.add(MaxPooling2D(pool_size=(3, 3), name="block_1--POOL_max"))
		model.add(Dropout(0.25, name="block_1--DO"))

		# Second CONV block, (CONV => RELU)*2 => POOL.
		model.add(Conv2D(64, (3, 3), padding="same", name="block_2--CONV_1"))
		model.add(Activation("relu", name="block_2--ACT_relu_1"))
		model.add(BatchNormalization(axis=chanDim, name="block_2--BN_1"))
		model.add(Conv2D(64, (3, 3), padding="same", name="block_2--CONV_2"))
		model.add(Activation("relu", name="block_2--ACT_relu_2"))
		model.add(BatchNormalization(axis=chanDim, name="block_2--BN_2"))
		model.add(MaxPooling2D(pool_size=(2, 2), name="block_2--POOL_max"))
		model.add(Dropout(0.25, name="block_2--DO"))

		# Third CONV block, (CONV => RELU)*2 => POOL.
		model.add(Conv2D(128, (3, 3), padding="same", name="block_3--CONV_1"))
		model.add(Activation("relu", name="block_3--ACT_relu_1"))
		model.add(BatchNormalization(axis=chanDim, name="block_3--BN_1"))
		model.add(Conv2D(128, (3, 3), padding="same", name="block_3--CONV_2"))
		model.add(Activation("relu", name="block_3--ACT_relu_2"))
		model.add(BatchNormalization(axis=chanDim, name="block_3--BN_2"))
		model.add(MaxPooling2D(pool_size=(2, 2), name="block_3--POOL_max"))
		model.add(Dropout(0.25, name="block_3--DO"))

		# Classify block, FC = > RELU => OUTPUT.
		model.add(Flatten())
		model.add(Dense(1024, name="block_end--FC_1"))
		model.add(Activation("relu", name="block_end--ACT_relu"))
		model.add(BatchNormalization(name="block_end--BN"))
		model.add(Dropout(0.5, name="block_end--DO"))
		# Output, use a 'softmax' ACT -- for single-label classification;
		#      or use a 'sigmoid' ACT -- for multi-label classification.
		model.add(Dense(classes, name="block_end--FC_2"))
		model.add(Activation(finalAct, name="block_end--ACT_output"))

		# Return the constructed network architecture.
		return model
Example #11
0
def discriminator(input_shape,
                  base_name,
                  num_res_blocks=0,
                  is_D=True,
                  use_res=False):
    initializer_d = TruncatedNormal(mean=0, stddev=0.1, seed=42)

    D = in_D = Input(shape=input_shape)
    D = Conv2D(64,
               kernel_size=4,
               strides=2,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv1")(D)

    D = LeakyReLU(0.2)(D)

    D = Conv2D(128,
               kernel_size=4,
               strides=2,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv2")(D)

    #D = BatchNormalization(momentum=0.9, epsilon=1e-5, name=base_name + "_bn1")(D, training=1)
    D = LeakyReLU(0.2)(D)

    D = Conv2D(256,
               kernel_size=4,
               strides=2,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv3")(D)
    #D = BatchNormalization(momentum=0.9, epsilon=1e-5, name=base_name + "_bn2")(D, training=1)
    D = LeakyReLU(0.2)(D)
    D = SelfAttention(ch=256)(D)

    D = Conv2D(512,
               kernel_size=4,
               strides=2,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv4")(D)

    #D = BatchNormalization(momentum=0.9, epsilon=1e-5, name=base_name + "_bn3")(D, training=1)
    D = LeakyReLU(0.2)(D)
    D = Conv2D(1,
               kernel_size=1,
               strides=1,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv5")(D)

    D = Flatten()(D)
    out = Dense(units=1, activation=None, name=base_name + "_out")(D)
    model = Model(in_D, out, name=base_name)

    return model
Example #12
0
    def train(self,
              mode="cpu",
              is_random=1,
              model_path="./model.h5",
              load=False,
              verbose=1):
        if mode == "gpu":
            self.GRU = GRUgpu
        if mode == "cpu":
            self.GRU = GRUcpu

        if verbose:
            print("\nSTART TRAINING")
        if K.image_data_format() == 'channels_first':
            input_shape = (1, self.IMG_W, self.IMG_H)
        else:
            input_shape = (self.IMG_W, self.IMG_H, 1)

        input_data = Input(name='the_input_{}'.format(type(self).__name__),
                           shape=input_shape,
                           dtype='float32')
        inner = Conv2D(self.CONV_FILTERS,
                       self.KERNEL_SIZE,
                       padding='same',
                       activation=self.ACTIVATION,
                       kernel_initializer='he_normal',
                       name='conv1')(input_data)
        inner = MaxPooling2D(pool_size=(self.POOL_SIZE, self.POOL_SIZE),
                             name='max1')(inner)
        inner = Conv2D(self.CONV_FILTERS,
                       self.KERNEL_SIZE,
                       padding='same',
                       activation=self.ACTIVATION,
                       kernel_initializer='he_normal',
                       name='conv2')(inner)
        inner = MaxPooling2D(pool_size=(self.POOL_SIZE, self.POOL_SIZE),
                             name='max2')(inner)

        conv_to_rnn_dims = (self.IMG_W // (self.POOL_SIZE * self.POOL_SIZE),
                            (self.IMG_H // (self.POOL_SIZE * self.POOL_SIZE)) *
                            self.CONV_FILTERS)
        inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)

        # cuts down input size going into RNN:
        inner = Dense(self.TIME_DENSE_SIZE,
                      activation=self.ACTIVATION,
                      name='dense1')(inner)

        # Two layers of bidirecitonal GRUs
        # GRU seems to work as well, if not better than LSTM:
        gru_1 = self.GRU(self.RNN_SIZE,
                         return_sequences=True,
                         kernel_initializer='he_normal',
                         name='gru1')(inner)
        gru_1b = self.GRU(self.RNN_SIZE,
                          return_sequences=True,
                          go_backwards=True,
                          kernel_initializer='he_normal',
                          name='gru1_b')(inner)
        gru1_merged = add([gru_1, gru_1b])
        gru_2 = self.GRU(self.RNN_SIZE,
                         return_sequences=True,
                         kernel_initializer='he_normal',
                         name='gru2')(gru1_merged)
        gru_2b = self.GRU(self.RNN_SIZE,
                          return_sequences=True,
                          go_backwards=True,
                          kernel_initializer='he_normal',
                          name='gru2_b')(gru1_merged)

        # transforms RNN output to character activations:
        inner = Dense(self.tiger_train.get_output_size(),
                      kernel_initializer='he_normal',
                      name='dense2')(concatenate([gru_2, gru_2b]))
        y_pred = Activation('softmax',
                            name='softmax_{}'.format(
                                type(self).__name__))(inner)
        Model(inputs=input_data, outputs=y_pred).summary()

        labels = Input(name='the_labels',
                       shape=[self.tiger_train.max_text_len],
                       dtype='float32')
        input_length = Input(name='input_length', shape=[1], dtype='int64')
        label_length = Input(name='label_length', shape=[1], dtype='int64')
        # Keras doesn't currently support loss funcs with extra parameters
        # so CTC loss is implemented in a lambda layer
        loss_out = Lambda(self.ctc_lambda_func, output_shape=(1, ),
                          name='ctc')(
                              [y_pred, labels, input_length, label_length])

        # clipnorm seems to speeds up convergence
        sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)

        if load:
            model = load_model(model_path, compile=False)
        else:
            model = Model(
                inputs=[input_data, labels, input_length, label_length],
                outputs=loss_out)

        # the loss calc occurs elsewhere, so use a dummy lambda func for the loss
        model.compile(loss={
            'ctc': lambda y_true, y_pred: y_pred
        },
                      optimizer=sgd)

        if not load:
            # captures output of softmax so we can decode the output during visualization
            test_func = K.function([input_data], [y_pred])

            model.fit_generator(
                generator=self.tiger_train.next_batch(is_random),
                steps_per_epoch=self.tiger_train.n,
                epochs=self.EPOCHS,
                validation_data=self.tiger_val.next_batch(is_random),
                validation_steps=self.tiger_val.n)

        net_inp = model.get_layer(name='the_input').input
        net_out = model.get_layer(name='softmax').output
        self.MODEL = Model(input=net_inp, output=net_out)
        return self.MODEL
Example #13
0
def make_discriminator(image_size, use_input_pose, warp_skip, disc_type,
                       warp_agg):
    input_img = Input(list(image_size) + [3])
    output_pose = Input(list(image_size) + [18])
    input_pose = Input(list(image_size) + [18])
    output_img = Input(list(image_size) + [3])

    if warp_skip == 'full':
        warp = [Input((10, 8))]
    elif warp_skip == 'mask':
        warp = [Input((10, 8)), Input((10, image_size[0], image_size[1]))]
    else:
        warp = []

    if use_input_pose:
        input_pose = [input_pose]
    else:
        input_pose = []

    if disc_type == 'call':
        out = Concatenate(axis=-1)([input_img] + input_pose +
                                   [output_img, output_pose])
        out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)
        out = block(out, 128)
        out = block(out, 256)
        out = block(out, 512)
        out = block(out, 1, bn=False)
        out = Activation('sigmoid')(out)
        out = Flatten()(out)
        return Model(inputs=[input_img] + input_pose +
                     [output_img, output_pose],
                     outputs=[out])
    elif disc_type == 'sim':
        out = Concatenate(axis=-1)([output_img, output_pose])
        out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)
        out = block(out, 128)
        out = block(out, 256)
        out = block(out, 512)
        m_share = Model(inputs=[output_img, output_pose], outputs=[out])
        output_feat = m_share([output_img, output_pose])
        input_feat = m_share([input_img] + input_pose)

        out = Concatenate(axis=-1)([output_feat, input_feat])
        out = LeakyReLU(0.2)(out)
        out = Flatten()(out)
        out = Dense(1)(out)
        out = Activation('sigmoid')(out)

        return Model(inputs=[input_img] + input_pose +
                     [output_img, output_pose],
                     outputs=[out])
    else:
        out_inp = Concatenate(axis=-1)([input_img] + input_pose)
        out_inp = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out_inp)

        out_inp = AffineTransformLayer(10, warp_agg,
                                       image_size)([out_inp] + warp)

        out = Concatenate(axis=-1)([output_img, output_pose])
        out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)

        out = Concatenate(axis=-1)([out, out_inp])

        out = block(out, 128)
        out = block(out, 256)
        out = block(out, 512)
        out = block(out, 1, bn=False)
        out = Activation('sigmoid')(out)
        out = Flatten()(out)
        return Model(inputs=[input_img] + input_pose +
                     [output_img, output_pose] + warp,
                     outputs=[out])
Example #14
0
def train(img_w, img_h, load):

    count_filters = 18
    kernel_size = (3, 3)
    pool_size = 2
    time_dense_size = 32
    rnn_size = 256

    if K.image_data_format() == 'channels_first':
        input_shape = (1, img_w, img_h)
    else:
        input_shape = (img_w, img_h, 1)

    batch_size = 32
    downsample_factor = pool_size**2
    train_data = LicensePlateImages('F:/tablice/train3', img_w, img_h,
                                    batch_size, downsample_factor)
    train_data.load_data()
    val_data = LicensePlateImages('F:/tablice/valid5', img_w, img_h,
                                  batch_size, downsample_factor)
    val_data.load_data()

    input_data = Input(name='the_input', shape=input_shape, dtype='float32')
    inner = Conv2D(count_filters,
                   kernel_size,
                   padding='same',
                   activation='relu',
                   kernel_initializer='he_normal',
                   name='conv1')(input_data)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
    inner = Conv2D(count_filters,
                   kernel_size,
                   padding='same',
                   activation='relu',
                   kernel_initializer='he_normal',
                   name='conv2')(inner)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)

    conv_to_rnn_dims = (img_w // (pool_size**2),
                        (img_h // (pool_size**2)) * count_filters)
    inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)

    inner = Dense(time_dense_size, activation='relu', name='dense1')(inner)
    # inner = Dropout(0.5)(inner)

    gru_1 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru1')(inner)
    gru_1b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru1_b')(inner)
    gru1_merged = add([gru_1, gru_1b])
    # inner = Dropout(0.5)(inner)
    gru_2 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru2')(gru1_merged)
    gru_2b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru2_b')(gru1_merged)

    # inner = Dropout(0.5)(inner)
    inner = Dense(train_data.size_of_chars_set(),
                  kernel_initializer='he_normal',
                  name='dense2')(concatenate([gru_2, gru_2b]))

    y_pred = Activation('softmax', name='softmax')(inner)

    Model(inputs=input_data, outputs=y_pred).summary()

    labels = Input(name='labels',
                   shape=[train_data.max_text_len],
                   dtype='float32')
    input_length = Input(name='in_length', shape=[1], dtype='int64')
    label_length = Input(name='lab_length', shape=[1], dtype='int64')

    loss_out = Lambda(compute_loss, output_shape=(1, ),
                      name='ctc')([y_pred, labels, input_length, label_length])
    tensor_board = TensorBoard(log_dir='./Graph',
                               histogram_freq=0,
                               write_graph=True,
                               write_images=True,
                               update_freq=1)

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    if load:
        model = load_model('F:/tablice/nowe_tabliceee.h5', compile=False)
    else:
        model = Model(inputs=[input_data, labels, input_length, label_length],
                      outputs=loss_out)

    model.compile(loss='mae', optimizer=adam, metrics=['mae', 'mse'])

    if not load:

        model.fit_generator(generator=train_data.images_next_batch(),
                            steps_per_epoch=250,
                            epochs=64,
                            validation_data=val_data.images_next_batch(),
                            validation_steps=val_data.n,
                            verbose=1,
                            callbacks=[tensor_board])

    model.save('F:/tablice/nowe_tabliceee.h5')

    return model
lb = LabelBinarizer().fit(y_train)
y_train = lb.transform(y_train)
y_test = lb.transform(y_test)

# Save the mapping from labels to one-hot encodings.
# We'll need this later when we use the model to decode what it's predictions mean
with open(MODEL_LABELS_FILENAME, "wb") as f:
    pickle.dump(lb, f)

# Build the neural network!
model = Sequential()

# First convolutional layer with max pooling
model.add(
    Conv2D(40, (5, 5),
           padding="same",
           input_shape=(40, 50, 1),
           activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

# Second convolutional layer with max pooling
model.add(Conv2D(50, (5, 5), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

# Hidden layer with 500 nodes
model.add(Flatten())
model.add(Dense(500, activation="relu"))

# Output layer with 26 nodes (one for each possible letter we predict)
model.add(Dense(26, activation="softmax"))

# Ask Keras to build the TensorFlow model behind the scenes
Example #16
0
def inceptionresnetv2(input,
                      dropout_keep_prob=0.8,
                      num_classes=1000,
                      is_training=True,
                      scope='InceptionResnetV2'):
    '''Creates the Inception_ResNet_v2 network.'''
    with tf.variable_scope(scope, 'InceptionResnetV2', [input]):
        # Input shape is 299 * 299 * 3
        x = resnet_v2_stem(input)  # Output: 35 * 35 * 256

        # 5 x Inception A
        for i in range(5):
            x = inception_resnet_v2_A(x)
            # Output: 35 * 35 * 256

        # Reduction A
        x = reduction_resnet_A(x, k=256, l=256, m=384,
                               n=384)  # Output: 17 * 17 * 896

        # 10 x Inception B
        for i in range(10):
            x = inception_resnet_v2_B(x)
            # Output: 17 * 17 * 896

        # auxiliary
        loss2_ave_pool = AveragePooling2D(pool_size=(5, 5),
                                          strides=(3, 3),
                                          name='loss2/ave_pool')(x)

        loss2_conv_a = Conv2D(128, (1, 1),
                              kernel_regularizer=l2(0.0002),
                              activation="relu",
                              padding="same")(loss2_ave_pool)
        loss2_conv_b = Conv2D(768, (5, 5),
                              kernel_regularizer=l2(0.0002),
                              activation="relu",
                              padding="same")(loss2_conv_a)

        loss2_conv_b = BatchNormalization(axis=3)(loss2_conv_b)

        loss2_conv_b = Activation('relu')(loss2_conv_b)

        loss2_flat = Flatten()(loss2_conv_b)

        loss2_fc = Dense(1024,
                         activation='relu',
                         name='loss2/fc',
                         kernel_regularizer=l2(0.0002))(loss2_flat)

        loss2_drop_fc = Dropout(dropout_keep_prob)(loss2_fc,
                                                   training=is_training)

        loss2_classifier = Dense(num_classes,
                                 name='loss2/classifier',
                                 kernel_regularizer=l2(0.0002))(loss2_drop_fc)

        # Reduction B
        x = reduction_resnet_v2_B(x)  # Output: 8 * 8 * 1792

        # 5 x Inception C
        for i in range(5):
            x = inception_resnet_v2_C(x)
            # Output: 8 * 8 * 1792

        net = x

        # Average Pooling
        x = GlobalAveragePooling2D(name='avg_pool')(x)  # Output: 1792

        pool5_drop_10x10_s1 = Dropout(dropout_keep_prob)(x,
                                                         training=is_training)

        loss3_classifier_W = Dense(num_classes,
                                   name='loss3/classifier',
                                   kernel_regularizer=l2(0.0002))

        loss3_classifier = loss3_classifier_W(pool5_drop_10x10_s1)

        w_variables = loss3_classifier_W.get_weights()

        logits = tf.cond(
            tf.equal(is_training, tf.constant(True)),
            lambda: tf.add(loss3_classifier,
                           tf.scalar_mul(tf.constant(0.3), loss2_classifier)),
            lambda: loss3_classifier)

        return logits, net, tf.convert_to_tensor(w_variables[0])
def main():
    label_list, image_list = loadImage()
    image_size = len(image_list)
    label_binarizer = LabelBinarizer()
    image_labels = label_binarizer.fit_transform(label_list)
    pickle.dump(label_binarizer, open('label_transform.pkl', 'wb'))
    n_classes = len(label_binarizer.classes_)
    print(label_binarizer.classes_)
    np_image_list = np.array(image_list, dtype=np.float16) / 225.0
    print("[INFO] Spliting data to train, test")
    x_train, x_test, y_train, y_test = train_test_split(np_image_list,
                                                        image_labels,
                                                        test_size=0.2,
                                                        random_state=42)
    aug = ImageDataGenerator(rotation_range=25,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode="nearest")
    model = Sequential()
    inputShape = (height, width, depth)
    chanDim = -1
    if K.image_data_format() == "channels_first":
        inputShape = (depth, height, width)
        chanDim = 1
    model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=chanDim))
    model.add(MaxPooling2D(pool_size=(3, 3)))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=chanDim))
    model.add(Conv2D(64, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=chanDim))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Conv2D(128, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=chanDim))
    model.add(Conv2D(128, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(axis=chanDim))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation("relu"))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes))
    model.add(Activation("softmax"))
    model.summary()
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    # distribution
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    # train the network
    print("[INFO] training network...")
    history = model.fit_generator(aug.flow(x_train, y_train, batch_size=BS),
                                  validation_data=(x_test, y_test),
                                  steps_per_epoch=len(x_train) // BS,
                                  epochs=EPOCHS,
                                  verbose=1)
    cc = history.history['acc']
    val_acc = history.history['val_acc']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(1, len(acc) + 1)
    #Train and validation accuracy
    plt.plot(epochs, acc, 'b', label='Training accurarcy')
    plt.plot(epochs, val_acc, 'r', label='Validation accurarcy')
    plt.title('Training and Validation accurarcy')
    plt.legend()

    plt.figure()
    #Train and validation loss
    plt.plot(epochs, loss, 'b', label='Training loss')
    plt.plot(epochs, val_loss, 'r', label='Validation loss')
    plt.title('Training and Validation loss')
    plt.legend()
    plt.show()
Example #18
0
x, y = shuffle(img_data, Y, random_state=2)

X_train, X_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=2)

input_shape = img_data[0].shape
print(input_shape)

model = Sequential()
model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           input_shape=input_shape,
           padding='same',
           activation='relu',
           kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(
    Conv2D(32, (3, 3),
           activation='relu',
           padding='same',
           kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

epochs = 10
Example #19
0
def __create_fcn_dense_net(nb_classes,
                           img_input,
                           include_top,
                           nb_dense_block=5,
                           growth_rate=12,
                           reduction=0.0,
                           dropout_rate=None,
                           weight_decay=1e-4,
                           nb_layers_per_block=4,
                           nb_upsampling_conv=128,
                           upsampling_type='upsampling',
                           init_conv_filters=48,
                           input_shape=None,
                           activation='deconv'):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay
        nb_layers_per_block: number of layers in each dense block.
            Can be a positive integer or a list.
            If positive integer, a set number of layers per dense block.
            If list, nb_layer is used as provided. Note that list size must
            be (nb_dense_block + 1)
        nb_upsampling_conv: number of convolutional layers in upsampling via subpixel convolution
        upsampling_type: Can be one of 'upsampling', 'deconv' and 'subpixel'. Defines
            type of upsampling algorithm used.
        input_shape: Only used for shape inference in fully convolutional networks.
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                    Note that if sigmoid is used, classes must be 1.
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if concat_axis == 1:  # channels_first dim ordering
        _, rows, cols = input_shape
    else:
        rows, cols, _ = input_shape

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # check if upsampling_conv has minimum number of filters
    # minimum is set to 12, as at least 3 color channels are needed for correct upsampling
    assert nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0, 'Parameter `upsampling_conv` number of channels must ' \
                                                                    'be a positive number divisible by 4 and greater ' \
                                                                    'than 12'

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block + 1), 'If list, nb_layer is used as provided. ' \
                                                       'Note that list size must be (nb_dense_block + 1)'

        bottleneck_nb_layers = nb_layers[-1]
        rev_layers = nb_layers[::-1]
        nb_layers.extend(rev_layers[1:])
    else:
        bottleneck_nb_layers = nb_layers_per_block
        nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1)

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    x = Conv2D(init_conv_filters, (7, 7),
               kernel_initializer='he_normal',
               padding='same',
               name='initial_conv2D',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)
    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    nb_filter = init_conv_filters

    skip_list = []

    # Add dense blocks and transition down block
    for block_idx in range(nb_dense_block):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)

        # Skip connection
        skip_list.append(x)

        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               weight_decay=weight_decay)

        nb_filter = int(
            nb_filter *
            compression)  # this is calculated inside transition_down_block

    # The last dense_block does not have a transition_down_block
    # return the concatenated feature maps without the concatenation of the input
    _, nb_filter, concat_list = __dense_block(x,
                                              bottleneck_nb_layers,
                                              nb_filter,
                                              growth_rate,
                                              dropout_rate=dropout_rate,
                                              weight_decay=weight_decay,
                                              return_concat_list=True)

    skip_list = skip_list[::-1]  # reverse the skip list

    # Add dense blocks and transition up block
    for block_idx in range(nb_dense_block):
        n_filters_keep = growth_rate * nb_layers[nb_dense_block + block_idx]

        # upsampling block must upsample only the feature maps (concat_list[1:]),
        # not the concatenation of the input with the feature maps (concat_list[0].
        l = concatenate(concat_list[1:], axis=concat_axis)

        t = __transition_up_block(l,
                                  nb_filters=n_filters_keep,
                                  type=upsampling_type,
                                  weight_decay=weight_decay)

        # concatenate the skip connection with the transition block
        x = concatenate([t, skip_list[block_idx]], axis=concat_axis)

        # Dont allow the feature map size to grow in upsampling dense blocks
        x_up, nb_filter, concat_list = __dense_block(x,
                                                     nb_layers[nb_dense_block +
                                                               block_idx + 1],
                                                     nb_filter=growth_rate,
                                                     growth_rate=growth_rate,
                                                     dropout_rate=dropout_rate,
                                                     weight_decay=weight_decay,
                                                     return_concat_list=True,
                                                     grow_nb_filters=False)

    if include_top:
        x = Conv2D(nb_classes, (1, 1),
                   activation='linear',
                   padding='same',
                   use_bias=False)(x_up)

        if K.image_data_format() == 'channels_first':
            channel, row, col = input_shape
        else:
            row, col, channel = input_shape

        x = Reshape((row * col, nb_classes))(x)
        x = Activation(activation)(x)
        x = Reshape((row, col, nb_classes))(x)
    else:
        x = x_up

    return x
Example #20
0
def getMotionModel(LR, input_shape, n_classes, printmod=1):

    img_input = Input(shape=input_shape)

    # Block 1
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               kernel_initializer='random_uniform')(img_input)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               kernel_initializer='random_uniform')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               kernel_initializer='random_uniform')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3',
               kernel_initializer='random_uniform')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3',
               kernel_initializer='random_uniform')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    x = Flatten()(x)
    x = Dense(256, activation='relu')(x)

    predictions = Dense(101, activation='softmax')(x)

    model = Model(inputs=img_input,
                  outputs=predictions,
                  name='vgg16_motion_model')

    mypotim = SGD(lr=LR, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=mypotim,
                  metrics=['accuracy'])

    if (printmod == 1):
        model.summary()
    return model
Example #21
0
 def block(x):
     x = Conv2D(filters * 4, kernel_size=3, padding='same')(x)
     x = LeakyReLU(0.1)(x)
     x = PixelShuffler()(x)
     return x
Example #22
0
def letter_recognition(X_train, y_train, X_test, y_test):
    ### Prepare dataset

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train = X_train / 255.0
    X_test = X_test / 255.0

    y_train = np_utils.to_categorical(y_train)
    y_test = np_utils.to_categorical(y_test)
    class_num = y_test.shape[1]

    ### Prepare model
    print('preparing model...')
    model = Sequential()

    model.add(Conv2D(32, (3, 3), input_shape=X_train.shape[1:], padding='same'))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Conv2D(256, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Flatten())
    model.add(Dropout(0.2))

    model.add(Dense(512, kernel_constraint=maxnorm(3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(Dense(256, kernel_constraint=maxnorm(3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(Dense(128, kernel_constraint=maxnorm(3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(Dense(class_num))
    model.add(Activation('softmax'))

    print('model created')

    model.compile(
        loss='categorical_crossentropy',
        optimizer='adam',
        metrics=[
            'accuracy',
            'AUC',
        ]
    )

    print(model.summary())

    np.random.seed(seed)
    model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=40)

    # Final evaluation of the model

    scores = model.evaluate(X_test, y_test, verbose=0)
    print("Accuracy: %.2f%%" % (scores[1] * 100))

    ### save model ###
    model.save('number_recognition.hdf5')
    ##################

    print(model.predict_classes(X_test))
Example #23
0
def conv(x, nf, ks, name):
    x1 = Conv2D(nf, (ks, ks), padding='same', name=name)(x)
    return x1
Example #24
0
## From: https://stackoverflow.com/questions/39930952/cannot-import-keras-after-installation
# 
# virtualenv -p python3 py-keras
# source py-keras/bin/activate
# pip install -q -U pip setuptools wheel
# pip3 install tensorflow
# python3 conv-dims.py
# deactivate

from keras import Sequential
from keras.layers.convolutional import Conv2D

model = Sequential()
#model.add(Conv2D(filters=16, kernel_size=2, strides=2, padding='valid', activation='relu', input_shape=(200, 200, 1)))
model.add(Conv2D(filters=32, kernel_size=3, strides=2, padding='same', activation='relu', input_shape=(128, 128, 3)))
model.summary()

Example #25
0
    def lrcn(self):
        """Build a CNN into RNN.
        Starting version from:
            https://github.com/udacity/self-driving-car/blob/master/
                steering-models/community-models/chauffeur/models.py

        Heavily influenced by VGG-16:
            https://arxiv.org/abs/1409.1556

        Also known as an LRCN:
            https://arxiv.org/pdf/1411.4389.pdf
        """
        model = Sequential()

        model.add(
            TimeDistributed(Conv2D(32, (7, 7),
                                   strides=(2, 2),
                                   activation='relu',
                                   padding='same'),
                            input_shape=self.input_shape))
        model.add(
            TimeDistributed(
                Conv2D(32, (3, 3),
                       kernel_initializer="he_normal",
                       activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(
            TimeDistributed(
                Conv2D(64, (3, 3), padding='same', activation='relu')))
        model.add(
            TimeDistributed(
                Conv2D(64, (3, 3), padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(
            TimeDistributed(
                Conv2D(128, (3, 3), padding='same', activation='relu')))
        model.add(
            TimeDistributed(
                Conv2D(128, (3, 3), padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(
            TimeDistributed(
                Conv2D(256, (3, 3), padding='same', activation='relu')))
        model.add(
            TimeDistributed(
                Conv2D(256, (3, 3), padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(
            TimeDistributed(
                Conv2D(512, (3, 3), padding='same', activation='relu')))
        model.add(
            TimeDistributed(
                Conv2D(512, (3, 3), padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Flatten()))

        model.add(Dropout(0.5))
        model.add(LSTM(256, return_sequences=False, dropout=0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
Example #26
0
def build_model(input_layer, start_neurons, DropoutRatio=0.5):
    # 101 -> 50
    conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                   padding="same")(input_layer)
    conv1 = residual_block(conv1, start_neurons * 1)
    conv1 = residual_block(conv1, start_neurons * 1, True)
    pool1 = MaxPooling2D((2, 2))(conv1)
    pool1 = Dropout(DropoutRatio / 2)(pool1)

    # 50 -> 25
    conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                   padding="same")(pool1)
    conv2 = residual_block(conv2, start_neurons * 2)
    conv2 = residual_block(conv2, start_neurons * 2, True)
    pool2 = MaxPooling2D((2, 2))(conv2)
    pool2 = Dropout(DropoutRatio)(pool2)

    # 25 -> 12
    conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                   padding="same")(pool2)
    conv3 = residual_block(conv3, start_neurons * 4)
    conv3 = residual_block(conv3, start_neurons * 4, True)
    pool3 = MaxPooling2D((2, 2))(conv3)
    pool3 = Dropout(DropoutRatio)(pool3)

    # 12 -> 6
    conv4 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                   padding="same")(pool3)
    conv4 = residual_block(conv4, start_neurons * 8)
    conv4 = residual_block(conv4, start_neurons * 8, True)
    pool4 = MaxPooling2D((2, 2))(conv4)
    pool4 = Dropout(DropoutRatio)(pool4)

    # Middle
    convm = Conv2D(start_neurons * 16, (3, 3), activation=None,
                   padding="same")(pool4)
    convm = residual_block(convm, start_neurons * 16)
    convm = residual_block(convm, start_neurons * 16, True)

    # 6 -> 12
    deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3),
                              strides=(2, 2),
                              padding="same")(convm)
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(DropoutRatio)(uconv4)

    uconv4 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                    padding="same")(uconv4)
    uconv4 = residual_block(uconv4, start_neurons * 8)
    uconv4 = residual_block(uconv4, start_neurons * 8, True)

    # 12 -> 25
    #deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
    deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3),
                              strides=(2, 2),
                              padding="valid")(uconv4)
    uconv3 = concatenate([deconv3, conv3])
    uconv3 = Dropout(DropoutRatio)(uconv3)

    uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                    padding="same")(uconv3)
    uconv3 = residual_block(uconv3, start_neurons * 4)
    uconv3 = residual_block(uconv3, start_neurons * 4, True)

    # 25 -> 50
    deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv3)
    uconv2 = concatenate([deconv2, conv2])

    uconv2 = Dropout(DropoutRatio)(uconv2)
    uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                    padding="same")(uconv2)
    uconv2 = residual_block(uconv2, start_neurons * 2)
    uconv2 = residual_block(uconv2, start_neurons * 2, True)

    # 50 -> 101
    #deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
    deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3),
                              strides=(2, 2),
                              padding="valid")(uconv2)
    uconv1 = concatenate([deconv1, conv1])

    uconv1 = Dropout(DropoutRatio)(uconv1)
    uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                    padding="same")(uconv1)
    uconv1 = residual_block(uconv1, start_neurons * 1)
    uconv1 = residual_block(uconv1, start_neurons * 1, True)

    #uconv1 = Dropout(DropoutRatio/2)(uconv1)
    #output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
    output_layer_noActi = Conv2D(1, (1, 1), padding="same",
                                 activation=None)(uconv1)
    output_layer = Activation('sigmoid')(output_layer_noActi)

    return output_layer
Example #27
0
    def twoStreams(self):
        input = Input(shape=(80, 80, 3))
        input1 = Lambda(lambda x: x[:, 20:60, 20:60, :],
                        output_shape=(40, 40, 3))(input)
        input2 = Lambda(lambda x: tf.strided_slice(
            x, [0, 0, 0, 0], [self.data_size, -1, -1, 3], [1, 2, 2, 1]),
                        output_shape=(40, 40, 3))(input)

        x = Conv2D(kernel_size=11,
                   filters=96,
                   strides=(3, 3),
                   padding='same',
                   activation='relu')(input1)
        x = normalization.BatchNormalization()(x)
        x = MaxPooling2D(pool_size=2, strides=(2, 2))(x)
        x = (Conv2D(kernel_size=5,
                    filters=256,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(x)
        x = (normalization.BatchNormalization())(x)
        x = (MaxPooling2D(pool_size=2, strides=(2, 2)))(x)
        x = (Conv2D(kernel_size=3,
                    filters=384,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(x)
        x = (Conv2D(kernel_size=3,
                    filters=384,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(x)
        x = (Conv2D(kernel_size=3,
                    filters=256,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(x)
        x = (MaxPooling2D(pool_size=2, strides=(2, 2)))(x)

        y = Conv2D(kernel_size=11,
                   filters=96,
                   strides=(3, 3),
                   padding='same',
                   activation='relu')(input2)
        y = normalization.BatchNormalization()(y)
        y = MaxPooling2D(pool_size=2, strides=(2, 2))(y)
        y = (Conv2D(kernel_size=5,
                    filters=256,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(y)
        y = (normalization.BatchNormalization())(y)
        y = (MaxPooling2D(pool_size=2, strides=(2, 2)))(y)
        y = (Conv2D(kernel_size=3,
                    filters=384,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(y)
        y = (Conv2D(kernel_size=3,
                    filters=384,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(y)
        y = (Conv2D(kernel_size=3,
                    filters=256,
                    strides=(1, 1),
                    padding='same',
                    activation='relu'))(y)
        y = (MaxPooling2D(pool_size=2, strides=(2, 2)))(y)

        merge = concatenate([x, y])
        merge = Flatten()(merge)
        merge = (Dense(4096, activation='relu'))(merge)
        merge = (Dense(4096, activation='relu'))(merge)
        merge = (Dense(self.nb_classes, activation='softmax'))(merge)
        model = Model(inputs=input, outputs=merge)
        return model
Example #28
0
def ds_vgg(data):
    # conv_1
    trainable = True
    data = Concatenate()([data[0], data[1], data[2]])
    conv_1_out = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1_new', trainable=trainable)(data)
    conv_1_out = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2', trainable=trainable)(
        conv_1_out)
    ds_conv_1_out = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(conv_1_out)

    # conv_2
    conv_2_out = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1', trainable=trainable)(
        ds_conv_1_out)
    conv_2_out = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2', trainable=trainable)(
        conv_2_out)
    ds_conv_2_out = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(conv_2_out)

    # conv_3
    conv_3_out = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1', trainable=trainable)(
        ds_conv_2_out)
    conv_3_out = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2', trainable=trainable)(
        conv_3_out)
    conv_3_out = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3', trainable=trainable)(
        conv_3_out)
    ds_conv_3_out = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool', padding='same')(conv_3_out)

    # conv_4
    conv_4_out = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1', trainable=trainable)(
        ds_conv_3_out)
    conv_4_out = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2', trainable=trainable)(
        conv_4_out)
    conv_4_out = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3', trainable=trainable)(
        conv_4_out)
    ds_conv_4_out = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool', padding='same')(conv_4_out)

    # conv_5 #
    conv_5_out = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1', trainable=trainable)(
        ds_conv_4_out)
    conv_5_out = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2', trainable=trainable)(
        conv_5_out)
    conv_5_out = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3', trainable=trainable)(
        conv_5_out)
    # conv_5_out = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool', padding='same')(conv_5_out)
    saliency_conv_5 = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='sal_conv_5', trainable=trainable)(
        conv_5_out)

    conv_4_out = Conv2D(64, (3, 3), padding='same', activation='sigmoid', name='conv_4_out', trainable=trainable)(
        conv_4_out)
    up_saliency_conv_5 = UpSampling2D(size=(2, 2))(saliency_conv_5)
    conv_4_out = Concatenate()([conv_4_out, up_saliency_conv_5])
    saliency_conv_4 = Conv2D(1, (3, 3), padding='same', activation='sigmoid', name='sal_conv4', trainable=trainable)(
        conv_4_out)

    # saliency from conv_3 #
    conv_3_out = Conv2D(64, (3, 3), padding='same', activation='sigmoid', name='conv_3_out', trainable=trainable)(
        conv_3_out)  # , activation='sigmoid'
    up_saliency_conv_4 = UpSampling2D(size=(2, 2))(saliency_conv_4)
    conv_3_out = Concatenate()([conv_3_out, up_saliency_conv_4])
    saliency_conv_3 = Conv2D(1, (3, 3), padding='same', activation='sigmoid', name='sal_conv3', trainable=trainable)(
        conv_3_out)

    return [saliency_conv_5, saliency_conv_4, saliency_conv_3]#
Example #29
0
def createNewModel(patchSize):
	seed=5
	np.random.seed(seed)
	input=Input(shape=(1,patchSize[0, 0], patchSize[0, 1]))
	out1=Conv2D(filters=32,
				kernel_size=(3,3),
				kernel_initializer='he_normal',
				weights=None,
				padding='valid',
				strides=(1, 1),
				kernel_regularizer=l2(1e-6),
				activation='relu')(input)

	out2=Conv2D(filters=64,
				kernel_size=(3,3),
				kernel_initializer='he_normal',
				weights=None,
				padding='valid',
				strides=(1, 1),
				kernel_regularizer=l2(1e-6),
				activation='relu')(out1)
	out2=pool2(pool_size=(2,2),data_format='channels_first')(out2)

	out3=Conv2D(filters=128,  # learning rate: 0.1 -> 76%
				kernel_size=(3,3),
				kernel_initializer='he_normal',
				weights=None,
				padding='valid',
				strides=(1, 1),
				kernel_regularizer=l2(1e-6),
				activation='relu')(out2)


	out4=Conv2D(filters=128,  # learning rate: 0.1 -> 76%
				kernel_size=(3,3),
				kernel_initializer='he_normal',
				weights=None,
				padding='valid',
				strides=(1, 1),
				kernel_regularizer=l2(1e-6),
				activation='relu')(out3)
	out4=pool2(pool_size=(2,2),data_format='channels_first')(out4)

	out5_1=Conv2D(filters=32,
				  kernel_size=(1,1),
				  kernel_initializer='he_normal',
				  weights=None,
				  padding='same',
				  strides=(1, 1),
				  kernel_regularizer=l2(1e-6),
	              activation='relu')(out4)

	out5_2=Conv2D(filters=32,  # learning rate: 0.1 -> 76%
				  kernel_size=(1,1),
				  kernel_initializer='he_normal',
				  weights=None,
				  padding='same',
				  strides=(1, 1),
				  kernel_regularizer=l2(1e-6),
	              activation='relu')(out4)
	out5_2=Conv2D(filters=128,  # learning rate: 0.1 -> 76%
				  kernel_size=(3,3),
				  kernel_initializer='he_normal',
				  weights=None,
				  padding='same',
				  strides=(1, 1),
				  kernel_regularizer=l2(1e-6),
	              activation='relu')(out5_2)

	out5_3=Conv2D(filters=32,  # learning rate: 0.1 -> 76%
				  kernel_size=(1,1),
				  kernel_initializer='he_normal',
				  weights=None,
				  padding='same',
				  strides=(1, 1),
				  kernel_regularizer=l2(1e-6),
	              activation='relu')(out4)
	out5_3=Conv2D(filters=128,  # learning rate: 0.1 -> 76%
				  kernel_size=(5,5),
				  kernel_initializer='he_normal',
				  weights=None,
				  padding='same',
				  strides=(1, 1),
				  kernel_regularizer=l2(1e-6),
	              activation='relu')(out5_3)

	out5_4=pool2(pool_size=(3,3),strides=(1,1),padding='same',data_format='channels_first')(out4)
	out5_4=Conv2D(filters=128,  # learning rate: 0.1 -> 76%
				  kernel_size=(1,1),
				  kernel_initializer='he_normal',
				  weights=None,
				  padding='same',
				  strides=(1, 1),
				  kernel_regularizer=l2(1e-6),
	              activation='relu')(out5_4)

	out5=concatenate(inputs=[out5_1,out5_2,out5_3],axis=1)

	out7=Conv2D(filters=256,  # learning rate: 0.1 -> 76%
				kernel_size=(3,3),
				kernel_initializer='he_normal',
				weights=None,
				padding='valid',
				strides=(1, 1),
				kernel_regularizer=l2(1e-6),
	            activation='relu')(out5)
	#out7=pool2(pool_size=(2,2),data_format='channels_first')(out7)

	out8=Conv2D(filters=256,  # learning rate: 0.1 -> 76%
				kernel_size=(3,3),
				kernel_initializer='he_normal',
				weights=None,
				padding='valid',
				strides=(1, 1),
				kernel_regularizer=l2(1e-6),
	            activation='relu')(out7)
	out8=pool2(pool_size=(2,2),data_format='channels_first')(out8)

	out9=Flatten()(out8)


	out10=Dense(units=11,
	           kernel_initializer='normal',
               kernel_regularizer='l2',
	           activation='softmax')(out9)

	cnn = Model(inputs=input,outputs=out10)
	return cnn
Example #30
0
def tweet_ner_model(tweets_info):

    ### Get The orthographic Sentences
    cmplt_word_list = tweets_info['words']
    cmplt_ortho_word_list = tweets_info['orthographic_words']
    cmplt_BIOU_result = tweets_info['']


    flattened_wrdlst = [val for sublist in cmplt_word_list for val in sublist]
    flattened_ortho_wrdlst = [val for sublist in cmplt_ortho_word_list for val in sublist]

    unique_wrds = set(flattened_wrdlst)
    unique_ortho_wrds = list(set(flattened_ortho_wrdlst))

    glove_dict = utility.getGloveVec(unique_wrds)

    char_dict = lrf_config.get_char_dict()
    ortho_char_dict = lrf_config.get_orthohraphic_char_dict()


    ############################# Initializations of Embedding Matrices:

    #### Initialization of Actual Word Character Embedding : DIM : Dim_of_chars x Dim_needed_for_char_embedding : 94 x 30
    ## Random Uniform Initialization

    char_embed_matrix = initialize_matrix(dim = constants.CHAR_EMBED_DIM,m = constants.CHAR_EMBED_DIM,n=constants.CHAR_ONEHOT_DIM, initialization_typ = 'random_uniform')

    #### Initialization of Orthographic Word Character Embedding : DIM : Dim_of_ortho_chars x Dim_needed_for_ortho_char_embedding : 4 x 30
    ## Random Uniform Initialization

    char_o_embed_matrix = initialize_matrix(dim=constants.CHAR_O_EMBED_DIM, m=constants.CHAR_O_EMBED_DIM,
                                          n=constants.CHAR_O_ONEHOT_DIM, initialization_typ = 'random_uniform')

    #### Initialization of Orthographic Word Embedding : DIM : Dim_of_unique_Ortho_words x Dim_of_glove_vec : n x 200
    ## Random Uniform Initialization

    word_o_embed_matrix = initialize_matrix(dim=constants.GLOVE_DIM, m=constants.GLOVE_DIM,
                                            n=len(unique_ortho_wrds), initialization_typ='random_uniform')


    ############################ Actual Model for Processing

    comprehensive_input = []

    for ind_tweet,tweet in enumerate(cmplt_word_list):
        ortho_tweet = cmplt_ortho_word_list[ind_tweet]

        for ind_word,word in enumerate(tweet):
            ortho_word = ortho_tweet[ind_word]

            #########################################################
            ## Part 1: Finding Char Embedding of any word:
            char_labels = [char_dict[c] for c in list(word)]
            char_onehot = keras.utils.to_categorical(char_labels,num_classes=constants.CHAR_ONEHOT_DIM)

            char_embed_inp = np.matmul(char_embed_matrix,np.transpose(char_onehot))

            out_1 = Conv2D(filters=constants.NUM_OF_FILTERS, kernel_size=(constants.CHAR_EMBED_DIM,constants.WINDOW_SIZE),padding='same',activation=constants.LAYER_1_ACTIV,kernel_initializer=RandomUniform,bias_initializer=RandomUniform)(char_embed_inp)

            #########################################################
            ## Part 2: Finding Word Embedding of word: Glove
            high_dim = np.sqrt(3 / constants.GLOVE_DIM)
            low_dim = (-1) * high_dim

            out_2 = np.transpose(glove_dict.get(word)) if word in glove_dict else np.random.uniform(low=low_dim,
                                                                                                    high=high_dim,
                                                                                                    size=(
                                                                                                    constants.GLOVE_DIM,
                                                                                                    1))
            #########################################################
            ## Part 3: Finding Char Embedding of orthographic word

            ortho_char_labels = [ortho_char_dict[c] for c in list(ortho_word)]
            ortho_char_onehot = keras.utils.to_categorical(ortho_char_labels, num_classes=constants.CHAR_O_ONEHOT_DIM)

            ortho_char_embed_inp = np.matmul(char_o_embed_matrix, np.transpose(ortho_char_onehot))

            out_3 = Conv2D(filters=constants.NUM_OF_FILTERS,
                           kernel_size=(constants.CHAR_O_EMBED_DIM, constants.WINDOW_SIZE), padding='same',
                           activation=constants.LAYER_2_ACTIV, kernel_initializer=RandomUniform,
                           bias_initializer=RandomUniform)(ortho_char_embed_inp)

            #########################################################
            ## Part 4: Finding Word Embedding of orthographic word

            word_onehot = keras.utils.to_categorical(unique_ortho_wrds.index(ortho_word))
            ortho_word_inp = np.matmul(np.transpose(word_o_embed_matrix),word_onehot)
            out_4 =  Conv2D(filters=constants.NUM_OF_FILTERS, kernel_size=(constants.WORD_O_EMBED_DIM,constants.WINDOW_SIZE),padding='same',activation=constants.LAYER_3_ACTIV,kernel_initializer=RandomUniform,bias_initializer=RandomUniform)(ortho_word_inp)

            comprehensive_input = tf.keras.backend.stack((out_1,out_2,out_3,out_4),axis=0)

            # comprehensive_input.append(np.concatenate((out_1,out_2,out_3,out_4)))


        LSTM_NUM_NODES = len(comprehensive_input)

        lstm_out = keras.layers.Bidirectional(LSTM(units=LSTM_NUM_NODES, return_sequences=True, activation='hard_sigmoid', use_bias=True, kernel_initializer=RandomUniform, dropout=0.0))(comprehensive_input)

        comprehensive_model = crf.CRF(constants.NUM_OF_TAGS)
        out = comprehensive_model(lstm_out)