Esempio n. 1
0
gen_seed_eval = generator_seed(shape_noise, batch_size, label=1)

###############################
##### Generator Model   #######
input_noise = Input(shape=shape_noise)
g = input_noise
g = Dense(1024, activation="relu")(g)
g = Dense(128 * 6 * 6, activation="relu")(g)
g = BatchNormalization()(g)

g = Reshape((6, 6, 128))(g)
#g = Dropout(0.5)(g)

# Input : (6,6), Output : (10,10)
g = UpSampling2D((2, 2))(g)
g = Conv2D(128, (5, 5), activation="relu")(g)
#g = Dropout(0.5)(g)

# Input : (8,8), Output : (16,16)
g = UpSampling2D((2, 2))(g)
g = Conv2D(64, (5, 5), activation="relu")(g)
#g = Dropout(0.5)(g)

# Input : (12,12), Output : (28,128)
g = UpSampling2D((2, 2))(g)
g = Conv2D(32, (5, 5), activation="relu")(g)

# Input : (20,20), Output : (28,128)
g = UpSampling2D((2, 2))(g)
g = Conv2D(16, (5, 5), activation="relu")(g)
g = Conv2D(3, (5, 5), activation="tanh")(g)
Esempio n. 2
0
def inceptionresnetv2(input,
                      dropout_keep_prob=0.8,
                      num_classes=1000,
                      is_training=True,
                      scope='InceptionResnetV2'):
    '''Creates the Inception_ResNet_v2 network.'''
    with tf.variable_scope(scope, 'InceptionResnetV2', [input]):
        # Input shape is 299 * 299 * 3
        x = resnet_v2_stem(input)  # Output: 35 * 35 * 256

        # 5 x Inception A
        for i in range(5):
            x = inception_resnet_v2_A(x)
            # Output: 35 * 35 * 256

        # Reduction A
        x = reduction_resnet_A(x, k=256, l=256, m=384,
                               n=384)  # Output: 17 * 17 * 896

        # 10 x Inception B
        for i in range(10):
            x = inception_resnet_v2_B(x)
            # Output: 17 * 17 * 896

        # auxiliary
        loss2_ave_pool = AveragePooling2D(pool_size=(5, 5),
                                          strides=(3, 3),
                                          name='loss2/ave_pool')(x)

        loss2_conv_a = Conv2D(128, (1, 1),
                              kernel_regularizer=l2(0.0002),
                              activation="relu",
                              padding="same")(loss2_ave_pool)
        loss2_conv_b = Conv2D(768, (5, 5),
                              kernel_regularizer=l2(0.0002),
                              activation="relu",
                              padding="same")(loss2_conv_a)

        loss2_conv_b = BatchNormalization(axis=3)(loss2_conv_b)

        loss2_conv_b = Activation('relu')(loss2_conv_b)

        loss2_flat = Flatten()(loss2_conv_b)

        loss2_fc = Dense(1024,
                         activation='relu',
                         name='loss2/fc',
                         kernel_regularizer=l2(0.0002))(loss2_flat)

        loss2_drop_fc = Dropout(dropout_keep_prob)(loss2_fc,
                                                   training=is_training)

        loss2_classifier = Dense(num_classes,
                                 name='loss2/classifier',
                                 kernel_regularizer=l2(0.0002))(loss2_drop_fc)

        # Reduction B
        x = reduction_resnet_v2_B(x)  # Output: 8 * 8 * 1792

        # 5 x Inception C
        for i in range(5):
            x = inception_resnet_v2_C(x)
            # Output: 8 * 8 * 1792

        net = x

        # Average Pooling
        x = GlobalAveragePooling2D(name='avg_pool')(x)  # Output: 1792

        pool5_drop_10x10_s1 = Dropout(dropout_keep_prob)(x,
                                                         training=is_training)

        loss3_classifier_W = Dense(num_classes,
                                   name='loss3/classifier',
                                   kernel_regularizer=l2(0.0002))

        loss3_classifier = loss3_classifier_W(pool5_drop_10x10_s1)

        w_variables = loss3_classifier_W.get_weights()

        logits = tf.cond(
            tf.equal(is_training, tf.constant(True)),
            lambda: tf.add(loss3_classifier,
                           tf.scalar_mul(tf.constant(0.3), loss2_classifier)),
            lambda: loss3_classifier)

        return logits, net, tf.convert_to_tensor(w_variables[0])
Esempio n. 3
0
def getMotionModel(LR, input_shape, n_classes, printmod=1):

    img_input = Input(shape=input_shape)

    # Block 1
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               kernel_initializer='random_uniform')(img_input)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               kernel_initializer='random_uniform')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               kernel_initializer='random_uniform')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3',
               kernel_initializer='random_uniform')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2',
               kernel_initializer='random_uniform')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3',
               kernel_initializer='random_uniform')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    x = Flatten()(x)
    x = Dense(256, activation='relu')(x)

    predictions = Dense(101, activation='softmax')(x)

    model = Model(inputs=img_input,
                  outputs=predictions,
                  name='vgg16_motion_model')

    mypotim = SGD(lr=LR, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=mypotim,
                  metrics=['accuracy'])

    if (printmod == 1):
        model.summary()
    return model
Esempio n. 4
0
	def build(width, height, depth, classes, finalAct="softmax"):
		"""
		Model builder.
		Parameter:
			width: The width dimension of image/number of horizontal pixels.
			height: The height dimension of image/number of vertical pixels.
			depth: The number of image channels.
			classes:
			finalAct:  The optional argument, finalAct (with a default value of "softmax") will be utilized at the end of the network architecture.
					   Changing this value from 'softmax' to 'dsigmoid' will enable us to perform 'multi-label classification' with Keras.
					   Control whether we are performing 'simple classification' or 'multi-class classification'.
		Return:
			model: The constructed network architecture.
		"""
		# Initialize the model along with the input shape to be "channels last" and the channels dimension itself.
		model = Sequential()
		inputShape = (height, width, depth)
		chanDim = -1

		# If channels order is "channels first", modify the input shape and channels dimension.
		if K.image_data_format() == "channels_first":
			inputShape = (depth, height, width)
			chanDim = 1

		# First CONV block, CONV => RELU => POOL.
		model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape, name="block_1--CONV_1"))
		model.add(Activation("relu", name="block_1--ACT_relu_1"))
		model.add(BatchNormalization(axis=chanDim, name="block_1--BN_1"))
		model.add(MaxPooling2D(pool_size=(3, 3), name="block_1--POOL_max"))
		model.add(Dropout(0.25, name="block_1--DO"))

		# Second CONV block, (CONV => RELU)*2 => POOL.
		model.add(Conv2D(64, (3, 3), padding="same", name="block_2--CONV_1"))
		model.add(Activation("relu", name="block_2--ACT_relu_1"))
		model.add(BatchNormalization(axis=chanDim, name="block_2--BN_1"))
		model.add(Conv2D(64, (3, 3), padding="same", name="block_2--CONV_2"))
		model.add(Activation("relu", name="block_2--ACT_relu_2"))
		model.add(BatchNormalization(axis=chanDim, name="block_2--BN_2"))
		model.add(MaxPooling2D(pool_size=(2, 2), name="block_2--POOL_max"))
		model.add(Dropout(0.25, name="block_2--DO"))

		# Third CONV block, (CONV => RELU)*2 => POOL.
		model.add(Conv2D(128, (3, 3), padding="same", name="block_3--CONV_1"))
		model.add(Activation("relu", name="block_3--ACT_relu_1"))
		model.add(BatchNormalization(axis=chanDim, name="block_3--BN_1"))
		model.add(Conv2D(128, (3, 3), padding="same", name="block_3--CONV_2"))
		model.add(Activation("relu", name="block_3--ACT_relu_2"))
		model.add(BatchNormalization(axis=chanDim, name="block_3--BN_2"))
		model.add(MaxPooling2D(pool_size=(2, 2), name="block_3--POOL_max"))
		model.add(Dropout(0.25, name="block_3--DO"))

		# Classify block, FC = > RELU => OUTPUT.
		model.add(Flatten())
		model.add(Dense(1024, name="block_end--FC_1"))
		model.add(Activation("relu", name="block_end--ACT_relu"))
		model.add(BatchNormalization(name="block_end--BN"))
		model.add(Dropout(0.5, name="block_end--DO"))
		# Output, use a 'softmax' ACT -- for single-label classification;
		#      or use a 'sigmoid' ACT -- for multi-label classification.
		model.add(Dense(classes, name="block_end--FC_2"))
		model.add(Activation(finalAct, name="block_end--ACT_output"))

		# Return the constructed network architecture.
		return model
#train_idx=range(0,104000)
#test_idx=range(104000,200000)
#X_train = X[train_idx]
#X_test =  X[test_idx]
#Y_train_labeld=X_labeld[train_idx]
#Y_test_labeld=X_labeld[test_idx]

#keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

dr = 0.0

model = Sequential()
model.add(Reshape(in_dim+[1],input_shape=in_dim))
#model.add(ZeroPadding2D((2, 2)))
model.add(Conv2D(64, (2, 3), name='conv1', padding='valid', activation='relu', kernel_initializer='glorot_uniform'))
#model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))
model.add(Dropout(dr))
#model.add(ZeroPadding2D((2, 2)))
model.add(Conv2D(16, (1, 3), name='conv2', padding='valid', activation='relu', kernel_initializer='glorot_uniform'))
#model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))
model.add(Dropout(dr))
model.add(Flatten())
model.add(Dense(128, activation='relu', init='he_normal', name="dense1"))
model.add(Dropout(dr))
model.add(Dense(numclass, init='he_normal', name="dense2" ))
model.add(Activation('softmax'))
model.add(Reshape([numclass]))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#model.compile(loss='sparse_categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
model.summary()
Esempio n. 6
0
 def deconv2d(layer_input):
     """Layers used during upsampling"""
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)
     u = Activation('relu')(u)
     return u
Esempio n. 7
0
def make_discriminator(image_size, use_input_pose, warp_skip, disc_type,
                       warp_agg):
    input_img = Input(list(image_size) + [3])
    output_pose = Input(list(image_size) + [18])
    input_pose = Input(list(image_size) + [18])
    output_img = Input(list(image_size) + [3])

    if warp_skip == 'full':
        warp = [Input((10, 8))]
    elif warp_skip == 'mask':
        warp = [Input((10, 8)), Input((10, image_size[0], image_size[1]))]
    else:
        warp = []

    if use_input_pose:
        input_pose = [input_pose]
    else:
        input_pose = []

    if disc_type == 'call':
        out = Concatenate(axis=-1)([input_img] + input_pose +
                                   [output_img, output_pose])
        out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)
        out = block(out, 128)
        out = block(out, 256)
        out = block(out, 512)
        out = block(out, 1, bn=False)
        out = Activation('sigmoid')(out)
        out = Flatten()(out)
        return Model(inputs=[input_img] + input_pose +
                     [output_img, output_pose],
                     outputs=[out])
    elif disc_type == 'sim':
        out = Concatenate(axis=-1)([output_img, output_pose])
        out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)
        out = block(out, 128)
        out = block(out, 256)
        out = block(out, 512)
        m_share = Model(inputs=[output_img, output_pose], outputs=[out])
        output_feat = m_share([output_img, output_pose])
        input_feat = m_share([input_img] + input_pose)

        out = Concatenate(axis=-1)([output_feat, input_feat])
        out = LeakyReLU(0.2)(out)
        out = Flatten()(out)
        out = Dense(1)(out)
        out = Activation('sigmoid')(out)

        return Model(inputs=[input_img] + input_pose +
                     [output_img, output_pose],
                     outputs=[out])
    else:
        out_inp = Concatenate(axis=-1)([input_img] + input_pose)
        out_inp = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out_inp)

        out_inp = AffineTransformLayer(10, warp_agg,
                                       image_size)([out_inp] + warp)

        out = Concatenate(axis=-1)([output_img, output_pose])
        out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)

        out = Concatenate(axis=-1)([out, out_inp])

        out = block(out, 128)
        out = block(out, 256)
        out = block(out, 512)
        out = block(out, 1, bn=False)
        out = Activation('sigmoid')(out)
        out = Flatten()(out)
        return Model(inputs=[input_img] + input_pose +
                     [output_img, output_pose] + warp,
                     outputs=[out])
Esempio n. 8
0
 def downsample(layer_input, filters, f_size=4):
     d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
     d = InstanceNormalization(axis = -1, center = False, scale = False)(d)
     d = Activation('relu')(d)
     
     return d
Esempio n. 9
0
 def downsample(layer_input,filters):
     y = Conv2D(filters, kernel_size=(3,3), strides=2, padding='same', kernel_initializer = self.weight_init)(layer_input)
     y = InstanceNormalization(axis = -1, center = False, scale = False)(y)
     y = Activation('relu')(y)
     return y
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0

# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]

# Create the model
model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           input_shape=(3, 32, 32),
           padding='same',
           activation='relu',
           kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(
    Conv2D(32, (3, 3),
           activation='relu',
           padding='same',
           kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
epochs = 25
Esempio n. 11
0
def main():
    names = ['human', 'murai']
    image_size = 50

    X_train = []
    y_train = []
    for index, name in enumerate(names):
        dir = "./data/devide/" + name + "/model"
        files = glob.glob(dir + "/*.jpg")
        for i, file in enumerate(files[:TRAIN_NUM[name]]):
            image = Image.open(file)
            image = image.convert("RGB")
            image = image.resize((image_size, image_size))
            data = np.asarray(image)
            X_train.append(data)
            y_train.append(index)

    X_train = np.array(X_train)
    y_train = np.array(y_train)

    X_test = []
    y_test = []
    for index, name in enumerate(names):
        dir = "./data/divide/" + name + "/test"
        files = glob.glob(dir + "/*.jpg")
        for i, file in enumerate(files):
            image = Image.open(file)
            image = image.convert("RGB")
            image = image.resize((image_size, image_size))
            data = np.asarray(image)
            X_test.append(data)
            y_test.append(index)

    X_test = np.array(X_test)
    y_test = np.array(y_test)

    X_train = X_train.astype('float32')
    X_train = X_train / 255.0

    X_test = X_test.astype('float32')
    X_test = X_test / 255.0

    # 正解ラベルの形式を変換
    y_train = np_utils.to_categorical(y_train, 2)
    # 正解ラベルの形式を変換
    y_test = np_utils.to_categorical(y_test, 2)

    # CNNを構築
    #model = Sequential()

    # model.add(Conv2D(32, (3, 3), padding='same',
    #          activation='relu'))
    # model.add(Activation('relu'))
    #model.add(Conv2D(32, (3, 3)))
    # model.add(Activation('relu'))
    #model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    #model.add(Conv2D(64, (3, 3), padding='same'))
    # model.add(Activation('relu'))
    #model.add(Conv2D(64, (3, 3)))
    # model.add(Activation('relu'))
    #model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    # model.add(Flatten())
    # model.add(Dense(512))
    # model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    # model.add(Dense(2))
    # model.add(Activation('softmax'))

    # コンパイル
    # model.compile(loss='categorical_crossentropy',
    #              optimizer='SGD', metrics=['accuracy'])
    model = Sequential()

    model.add(Conv2D(filters=32, kernel_size=(3, 3),
              strides=(1, 1), padding="same", use_bias=False, activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(filters=32, kernel_size=(3, 3),
                     strides=(1, 1), padding="same", use_bias=False, activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(filters=32, kernel_size=(3, 3),
                     strides=(1, 1), padding="same", use_bias=False, activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation("relu"))
    model.add(Dense(128))
    model.add(Activation('sigmoid'))
    # 分類したい人数を入れる
    model.add(Dense(2))
    model.add(Activation('softmax'))

    # コンパイル
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(X_train, y_train, epochs=100)
    #print(model.evaluate(X_test, y_test))
    model.save("./data/model/model.h5")
Esempio n. 12
0
def unet_model(n_classes=255,
               im_sz=128,
               n_channels=7,
               n_filters_start=32,
               growth_factor=2,
               upconv=True,
               class_weights=cls_wgt):
    droprate = 0.11
    n_filters = n_filters_start
    inputs = Input((128, 128, 7))
    #inputs = BatchNormalization()(inputs)
    conv1 = Conv2D(n_filters, (3, 3), activation='relu',
                   padding='same')(inputs)
    conv1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv1)
    conv1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv1)
    conv1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    pool1 = Dropout(droprate)(pool1)

    n_filters *= growth_factor
    pool1 = BatchNormalization()(pool1)
    conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv2)
    conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv2)
    conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    pool2 = Dropout(droprate)(pool2)

    n_filters *= growth_factor
    pool2 = BatchNormalization()(pool2)
    conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv3)
    conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv3)
    conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    pool3 = Dropout(droprate)(pool3)

    n_filters *= growth_factor
    pool3 = BatchNormalization()(pool3)
    conv4_0 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(pool3)
    conv4_0 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_0)
    conv4_0 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_0)
    conv4_0 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_0)
    pool4_1 = MaxPooling2D(pool_size=(2, 2))(conv4_0)
    pool4_1 = Dropout(droprate)(pool4_1)

    n_filters *= growth_factor
    pool4_1 = BatchNormalization()(pool4_1)
    conv4_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(pool4_1)
    conv4_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_1)
    conv4_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_1)
    conv4_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_1)
    pool4_2 = MaxPooling2D(pool_size=(2, 2))(conv4_1)
    pool4_2 = Dropout(droprate)(pool4_2)

    n_filters *= growth_factor
    conv5 = Conv2D(n_filters, (3, 3), activation='relu',
                   padding='same')(pool4_2)
    conv5 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv5)
    conv5 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv5)
    conv5 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv5)

    n_filters //= growth_factor
    if upconv:
        up6_1 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv5), conv4_1
        ])
    else:
        up6_1 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4_1])
    up6_1 = BatchNormalization()(up6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(up6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_1)
    conv6_1 = Dropout(droprate)(conv6_1)

    n_filters //= growth_factor
    if upconv:
        up6_2 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv6_1), conv4_0
        ])
    else:
        up6_2 = concatenate([UpSampling2D(size=(2, 2))(conv6_1), conv4_0])
    up6_2 = BatchNormalization()(up6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(up6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_2)
    conv6_2 = Dropout(droprate)(conv6_2)

    n_filters //= growth_factor
    if upconv:
        up7 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv6_2), conv3
        ])
    else:
        up7 = concatenate([UpSampling2D(size=(2, 2))(conv6_2), conv3])
    up7 = BatchNormalization()(up7)
    conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv7)
    conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv7)
    conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv7)
    conv7 = Dropout(droprate)(conv7)

    n_filters //= growth_factor
    if upconv:
        up8 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv7), conv2
        ])
    else:
        up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    up8 = BatchNormalization()(up8)
    conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up8)
    conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv8)
    conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv8)
    conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv8)
    conv8 = Dropout(droprate)(conv8)

    n_filters //= growth_factor
    if upconv:
        up9 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv8), conv1
        ])
    else:
        up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up9)
    conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv9)
    conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv9)
    conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv9)

    conv10 = Conv2D(n_classes, (1, 1), activation='sigmoid')(conv9)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(conv10)

    model = Model(inputs=[inputs], outputs=[outputs])
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[mean_iou])
    return model
    model.summary()

    model = Model(inputs=inputs, outputs=conv10)

    def weighted_binary_crossentropy(y_true, y_pred):
        class_loglosses = K.mean(K.binary_crossentropy(y_true, y_pred),
                                 axis=[0, 1, 2])
        return K.sum(class_loglosses * K.constant(class_weights))
Esempio n. 13
0
#Shuffle the dataset
x, y = shuffle(img_data, Y, random_state=5)

# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=2)

# Defining the model
input_shape = img_data[0].shape
print(input_shape)

model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X_train,
          y_train,
          validation_data=(X_test, y_test),
          epochs=20,
          batch_size=32)
Esempio n. 14
0
#coding=utf-8  
from keras.models import Sequential  
from keras.layers import Dense,Flatten,Dropout  
from keras.layers.convolutional import Conv2D,MaxPooling2D  
import numpy as np  
seed = 7  
np.random.seed(seed)  
  
model = Sequential()  
model.add(Conv2D(64,(3,3),strides=(1,1),input_shape=(224,224,3),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(MaxPooling2D(pool_size=(2,2)))  
model.add(Conv2D(128,(3,2),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(Conv2D(128,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(MaxPooling2D(pool_size=(2,2)))  
model.add(Conv2D(256,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(Conv2D(256,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(Conv2D(256,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(MaxPooling2D(pool_size=(2,2)))  
model.add(Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(MaxPooling2D(pool_size=(2,2)))  
model.add(Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(Conv2D(512,(3,3),strides=(1,1),padding='same',activation='relu',kernel_initializer='uniform'))  
model.add(MaxPooling2D(pool_size=(2,2)))  
model.add(Flatten())  
model.add(Dense(4096,activation='relu'))  
model.add(Dropout(0.5))  
model.add(Dense(4096,activation='relu'))  
Esempio n. 15
0
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

batch_size = 128
num_classes = 10
epochs = 12

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(
    Conv2D(32,
           kernel_size=(5, 5),
           strides=(1, 1),
           padding='same',
           activation='relu',
           input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, (2, 2), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
Esempio n. 16
0
def initial_block(inp, nb_filter=13, nb_row=3, nb_col=3, strides=(2, 2)):
    conv = Conv2D(nb_filter, (nb_row, nb_col), padding='same', strides=strides)(inp)
    max_pool, indices = MaxPoolingWithArgmax2D()(inp)
    merged = concatenate([conv, max_pool], axis=3)
    return merged, indices
Esempio n. 17
0
def tweet_ner_model(tweets_info):

    ### Get The orthographic Sentences
    cmplt_word_list = tweets_info['words']
    cmplt_ortho_word_list = tweets_info['orthographic_words']
    cmplt_BIOU_result = tweets_info['']


    flattened_wrdlst = [val for sublist in cmplt_word_list for val in sublist]
    flattened_ortho_wrdlst = [val for sublist in cmplt_ortho_word_list for val in sublist]

    unique_wrds = set(flattened_wrdlst)
    unique_ortho_wrds = list(set(flattened_ortho_wrdlst))

    glove_dict = utility.getGloveVec(unique_wrds)

    char_dict = lrf_config.get_char_dict()
    ortho_char_dict = lrf_config.get_orthohraphic_char_dict()


    ############################# Initializations of Embedding Matrices:

    #### Initialization of Actual Word Character Embedding : DIM : Dim_of_chars x Dim_needed_for_char_embedding : 94 x 30
    ## Random Uniform Initialization

    char_embed_matrix = initialize_matrix(dim = constants.CHAR_EMBED_DIM,m = constants.CHAR_EMBED_DIM,n=constants.CHAR_ONEHOT_DIM, initialization_typ = 'random_uniform')

    #### Initialization of Orthographic Word Character Embedding : DIM : Dim_of_ortho_chars x Dim_needed_for_ortho_char_embedding : 4 x 30
    ## Random Uniform Initialization

    char_o_embed_matrix = initialize_matrix(dim=constants.CHAR_O_EMBED_DIM, m=constants.CHAR_O_EMBED_DIM,
                                          n=constants.CHAR_O_ONEHOT_DIM, initialization_typ = 'random_uniform')

    #### Initialization of Orthographic Word Embedding : DIM : Dim_of_unique_Ortho_words x Dim_of_glove_vec : n x 200
    ## Random Uniform Initialization

    word_o_embed_matrix = initialize_matrix(dim=constants.GLOVE_DIM, m=constants.GLOVE_DIM,
                                            n=len(unique_ortho_wrds), initialization_typ='random_uniform')


    ############################ Actual Model for Processing

    comprehensive_input = []

    for ind_tweet,tweet in enumerate(cmplt_word_list):
        ortho_tweet = cmplt_ortho_word_list[ind_tweet]

        for ind_word,word in enumerate(tweet):
            ortho_word = ortho_tweet[ind_word]

            #########################################################
            ## Part 1: Finding Char Embedding of any word:
            char_labels = [char_dict[c] for c in list(word)]
            char_onehot = keras.utils.to_categorical(char_labels,num_classes=constants.CHAR_ONEHOT_DIM)

            char_embed_inp = np.matmul(char_embed_matrix,np.transpose(char_onehot))

            out_1 = Conv2D(filters=constants.NUM_OF_FILTERS, kernel_size=(constants.CHAR_EMBED_DIM,constants.WINDOW_SIZE),padding='same',activation=constants.LAYER_1_ACTIV,kernel_initializer=RandomUniform,bias_initializer=RandomUniform)(char_embed_inp)

            #########################################################
            ## Part 2: Finding Word Embedding of word: Glove
            high_dim = np.sqrt(3 / constants.GLOVE_DIM)
            low_dim = (-1) * high_dim

            out_2 = np.transpose(glove_dict.get(word)) if word in glove_dict else np.random.uniform(low=low_dim,
                                                                                                    high=high_dim,
                                                                                                    size=(
                                                                                                    constants.GLOVE_DIM,
                                                                                                    1))
            #########################################################
            ## Part 3: Finding Char Embedding of orthographic word

            ortho_char_labels = [ortho_char_dict[c] for c in list(ortho_word)]
            ortho_char_onehot = keras.utils.to_categorical(ortho_char_labels, num_classes=constants.CHAR_O_ONEHOT_DIM)

            ortho_char_embed_inp = np.matmul(char_o_embed_matrix, np.transpose(ortho_char_onehot))

            out_3 = Conv2D(filters=constants.NUM_OF_FILTERS,
                           kernel_size=(constants.CHAR_O_EMBED_DIM, constants.WINDOW_SIZE), padding='same',
                           activation=constants.LAYER_2_ACTIV, kernel_initializer=RandomUniform,
                           bias_initializer=RandomUniform)(ortho_char_embed_inp)

            #########################################################
            ## Part 4: Finding Word Embedding of orthographic word

            word_onehot = keras.utils.to_categorical(unique_ortho_wrds.index(ortho_word))
            ortho_word_inp = np.matmul(np.transpose(word_o_embed_matrix),word_onehot)
            out_4 =  Conv2D(filters=constants.NUM_OF_FILTERS, kernel_size=(constants.WORD_O_EMBED_DIM,constants.WINDOW_SIZE),padding='same',activation=constants.LAYER_3_ACTIV,kernel_initializer=RandomUniform,bias_initializer=RandomUniform)(ortho_word_inp)

            comprehensive_input = tf.keras.backend.stack((out_1,out_2,out_3,out_4),axis=0)

            # comprehensive_input.append(np.concatenate((out_1,out_2,out_3,out_4)))


        LSTM_NUM_NODES = len(comprehensive_input)

        lstm_out = keras.layers.Bidirectional(LSTM(units=LSTM_NUM_NODES, return_sequences=True, activation='hard_sigmoid', use_bias=True, kernel_initializer=RandomUniform, dropout=0.0))(comprehensive_input)

        comprehensive_model = crf.CRF(constants.NUM_OF_TAGS)
        out = comprehensive_model(lstm_out)
Esempio n. 18
0
print('num frames: {}'.format(len(lines)))

train_generator = generator_from_lines(train_lines, True)
validation_generator = generator_from_lines(validation_lines, False)

from keras.models import Sequential
from keras.layers import Cropping2D, Dense, Flatten, Lambda 
from keras.layers.convolutional import Conv2D
from keras.layers.core import Dropout
from keras.layers.local import LocallyConnected2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam

model = Sequential()
model.add(Cropping2D(cropping=((69,25),(0,0)), input_shape=(160,320,3)))
model.add(Conv2D(24, 5, 5, subsample=(2,2), activation='elu', name='conv1'))
model.add(Dropout(0.3))
model.add(Conv2D(36, 5, 5, subsample=(2,2), activation='elu', name='conv2'))
model.add(Conv2D(48, 5, 5, subsample=(2,2), activation='elu', name='conv3'))
model.add(Conv2D(64, 3, 3, activation='elu', name='conv4'))
model.add(Conv2D(64, 3, 3, activation='elu', name='conv5'))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(1164, activation='elu'))
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(1))

model.compile(loss='mse', optimizer=Adam(epsilon=1e-3))

model.fit_generator(
Esempio n. 19
0
def train(img_w, img_h, load):

    count_filters = 18
    kernel_size = (3, 3)
    pool_size = 2
    time_dense_size = 32
    rnn_size = 256

    if K.image_data_format() == 'channels_first':
        input_shape = (1, img_w, img_h)
    else:
        input_shape = (img_w, img_h, 1)

    batch_size = 32
    downsample_factor = pool_size**2
    train_data = LicensePlateImages('F:/tablice/train3', img_w, img_h,
                                    batch_size, downsample_factor)
    train_data.load_data()
    val_data = LicensePlateImages('F:/tablice/valid5', img_w, img_h,
                                  batch_size, downsample_factor)
    val_data.load_data()

    input_data = Input(name='the_input', shape=input_shape, dtype='float32')
    inner = Conv2D(count_filters,
                   kernel_size,
                   padding='same',
                   activation='relu',
                   kernel_initializer='he_normal',
                   name='conv1')(input_data)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
    inner = Conv2D(count_filters,
                   kernel_size,
                   padding='same',
                   activation='relu',
                   kernel_initializer='he_normal',
                   name='conv2')(inner)
    inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)

    conv_to_rnn_dims = (img_w // (pool_size**2),
                        (img_h // (pool_size**2)) * count_filters)
    inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)

    inner = Dense(time_dense_size, activation='relu', name='dense1')(inner)
    # inner = Dropout(0.5)(inner)

    gru_1 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru1')(inner)
    gru_1b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru1_b')(inner)
    gru1_merged = add([gru_1, gru_1b])
    # inner = Dropout(0.5)(inner)
    gru_2 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru2')(gru1_merged)
    gru_2b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru2_b')(gru1_merged)

    # inner = Dropout(0.5)(inner)
    inner = Dense(train_data.size_of_chars_set(),
                  kernel_initializer='he_normal',
                  name='dense2')(concatenate([gru_2, gru_2b]))

    y_pred = Activation('softmax', name='softmax')(inner)

    Model(inputs=input_data, outputs=y_pred).summary()

    labels = Input(name='labels',
                   shape=[train_data.max_text_len],
                   dtype='float32')
    input_length = Input(name='in_length', shape=[1], dtype='int64')
    label_length = Input(name='lab_length', shape=[1], dtype='int64')

    loss_out = Lambda(compute_loss, output_shape=(1, ),
                      name='ctc')([y_pred, labels, input_length, label_length])
    tensor_board = TensorBoard(log_dir='./Graph',
                               histogram_freq=0,
                               write_graph=True,
                               write_images=True,
                               update_freq=1)

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    if load:
        model = load_model('F:/tablice/nowe_tabliceee.h5', compile=False)
    else:
        model = Model(inputs=[input_data, labels, input_length, label_length],
                      outputs=loss_out)

    model.compile(loss='mae', optimizer=adam, metrics=['mae', 'mse'])

    if not load:

        model.fit_generator(generator=train_data.images_next_batch(),
                            steps_per_epoch=250,
                            epochs=64,
                            validation_data=val_data.images_next_batch(),
                            validation_steps=val_data.n,
                            verbose=1,
                            callbacks=[tensor_board])

    model.save('F:/tablice/nowe_tabliceee.h5')

    return model
def VGG_16(width, height, depth, classes, weights_path=None):

    input_shape = (height, width, depth)
    chanDim = -1

    if backend.image_data_format() == "channels_first":
        input_shape = (depth, height, width)     #  input_shape=(3, 224, 224)))
        chanDim = 1



    model = Sequential()
    model.add(ZeroPadding2D((1, 1),
                            input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(classes, activation='softmax'))

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

    if weights_path:
        model.load_weights(weights_path)

    return model
Esempio n. 21
0
    def train(self,
              mode="cpu",
              is_random=1,
              model_path="./model.h5",
              load=False,
              verbose=1):
        if mode == "gpu":
            self.GRU = GRUgpu
        if mode == "cpu":
            self.GRU = GRUcpu

        if verbose:
            print("\nSTART TRAINING")
        if K.image_data_format() == 'channels_first':
            input_shape = (1, self.IMG_W, self.IMG_H)
        else:
            input_shape = (self.IMG_W, self.IMG_H, 1)

        input_data = Input(name='the_input_{}'.format(type(self).__name__),
                           shape=input_shape,
                           dtype='float32')
        inner = Conv2D(self.CONV_FILTERS,
                       self.KERNEL_SIZE,
                       padding='same',
                       activation=self.ACTIVATION,
                       kernel_initializer='he_normal',
                       name='conv1')(input_data)
        inner = MaxPooling2D(pool_size=(self.POOL_SIZE, self.POOL_SIZE),
                             name='max1')(inner)
        inner = Conv2D(self.CONV_FILTERS,
                       self.KERNEL_SIZE,
                       padding='same',
                       activation=self.ACTIVATION,
                       kernel_initializer='he_normal',
                       name='conv2')(inner)
        inner = MaxPooling2D(pool_size=(self.POOL_SIZE, self.POOL_SIZE),
                             name='max2')(inner)

        conv_to_rnn_dims = (self.IMG_W // (self.POOL_SIZE * self.POOL_SIZE),
                            (self.IMG_H // (self.POOL_SIZE * self.POOL_SIZE)) *
                            self.CONV_FILTERS)
        inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)

        # cuts down input size going into RNN:
        inner = Dense(self.TIME_DENSE_SIZE,
                      activation=self.ACTIVATION,
                      name='dense1')(inner)

        # Two layers of bidirecitonal GRUs
        # GRU seems to work as well, if not better than LSTM:
        gru_1 = self.GRU(self.RNN_SIZE,
                         return_sequences=True,
                         kernel_initializer='he_normal',
                         name='gru1')(inner)
        gru_1b = self.GRU(self.RNN_SIZE,
                          return_sequences=True,
                          go_backwards=True,
                          kernel_initializer='he_normal',
                          name='gru1_b')(inner)
        gru1_merged = add([gru_1, gru_1b])
        gru_2 = self.GRU(self.RNN_SIZE,
                         return_sequences=True,
                         kernel_initializer='he_normal',
                         name='gru2')(gru1_merged)
        gru_2b = self.GRU(self.RNN_SIZE,
                          return_sequences=True,
                          go_backwards=True,
                          kernel_initializer='he_normal',
                          name='gru2_b')(gru1_merged)

        # transforms RNN output to character activations:
        inner = Dense(self.tiger_train.get_output_size(),
                      kernel_initializer='he_normal',
                      name='dense2')(concatenate([gru_2, gru_2b]))
        y_pred = Activation('softmax',
                            name='softmax_{}'.format(
                                type(self).__name__))(inner)
        Model(inputs=input_data, outputs=y_pred).summary()

        labels = Input(name='the_labels',
                       shape=[self.tiger_train.max_text_len],
                       dtype='float32')
        input_length = Input(name='input_length', shape=[1], dtype='int64')
        label_length = Input(name='label_length', shape=[1], dtype='int64')
        # Keras doesn't currently support loss funcs with extra parameters
        # so CTC loss is implemented in a lambda layer
        loss_out = Lambda(self.ctc_lambda_func, output_shape=(1, ),
                          name='ctc')(
                              [y_pred, labels, input_length, label_length])

        # clipnorm seems to speeds up convergence
        sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)

        if load:
            model = load_model(model_path, compile=False)
        else:
            model = Model(
                inputs=[input_data, labels, input_length, label_length],
                outputs=loss_out)

        # the loss calc occurs elsewhere, so use a dummy lambda func for the loss
        model.compile(loss={
            'ctc': lambda y_true, y_pred: y_pred
        },
                      optimizer=sgd)

        if not load:
            # captures output of softmax so we can decode the output during visualization
            test_func = K.function([input_data], [y_pred])

            model.fit_generator(
                generator=self.tiger_train.next_batch(is_random),
                steps_per_epoch=self.tiger_train.n,
                epochs=self.EPOCHS,
                validation_data=self.tiger_val.next_batch(is_random),
                validation_steps=self.tiger_val.n)

        net_inp = model.get_layer(name='the_input').input
        net_out = model.get_layer(name='softmax').output
        self.MODEL = Model(input=net_inp, output=net_out)
        return self.MODEL
data_dict=[]
with open("cifar10_data",'rb') as pickle_in:
    data_dict = pickle.load(pickle_in)
    print(data_dict.keys())
traindata = data_dict['traindata']
trainlabel = data_dict['trainlabel']
testlabel = data_dict['testlabel']
testdata = data_dict['testdata']
classes = data_dict['classes']
num_classes = len(classes)
'''

#Built model
model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=(32, 32, 1), activation='relu'))

model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(Flatten())

model.add(Dense(units=128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(units=num_classes, activation='softmax'))
Esempio n. 23
0
def discriminator(input_shape,
                  base_name,
                  num_res_blocks=0,
                  is_D=True,
                  use_res=False):
    initializer_d = TruncatedNormal(mean=0, stddev=0.1, seed=42)

    D = in_D = Input(shape=input_shape)
    D = Conv2D(64,
               kernel_size=4,
               strides=2,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv1")(D)

    D = LeakyReLU(0.2)(D)

    D = Conv2D(128,
               kernel_size=4,
               strides=2,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv2")(D)

    #D = BatchNormalization(momentum=0.9, epsilon=1e-5, name=base_name + "_bn1")(D, training=1)
    D = LeakyReLU(0.2)(D)

    D = Conv2D(256,
               kernel_size=4,
               strides=2,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv3")(D)
    #D = BatchNormalization(momentum=0.9, epsilon=1e-5, name=base_name + "_bn2")(D, training=1)
    D = LeakyReLU(0.2)(D)
    D = SelfAttention(ch=256)(D)

    D = Conv2D(512,
               kernel_size=4,
               strides=2,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv4")(D)

    #D = BatchNormalization(momentum=0.9, epsilon=1e-5, name=base_name + "_bn3")(D, training=1)
    D = LeakyReLU(0.2)(D)
    D = Conv2D(1,
               kernel_size=1,
               strides=1,
               padding="same",
               kernel_initializer=initializer_d,
               use_bias=False,
               name=base_name + "_conv5")(D)

    D = Flatten()(D)
    out = Dense(units=1, activation=None, name=base_name + "_out")(D)
    model = Model(in_D, out, name=base_name)

    return model
Esempio n. 24
0
        config = {'size': self.size, 'target_size': self.target_size}
        base_config = super(BilinearUpSampling2D, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


NUM_CLASSES = 2  # not_tumor, tumor
BATCH_SIZE = 32
N_EPOCHS = 50

# In[2]:

model = Sequential()

model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(256, 256, 3)))
model.add(
    Conv2D(64, (3, 3), activation='elu', padding='same', name='block1_conv1'))
model.add(
    Conv2D(64, (3, 3), activation='elu', padding='same', name='block1_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))

# Block 2
model.add(
    Conv2D(128, (3, 3), activation='elu', padding='same', name='block2_conv1'))
model.add(
    Conv2D(128, (3, 3), activation='elu', padding='same', name='block2_conv2'))

model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))

# Block 3
model.add(
    Conv2D(
Esempio n. 25
0
def resnet_v2_stem(input):
    '''The stem of the pure Inception-v4 and Inception-ResNet-v2 networks. This is input part of those networks.'''

    # Input shape is 299 * 299 * 3 (Tensorflow dimension ordering)
    x = Conv2D(32, (3, 3),
               kernel_regularizer=l2(0.0002),
               activation="relu",
               strides=(2, 2))(input)  # 149 * 149 * 32
    x = Conv2D(32, (3, 3), kernel_regularizer=l2(0.0002),
               activation="relu")(x)  # 147 * 147 * 32
    x = Conv2D(64, (3, 3),
               kernel_regularizer=l2(0.0002),
               activation="relu",
               padding="same")(x)  # 147 * 147 * 64

    x1 = MaxPooling2D((3, 3), strides=(2, 2))(x)
    x2 = Conv2D(96, (3, 3),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                strides=(2, 2))(x)

    x = concatenate([x1, x2], axis=3)  # 73 * 73 * 160

    x1 = Conv2D(64, (1, 1),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x)
    x1 = Conv2D(96, (3, 3), kernel_regularizer=l2(0.0002),
                activation="relu")(x1)

    x2 = Conv2D(64, (1, 1),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x)
    x2 = Conv2D(64, (7, 1),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x2)
    x2 = Conv2D(64, (1, 7),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="same")(x2)
    x2 = Conv2D(96, (3, 3),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                padding="valid")(x2)

    x = concatenate([x1, x2], axis=3)  # 71 * 71 * 192

    x1 = Conv2D(192, (3, 3),
                kernel_regularizer=l2(0.0002),
                activation="relu",
                strides=(2, 2))(x)

    x2 = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = concatenate([x1, x2], axis=3)  # 35 * 35 * 384

    x = BatchNormalization(axis=3)(x)
    x = Activation("relu")(x)

    return x
Esempio n. 26
0
 def build_network(self, input_shape, output_shape):
     self.model = Sequential()
     self.model.add(
         Conv2D(64, (3, 3),
                strides=(1, 1),
                input_shape=input_shape,
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(
         Conv2D(64, (3, 3),
                strides=(1, 1),
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(MaxPooling2D(pool_size=(2, 2)))
     self.model.add(
         Conv2D(128, (3, 2),
                strides=(1, 1),
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(
         Conv2D(128, (3, 3),
                strides=(1, 1),
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(MaxPooling2D(pool_size=(2, 2)))
     self.model.add(
         Conv2D(256, (3, 3),
                strides=(1, 1),
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(
         Conv2D(256, (3, 3),
                strides=(1, 1),
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(MaxPooling2D(pool_size=(2, 2)))
     self.model.add(
         Conv2D(512, (3, 3),
                strides=(1, 1),
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(
         Conv2D(512, (3, 3),
                strides=(1, 1),
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(MaxPooling2D(pool_size=(2, 2)))
     self.model.add(
         Conv2D(512, (3, 3),
                strides=(1, 1),
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(
         Conv2D(512, (3, 3),
                strides=(1, 1),
                padding='same',
                activation='relu',
                kernel_initializer='uniform'))
     self.model.add(MaxPooling2D(pool_size=(2, 2)))
     self.model.add(Flatten())
     self.model.add(Dense(4096, activation='relu'))
     self.model.add(Dropout(0.5))
     self.model.add(Dense(4096, activation='relu'))
     self.model.add(Dropout(0.5))
     self.model.add(Dense(output_shape, activation='softmax'))
     self.model.compile(loss='categorical_crossentropy',
                        optimizer='sgd',
                        metrics=['accuracy'])
     self.model.summary()
Esempio n. 27
0
x, y = shuffle(img_data, Y, random_state=2)

X_train, X_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=2)

input_shape = img_data[0].shape
print(input_shape)

model = Sequential()
model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           input_shape=input_shape,
           padding='same',
           activation='relu',
           kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(
    Conv2D(32, (3, 3),
           activation='relu',
           padding='same',
           kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

epochs = 10
Esempio n. 28
0
xtrain, ytrain, xvalid, yvalid = split_valid_set(x, y, 0.9)

from keras.layers import Input, Dense, Dropout, Flatten, Activation, Reshape, LeakyReLU
from keras.layers.convolutional import Conv2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint, History, CSVLogger

model = Sequential()

model.add(
    Conv2D(64,
           input_shape=xtrain[0].shape,
           kernel_size=(5, 5),
           padding='same',
           kernel_initializer='glorot_normal'))
model.add(
    Conv2D(64,
           kernel_size=(3, 3),
           padding='same',
           kernel_initializer='glorot_normal'))
model.add(LeakyReLU(alpha=1. / 20.))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Dropout(0.25))

model.add(
    Conv2D(128,
           kernel_size=(3, 3),
Esempio n. 29
0
def letter_recognition(X_train, y_train, X_test, y_test):
    ### Prepare dataset

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train = X_train / 255.0
    X_test = X_test / 255.0

    y_train = np_utils.to_categorical(y_train)
    y_test = np_utils.to_categorical(y_test)
    class_num = y_test.shape[1]

    ### Prepare model
    print('preparing model...')
    model = Sequential()

    model.add(Conv2D(32, (3, 3), input_shape=X_train.shape[1:], padding='same'))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Conv2D(256, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Flatten())
    model.add(Dropout(0.2))

    model.add(Dense(512, kernel_constraint=maxnorm(3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(Dense(256, kernel_constraint=maxnorm(3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(Dense(128, kernel_constraint=maxnorm(3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(Dense(class_num))
    model.add(Activation('softmax'))

    print('model created')

    model.compile(
        loss='categorical_crossentropy',
        optimizer='adam',
        metrics=[
            'accuracy',
            'AUC',
        ]
    )

    print(model.summary())

    np.random.seed(seed)
    model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=40)

    # Final evaluation of the model

    scores = model.evaluate(X_test, y_test, verbose=0)
    print("Accuracy: %.2f%%" % (scores[1] * 100))

    ### save model ###
    model.save('number_recognition.hdf5')
    ##################

    print(model.predict_classes(X_test))
Esempio n. 30
0
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

# y_train = keras.utils.to_categorical(y_train,10)
# y_test = keras.utils.to_categorical(y_test,10)

#create the sequential model
model = Sequential()

model.add(
    Conv2D(6, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(16, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))

optimizer = optimizers.Adam(lr=0.001,
                            beta_1=0.9,
                            beta_2=0.999,
                            epsilon=None,
                            decay=0.0,
                            amsgrad=False)