Exemple #1
0
def create_normal_wide_resnet(N=4, k=10):
    """
    Create vanilla conv Wide ResNet (N=4, k=10)
    """
    # input
    input = layers.Input((32, 32, 3))
    # 16 channels block
    x = layers.Conv2D(16, 3, padding="same")(input)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)
    # 1st block
    x = _create_normal_residual_block(x, 16 * k, N)
    # The original wide resnet is stride=2 conv for downsampling,
    # but replace them to average pooling because centers are shifted when octconv
    # 2nd block
    x = layers.AveragePooling2D(2)(x)
    x = _create_normal_residual_block(x, 32 * k, N)
    # 3rd block
    x = layers.AveragePooling2D(2)(x)
    x = _create_normal_residual_block(x, 64 * k, N)
    # FC
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(10, activation="softmax")(x)

    model = Model(input, x)
    return model
Exemple #2
0
def make_lenet5():
    model = Sequential()
    #make the neural network
    model.add(
        layers.Conv2D(filters=6,
                      kernel_size=(5, 5),
                      strides=1,
                      activation=activations.tanh,
                      input_shape=(32, 32, 1)))
    model.add(
        layers.AveragePooling2D(pool_size=(2, 2),
                                strides=2,
                                input_shape=(28, 28, 1)))
    model.add(
        layers.Conv2D(filters=16,
                      kernel_size=(5, 5),
                      strides=1,
                      activation=activations.tanh,
                      input_shape=(14, 14, 1)))
    model.add(
        layers.AveragePooling2D(pool_size=(2, 2),
                                strides=2,
                                input_shape=(10, 10, 1)))
    model.add(
        layers.Conv2D(filters=120,
                      kernel_size=(5, 5),
                      strides=1,
                      activation=activations.tanh,
                      input_shape=(5, 5, 1)))
    model.add(layers.Flatten())
    model.add(layers.Dense(units=84, activation=activations.tanh))
    model.add(layers.Dense(units=10, activation=activations.softmax))
    return model
Exemple #3
0
def create_octconv_wide_resnet_5(alpha, N=4, k=10):
    """
    Create OctConv Wide ResNet(N=4, k=10)
    """
    # Input
    input = layers.Input((32, 32, 3))
    # downsampling for lower
    low = layers.AveragePooling2D(2)(input)

    # 16 channels block
    high, low = OctConv2D(filters=16, alpha=alpha)([input, low])
    high = layers.BatchNormalization()(high)
    high = layers.Activation("relu")(high)
    low = layers.BatchNormalization()(low)
    low = layers.Activation("relu")(low)

    # 1st block
    high, low = _create_octconv_residual_block([high, low], 16 * k, N, alpha)
    # 2nd block
    high = layers.AveragePooling2D(2)(high)
    low = layers.AveragePooling2D(2)(low)
    high, low = _create_octconv_residual_block([high, low], 32 * k, N, alpha)
    # 3rd block
    high = layers.AveragePooling2D(2)(high)
    low = layers.AveragePooling2D(2)(low)
    high, low = _create_octconv_residual_block([high, low], 64 * k, N - 1,
                                               alpha)
    # 3rd block Last
    x = _create_octconv_last_residual_block([high, low], 64 * k, alpha)
    # FC
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(5, activation="softmax")(x)

    model = Model(input, x)
    return model
Exemple #4
0
def build_paper_model():
    layer_one_input = keras.Input(shape=(240, 320, 3))
    lays = layers.Conv2D(96, (11, 11), strides=4,
                         activation='relu')(layer_one_input)
    lays = layers.AveragePooling2D(pool_size=2)(lays)
    lays = layers.Conv2D(256, (5, 5), activation='relu')(lays)
    lays = layers.AveragePooling2D(pool_size=2)(lays)
    lays = layers.Conv2D(384, (3, 3), activation='relu')(lays)
    lays = layers.AveragePooling2D()(lays)
    lays = layers.Conv2D(384, (3, 5), activation='relu')(lays)
    lays = layers.AveragePooling2D()(lays)
    lays = layers.Dense(256)(lays)
    lays = layers.Dropout(0.2)(lays)
    lays = layers.Dense(4800)(lays)

    #lays2 = layers.Conv2D(63,(9,9),strides=2,activation='relu')(layer_one_input)
    #lays2 = layers.AveragePooling2D(pool_size=2)(lays2)
    #lays2 = layers.Concatenate()([lays,lays2])
    #lays2 = layers.Conv2D(64,(5,5),strides=1,activation='relu')(lays2)
    #lays2 = layers.Conv2D(64,(5,5),strides=1,activation='relu')(lays2)
    lays = layers.Reshape(target_shape=(60, 80))(lays)

    model = keras.models.Model(inputs=[layer_one_input], outputs=lays)
    model.compile(optimizer=optimizer,
                  loss='mean_squared_logarithmic_error',
                  metrics=['accuracy', psnr])
    return model
Exemple #5
0
    def _build_classifier(self, l_rate):
        l_rate = self._knobs.get('l_rate')
        model = models.Sequential()
        model.add(
            layers.Conv2D(filters=6,
                          kernel_size=(3, 3),
                          activation='relu',
                          input_shape=(32, 32, 1)))
        model.add(layers.AveragePooling2D())
        model.add(
            layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
        model.add(layers.AveragePooling2D())
        model.add(layers.Flatten())
        model.add(layers.Dense(units=120, activation='relu'))
        model.add(layers.Dense(units=84, activation='relu'))
        model.add(layers.Dense(units=10, activation='softmax'))
        adam = Adam(lr=l_rate,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=None,
                    decay=0.0,
                    amsgrad=False)
        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=adam,
                      metrics=['accuracy'])

        return model
Exemple #6
0
    def create_discriminator(self):
        """ Model to distinguish real images from generated ones."""

        discriminator = Sequential()
        discriminator.add(L.InputLayer(self.IMG_SHAPE))

        discriminator.add(
            L.Conv2D(16, kernel_size=(7, 7), padding='same', activation='elu'))
        discriminator.add(
            L.Conv2D(16, kernel_size=(7, 7), padding='same', activation='elu'))
        discriminator.add(L.AveragePooling2D(strides=2))

        discriminator.add(
            L.Conv2D(32, kernel_size=(5, 5), padding='same', activation='elu'))
        discriminator.add(
            L.Conv2D(32, kernel_size=(5, 5), padding='same', activation='elu'))
        discriminator.add(L.AveragePooling2D(strides=2))

        discriminator.add(
            L.Conv2D(64, kernel_size=(3, 3), padding='same', activation='elu'))
        discriminator.add(
            L.Conv2D(64, kernel_size=(3, 3), padding='same', activation='elu'))
        discriminator.add(L.AveragePooling2D(strides=2))

        discriminator.add(L.Flatten())
        discriminator.add(L.Dense(256, activation='tanh'))
        discriminator.add(L.Dense(2, activation=tf.nn.log_softmax))

        self.discriminator = discriminator
        print('Discriminator created successfully.')
Exemple #7
0
def LeNet5(n_classes):
    model = keras.Sequential()

    model.add(
        layers.Conv2D(filters=6,
                      kernel_size=(3, 3),
                      input_shape=(28, 28, 1),
                      padding='same'))
    model.add(layers.normalization.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D())

    model.add(layers.Conv2D(filters=16, kernel_size=(3, 3), padding='valid'))
    model.add(layers.normalization.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D())

    model.add(layers.Flatten())

    model.add(layers.Dense(units=120))
    model.add(layers.normalization.BatchNormalization())
    model.add(layers.Activation('relu'))

    model.add(layers.Dense(units=84))
    model.add(layers.normalization.BatchNormalization())
    model.add(layers.Activation('relu'))

    model.add(layers.Dense(units=n_classes, activation='softmax'))

    #sgd = keras.optimizers.SGD(lr=0.1, nesterov=True)
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer='sgd',
                  metrics=['accuracy'])

    return model
Exemple #8
0
def make_lenet5():
    model = Sequential()

    # Todo: implement LeNet-5 model
    model.add(
        layers.Conv2D(filters=6,
                      input_shape=(32, 32, 1),
                      kernel_size=(5, 5),
                      strides=1,
                      activation=activations.tanh))
    model.add(
        layers.AveragePooling2D(input_shape=(28, 28, 1),
                                pool_size=(2, 2),
                                strides=2))
    model.add(
        layers.Conv2D(filters=16,
                      input_shape=(14, 14, 1),
                      kernel_size=(5, 5),
                      strides=1,
                      activation=activations.tanh))
    model.add(
        layers.AveragePooling2D(input_shape=(10, 10, 1),
                                pool_size=(2, 2),
                                strides=2))
    model.add(
        layers.Conv2D(filters=120,
                      input_shape=(5, 5, 1),
                      kernel_size=(5, 5),
                      strides=1,
                      activation=activations.tanh))
    model.add(layers.Flatten())
    model.add(layers.Dense(units=84, activation=activations.tanh))
    model.add(layers.Dense(units=10, activation=activations.softmax))
    ######################
    return model
Exemple #9
0
def LeNet5():
    #Instantiate an empty model
    model = Sequential()

    # C1 Convolutional Layer
    model.add(layers.Conv2D(6, kernel_size=(5, 5), strides=(1, 1), activation='tanh', input_shape=input_shape, padding="same"))

    # S2 Pooling Layer
    model.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid'))

    # C3 Convolutional Layer
    model.add(layers.Conv2D(16, kernel_size=(5, 5), strides=(1, 1), activation='tanh', padding='valid'))

    # S4 Pooling Layer
    model.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # C5 Fully Connected Convolutional Layer
    model.add(layers.Conv2D(120, kernel_size=(5, 5), strides=(1, 1), activation='tanh', padding='valid'))
    #Flatten the CNN output so that we can connect it with fully connected layers
    model.add(layers.Flatten())

    # FC6 Fully Connected Layer
    model.add(layers.Dense(84, activation='tanh'))

    #Output Layer with softmax activation
    model.add(layers.Dense(classes, activation='softmax'))

    # Compile the model
    model.compile(loss=keras.losses.categorical_crossentropy, optimizer='SGD', metrics=["accuracy"])
    return model
def M7():
    model = models.Sequential()

    #VGG16 like

    model.add(
        layers.Conv2D(64,
                      kernel_size=kernel_size_3,
                      input_shape=(230, 510, 1),
                      activation='relu'))
    model.add(layers.AveragePooling2D(pool_size=pool_size_2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(64, kernel_size=kernel_size_3, activation='relu'))
    model.add(layers.AveragePooling2D(pool_size=pool_size_2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(64, kernel_size=kernel_size_3, activation='relu'))
    model.add(layers.AveragePooling2D(pool_size=pool_size_2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(64, kernel_size=kernel_size_3, activation='relu'))
    model.add(layers.AveragePooling2D(pool_size=pool_size_2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(64, kernel_size=kernel_size_5, activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=pool_size_2))
    model.add(layers.Dropout(0.4))

    model.add(layers.Flatten())
    model.add(layers.Dense(500, activation='relu'))
    model.add(layers.Dropout(0.4))
    model.add(layers.Dense(3, activation='softmax'))

    return model
def gen_model(shape):
    input = layers.Input(shape=(shape))
    # First conv layer
    c_1 = layers.Conv2D(48, (3, 8), padding='same')(input)
    c_2 = layers.Conv2D(32, (3, 32), padding='same')(input)
    c_3 = layers.Conv2D(16, (3, 64), padding='same')(input)
    c_4 = layers.Conv2D(16, (3, 90), padding='same')(input)
    conv_1 = layers.Concatenate()([c_1, c_2, c_3, c_4])
    x = layers.BatchNormalization()(conv_1)
    x = layers.ReLU()(x)
    # x = layers.MaxPooling2D((5,5))(x)
    x = layers.AveragePooling2D((5, 5))(x)
    # Second conv layer
    x = layers.Conv2D(224, 5)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    # x = layers.MaxPooling2D((6,4))(x)
    x = layers.AveragePooling2D((6, 4))(x)
    # Output layer
    x = layers.Flatten()(x)
    # x = layers.Dropout(0.5)(x)
    x = layers.Dense(64)(x)
    x = layers.Dense(1, activation='sigmoid')(x)
    model = models.Model(input, x)
    return model
def M12():
    model = models.Sequential()

    #VGG16 like

    model.add(
        layers.Conv2D(64,
                      kernel_size=kernel_size_3,
                      input_shape=(160, 120, 1),
                      activation='relu'))
    model.add(layers.AveragePooling2D(pool_size=pool_size_2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(64, kernel_size=kernel_size_3, activation='relu'))
    model.add(layers.AveragePooling2D(pool_size=pool_size_2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(64, kernel_size=kernel_size_3, activation='relu'))
    model.add(layers.AveragePooling2D(pool_size=pool_size_2))
    model.add(layers.BatchNormalization())

    model.add(layers.Conv2D(128, kernel_size=kernel_size_5, activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=pool_size_2))

    model.add(layers.Flatten())
    #model.add(layers.BatchNormalization())

    model.add(layers.Dense(200, activation='relu'))
    #model.add(layers.Dropout(0.5))
    model.add(layers.Dense(1, activation='linear'))

    return model
Exemple #13
0
def thanh_net(input_shape, num_classes):

    model = keras.Sequential()

    model.add(layers.Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
    model.add(layers.AveragePooling2D())
    #model.add(layers.BatchNormalization)

    model.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
    model.add(layers.AveragePooling2D())
    #model.add(layers.BatchNormalization)

    model.add(layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
    model.add(layers.AveragePooling2D())
    # #
    #model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
    #model.add(layers.AveragePooling2D())

    # model.add(layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
    # model.add(layers.AveragePooling2D())

    model.add(layers.Flatten())
    #model.add(layers.Dropout(0.8))
    model.add(layers.Dense(units=120, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(units=84, activation='relu'))
    #model.add(layers.Dropout(0.5))
    model.add(layers.Dense(units=num_classes, activation='softmax'))

    return model
Exemple #14
0
def create_discriminator_model():

    model = Sequential()

    model.add(L.InputLayer(input_shape = data_sample.IMG_SHAPE))

    model.add(L.Conv2D(filters = 32, kernel_size = [3,3]))

    model.add(L.AveragePooling2D(pool_size = [2,2]))

    model.add(L.Activation(activation = 'elu'))

    model.add(L.Conv2D(filters = 64, kernel_size = [3,3]))

    model.add(L.AveragePooling2D(pool_size = [2,2]))

    model.add(L.Activation(activation = 'elu'))

    model.add(L.Flatten())

    model.add(L.Dense(units = 256, activation = 'tanh'))

    model.add(L.Dense(units = 2, activation = tf.nn.log_softmax))

    return model
Exemple #15
0
def decode_graph(input_tensor, skip_layers, use_bias=True, train_bn=True):

    conv_name_base = "decode_graph"
    with tf.name_scope(conv_name_base) as sc:
        downsample_16 = KL.AveragePooling2D(pool_size=(16,16))(input_tensor)
        downsample_32 = KL.AveragePooling2D(pool_size=(8,8))(input_tensor)
        downsample_64 = KL.AveragePooling2D(pool_size=(4,4))(input_tensor)
        downsample_128 = KL.AveragePooling2D(pool_size=(2,2))(input_tensor)
        x = KL.Conv2D(128, (1,1), activation="relu",
                      name="decode_conv_1")(skip_layers.pop())
        x = KL.Concatenate()([x, downsample_16])
        x = KL.Conv2D(64, (1,1), activation="relu",
                      name="decode_combine_conv_1")(x)
        x = res_block(x, 3, [64, 64, 64], "decode_res_1", use_bias, train_bn)
        for index, downsample_item in enumerate([downsample_32, downsample_64,
                                                 downsample_128, input_tensor]):
            x = KL.Concatenate()([KL.UpSampling2D(size=(2,2))(x),
                                  downsample_item])
            x = KL.Conv2D(129, (1,1), activation="relu",
                          name="decode_conv_"+str(index+2))(x)
            x = KL.Concatenate()([x, skip_layers.pop()])
            x = KL.Conv2D(64, (1,1), activation="relu",
                          name="decode_combine_conv_"+str(index+2))(x)
            x = res_block(x, 3, [64, 64, 64], "decode_res_" + str(index+2),
                          use_bias, train_bn)

        decode_output = KL.Conv2D(1, (1,1), name="decode_result")(x)
        output_mask = KL.Activation("sigmoid",
                             name="prob_output")(decode_output)
        return decode_output, output_mask
def create_complex_model(param: Param) -> keras.Model:
    inputs = keras.Input((28, 28, 1))
    x = inputs
    if param is not None:
        x = layers.GaussianNoise(param.noise_stddev)(x)
    x_re = x
    x_im = layers.Lambda(lambda z: K.zeros((1, 28, 28, 1)))([])

    for i in range(len(param.conv_filters)):
        x_re, x_im = complexize_kernel(layers.Conv2D,
                                       param.conv_filters[i],
                                       kernel_size=param.kernel_sizes[i],
                                       strides=param.strides[i],
                                       padding='same',
                                       activation=layers.Activation("tanh"))(
                                           x_re, x_im)
        x_re = layers.BatchNormalization(axis=-1)(x_re)
        x_im = layers.BatchNormalization(axis=-1)(x_im)
        if param.pool_sizes[i] is not None:
            pool_size = param.pool_sizes[i]
            pool_strides = param.pool_strides[i]
            x_re = layers.AveragePooling2D(pool_size=pool_size,
                                           strides=pool_strides)(x_re)
            x_im = layers.AveragePooling2D(pool_size=pool_size,
                                           strides=pool_strides)(x_im)
        if param.conv_dropout_rates[i] is not None:
            dropout_rate = param.conv_dropout_rates[i]
            x_re = layers.Dropout(dropout_rate)(x_re)
            x_im = layers.Dropout(dropout_rate)(x_im)

    x_re = layers.Flatten()(x_re)
    x_im = layers.Flatten()(x_im)
    for units, dropout_rate in zip(param.dense_units,
                                   param.dense_dropout_rates):
        x_re, x_im = complexize_kernel(layers.Dense,
                                       units,
                                       activation=layers.Activation("tanh"))(
                                           x_re, x_im)
        if dropout_rate is not None:
            x_re = layers.Dropout(dropout_rate)(x_re)
            x_im = layers.Dropout(dropout_rate)(x_im)

    # x = layers.Lambda(lambda d: K.sqrt(K.square(d[0]) + K.square(d[1])))([x_re, x_im])
    if param.l2_constrained_scale:
        x = layers.Lambda(lambda z: K.l2_normalize(z, axis=1) * param.
                          l2_constrained_scale)(x_re)
        outputs = layers.Dense(10,
                               kernel_constraint=keras.constraints.UnitNorm(),
                               use_bias=False)(x)
    else:
        outputs = layers.Dense(10)(x_re)
    model = keras.Model(inputs=inputs, outputs=outputs)

    if param.center_loss_margin:
        loss = CenterLoss(param.center_loss_margin)
    else:
        loss = tf.losses.softmax_cross_entropy
    model.compile(loss=loss, optimizer='adam', metrics=['accuracy'])
    return model
Exemple #17
0
	def build_default_model(self):

		K.set_learning_phase(True)

		inputs = keras.layers.Input(
			shape = self.data.size_input_image,
			name = "input"
		)

		x = layer.Conv2D(32, kernel_size = (6,6),
				activation = "relu",
				padding = "same")(inputs)
		x = layer.AveragePooling2D( pool_size = (2,2), padding = "same" )(x)

		x = layer.Conv2D(64, kernel_size = (6,6),
				activation = "relu",
				padding = "same")(x)
		x = layer.AveragePooling2D( pool_size = (2,2), padding = "same" )(x)

		x = layer.Conv2D(128, kernel_size = (6,6),
				activation = "relu",
				padding = "same")(x)
		x = layer.AveragePooling2D( pool_size = (2,2), padding = "same" )(x)

		x = layer.Conv2D(256, kernel_size = (6,6),
				activation = "relu",
				padding = "same")(x)
		x = layer.AveragePooling2D( pool_size = (2,2), padding = "same" )(x)

		x = layer.Flatten()(x)

		x = layer.Dense(128, activation = "relu")(x)
		x = layer.Dropout(0.5)(x)
		x = layer.Dense(128, activation = "relu")(x)
		x = layer.Dropout(0.5)(x)

		outputs = layer.Dense(self.data.number_jet_categories, activation = "softmax")(x)

		pre_net = models.Model(input = [inputs], outputs = [outputs])
		pre_net.summary()

		# main_net

		main_input = keras.layers.Input(
				shape = (self.data.n_input_neurons, ),
				name = "main_input")

		y = layer.Concatenate()([main_input, pre_net.output])
		y = layer.Dense(100, activation="relu")(y)
		y = layer.Dropout(0.5)(y)
		y = layer.Dense(100, activation = "relu")(y)
		y = layer.Dropout(0.5)(y)
		y = layer.Dense(self.data.n_output_neurons, activation = "softmax")(y)

		main_net = models.Model(input = [inputs, main_input], outputs = [y])
		main_net.summary()

		return pre_net, main_net
Exemple #18
0
def sMGN(inputs,
         _eval,
         training=None,
         return_all=True,
         return_mgn=False,
         l2_norm=True):
    reg[0] = l1l2_reg
    b2 = basicUnit(inputs, name='my_bu2', training=training)
    shape = b2._keras_shape[-3:-1]
    b2_g_max = KL.MaxPooling2D((shape[0], shape[1]))(b2)
    b2_g_ave = KL.AveragePooling2D((shape[0], shape[1]))(b2)
    b2_g = KL.Concatenate(axis=-1)([b2_g_max, b2_g_ave])

    b2_s_max = KL.MaxPooling2D((shape[0] // 2, shape[1]),
                               strides=(shape[0] // 2, shape[1]),
                               padding='valid')(b2)
    b2_s_ave = KL.AveragePooling2D((shape[0] // 2, shape[1]),
                                   strides=(shape[0] // 2, shape[1]),
                                   padding='valid')(b2)
    b2_s = KL.Concatenate(axis=-1)([b2_s_max, b2_s_ave])

    b3_s_max = KL.MaxPooling2D((shape[0] // 3, shape[1]),
                               strides=(shape[0] // 3, shape[1]),
                               padding='valid')(b2)
    b3_s_ave = KL.AveragePooling2D((shape[0] // 3, shape[1]),
                                   strides=(shape[0] // 3, shape[1]),
                                   padding='valid')(b2)
    b3_s = KL.Concatenate(axis=-1)([b3_s_max, b3_s_ave])

    b2_s1, b2_s2 = KL.Lambda(lambda x: [x[:, 0:1, :, :], x[:, 1:2, :, :]])(
        b2_s)
    b3_s1, b3_s2, b3_s3 = KL.Lambda(
        lambda x: [x[:, 0:1, :, :], x[:, 1:2, :, :], x[:, 2:3, :, :]])(b3_s)
    reg[1] = None
    if return_all:
        b_all = [
            DarknetConv2D_BN_Linear(256,
                                    1,
                                    strides=(2, 2),
                                    name='my_dlt' + str(i),
                                    training=training)(x)
            for i, x in enumerate([b2_g, b2_s1, b2_s2, b3_s1, b3_s2, b3_s3])
        ]
        if return_mgn:
            return KL.Lambda(lambda x: tf.nn.l2_normalize(
                tf.concat(x, axis=-1), dim=-1))(b_all)
        b_all = KL.Lambda(lambda x: x, name='l2_norm')(b_all)
        b_all = KL.Lambda(lambda x: [tf.nn.l2_normalize(f, dim=-1) for f in x],
                          name='l2_norm')(b_all) if _eval else b_all
        return KL.Lambda(lambda x: tf.concat(x, axis=-1), )(b_all)
    else:
        bg = DarknetConv2D_BN_Linear(256,
                                     1,
                                     strides=(2, 2),
                                     name='my_dlt' + str(0),
                                     training=training)(b2_g)
        return KL.Lambda(lambda x: tf.nn.l2_normalize(x, dim=-1))(
            bg) if _eval else bg
Exemple #19
0
def simple_CNN(input_shape, num_classes):

    model = models.Sequential()
    model.add(
        layers.Convolution2D(filters=16,
                             kernel_size=(7, 7),
                             padding='same',
                             name='image_array',
                             input_shape=input_shape))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=16, kernel_size=(7, 7), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D(pool_size=(2, 2), padding='same'))
    model.add(layers.Dropout(.5))

    model.add(
        layers.Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D(pool_size=(2, 2), padding='same'))
    model.add(layers.Dropout(.5))

    model.add(
        layers.Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D(pool_size=(2, 2), padding='same'))
    model.add(layers.Dropout(.5))

    model.add(
        layers.Convolution2D(filters=128, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=128, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu'))
    model.add(layers.AveragePooling2D(pool_size=(2, 2), padding='same'))
    model.add(layers.Dropout(.5))

    model.add(
        layers.Convolution2D(filters=256, kernel_size=(3, 3), padding='same'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Convolution2D(filters=num_classes,
                             kernel_size=(3, 3),
                             padding='same'))
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Activation('softmax', name='predictions'))
    return model
Exemple #20
0
 def _pooling_function(self, inputs, pool_size, strides, padding,
                       data_format):
     input_real, input_imag = complex_to_real_imag(inputs)
     real_outputs = KL.AveragePooling2D(pool_size, strides,
                                        padding)(input_real)
     imag_outputs = KL.AveragePooling2D(pool_size, strides,
                                        padding)(input_imag)
     outputs = real_imag_to_complex(real_outputs, imag_outputs)
     return outputs
  def Discriminator(self, input_shape):


      # Assuming model is not recurrent
      model = Sequential()

      # Architecture Based on EEG Classification Model at https://arxiv.org/pdf/1703.05051.pdf
    
      model.add(layers.Conv2D(25, kernel_size=(1,11), strides=(1,1), 
                               padding='same', input_shape=input_shape, 
                               kernel_initializer='he_normal'))
                               #kernel_regularizer=regularizers.l2(.001)))

      model.add(layers.Conv2D(25, kernel_size=(22,1), strides=(1,1),
                              padding='valid',
                              kernel_initializer='he_normal'))
                              #kernel_regularizer=regularizers.l2(.001)))
      model.add(layers.LeakyReLU(alpha=0.2))
      model.add(layers.AveragePooling2D(pool_size=(1,3)))
      model.add(layers.Dropout(rate=0.2))


      model.add(layers.Conv2D(50, kernel_size=(1,11),
                              padding='same',
                              kernel_initializer='he_normal'))
                              #kernel_regularizer=regularizers.l2(.001)))
      model.add(layers.LeakyReLU(alpha=0.2))
      model.add(layers.AveragePooling2D(pool_size=(1,3)))
      model.add(layers.Dropout(rate=0.2))

      model.add(layers.Conv2D(100, kernel_size=(1,11), strides=(1,1),
                        padding='same',
                        kernel_initializer='he_normal'))
                        #kernel_regularizer=regularizers.l2(.001)))
      model.add(layers.LeakyReLU(alpha=0.2))
      model.add(layers.AveragePooling2D(pool_size=(1,3)))
      model.add(layers.Dropout(rate=0.2))

      model.add(layers.Conv2D(200, kernel_size=(1,11), strides=(1,1),
                        padding='same',
                        kernel_initializer='he_normal'))
                        #kernel_regularizer=regularizers.l2(.001)))                          
      model.add(layers.LeakyReLU(alpha=0.2))
      model.add(layers.AveragePooling2D(pool_size=(1,3)))
      model.add(layers.Dropout(rate=0.2))

      model.add(layers.Flatten())
      model.add(layers.Dense(1, activation='sigmoid'))
      #model.summary()

      eeg = layers.Input(shape=input_shape)
      validity = model(eeg)

      fixed = Container(eeg, validity)

      return Model(eeg, validity), fixed
Exemple #22
0
 def __init__(self, input_shape = (112,112,3), num_clases=2):
       self.__lenet = keras.Sequential()
       self.__lenet.add(layers.Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape = input_shape))
       self.__lenet.add(layers.AveragePooling2D())
       self.__lenet.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
       self.__lenet.add(layers.AveragePooling2D())
       self.__lenet.add(layers.Flatten())
       self.__lenet.add(layers.Dense(units=120, activation='relu'))
       self.__lenet.add(layers.Dense(units=84, activation='relu'))
       self.__lenet.add(layers.Dense(units=num_clases, activation = 'sigmoid'))
Exemple #23
0
def LeNet():
    network = models.Sequential
    network.add(layers.Conv2D(filters=6,kernel_size=(3,3),activation='relu',input_shape=(28,28,1)))
    network.add(layers.AveragePooling2D((2,2)))
    network.add(layers.Conv2D(filters=16,kernel_size=(3,3),activation='relu'))
    network.add(layers.AveragePooling2D((2,2)))
    network.add(layers.Conv2D(filters=120,kernel_size=(3,3),activation='relu'))
    network.add(layers.Flatten())
    network.add(layers.Dense(84,activation='relu'))
    network.add(layers.Dense(10,activation='softmax'))
    return network
def create_models():
    input = layers.Input((28, 28, 1))
    x = conv_bn_relu(input, 32)
    x = layers.AveragePooling2D(2)(x)
    x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    x = conv_bn_relu(x, 128)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(10, activation="softmax")(x)

    return Model(input, x)
def create_models():
    input = layers.Input((28,28,1))
    x = conv_bn_relu(input, 32)
    x = layers.AveragePooling2D(2)(x)
    x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    x = conv_bn_relu(x, 128)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.BatchNormalization()(x)
    x = ClusteringAffinity(10, 1, 10.0)(x)

    return Model(input, x)
  def Generator(self, input_shape):
    
    # f.close()
    # Assuming input is 21x729x1
    input1 = Input(shape=(input_shape))
    c,t,d = input_shape

    h = layers.Conv2D(25, kernel_size=(1,11), strides=(1,1), 
                             padding='same', input_shape=input_shape, 
                             kernel_initializer='he_normal',
                             kernel_regularizer=regularizers.l2(.001))(input1)

    h = layers.Conv2D(25, kernel_size=(21,1), strides=(1,1),
                            padding='valid',
                            kernel_initializer='he_normal',
                            kernel_regularizer=regularizers.l2(.001))(h)

    h = layers.LeakyReLU(alpha=0.2)(h)
    h = layers.AveragePooling2D(pool_size=(1,3))(h)


    h = layers.Conv2D(50, kernel_size=(1,11),
                            padding='same',
                            kernel_initializer='he_normal',
                            kernel_regularizer=regularizers.l2(.001))(h)
    h = layers.LeakyReLU(alpha=0.2)(h)
    h = layers.AveragePooling2D(pool_size=(1,3))(h)

    h = layers.Conv2D(100, kernel_size=(1,11), strides=(1,1),
                      padding='same',
                      kernel_initializer='he_normal',
                      kernel_regularizer=regularizers.l2(.001))(h)
    h = layers.LeakyReLU(alpha=0.2)(h)
    h = layers.AveragePooling2D(pool_size=(1,3))(h)

    h = layers.Conv2D(200, kernel_size=(1,11), strides=(1,1),
                      padding='same',
                      kernel_initializer='he_normal',
                      kernel_regularizer=regularizers.l2(.001))(h)                          
    h = layers.LeakyReLU(alpha=0.2)(h)
    h = layers.AveragePooling2D(pool_size=(1,3))(h)

    h = layers.Flatten()(h)
    h = layers.Dense(729, activation='tanh')(h)

    gen_output = layers.Lambda(EEG_Concatenate,
                               output_shape=(c+1, t, d))([input1, h])

    model = Model(inputs=input1, outputs=gen_output)

    #model.summary()

    return model
Exemple #27
0
    def __init__(self, input_shape, num_classes):
        super().__init__()
        self.add(layers.Conv2D(6, kernel_size=(5, 5), strides=(1, 1), activation='tanh', input_shape=input_shape, padding="same"))
        self.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid'))
        self.add(layers.Conv2D(16, kernel_size=(5, 5), strides=(1, 1), activation='tanh', padding='valid'))
        self.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
        self.add(layers.Conv2D(120, kernel_size=(5, 5), strides=(1, 1), activation='tanh', padding='valid'))
        self.add(layers.Flatten())
        self.add(layers.Dense(84, activation='tanh'))
        self.add(layers.Dense(num_classes, activation='softmax'))

        self.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer="sgd")
Exemple #28
0
def create_models():
    input = layers.Input((28, 28, 1))
    x = conv_bn_relu(input, 32)
    x = layers.AveragePooling2D(2)(x)
    x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    x = conv_bn_relu(x, 128)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(2, name="latent_features",
                     kernel_initializer="he_normal")(x)
    x = layers.BatchNormalization()(x)
    x = ClusteringAffinity(10, 1, 5.0)(x)

    return Model(input, x)
def lenet_model():
  adam = Adam(lr=0.00001)
  model = Sequential()
  model.add(layers.Conv2D(6,(5,5), activation = 'relu', input_shape = (224,224,3), name = 'conv1')) 
  model.add(layers.AveragePooling2D((2, 2), name = 'avgpool1'))
  model.add(layers.Conv2D(16,(5,5), activation = 'relu', name = 'conv2')) 
  model.add(layers.AveragePooling2D((2, 2), name = 'avgpool2'))
  model.add(layers.Flatten(name = 'flatten'))
  model.add(layers.Dense(120, activation = 'relu', name = 'dense1'))
  model.add(layers.Dense(84,  name = 'dense2')) #SOFTMAX DOESNT TRAIN SOMEHOW, SO STICK WITH SIGMOID
  model.add(layers.Dense(1, activation = 'sigmoid', name = 'dense3'))
	# Compile model
  model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
  return model
Exemple #30
0
def sequential2(wd_rate=None):
    reg = keras.regularizers.l2(wd_rate)
    inputs = kl.Input(shape=[200, 200, 3])
    x = kl.Conv2D(filters=20,
                  kernel_size=[5, 5],
                  use_bias=True,
                  activation='relu',
                  kernel_regularizer=reg)(inputs)
    x = kl.Conv2D(filters=20,
                  kernel_size=[5, 5],
                  use_bias=True,
                  activation='relu',
                  kernel_regularizer=reg)(x)
    x = kl.AveragePooling2D(pool_size=[2, 2])(x)
    x = kl.Conv2D(filters=50,
                  kernel_size=[5, 5],
                  use_bias=True,
                  activation='relu',
                  kernel_regularizer=reg)(x)
    x = kl.AveragePooling2D(pool_size=[2, 2])(x)
    x = kl.Conv2D(filters=50,
                  kernel_size=[5, 5],
                  use_bias=True,
                  activation='relu',
                  kernel_regularizer=reg)(x)
    x = kl.AveragePooling2D(pool_size=[2, 2])(x)
    x = kl.Conv2D(filters=50,
                  kernel_size=[5, 5],
                  use_bias=True,
                  activation='relu',
                  kernel_regularizer=reg)(x)
    x = kl.AveragePooling2D(pool_size=[2, 2])(x)
    x = kl.Conv2D(filters=35,
                  kernel_size=[5, 5],
                  use_bias=True,
                  activation='relu',
                  kernel_regularizer=reg)(x)
    x = kl.Flatten()(x)
    x = kl.Dense(units=60,
                 activation='relu',
                 use_bias=True,
                 kernel_regularizer=reg)(x)
    x = kl.Dropout(rate=0.20)(x)
    x = kl.Dense(units=29,
                 activation='softmax',
                 use_bias=True,
                 kernel_regularizer=reg)(x)
    return keras.Model(inputs=(inputs, ), outputs=(x, ))