示例#1
0
    def _build_residual_block(self, x, index):
        # mc = self.config.model
        mc = self.config

        in_x = x
        res_name = "res" + str(index)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv1-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name=res_name + "_batchnorm1")(x)
        x = Activation("relu", name=res_name + "_relu1")(x)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv2-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1,
                               name="res" + str(index) + "_batchnorm2")(x)
        x = Add(name=res_name + "_add")([in_x, x])
        x = Activation("relu", name=res_name + "_relu2")(x)
        return x
示例#2
0
    def build(self):
        """
        Builds the full Keras model and stores it in self.model.
        """
        mc = self.config
        in_x = x = Input((12, 8, 8))

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)

        policy_out = Dense(self.config.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=4,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu",
                  name="value_dense")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="chess_model")
示例#3
0
def InceptionBlock_v2(inputs, num_filters, kernel_size, activation, padding,
                      strides, kernel_initializer="he_normal"):
    #     print("testline2:",inputs.shape)

    cut = inputs
    layer = Conv2D(int(num_filters / 4), 1,
                   padding=padding,
                   strides=strides,
                   activation=activation,
                   kernel_initializer=kernel_initializer)(inputs)

    layer = Conv2D(int(num_filters / 4), kernel_size,
                   padding=padding,
                   activation=activation,
                   strides=strides,
                   kernel_initializer=kernel_initializer)(layer)
    layer = Conv2D(num_filters, 1,
                   padding=padding,
                   strides=strides,
                   activation=activation,
                   kernel_initializer=kernel_initializer)(layer)

    layer += cut

    #     print("testline22:", layer.shape)
    return layer
示例#4
0
    def __init__(self, filters):
        super(DoubleConvBlock, self).__init__()

        self.deconv = Conv2DTranspose(filter=filters[0],
                                      kernel_size=(3, 3),
                                      strides=1,
                                      padding="same")

        self.concat = Concatenate()

        self.conv1 = Conv2D(
            filter=filters[1],
            kernel_size=(3, 3),
            strides=1,
            padding="same",
        )
        self.bn1 = BatchNormalization()

        self.act1 = Activation("relu")

        self.conv2 = Conv2D(
            filter=filters[2],
            kernel_size=(3, 3),
            strides=1,
            padding="same",
        )

        self.bn2 = BatchNormalization()

        self.act2 = Activation("relu")
示例#5
0
def discriminator_model():
    """Build discriminator architecture."""
    n_layers, use_sigmoid = 3, False
    inputs = Input(shape=input_shape_discriminator)

    x = Conv2D(filters=ndf, kernel_size=(4, 4), strides=2, padding='same')(inputs)
    x = LeakyReLU(0.2)(x)

    nf_mult, nf_mult_prev = 1, 1
    for n in range(n_layers):
        nf_mult_prev, nf_mult = nf_mult, min(2**n, 8)
        x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=2, padding='same')(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.2)(x)

    nf_mult_prev, nf_mult = nf_mult, min(2**n_layers, 8)
    x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)

    x = Conv2D(filters=1, kernel_size=(4, 4), strides=1, padding='same')(x)
    if use_sigmoid:
        x = Activation('sigmoid')(x)

    x = Flatten()(x)
    x = Dense(1024, activation='tanh')(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=x, name='Discriminator')
    return model
def initialize_model():

    model = Sequential()
    model.add(
        Conv2D(40, 11, strides=1, padding='same', input_shape=(1, 1024, 4)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(40, 11, strides=1, padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(1, 64)))

    model.add(Flatten())

    model.add(Dense(units=500))

    model.add(Dense(units=640))

    model.add(Reshape((1, 16, 40)))

    model.add(Conv2DTranspose(40, 11, strides=(1, 64), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2DTranspose(40, 11, strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(4, 11, strides=1, padding='same', activation='sigmoid'))
    model.summary()
    model.compile(optimizer='adam', loss='mse')

    return model
示例#7
0
    def create_posla_net(self, raw=120, column=320, channel=1):
        # model setting

        inputShape = (raw, column, channel)

        activation = 'relu'
        keep_prob_conv = 0.25
        keep_prob_dense = 0.5

        # init = 'glorot_normal'
        # init = 'he_normal'
        init = 'he_uniform'
        chanDim = -1
        classes = 3

        model = Sequential()

        # CONV => RELU => POOL
        model.add(
            Conv2D(3, (3, 3),
                   padding="valid",
                   input_shape=inputShape,
                   kernel_initializer=init,
                   activation=activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(9, (3, 3),
                   padding="valid",
                   kernel_initializer=init,
                   activation=activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(18, (3, 3),
                   padding="valid",
                   kernel_initializer=init,
                   activation=activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(32, (3, 3),
                   padding="valid",
                   kernel_initializer=init,
                   activation=activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())

        model.add(Dense(80, kernel_initializer=init, activation=activation))
        model.add(Dropout(keep_prob_dense))

        model.add(Dense(15, kernel_initializer=init, activation=activation))
        model.add(Dropout(keep_prob_dense))

        # softmax classifier
        model.add(Dense(classes, activation='softmax'))

        self.model = model
示例#8
0
文件: ACGAN.py 项目: fernadn/GAN
def build_generator(channels, num_classes, latent_dim):

    model = Sequential()

    model.add(Dense(128 * 7 * 7, activation="relu", input_dim=latent_dim))
    model.add(Reshape((7, 7, 128)))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling2D())
    #7x7x128
    model.add(Conv2D(128, kernel_size=3, padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling2D())
    #14x14x128
    model.add(Conv2D(64, kernel_size=3, padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(momentum=0.8))
    #28x28x64
    model.add(Conv2D(channels, kernel_size=3, padding='same'))
    model.add(Activation("tanh"))
    #28x28x3

    model.summary()

    noise = Input(shape=(latent_dim, ))
    label = Input(shape=(1, ), dtype='int32')
    label_embedding = Flatten()(Embedding(num_classes, latent_dim)(label))

    model_input = multiply([noise, label_embedding])
    img = model(model_input)

    return Model([noise, label], img)
示例#9
0
文件: nets.py 项目: OrangeBai/C3DLab
def extract_layer(input_tensor=None):

    x = TimeDistributed(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               trainable=True))(input_tensor)
    x = TimeDistributed(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               trainable=True))(x)
    x = TimeDistributed(
        MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))(x)

    # Block 2
    x = TimeDistributed(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               trainable=True))(x)
    x = TimeDistributed(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               trainable=True))(x)
    x = TimeDistributed(
        MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))(x)

    return x
示例#10
0
文件: nets.py 项目: OrangeBai/C3DLab
def generator(input_shape, upscale_times=2):

    gen_input = Input(shape=input_shape)

    model = Conv2D(filters=64, kernel_size=9, strides=1,
                   padding="same")(gen_input)
    model = PReLU(alpha_initializer='zeros',
                  alpha_regularizer=None,
                  alpha_constraint=None,
                  shared_axes=[1, 2])(model)

    gen_model = model

    # Using 16 Residual Blocks
    for index in range(8):
        model = res_block_gen(model, 3, 64, 1)

    model = Conv2D(filters=64, kernel_size=3, strides=1, padding="same")(model)
    model = BatchNormalization(momentum=0.5)(model)
    model = add([gen_model, model])

    # Using 2 UpSampling Blocks
    for index in range(upscale_times):
        model = up_sampling_block(model, 3, 256, 1)

    model = Conv2D(filters=3, kernel_size=9, strides=1, padding="same")(model)
    model = Activation('tanh')(model)

    generator_model = Model(inputs=gen_input, outputs=model)

    return generator_model
示例#11
0
    def __init__(self, filter_sizes):
        super(DownsampleBlock, self).__init__()

        self.conv1 = Conv2D(
            filter=filter_sizes[0],
            kernel_size=(3, 3),
            strides=1,
            padding="same",
        )
        self.bn1 = BatchNormalization()

        self.act1 = Activation("relu")

        self.conv2 = Conv2D(
            filter=filter_sizes[1],
            kernel_size=(3, 3),
            strides=1,
            padding="same",
        )

        self.bn2 = BatchNormalization()

        self.act2 = Activation("relu")

        self.mp = MaxPooling2D(pool_size=(2, 2))
示例#12
0
    def build_generator(self):
        # layers, kernel_size, strides should be changed wisely to fit the output size.
        '''
        This function is used to define the generator of model(DCGAN).
        '''

        model = Sequential()
        model.add(Conv2D(128, kernel_size = 5, strides = 1, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        model.add(BatchNormalization(momentum = 0.8))
        model.add(Activation("relu"))
        #model.add(UpSampling2D())

        model.add(Conv2D(64, kernel_size = 5, strides = 1, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        model.add(BatchNormalization(momentum = 0.8))
        model.add(Activation("relu"))
        #model.add(UpSampling2D())

        model.add(Conv2DTranspose(self.img_channels, kernel_size = 5, strides = 1,  padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        #model.add(BatchNormalization(momentum = 0.8))
        model.add(Activation("tanh"))

        #model.summary()

        image = Input(shape = (self.img_size, self.img_size, self.img_channels))
        img_output = model(image)
        return Model(image, img_output)
    def build_discriminator(self):
        '''
        This function is used to define the discriminator of model(DCGAN).
        '''
        model = Sequential()
        model.add(Reshape((self.img_size, self.img_size, self.img_channels)))
        model.add(Conv2D(32, kernel_size = 5, strides = 2, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        model.add(LeakyReLU(alpha = 0.2))
        model.add(Dropout(rate = 0.25))
        model.add(Conv2D(64, kernel_size = 5, strides = 2, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        #model.add(ZeroPadding2D(padding = ((0,1),(0,1))))
        model.add(BatchNormalization(momentum = 0.99))
        model.add(LeakyReLU(alpha = 0.2))
        model.add(Dropout(rate = 0.25))
        model.add(Conv2D(128, kernel_size = 5, strides = 2, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        #model.add(ZeroPadding2D(padding = ((0,1),(0,1))))
        model.add(BatchNormalization(momentum = 0.99))
        model.add(LeakyReLU(alpha = 0.2))
        model.add(Dropout(rate = 0.25))
        model.add(Conv2D(128, kernel_size = 5, strides = 2, padding = 'same', use_bias = True, kernel_initializer = RandomNormal(stddev = 0.02)))
        
        model.add(BatchNormalization(momentum = 0.99))
        model.add(LeakyReLU(alpha = 0.2))
        model.add(Dropout(rate = 0.25))
        model.add(Flatten())
        model.add(Dense(1, activation = 'sigmoid'))

        img = Input(shape = (self.img_size, self.img_size, self.img_channels))
        output_ = model(img)

        return Model(img, output_)
示例#14
0
def inception_resnet_v2_C(input, scale_residual=True):
    channel_axis = -1

    # Input is relu activation
    init = input

    ir1 = Conv2D(192, (1, 1), activation='relu', padding='same')(input)

    ir2 = Conv2D(192, (1, 1), activation='relu', padding='same')(input)
    ir2 = Conv2D(224, (1, 3), activation='relu', padding='same')(ir2)
    ir2 = Conv2D(256, (3, 1), activation='relu', padding='same')(ir2)

    ir_merge = merge.concatenate([ir1, ir2], axis=channel_axis)

    ir_conv = Conv2D(backend.int_shape(input)[channel_axis], (1, 1),
                     activation='relu')(ir_merge)
    out = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
                 output_shape=backend.int_shape(input)[1:],
                 arguments={'scale': 0.1})([input, ir_conv])

    # ir_conv = Conv2D(2144, (1, 1), activation='linear', padding='same')(ir_merge)
    # if scale_residual: ir_conv = Lambda(lambda x: x * 0.1)(ir_conv)
    # out = merge.concatenate([init, ir_conv], axis=channel_axis)

    out = BatchNormalization(axis=channel_axis)(out)
    out = Activation("relu")(out)
    return out
示例#15
0
    def build_critic(self):

        model = Sequential()

        model.add(
            Conv2D(16,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
示例#16
0
def myModel():
    no_Of_Filters = 60
    size_of_Filter = (5, 5)

    size_of_Filter_2 = (3, 3)
    size_of_pool = (2, 2)
    no_Of_Nodes = 500
    model = Sequential()
    model.add((Conv2D(no_Of_Filters,
                      size_of_Filter,
                      input_shape=(imageDimesions[0], imageDimesions[1], 1),
                      activation='relu')))
    model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
    model.add(MaxPooling2D(pool_size=size_of_pool))

    model.add((Conv2D(no_Of_Filters // 2, size_of_Filter_2,
                      activation='relu')))
    model.add((Conv2D(no_Of_Filters // 2, size_of_Filter_2,
                      activation='relu')))
    model.add(MaxPooling2D(pool_size=size_of_pool))
    model.add(Dropout)

    model.add(Flatten())
    model.add(Dense(no_Of_Nodes, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(noOfClasses, activation='softmax'))

    model.compile(Adam(lr=0.001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
示例#17
0
 def __init__(self, num_templates=10, **kwargs):
     WeightedMixIn.__init__(self)
     Conv2D.__init__(self, **kwargs)
     self.num_templates = num_templates
     self.add_template_variable(weight_name="kernel")
     self.add_template_variable(weight_name="bias")
     mixture_input_spec = InputSpec(ndim=1)
     self.input_spec = (self.input_spec, mixture_input_spec)
示例#18
0
def T_trans(T, T_F, H, W):
    T_in = Input(shape=(T + 7, H, W))
    T_in_p = Permute((2, 3, 1))(T_in)
    T_mid = Conv2D(filters=T_F, kernel_size=(1, 1), padding="same")(T_in_p)
    T_act = Activation('relu')(T_mid)
    T_fin = Conv2D(filters=1, kernel_size=(1, 1), padding="same")(T_act)
    T_mul = Activation('relu')(T_fin)
    T_model = Model(inputs=T_in, outputs=T_mul)

    return T_model
示例#19
0
def create_keras_model(inputShape, nClasses, output_activation='linear'):
    """
    SegNet model
    ----------
    inputShape : tuple
        Tuple with the dimensions of the input data (ny, nx, nBands). 
    nClasses : int
         Number of classes.
    """

    filter_size = 64
    kernel = (3, 3)
    pad = (1, 1)
    pool_size = (2, 2)

    inputs = Input(shape=inputShape, name='image')

    # Encoder
    x = Conv2D(64, kernel, padding='same')(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(128, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(256, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(512, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Decoder
    x = Conv2D(512, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=pool_size)(x)

    x = Conv2D(256, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=pool_size)(x)

    x = Conv2D(128, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=pool_size)(x)

    x = Conv2D(64, kernel, padding='same')(x)
    x = BatchNormalization()(x)

    x = Conv2D(nClasses, (1, 1), padding='valid')(x)

    outputs = Activation(output_activation, name='output')(x)

    model = Model(inputs=inputs, outputs=outputs, name='segnet')

    return model
示例#20
0
    def create_nvidia_net(self, raw=120, column=320, channel=1):
        print('create nvidia model!!')

        input_shape = (raw, column, channel)

        activation = 'relu'
        keep_prob = 0.5
        keep_prob_dense = 0.5
        classes = 3

        model = Sequential()

        model.add(
            Conv2D(24, (5, 5),
                   input_shape=input_shape,
                   padding="valid",
                   strides=(2, 2)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        model.add(Conv2D(36, (5, 5), padding="valid", strides=(2, 2)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        model.add(Conv2D(48, (5, 5), padding="valid", strides=(2, 2)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        # FC
        model.add(Flatten())

        model.add(Dense(100))
        model.add(Dropout(keep_prob_dense))

        model.add(Dense(50))
        model.add(Dropout(keep_prob_dense))

        model.add(Dense(10))
        model.add(Dropout(keep_prob_dense))

        model.add(Dense(classes))
        model.add(Activation('softmax'))

        self.model = model
示例#21
0
def residual(inputs, filter_size, kernel):
    x = Conv2D(filter_size,
               kernel,
               padding='same',
               kernel_initializer='he_normal')(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filter_size,
               kernel,
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Add()([x, inputs])
    return x
示例#22
0
def reduction_A(input, k=192, l=224, m=256, n=384):
    channel_axis = -1

    r1 = MaxPooling2D((3, 3), strides=(2, 2))(input)

    r2 = Conv2D(n, (3, 3), activation='relu', strides=(2, 2))(input)

    r3 = Conv2D(k, (1, 1), activation='relu', padding='same')(input)
    r3 = Conv2D(l, (3, 3), activation='relu', padding='same')(r3)
    r3 = Conv2D(m, (3, 3), activation='relu', strides=(2, 2))(r3)

    m = merge.concatenate([r1, r2, r3], axis=channel_axis)
    m = BatchNormalization(axis=channel_axis)(m)
    m = Activation('relu')(m)
    return m
示例#23
0
    def _res_func(x):
        identity = Cropping2D(cropping=((2, 2), (2, 2)))(x)

        a = Conv2D(nb_filter, (nb_row, nb_col),
                   strides=stride,
                   padding='valid')(x)
        a = BatchNormalization()(a)
        #a = LeakyReLU(0.2)(a)
        a = Activation("relu")(a)
        a = Conv2D(nb_filter, (nb_row, nb_col),
                   strides=stride,
                   padding='valid')(a)
        y = BatchNormalization()(a)

        return add([identity, y])
示例#24
0
def LeNet_model():
    model = Sequential()
    model.add(Conv2D(30, (5, 5), input_shape=(32, 32, 1), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    
    model.add(Conv2D(15, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    
    model.add(Flatten())
    model.add(Dense(500, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))
    # Compile model
    model.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics=['accuracy'])
    return model
示例#25
0
def normalization(inp, norm="none", group="16"):
    """ GAN Normalization """
    if norm == "layernorm":
        var_x = GroupNormalization(group=group)(inp)
    elif norm == "batchnorm":
        var_x = BatchNormalization()(inp)
    elif norm == "groupnorm":
        var_x = GroupNormalization(group=16)(inp)
    elif norm == "instancenorm":
        var_x = InstanceNormalization()(inp)
    elif norm == "hybrid":
        if group % 2 == 1:
            raise ValueError(
                "Output channels must be an even number for hybrid norm, "
                "received {}.".format(group))
        filt = group
        var_x_0 = Lambda(lambda var_x: var_x[..., :filt // 2])(var_x)
        var_x_1 = Lambda(lambda var_x: var_x[..., filt // 2:])(var_x)
        var_x_0 = Conv2D(filt // 2,
                         kernel_size=1,
                         kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                         kernel_initializer=GAN22_CONV_INIT)(var_x_0)
        var_x_1 = InstanceNormalization()(var_x_1)
        var_x = concatenate([var_x_0, var_x_1], axis=-1)
    else:
        var_x = inp
    return var_x
示例#26
0
def block(out,
          nkernels,
          down=True,
          bn=True,
          dropout=False,
          leaky=True,
          normalization=InstanceNormalization):
    if leaky:
        out = LeakyReLU(0.2)(out)
    else:
        out = Activation('relu')(out)
    if down:
        out = ZeroPadding2D((1, 1))(out)
        out = Conv2D(nkernels,
                     kernel_size=(4, 4),
                     strides=(2, 2),
                     use_bias=False)(out)
    else:
        out = Conv2DTranspose(nkernels,
                              kernel_size=(4, 4),
                              strides=(2, 2),
                              use_bias=False)(out)
        out = Cropping2D((1, 1))(out)
    if bn:
        out = normalization(axis=-1)(out)
    if dropout:
        out = Dropout(0.5)(out)
    return out
示例#27
0
 def prepare_simple_model(input_tensor, loss_name, target):
   axis = 1 if K.image_data_format() == 'channels_first' else -1
   loss = None
   num_channels = None
   activation = None
   if loss_name == 'sparse_categorical_crossentropy':
     loss = lambda y_true, y_pred: K.sparse_categorical_crossentropy(  # pylint: disable=g-long-lambda
         y_true, y_pred, axis=axis)
     num_channels = np.amax(target) + 1
     activation = 'softmax'
   elif loss_name == 'categorical_crossentropy':
     loss = lambda y_true, y_pred: K.categorical_crossentropy(  # pylint: disable=g-long-lambda
         y_true, y_pred, axis=axis)
     num_channels = target.shape[axis]
     activation = 'softmax'
   elif loss_name == 'binary_crossentropy':
     loss = lambda y_true, y_pred: K.binary_crossentropy(y_true, y_pred)  # pylint: disable=unnecessary-lambda
     num_channels = target.shape[axis]
     activation = 'sigmoid'
   predictions = Conv2D(num_channels,
                        1,
                        activation=activation,
                        kernel_initializer='ones',
                        bias_initializer='ones')(input_tensor)
   simple_model = keras.models.Model(inputs=input_tensor,
                                     outputs=predictions)
   simple_model.compile(optimizer='rmsprop', loss=loss)
   return simple_model
示例#28
0
def _shortcut(input, residual, weight_decay=1e-4):
    """Adds a shortcut between input and residual block and merges them with "sum"
	"""
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = input.get_shape().as_list()
    residual_shape = residual.get_shape().as_list()
    stride_width = int(
        round(input_shape[ROW_AXIS] /
              residual_shape[ROW_AXIS]))  # Problem for variable input
    stride_height = int(round(input_shape[COL_AXIS] /
                              residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
                          kernel_size=(1, 1),
                          strides=(stride_width, stride_height),
                          padding="valid",
                          kernel_initializer="he_normal",
                          kernel_regularizer=l2(weight_decay))(input)

    return add([shortcut, residual])
 def d_layer(layer_input, filters, f_size=4, bn=True):
     """Discriminator layer"""
     d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     if bn:
         d = BatchNormalization(momentum=0.8)(d)
     return d
示例#30
0
def initialize_model():

    one_filter_keras_model = Sequential()
    one_filter_keras_model.add(
        Conv2D(filters=40,
               kernel_size=(1, 11),
               padding="same",
               input_shape=(1, 1500, 5),
               kernel_constraint=NonNeg()))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))

    one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 30)))

    one_filter_keras_model.add(Flatten())
    one_filter_keras_model.add(Dense(40))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))
    one_filter_keras_model.add(Dropout(0.5))

    one_filter_keras_model.add(Dense(1))
    one_filter_keras_model.add(Activation("sigmoid"))

    one_filter_keras_model.summary()
    one_filter_keras_model.compile(optimizer='adam',
                                   loss='binary_crossentropy',
                                   metrics=[precision, recall, specificity])

    return one_filter_keras_model