Exemplo n.º 1
0
def build_encoder_decoder():
    # Encoder
    input_tensor = Input(shape=(320, 320, 4))
    x = Conv2D(64, (3, 3), padding='same', activation='relu',
               name='conv1_1')(input_tensor)
    x = Conv2D(64, (3, 3), padding='same', activation='relu',
               name='conv1_2')(x)
    orig_1 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(128, (3, 3), padding='same', activation='relu',
               name='conv2_1')(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu',
               name='conv2_2')(x)
    orig_2 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(256, (3, 3), padding='same', activation='relu',
               name='conv3_1')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu',
               name='conv3_2')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu',
               name='conv3_3')(x)
    orig_3 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(512, (3, 3), padding='same', activation='relu',
               name='conv4_1')(x)
    x = Conv2D(512, (3, 3), padding='same', activation='relu',
               name='conv4_2')(x)
    orig_4 = x
    res = Conv2D(512, (1, 1), padding='same', activation='relu', name='res')(x)
    # x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    inputs_size = x.get_shape()[1:3]
    conv_4_1x1 = SeparableConv2D(256, (1, 1),
                                 activation='relu',
                                 padding='same',
                                 name='conv4_1x1')(x)
    conv_4_3x3_1 = SeparableConv2D(256, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   dilation_rate=ATROUS_RATES[0],
                                   name='conv4_3x3_1')(x)
    conv_4_3x3_2 = SeparableConv2D(256, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   dilation_rate=ATROUS_RATES[1],
                                   name='conv4_3x3_2')(x)
    conv_4_3x3_3 = SeparableConv2D(256, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   dilation_rate=ATROUS_RATES[2],
                                   name='conv4_3x3_3')(x)
    # # Image average pooling
    # image_level_features = Lambda(lambda x: tf.reduce_mean(x, [1, 2], keepdims=True), name='global_average_pooling')(x)
    # image_level_features = Conv2D(256, (1, 1), activation='relu', padding='same', name='image_level_features_conv_1x1')(image_level_features)
    # image_level_features = Lambda(lambda x: tf.image.resize(x, inputs_size), name='upsample_1')(image_level_features)
    # Concat
    x = Concatenate(axis=3)(
        [conv_4_1x1, conv_4_3x3_1, conv_4_3x3_2, conv_4_3x3_3])
    x = SeparableConv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='conv_1x1_1_concat')(x)
    x = SeparableConv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='conv_1x1_2_concat')(x)
    x = Add()([res, x])
    x = Activation("relu")(x)
    # orig_4 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_3')(x)
    orig_5 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    # Decoder
    # x = Conv2D(4096, (7, 7), activation='relu', padding='valid', name='conv6')(x)
    # x = BatchNormalization()(x)
    # x = UpSampling2D(size=(7, 7))(x)

    x = Conv2D(512, (1, 1),
               activation='relu',
               padding='same',
               name='deconv6',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_5)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_5)
    # print('origReshaped.shape: ' + str(K.int_shape(origReshaped)))
    xReshaped = Reshape(shape)(x)
    # print('xReshaped.shape: ' + str(K.int_shape(xReshaped)))
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    # print('together.shape: ' + str(K.int_shape(together)))
    x = Unpooling()(together)

    x = Conv2D(512, (5, 5),
               activation='relu',
               padding='same',
               name='deconv5',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_4)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_4)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)

    x = Conv2D(256, (5, 5),
               activation='relu',
               padding='same',
               name='deconv4',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_3)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_3)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)

    x = Conv2D(128, (5, 5),
               activation='relu',
               padding='same',
               name='deconv3',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_2)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_2)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)

    x = Conv2D(64, (5, 5),
               activation='relu',
               padding='same',
               name='deconv2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_1)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_1)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(64, (5, 5),
               activation='relu',
               padding='same',
               name='deconv1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = Conv2D(1, (5, 5),
               activation='sigmoid',
               padding='same',
               name='pred',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)

    model = Model(inputs=input_tensor, outputs=x)
    return model
Exemplo n.º 2
0
def unet(input_size=(224, 224, 3), bn = True):
    inputs = Input(input_size)
    # Block 1
    conv1 = Conv2D(64, 3, 
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(inputs)
    conv1 = BatchNormalization()(conv1) if bn else conv1
    conv1 = Conv2D(64, 3, 
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv1)
    conv1 = BatchNormalization()(conv1) if bn else conv1
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    
    # Block 2
    conv2 = Conv2D(128, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(pool1)
    conv2 = BatchNormalization()(conv2) if bn else conv2
    conv2 = Conv2D(128, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(conv2)
    conv2 = BatchNormalization()(conv2) if bn else conv2
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    
    # Block 3
    conv3 = Conv2D(256, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(pool2)
    conv3 = BatchNormalization()(conv3) if bn else conv3
    conv3 = Conv2D(256, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(conv3)
    conv3 = BatchNormalization()(conv3) if bn else conv3
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    
    # Block 4
    conv4 = Conv2D(512, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(pool3)
    conv4 = BatchNormalization()(conv4) if bn else conv4
    conv4 = Conv2D(512, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(conv4)
    conv4 = BatchNormalization()(conv4) if bn else conv4
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
    
    # Bottleneck 
    conv5 = Conv2D(1024, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(pool4)
    conv5 = BatchNormalization()(conv5) if bn else conv5
    conv5 = Conv2D(1024, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(conv5)
    conv5 = BatchNormalization()(conv5) if bn else conv5
    drop5 = Dropout(0.5)(conv5)
    
    # Upsampling Block 4
    upsampling6 = Conv2D(512, 2, 
                         activation='relu', 
                         padding='same', 
                         kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
    merge6 = concatenate([drop4, upsampling6], axis=3)
    conv6 = Conv2D(512, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(merge6)
    conv6 = BatchNormalization()(conv6) if bn else conv6
    conv6 = Conv2D(512, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(conv6)
    conv6 = BatchNormalization()(conv6) if bn else conv6
    
    # Upsampling Block 3
    upsampling7 = Conv2D(256, 2, 
                         activation='relu', 
                         padding='same', 
                         kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
    merge7 = concatenate([conv3, upsampling7], axis=3)
    conv7 = Conv2D(256, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(merge7)
    conv7 = BatchNormalization()(conv7) if bn else conv7
    conv7 = Conv2D(256, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(conv7)
    conv7 = BatchNormalization()(conv7) if bn else conv7
    
    # Upsampling Block 2
    upsampling8 = Conv2D(128, 2, 
                         activation='relu', 
                         padding='same', 
                         kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))
    merge8 = concatenate([conv2, upsampling8], axis=3)
    conv8 = Conv2D(128, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(merge8)
    conv8 = BatchNormalization()(conv8) if bn else conv8
    conv8 = Conv2D(128, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(conv8)
    conv8 = BatchNormalization()(conv8) if bn else conv8
    
    # Upsampling Block 1
    upsampling9 = Conv2D(64, 2, 
                         activation='relu', 
                         padding='same', 
                         kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
    merge9 = concatenate([conv1, upsampling9], axis=3)
    conv9 = Conv2D(64, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(merge9)
    conv9 = BatchNormalization()(conv9) if bn else conv9
    conv9 = Conv2D(64, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(conv9)
    conv9 = BatchNormalization()(conv9) if bn else conv9
    conv9 = Conv2D(2, 3, 
                   activation='relu', 
                   padding='same', 
                   kernel_initializer='he_normal')(conv9)
    conv9 = BatchNormalization()(conv9) if bn else conv9
    
    output = Conv2D(1, 1, activation='sigmoid')(conv9)
    model = Model(inputs=inputs, outputs=output)

    return model
Exemplo n.º 3
0
l2 = Conv2D(20, (59, 51), strides=(5, 5), activation='relu', padding='same')(l1)
l3 = MaxPooling2D((5, 5), padding='same')(l2)
l4 = Flatten()(l3)
l5 = Dropout(0.30)(l4)
l6 = Dense(ltnt_dim)(l5) # <-- do not change the next three lines
l7 = BatchNormalization()(l6)
encoded = Activation('tanh')(l7)

# model that takes input and encodes it into the latent space
latent_ncdr = Model(inpt_img, encoded) # <-- do not change this, you will need
latent_ncdr.summary()                  #     to access the layers of the encoder

l8 = Dropout(0.30)(encoded)
l9 = Dense(180)(l8)
l10 = Reshape((3,3,20))(l9) # <-- add more layers after this if you want
l11= UpSampling2D((20, 17))(l10)
l12= Conv2D(1, (59, 51), activation='relu', padding='same')(l11)
decoded = Conv2D(1, (2, 1), activation='sigmoid')(l12)

# model that takes input, encodes it, and decodes it
autoencoder = Model(inpt_img, decoded)
autoencoder.summary()

opt = RMSprop(learning_rate=0.004)
autoencoder.compile(loss='mean_absolute_error', optimizer=opt)
# --- end of model definitions

from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping

reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=10,  
                              min_delta=1e-4, mode='min')
Exemplo n.º 4
0
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'))
model.add(
    Conv2D(16,
           2,
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'))
model.add(
    Conv2D(128,
           3,
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'))
model.add(UpSampling2D(size=(2, 2)))
model.add(
    Conv2D(64,
           3,
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'))
#model.add(UpSampling2D(size=(2, 2)))
model.add(
    Conv2D(32,
           3,
           activation='relu',
           padding='same',
           kernel_initializer='he_normal'))
#model.add(UpSampling2D(size=(2, 2)))
model.add(
Exemplo n.º 5
0
    def init(self, printSummary=True): # keep_negitive = 0 on inputs, otherwise for weights keep default (=1)
        encoded_dim = self.pams['encoded_dim']

        CNN_layer_nodes = self.pams['CNN_layer_nodes']
        CNN_kernel_size = self.pams['CNN_kernel_size']
        CNN_pool = self.pams['CNN_pool']
        Dense_layer_nodes = self.pams['Dense_layer_nodes']  # does not include encoded layer
        channels_first = self.pams['channels_first']

        inputs = Input(shape=self.pams['shape'])  # adapt this if using `channels_first` image data format

        # load bits to quantize
        nBits_input  = self.pams['nBits_input']
        nBits_accum  = self.pams['nBits_accum']
        nBits_weight = self.pams['nBits_weight']
        nBits_encod  = self.pams['nBits_encod']
        nBits_dense  = self.pams['nBits_dense'] if 'nBits_dense' in self.pams else nBits_weight
        nBits_conv   = self.pams['nBits_conv' ] if 'nBits_conv'  in self.pams else nBits_weight

        input_Qbits  = self.GetQbits(nBits_input, nBits_input['keep_negative']) 
        accum_Qbits  = self.GetQbits(nBits_accum, nBits_accum['keep_negative'])
        dense_Qbits  = self.GetQbits(nBits_dense, nBits_dense['keep_negative'])
        conv_Qbits   = self.GetQbits(nBits_conv , nBits_conv ['keep_negative'])
        encod_Qbits  = self.GetQbits(nBits_encod, nBits_encod['keep_negative'])
        # keeping weights and bias same precision for now

        # define model
        x = inputs
        x = QActivation(input_Qbits, name='input_qa')(x)
        for i, n_nodes in enumerate(CNN_layer_nodes):
            if channels_first:
                x = QConv2D(n_nodes, CNN_kernel_size[i], activation='relu', padding='same',
                            data_format='channels_first', name="conv2d_"+str(i)+"_m",
                            kernel_quantizer=conv_Qbits, bias_quantizer=conv_Qbits)(x)
            else:
                x = QConv2D(n_nodes, CNN_kernel_size[i], activation='relu', padding='same', name="conv2d_"+str(i)+"_m",
                            kernel_quantizer=conv_Qbits, bias_quantizer=conv_Qbits)(x)
            if CNN_pool[i]:
                if channels_first:
                    x = MaxPooling2D((2, 2), padding='same', data_format='channels_first', name="mp_"+str(i))(x)
                else:
                    x = MaxPooling2D((2, 2), padding='same', name="mp_"+str(i))(x)

        shape = K.int_shape(x)
        x = QActivation(accum_Qbits, name='accum1_qa')(x)
        x = Flatten(name="flatten")(x)
        
        # extended inputs fed forward to the dense layer
        # if self.extend:
        #     inputs2 = Input(shape=(2,))  # maxQ, occupancy
            # input2_Qbits  = self.GetQbits(nBits_input, keep_negative=1) #oddly fails if keep_neg=0
            # input2_Qbits
            # x = inputs
            # x = QActivation(input_Qbits, name='input_qa')(x)
            

        # encoder dense nodes
        for i, n_nodes in enumerate(Dense_layer_nodes):
            x = QDense(n_nodes, activation='relu', name="en_dense_"+str(i),
                           kernel_quantizer=dense_Qbits, bias_quantizer=dense_Qbits)(x)


        #x = QDense(encoded_dim, activation='relu', name='encoded_vector',
        #                      kernel_quantizer=dense_Qbits, bias_quantizer=dense_Qbits)(x)
        x = QDense(encoded_dim, activation=self.pams['activation'], name='encoded_vector',
                              kernel_quantizer=dense_Qbits, bias_quantizer=dense_Qbits)(x)
        encodedLayer = QActivation(encod_Qbits, name='encod_qa')(x)

        # Instantiate Encoder Model
        self.encoder = Model(inputs, encodedLayer, name='encoder')
        if printSummary:
            self.encoder.summary()

        encoded_inputs = Input(shape=(encoded_dim,), name='decoder_input')
        x = encoded_inputs

        # decoder dense nodes
        for i, n_nodes in enumerate(Dense_layer_nodes):
            x = Dense(n_nodes, activation='relu', name="de_dense_"+str(i))(x)

        x = Dense(shape[1] * shape[2] * shape[3], activation='relu', name='de_dense_final')(x)
        x = Reshape((shape[1], shape[2], shape[3]),name="de_reshape")(x)

        for i, n_nodes in enumerate(CNN_layer_nodes):

            if CNN_pool[i]:
                if channels_first:
                    x = UpSampling2D((2, 2), data_format='channels_first', name="up_"+str(i))(x)
                else:
                    x = UpSampling2D((2, 2), name="up_"+str(i))(x)

            if channels_first:
                x = Conv2DTranspose(n_nodes, CNN_kernel_size[i], activation='relu', padding='same',
                                    data_format='channels_first', name="conv2D_t_"+str(i))(x)
            else:
                x = Conv2DTranspose(n_nodes, CNN_kernel_size[i], activation='relu', padding='same',
                                    name="conv2D_t_"+str(i))(x)

        if channels_first:
            # shape[0] will be # of channel
            x = Conv2DTranspose(filters=self.pams['shape'][0], kernel_size=CNN_kernel_size[0], padding='same',
                                data_format='channels_first', name="conv2d_t_final")(x)

        else:
            x = Conv2DTranspose(filters=self.pams['shape'][2], kernel_size=CNN_kernel_size[0], padding='same',
                                name="conv2d_t_final")(x)
        x = QActivation(input_Qbits, name='q_decoder_output')(x) #Verify this step needed?
        outputs = Activation('sigmoid', name='decoder_output')(x)

        self.decoder = Model(encoded_inputs, outputs, name='decoder')
        if printSummary:
            self.decoder.summary()

        self.autoencoder = Model(inputs, self.decoder(self.encoder(inputs)), name='autoencoder')
        if printSummary:
            self.autoencoder.summary()

        self.compileModels()

        CNN_layers = ''
        if len(CNN_layer_nodes) > 0:
            CNN_layers += '_Conv'
            for i, n in enumerate(CNN_layer_nodes):
                CNN_layers += f'_{n}x{CNN_kernel_size[i]}'
                if CNN_pool[i]:
                    CNN_layers += 'pooled'
        Dense_layers = ''
        if len(Dense_layer_nodes) > 0:
            Dense_layers += '_Dense'
            for n in Dense_layer_nodes:
                Dense_layers += f'_{n}'

        self.name = f'Autoencoded{CNN_layers}{Dense_layers}_Encoded_{encoded_dim}'

        if not self.weights_f == '':
            self.autoencoder.load_weights(self.weights_f)
def get_test_model_exhaustive():
    """Returns a exhaustive test model."""
    input_shapes = [
        (2, 3, 4, 5, 6),
        (2, 3, 4, 5, 6),
        (7, 8, 9, 10),
        (7, 8, 9, 10),
        (11, 12, 13),
        (11, 12, 13),
        (14, 15),
        (14, 15),
        (16,),
        (16,),
        (2,),
        (1,),
        (2,),
        (1,),
        (1, 3),
        (1, 4),
        (1, 1, 3),
        (1, 1, 4),
        (1, 1, 1, 3),
        (1, 1, 1, 4),
        (1, 1, 1, 1, 3),
        (1, 1, 1, 1, 4),
        (26, 28, 3),
        (4, 4, 3),
        (4, 4, 3),
        (4,),
        (2, 3),
        (1,),
        (1,),
        (1,),
        (2, 3),
        (9, 16, 1),
        (1, 9, 16)
    ]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Conv1D(1, 3, padding='valid')(inputs[6]))
    outputs.append(Conv1D(2, 1, padding='same')(inputs[6]))
    outputs.append(Conv1D(3, 4, padding='causal', dilation_rate=2)(inputs[6]))
    outputs.append(ZeroPadding1D(2)(inputs[6]))
    outputs.append(Cropping1D((2, 3))(inputs[6]))
    outputs.append(MaxPooling1D(2)(inputs[6]))
    outputs.append(MaxPooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(MaxPooling1D(2, data_format="channels_first")(inputs[6]))
    outputs.append(AveragePooling1D(2)(inputs[6]))
    outputs.append(AveragePooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(AveragePooling1D(2, data_format="channels_first")(inputs[6]))
    outputs.append(GlobalMaxPooling1D()(inputs[6]))
    outputs.append(GlobalMaxPooling1D(data_format="channels_first")(inputs[6]))
    outputs.append(GlobalAveragePooling1D()(inputs[6]))
    outputs.append(GlobalAveragePooling1D(data_format="channels_first")(inputs[6]))

    outputs.append(Conv2D(4, (3, 3))(inputs[4]))
    outputs.append(Conv2D(4, (3, 3), use_bias=False)(inputs[4]))
    outputs.append(Conv2D(4, (2, 4), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(Conv2D(4, (2, 4), padding='same', dilation_rate=(2, 3))(inputs[4]))

    outputs.append(SeparableConv2D(3, (3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((1, 2))(inputs[4]))

    outputs.append(MaxPooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    #outputs.append(MaxPooling2D((2, 2), data_format="channels_first")(inputs[4])) # Default MaxPoolingOp only supports NHWC on device type CPU
    outputs.append(MaxPooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(AveragePooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    #outputs.append(AveragePooling2D((2, 2), data_format="channels_first")(inputs[4])) # Default AvgPoolingOp only supports NHWC on device type CPU
    outputs.append(AveragePooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))

    outputs.append(GlobalAveragePooling2D()(inputs[4]))
    outputs.append(GlobalAveragePooling2D(data_format="channels_first")(inputs[4]))
    outputs.append(GlobalMaxPooling2D()(inputs[4]))
    outputs.append(GlobalMaxPooling2D(data_format="channels_first")(inputs[4]))

    outputs.append(Permute((3, 4, 1, 5, 2))(inputs[0]))
    outputs.append(Permute((1, 5, 3, 2, 4))(inputs[0]))
    outputs.append(Permute((3, 4, 1, 2))(inputs[2]))
    outputs.append(Permute((2, 1, 3))(inputs[4]))
    outputs.append(Permute((2, 1))(inputs[6]))
    outputs.append(Permute((1,))(inputs[8]))

    outputs.append(Permute((3, 1, 2))(inputs[31]))
    outputs.append(Permute((3, 1, 2))(inputs[32]))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[31])))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[32])))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(axis=1)(inputs[0]))
    outputs.append(BatchNormalization(axis=2)(inputs[0]))
    outputs.append(BatchNormalization(axis=3)(inputs[0]))
    outputs.append(BatchNormalization(axis=4)(inputs[0]))
    outputs.append(BatchNormalization(axis=5)(inputs[0]))
    outputs.append(BatchNormalization()(inputs[2]))
    outputs.append(BatchNormalization(axis=1)(inputs[2]))
    outputs.append(BatchNormalization(axis=2)(inputs[2]))
    outputs.append(BatchNormalization(axis=3)(inputs[2]))
    outputs.append(BatchNormalization(axis=4)(inputs[2]))
    outputs.append(BatchNormalization()(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    #outputs.append(BatchNormalization(axis=1)(inputs[4])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[4]))
    outputs.append(BatchNormalization(axis=3)(inputs[4]))
    outputs.append(BatchNormalization()(inputs[6]))
    outputs.append(BatchNormalization(axis=1)(inputs[6]))
    outputs.append(BatchNormalization(axis=2)(inputs[6]))
    outputs.append(BatchNormalization()(inputs[8]))
    outputs.append(BatchNormalization(axis=1)(inputs[8]))
    outputs.append(BatchNormalization()(inputs[27]))
    outputs.append(BatchNormalization(axis=1)(inputs[27]))
    outputs.append(BatchNormalization()(inputs[14]))
    outputs.append(BatchNormalization(axis=1)(inputs[14]))
    outputs.append(BatchNormalization(axis=2)(inputs[14]))
    outputs.append(BatchNormalization()(inputs[16]))
    # todo: check if TensorFlow >= 2.1 supports this
    #outputs.append(BatchNormalization(axis=1)(inputs[16])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[16]))
    outputs.append(BatchNormalization(axis=3)(inputs[16]))
    outputs.append(BatchNormalization()(inputs[18]))
    outputs.append(BatchNormalization(axis=1)(inputs[18]))
    outputs.append(BatchNormalization(axis=2)(inputs[18]))
    outputs.append(BatchNormalization(axis=3)(inputs[18]))
    outputs.append(BatchNormalization(axis=4)(inputs[18]))
    outputs.append(BatchNormalization()(inputs[20]))
    outputs.append(BatchNormalization(axis=1)(inputs[20]))
    outputs.append(BatchNormalization(axis=2)(inputs[20]))
    outputs.append(BatchNormalization(axis=3)(inputs[20]))
    outputs.append(BatchNormalization(axis=4)(inputs[20]))
    outputs.append(BatchNormalization(axis=5)(inputs[20]))

    outputs.append(Dropout(0.5)(inputs[4]))

    outputs.append(ZeroPadding2D(2)(inputs[4]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[4]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[4]))
    outputs.append(Cropping2D(2)(inputs[4]))
    outputs.append(Cropping2D((2, 3))(inputs[4]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[4]))

    outputs.append(Dense(3, use_bias=True)(inputs[13]))
    outputs.append(Dense(3, use_bias=True)(inputs[14]))
    outputs.append(Dense(4, use_bias=False)(inputs[16]))
    outputs.append(Dense(4, use_bias=False, activation='tanh')(inputs[18]))
    outputs.append(Dense(4, use_bias=False)(inputs[20]))

    outputs.append(Reshape(((2 * 3 * 4 * 5 * 6),))(inputs[0]))
    outputs.append(Reshape((2, 3 * 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5, 6))(inputs[0]))

    outputs.append(Reshape((16,))(inputs[8]))
    outputs.append(Reshape((2, 8))(inputs[8]))
    outputs.append(Reshape((2, 2, 4))(inputs[8]))
    outputs.append(Reshape((2, 2, 2, 2))(inputs[8]))
    outputs.append(Reshape((2, 2, 1, 2, 2))(inputs[8]))

    outputs.append(UpSampling2D(size=(1, 2), interpolation='nearest')(inputs[4]))
    outputs.append(UpSampling2D(size=(5, 3), interpolation='nearest')(inputs[4]))
    outputs.append(UpSampling2D(size=(1, 2), interpolation='bilinear')(inputs[4]))
    outputs.append(UpSampling2D(size=(5, 3), interpolation='bilinear')(inputs[4]))

    for axis in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[0], inputs[1]]))
    for axis in [-4, -3, -2, -1, 1, 2, 3, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[2], inputs[3]]))
    for axis in [-3, -2, -1, 1, 2, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[4], inputs[5]]))
    for axis in [-2, -1, 1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[6], inputs[7]]))
    for axis in [-1, 1]:
        outputs.append(Concatenate(axis=axis)([inputs[8], inputs[9]]))
    for axis in [-1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[14], inputs[15]]))
    for axis in [-1, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[16], inputs[17]]))
    for axis in [-1, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[18], inputs[19]]))
    for axis in [-1, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[20], inputs[21]]))

    outputs.append(UpSampling1D(size=2)(inputs[6]))
    # outputs.append(UpSampling1D(size=2)(inputs[8])) # ValueError: Input 0 of layer up_sampling1d_1 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 16]

    outputs.append(Multiply()([inputs[10], inputs[11]]))
    outputs.append(Multiply()([inputs[11], inputs[10]]))
    outputs.append(Multiply()([inputs[11], inputs[13]]))
    outputs.append(Multiply()([inputs[10], inputs[11], inputs[12]]))
    outputs.append(Multiply()([inputs[11], inputs[12], inputs[13]]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid', name='shared_conv', activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[23]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[24]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1), padding='valid')(up_scale_2(inputs[24]))  # (1, 8, 8)
    x = Concatenate()([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate()([
        MaxPooling2D((2, 2))(x),
        AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(Add()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Subtract()([inputs[26], inputs[30]]))
    outputs.append(Multiply()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Average()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Maximum()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Concatenate()([inputs[26], inputs[30], inputs[30]]))

    intermediate_input_shape = (3,)
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5, name='duplicate_layer_name')(intermediate_x)
    intermediate_model = Model(
        inputs=[intermediate_in], outputs=[intermediate_x],
        name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5,)))
    intermediate_model_2.add(Dense(5, name='duplicate_layer_name'))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[25]),
        Activation('hard_sigmoid')(inputs[25]),
        Activation('selu')(inputs[25]),
        Activation('sigmoid')(inputs[25]),
        Activation('softplus')(inputs[25]),
        Activation('softmax')(inputs[25]),
        Activation('softmax')(inputs[25]),
        Activation('relu')(inputs[25]),
        LeakyReLU()(inputs[25]),
        ELU()(inputs[25]),
        PReLU()(inputs[24]),
        PReLU()(inputs[25]),
        PReLU()(inputs[26]),
        shared_activation(inputs[25]),
        Activation('linear')(inputs[26]),
        Activation('linear')(inputs[23]),
        x,
        shared_activation(x),
    ]

    model = Model(inputs=inputs, outputs=outputs, name='test_model_exhaustive')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 2
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
Exemplo n.º 7
0
print('Building model...')
model = Sequential()
model.add(
    Conv2D(16,
           kernel_size=3,
           activation='relu',
           padding='same',
           input_shape=(20, 494, 1)))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(64, kernel_size=3, activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))

model.add(Conv2D(64, kernel_size=3, activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(16, kernel_size=3, activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))

model.add(Conv2D(1, kernel_size=3, activation='sigmoid', padding='same'))

#model.add(Conv2D(4, kernel_size = 3, activation = 'sigmoid'))
#model.add(Conv2D(1, kernel_size = 3, activation = 'sigmoid'))
#model.add(Conv2D(1, kernel_size = 3, activation = 'sigmoid'))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(128 * 20))
model.add(Dense(1280))
model.add(Dense(640))
Exemplo n.º 8
0
xap1 = Multiply()([maska1, xa0])

#Stage 1 Conv2D layer
xa0 = Conv2D(16,
             kernel_size=(3, 3),
             strides=(1, 1),
             padding="same",
             activation='relu',
             name="Conv2D_Layer1")(xa0)

#REverting prediction on scan point location and add back the scan value.
xa0 = Multiply()([xa0, maska1])
xa0 = add([xa0, xap0])

#Upsampling to fit next stage
xa0 = UpSampling2D(size=(2, 2), name="UpSampling_Layer1")(xa0)
rul = UpSampling2D(size=(2, 2))(rul)
xa0 = Lambda(lambda x: backend.clip(x, 0, 1))(xa0)

###################################

#Stage 2
# Stage 2 MAx Pooling2D layer
xa = MaxPooling2D(pool_size=(32, 32), strides=(32, 32), name="max_layer2")(inp)

#preprocessing layers -
#maintaining scan points by creating mask. Masking output prediction for scanned point and re-insert back the values
#clip the max value to 1
#Using threshold Relu for low lying points.
maska = Lambda(lambda x: 1 - x)(xa)
maska1 = ThresholdedReLU(theta=11 / 256)(maska)
Exemplo n.º 9
0
def yolo_body(inputs, num_anchors, num_classes):
    # 生成darknet53的主干模型
    feat1, feat2, feat3 = darknet_body(inputs)

    # 第一个特征层
    # y1=(batch_size,13,13,3,85)
    P5 = DarknetConv2D_BN_Leaky(512, (1, 1))(feat3)
    P5 = DarknetConv2D_BN_Leaky(1024, (3, 3))(P5)
    P5 = DarknetConv2D_BN_Leaky(512, (1, 1))(P5)
    # 使用了SPP结构,即不同尺度的最大池化后堆叠。
    maxpool1 = MaxPooling2D(pool_size=(13, 13), strides=(1, 1),
                            padding='same')(P5)
    maxpool2 = MaxPooling2D(pool_size=(9, 9), strides=(1, 1),
                            padding='same')(P5)
    maxpool3 = MaxPooling2D(pool_size=(5, 5), strides=(1, 1),
                            padding='same')(P5)
    P5 = Concatenate()([maxpool1, maxpool2, maxpool3, P5])
    P5 = DarknetConv2D_BN_Leaky(512, (1, 1))(P5)
    P5 = DarknetConv2D_BN_Leaky(1024, (3, 3))(P5)
    P5 = DarknetConv2D_BN_Leaky(512, (1, 1))(P5)

    P5_upsample = compose(DarknetConv2D_BN_Leaky(256, (1, 1)),
                          UpSampling2D(2))(P5)

    P4 = DarknetConv2D_BN_Leaky(256, (1, 1))(feat2)
    P4 = Concatenate()([P4, P5_upsample])
    P4 = make_five_convs(P4, 256)

    P4_upsample = compose(DarknetConv2D_BN_Leaky(128, (1, 1)),
                          UpSampling2D(2))(P4)

    P3 = DarknetConv2D_BN_Leaky(128, (1, 1))(feat1)
    P3 = Concatenate()([P3, P4_upsample])
    P3 = make_five_convs(P3, 128)

    P3_output = DarknetConv2D_BN_Leaky(256, (3, 3))(P3)
    P3_output = DarknetConv2D(num_anchors * (num_classes + 5),
                              (1, 1))(P3_output)

    # 38x38 output
    P3_downsample = ZeroPadding2D(((1, 0), (1, 0)))(P3)
    P3_downsample = DarknetConv2D_BN_Leaky(256, (3, 3),
                                           strides=(2, 2))(P3_downsample)
    P4 = Concatenate()([P3_downsample, P4])
    P4 = make_five_convs(P4, 256)

    P4_output = DarknetConv2D_BN_Leaky(512, (3, 3))(P4)
    P4_output = DarknetConv2D(num_anchors * (num_classes + 5),
                              (1, 1))(P4_output)

    # 19x19 output
    P4_downsample = ZeroPadding2D(((1, 0), (1, 0)))(P4)
    P4_downsample = DarknetConv2D_BN_Leaky(512, (3, 3),
                                           strides=(2, 2))(P4_downsample)
    P5 = Concatenate()([P4_downsample, P5])
    P5 = make_five_convs(P5, 512)

    P5_output = DarknetConv2D_BN_Leaky(1024, (3, 3))(P5)
    P5_output = DarknetConv2D(num_anchors * (num_classes + 5),
                              (1, 1))(P5_output)

    return Model(inputs, [P5_output, P4_output, P3_output])
Exemplo n.º 10
0
def u_net(input_dim,
          target_dim,
          pool,
          strides=1,
          skips=True,
          skip_type='concat',
          batch_norm=False,
          dropout=0.5,
          dropout_allLayers=False,
          layer_activation='relu',
          out_activation='softmax',
          filterdims=[],
          filterdims_out=3,
          latentdims=[64, 3],
          prefilterdims=[],
          separable_convs=False,
          l2_pen=0,
          per_image_standardization=False):
    """U-net model

    Args:
        input_dim :      dimensions of the input data (x,x)
        target_dim:      dimensions of the target data (x,x)
        strides:         stride of the first convolution per conv block in encoder; 
                         & stride of the last convolution per conv block in decoder;
        pool:            maxpool dimension
        skips:           True or false; use skip connections or not
        skip_type:       'sum' or 'concat'
        batch_norm:      True or False; apply batch normalization
        dropout:         dropout on the fully connected layers
        dropout_allLayers: use dropout for each layer block or only the latent dimension
        layer_activation: type of activation between conv and batch_norm (e.g. 'relu', 'LeakyReLU')
        out_activation:  type of activation at the output (e.g. 'sigmoid', or None)
        filterdims:      filter dimensionality matrix 
                         (e.g. for 3 layers: [[Nfeat1,dim1],[Nfeat2,dim2],[Nfeat3,dim3]])
        filterdims_out:  kernel dimensions of the filter at the output layer
        latent_dims:     latent filter dimensions
        prefilterdims:   optional set of filters before the encoder
        separable_convs: use depthwise separable convolutions rather than regular convs [Xception: Deep Learning with Depthwise Separable Convolutions] 
        l2_pen:          l2 penalty on the convolutional weights
        per_image_standardization: normalize input images to zero mean unity variance
        
    Returns:
        keras model
    """

    inputs = Input(shape=input_dim)

    input_layer = inputs

    # make input dimensions compatible with the network; i.e. add a channel dim if neccesary
    if len(np.shape(input_layer)) < 4:
        input_layer = Lambda(lambda x: K.expand_dims(x))(input_layer)

    if per_image_standardization == True:
        input_layer = Lambda(
            standardization,
            output_shape=standardization_output_shape)(input_layer)

    if filterdims == []:
        filterdims = [[16, 3], [32, 5], [64, 5]]

    if len(target_dim) < 3:
        n_classes = 1
    else:
        n_classes = target_dim[-1]

    last_layer = input_layer
    """
    =============================================================================
        PREFILTER LAYER
    =============================================================================
    """
    for i in range(0, np.size(prefilterdims, 0)):
        if separable_convs:
            conv_pre = SeparableConv2D(
                prefilterdims[i][0],
                prefilterdims[i][1],
                activation=None,
                padding='same',
                kernel_initializer='he_normal',
                kernel_regularizer=regularizers.l2(l2_pen),
                name='preconv{}'.format(i))
        else:
            conv_pre = Conv2D(prefilterdims[i][0],
                              prefilterdims[i][1],
                              activation=None,
                              padding='same',
                              kernel_initializer='he_normal',
                              kernel_regularizer=regularizers.l2(l2_pen),
                              name='preconv{}'.format(i))

        conv = last_layer

        if batch_norm == True:
            conv = BatchNormalization()(conv)

        conv = conv_pre(conv)

        print("conv shape :", conv.shape)

        if layer_activation == 'LeakyReLU':
            conv = LeakyReLU(alpha=0.1)(conv)
        else:
            conv = Activation(layer_activation)(conv)

        if dropout_allLayers and dropout > 0:
            conv = Dropout(dropout)(conv)
            print("dropout layer")

        last_layer = conv
    """
    =============================================================================
        ENCODER
    =============================================================================
    """
    convs = []
    convs_a = []
    convs_b = []
    pools = []
    for i in range(0, np.size(filterdims, 0)):
        if separable_convs:
            conv_a = SeparableConv2D(
                filterdims[i][0],
                filterdims[i][1],
                strides=strides,
                activation=None,
                padding='same',
                kernel_initializer='he_normal',
                kernel_regularizer=regularizers.l2(l2_pen),
                name='conv{}a'.format(i))
            conv_b = SeparableConv2D(
                filterdims[i][0],
                filterdims[i][1],
                activation=None,
                padding='same',
                kernel_initializer='he_normal',
                kernel_regularizer=regularizers.l2(l2_pen),
                name='conv{}b'.format(i))
        else:
            conv_a = Conv2D(filterdims[i][0],
                            filterdims[i][1],
                            strides=strides,
                            activation=None,
                            padding='same',
                            kernel_initializer='he_normal',
                            kernel_regularizer=regularizers.l2(l2_pen),
                            name='conv{}a'.format(i))
            conv_b = Conv2D(filterdims[i][0],
                            filterdims[i][1],
                            activation=None,
                            padding='same',
                            kernel_initializer='he_normal',
                            kernel_regularizer=regularizers.l2(l2_pen),
                            name='conv{}b'.format(i))

        conv = last_layer
        if batch_norm == True:
            conv = BatchNormalization()(conv)

        conv = conv_a(conv)

        print("conv shape :", conv.shape)

        if layer_activation == 'LeakyReLU':
            conv = LeakyReLU(alpha=0.1)(conv)
        else:
            conv = Activation(layer_activation)(conv)

        if batch_norm == True:
            conv = BatchNormalization()(conv)

        conv = conv_b(conv)
        print("conv shape :", conv.shape)

        if layer_activation == 'LeakyReLU':
            conv = LeakyReLU(alpha=0.1)(conv)
        else:
            conv = Activation(layer_activation)(conv)

        convs.append(conv)
        pools.append(
            MaxPooling2D(pool_size=pool[i], name='maxpool{}'.format(i))(conv))
        print("pool shape :", pools[i].shape)

        last_layer = pools[-1]

        if dropout_allLayers and dropout > 0:
            last_layer = Dropout(dropout)(last_layer)
            print("dropout layer")
    """
    =============================================================================
        LATENT LAYER
    =============================================================================
    """
    if len(latentdims) == 2:
        if separable_convs:
            conv_latent = SeparableConv2D(
                latentdims[0],
                latentdims[1],
                activation=None,
                padding='same',
                kernel_initializer='he_normal',
                kernel_regularizer=regularizers.l2(l2_pen),
                name='Conv1latent_space')(last_layer)
        else:
            conv_latent = Conv2D(latentdims[0],
                                 latentdims[1],
                                 activation=None,
                                 padding='same',
                                 kernel_initializer='he_normal',
                                 kernel_regularizer=regularizers.l2(l2_pen),
                                 name='Conv1latent_space')(last_layer)

            print("conv shape :", conv_latent.shape)

        if layer_activation == 'LeakyReLU':
            conv_latent = LeakyReLU(alpha=0.1)(conv_latent)
        else:
            conv_latent = Activation(layer_activation)(conv_latent)

        if dropout > 0:
            conv_latent = Dropout(dropout)(conv_latent)
            print("dropout layer")

        if separable_convs:
            conv_latent = SeparableConv2D(
                latentdims[0],
                latentdims[1],
                activation=None,
                padding='same',
                kernel_initializer='he_normal',
                kernel_regularizer=regularizers.l2(l2_pen),
                name='Conv2latent_space')(conv_latent)
        else:
            conv_latent = Conv2D(latentdims[0],
                                 latentdims[1],
                                 activation=None,
                                 padding='same',
                                 kernel_initializer='he_normal',
                                 kernel_regularizer=regularizers.l2(l2_pen),
                                 name='Conv2latent_space')(conv_latent)

        print("conv shape :", conv_latent.shape)

        if layer_activation == 'LeakyReLU':
            conv_latent = LeakyReLU(alpha=0.1)(conv_latent)
        else:
            conv_latent = Activation(layer_activation)(conv_latent)

    else:
        conv_latent = last_layer
        print("skipping latent layer..")

        if dropout > 0:
            conv_latent = Dropout(dropout)(conv_latent)
            print("dropout layer")

    last_layer = conv_latent
    """
    =============================================================================
        DECODER
    =============================================================================
    """
    filterdims = filterdims[::-1]
    for i in range(0, np.size(filterdims, 0)):
        # 'learned' upsampling (nearest neighbor interpolation with 2x2, followed by conv of 2x2 )
        #        up = Conv2DTranspose(filterdims[i][0], 2, activation = None, padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(last_layer))
        up = UpSampling2D(name='upsample{}'.format(i),
                          size=pool[-i - 1])(last_layer)
        print("up shape   :", up.shape)

        if skips == True:
            # Skip connections
            if skip_type == 'concat':
                merged = concatenate([convs[-1 - i], up], 3)
            else:
                merged = Add(name='Skip-connection{}'.format(i))(
                    [convs[-1 - i], up])
        else:
            merged = up

        print("merge shape:", merged.shape)

        if batch_norm == True:
            conv = BatchNormalization()(conv)

        shape_in = merged.shape.as_list()

        layer = Conv2DTranspose(filterdims[i][0],
                                filterdims[i][1],
                                activation=None,
                                padding='same',
                                kernel_initializer='he_normal',
                                kernel_regularizer=regularizers.l2(l2_pen),
                                name='deconv{}a'.format(i))
        conv = layer(merged)

        shape_out = layer.compute_output_shape(shape_in)
        conv.set_shape(shape_out)

        print("conv shape :", conv.shape)

        if layer_activation == 'LeakyReLU':
            conv = LeakyReLU(alpha=0.1)(conv)
        else:
            conv = Activation(layer_activation)(conv)

        if batch_norm == True:
            conv = BatchNormalization()(conv)

        if i < np.size(filterdims, 0) - 1:
            shape_in = merged.shape.as_list()

            layer = Conv2DTranspose(filterdims[i + 1][0],
                                    filterdims[i + 1][1],
                                    strides=strides,
                                    activation=None,
                                    padding='same',
                                    kernel_initializer='he_normal',
                                    kernel_regularizer=regularizers.l2(l2_pen),
                                    name='deconv{}b'.format(i))
            conv = layer(conv)

            shape_out = layer.compute_output_shape(shape_in)
            conv.set_shape(shape_out)

            if layer_activation == 'LeakyReLU':
                conv = LeakyReLU(alpha=0.1)(conv)
            else:
                conv = Activation(layer_activation)(conv)

            print("conv shape :", conv.shape)
            last_layer = conv

            if dropout_allLayers and dropout > 0:
                last_layer = Dropout(dropout)(last_layer)
                print("dropout layer")

        else:  # last layer:
            conv = Conv2DTranspose(filterdims[i][0],
                                   filterdims[i][1],
                                   activation=None,
                                   strides=strides,
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=regularizers.l2(l2_pen),
                                   name='deconv{}b'.format(i))(conv)
            if layer_activation == 'LeakyReLU':
                conv = LeakyReLU(alpha=0.1)(conv)
            else:
                conv = Activation(layer_activation)(conv)

            conv = Conv2DTranspose(filterdims[i][0],
                                   filterdims[i][1],
                                   activation=None,
                                   padding='same',
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=regularizers.l2(l2_pen),
                                   name='deconv{}c'.format(i))(conv)
            if layer_activation == 'LeakyReLU':
                conv = LeakyReLU(alpha=0.1)(conv)
            else:
                conv = Activation(layer_activation)(conv)

            final_layer = Conv2D(n_classes,
                                 filterdims_out,
                                 activation=out_activation,
                                 padding='same',
                                 kernel_initializer='he_normal',
                                 kernel_regularizer=regularizers.l2(l2_pen),
                                 name='Final_conv')(conv)
            print("conv shape :", conv.shape)

            final_layer = Reshape(target_dim)(final_layer)

    model = Model(inputs=inputs, outputs=final_layer)

    return model
Exemplo n.º 11
0
import matplotlib.pyplot as plt
import os

if not os.path.exists("./gan_images"):
    os.makedirs('./gan_images')

np.random.seed(0)
tf.random.set_seed(0)

# fake 이미지 생성모델
generator = Sequential()
generator.add(Dense(128 * 7 * 7, input_dim=100, activation=LeakyReLU(0.2)))

generator.add(BatchNormalization())  # BatchNormalization : data의 배치를 정규화
generator.add(Reshape((7, 7, 128)))
generator.add(UpSampling2D())
generator.add(Conv2D(64, kernel_size=5, padding='same'))

generator.add(BatchNormalization())
generator.add(Activation(LeakyReLU(0.2)))
generator.add(UpSampling2D())
generator.add(Conv2D(1, kernel_size=5, padding='same', activation='tanh'))

# fake 이미지가 진짜 이미지인지 판별하는 모델
discriminator = Sequential()
discriminator.add(
    Conv2D(64,
           kernel_size=5,
           strides=2,
           padding='same',
           input_shape=(28, 28, 1)))
def yolo3_nano_body(inputs, num_anchors, num_classes, weights_path=None):
    """
    Create YOLO_V3 Nano model CNN body in Keras.

    Reference Paper:
        "YOLO Nano: a Highly Compact You Only Look Once Convolutional Neural Network for Object Detection"
        https://arxiv.org/abs/1910.01271
    """
    nano_net = NanoNet(input_tensor=inputs,
                       weights='imagenet',
                       include_top=False)
    if weights_path is not None:
        nano_net.load_weights(weights_path, by_name=True)
        print('Load weights {}.'.format(weights_path))

    # input: 416 x 416 x 3
    # Conv_pw_3_relu: 13 x 13 x 189
    # pep_block_15_add: 26 x 26 x 325
    # pep_block_7_add: 52 x 52 x 150

    # f1 :13 x 13 x 189
    f1 = nano_net.get_layer('Conv_pw_3').output
    # f2: 26 x 26 x 325
    f2 = nano_net.get_layer('pep_block_15_add').output
    # f3 : 52 x 52 x 150
    f3 = nano_net.get_layer('pep_block_7_add').output

    #feature map 1 head & output (13x13 for 416 input)
    y1 = _ep_block(f1,
                   filters=462,
                   stride=1,
                   expansion=EP_EXPANSION,
                   block_id=6)
    y1 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(y1)

    #upsample fpn merge for feature map 1 & 2
    x = compose(NanoConv2D_BN_Relu6(105, (1, 1)), UpSampling2D(2))(f1)
    x = Concatenate()([x, f2])

    #feature map 2 head & output (26x26 for 416 input)
    x = _pep_block(x,
                   proj_filters=113,
                   filters=325,
                   stride=1,
                   expansion=PEP_EXPANSION,
                   block_id=18)
    x = _pep_block(x,
                   proj_filters=99,
                   filters=207,
                   stride=1,
                   expansion=PEP_EXPANSION,
                   block_id=19)
    x = DarknetConv2D(98, (1, 1))(x)
    y2 = _ep_block(x,
                   filters=183,
                   stride=1,
                   expansion=EP_EXPANSION,
                   block_id=7)
    y2 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(y2)

    #upsample fpn merge for feature map 2 & 3
    x = compose(NanoConv2D_BN_Relu6(47, (1, 1)), UpSampling2D(2))(x)
    x = Concatenate()([x, f3])

    #feature map 3 head & output (52x52 for 416 input)
    x = _pep_block(x,
                   proj_filters=58,
                   filters=122,
                   stride=1,
                   expansion=PEP_EXPANSION,
                   block_id=20)
    x = _pep_block(x,
                   proj_filters=52,
                   filters=87,
                   stride=1,
                   expansion=PEP_EXPANSION,
                   block_id=21)
    x = _pep_block(x,
                   proj_filters=47,
                   filters=93,
                   stride=1,
                   expansion=PEP_EXPANSION,
                   block_id=22)
    y3 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)

    return Model(inputs=inputs, outputs=[y1, y2, y3])
Exemplo n.º 13
0
def yoloNano(anchors,
             input_size=416,
             num_classes=1,
             expention=1.5,
             decay=0.001):
    #f**k tensorflow 2.x
    #backbone
    input_0 = Input(shape=(input_size, input_size, 3))
    input_gt = [
        Input(shape=(input_size // {
            0: 32,
            1: 16,
            2: 8
        }[l], input_size // {
            0: 32,
            1: 16,
            2: 8
        }[l], len(anchors) // 3, num_classes + 5)) for l in range(3)
    ]
    x = Conv2D(filters=12,
               strides=(1, 1),
               kernel_size=(3, 3),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(input_0)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=24,
               strides=(2, 2),
               kernel_size=(3, 3),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_0 = LeakyReLU()(x)
    #PEP(7)(208x208x24)
    x = Conv2D(filters=7,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_0)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(7 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=24,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Add()([x_0, x])
    #EP(104x104x70)
    x = Conv2D(filters=math.ceil(24 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(2, 2),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=70,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_1 = LeakyReLU()(x)
    #PEP(25)(104x104x70)
    x = Conv2D(filters=25,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_1)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(25 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=70,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_2 = Add()([x_1, x])
    # PEP(24)(104x104x70)
    x = Conv2D(filters=24,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_2)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(24 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=70,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Add()([x_2, x])
    # EP(52x52x150)
    x = Conv2D(filters=math.ceil(70 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(2, 2),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_3 = LeakyReLU()(x)
    # PEP(56)(52x52x150)
    x = Conv2D(filters=56,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_3)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(56 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Add()([x_3, x])
    #Conv1x1
    x = Conv2D(filters=150,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_4 = LeakyReLU()(x)
    #FCA(8)
    x = AvgPool2D(pool_size=(52, 52))(x_4)
    x = Dense(units=150 // 8,
              activation='relu',
              use_bias=False,
              kernel_regularizer=l2(l=decay))(x)
    x = Dense(units=150,
              activation='sigmoid',
              use_bias=False,
              kernel_regularizer=l2(l=decay))(x)
    x_5 = Multiply()([x_4, x])
    #PEP(73)(52x52x150)
    x = Conv2D(filters=73,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_5)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(73 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_6 = Add()([x_5, x])
    # PEP(71)(52x52x150)
    x = Conv2D(filters=71,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_6)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(71 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_7 = Add()([x_6, x])
    # PEP(75)(52x52x150)
    x = Conv2D(filters=75,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_7)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(75 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=150,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_8 = Add()([x_7, x])  #output 52x52x150
    #EP(26x26x325)
    x = Conv2D(filters=math.ceil(150 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_8)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(2, 2),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_9 = LeakyReLU()(x)
    # PEP(132)(26x26x325)
    x = Conv2D(filters=132,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_9)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(132 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_10 = Add()([x_9, x])
    # PEP(124)(26x26x325)
    x = Conv2D(filters=124,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_10)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(124 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_11 = Add()([x_10, x])
    # PEP(141)(26x26x325)
    x = Conv2D(filters=141,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_11)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(141 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_12 = Add()([x_11, x])
    # PEP(140)(26x26x325)
    x = Conv2D(filters=140,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_12)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(140 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_13 = Add()([x_12, x])
    # PEP(137)(26x26x325)
    x = Conv2D(filters=137,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_13)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(137 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_14 = Add()([x_13, x])
    # PEP(135)(26x26x325)
    x = Conv2D(filters=135,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_14)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(135 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_15 = Add()([x_14, x])
    # PEP(133)(26x26x325)
    x = Conv2D(filters=133,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_15)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(133 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_16 = Add()([x_15, x])
    # PEP(140)(26x26x325)
    x = Conv2D(filters=140,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_16)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(140 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_17 = Add()([x_16, x])  #output 26x26x325
    # EP(13x13x545)
    x = Conv2D(filters=math.ceil(325 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_17)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(2, 2),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=545,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_18 = LeakyReLU()(x)
    # PEP(276)(13x13x545)
    x = Conv2D(filters=276,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_18)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(276 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=545,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x_19 = Add()([x_18, x])
    #Conv1x1
    x = Conv2D(filters=230,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_19)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # EP(13x13x489)
    x = Conv2D(filters=math.ceil(230 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=489,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # PEP(213)(13x13x469)
    x = Conv2D(filters=213,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(213 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=469,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # Conv1x1
    x = Conv2D(filters=189,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_20 = LeakyReLU()(x)  #output 13x13x189
    # EP(13x13x462)
    x = Conv2D(filters=math.ceil(189 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_20)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=462,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # feature 13x13x[(num_classes+5)x3]
    feature_13x13 = Conv2D(filters=3 * (num_classes + 5),
                           kernel_size=(1, 1),
                           strides=(1, 1),
                           use_bias=False,
                           padding='same',
                           kernel_regularizer=l2(l=decay))(x)
    # Conv1x1
    x = Conv2D(filters=105,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_20)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # upsampling 26x26x105
    x = UpSampling2D()(x)
    # concatenate
    x = Concatenate()([x, x_17])
    # PEP(113)(26x26x325)
    x = Conv2D(filters=113,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(113 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=325,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # PEP(99)(26x26x207)
    x = Conv2D(filters=99,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(99 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=207,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # Conv1x1
    x = Conv2D(filters=98,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_21 = LeakyReLU()(x)
    # EP(13x13x183)
    x = Conv2D(filters=math.ceil(98 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_21)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=183,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # feature 26x26x[(num_classes+5)x3]
    feature_26x26 = Conv2D(filters=3 * (num_classes + 5),
                           kernel_size=(1, 1),
                           strides=(1, 1),
                           use_bias=False,
                           padding='same',
                           kernel_regularizer=l2(l=decay))(x)
    # Conv1x1
    x = Conv2D(filters=47,
               kernel_size=(1, 1),
               strides=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x_21)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    #upsampling
    x = UpSampling2D()(x)
    #concatenate
    x = Concatenate()([x, x_8])
    # PEP(58)(52x52x132)
    x = Conv2D(filters=58,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(58 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=132,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # PEP(52)(52x52x87)
    x = Conv2D(filters=52,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(52 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=87,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    # PEP(47)(52x52x93)
    x = Conv2D(filters=47,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=math.ceil(47 * expention),
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = DepthwiseConv2D(strides=(1, 1),
                        kernel_size=(3, 3),
                        use_bias=False,
                        padding='same',
                        kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(filters=93,
               strides=(1, 1),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    feature_52x52 = Conv2D(filters=3 * (num_classes + 5),
                           kernel_size=(1, 1),
                           strides=(1, 1),
                           use_bias=False,
                           padding='same',
                           kernel_regularizer=l2(l=decay))(x)
    #loss layer
    loss = Lambda(yolo_loss,
                  output_shape=(1, ),
                  name='yolo_loss',
                  arguments={
                      'anchors': anchors,
                      'num_classes': num_classes,
                      'ignore_thresh': 0.5
                  })([feature_13x13, feature_26x26, feature_52x52, *input_gt])

    debug_model = tf.keras.Model(
        inputs=input_0, outputs=[feature_13x13, feature_26x26, feature_52x52])
    train_model = tf.keras.Model(inputs=[input_0, *input_gt], outputs=loss)
    return train_model, debug_model
Exemplo n.º 14
0
def expand_generator(old_model,
                     block,
                     filters,
                     z_dim,
                     noise_dim,
                     block_type="AdaIN"):
    """
    Expands the old model by increasing the output by a factor of 2. Size is not explicit and
    is determined by doubling the last feature map size.

    :param old_model: the old "straight through" generator model to expand
    :param block: the block number for naming
    :param filters: the number of convolution filters (all 3x3 size)
    :param z_dim: the number of z-dimensions to feed style vectors
    :param block_type: the type of generator block to use. Default is "AdaIN" else "ModDeMod
    :return: straight_g, merged_g
    """
    # Pre conditions
    assert (block > 1)
    GeneratorBlock = GeneratorBlockAdaIN if block_type == "AdaIN" else GeneratorBlockModDemod

    # Create new inputs
    w_inputs = [
        Input(shape=(z_dim, ), name=f"G_w_input_{i + 1}") for i in range(block)
    ]
    noise_input = Input(shape=(noise_dim, noise_dim, 1), name="G_noise_input")
    constant_input = Input(shape=(1, 1), name="G_constant_input")

    # Pass through old model up to tRGB
    noise = noise_input
    constant = constant_input
    x = old_model.get_layer(f"G_base")(constant)
    x = old_model.get_layer("G_base_reshape")(x)
    for b in range(block - 1):
        style = w_inputs[b]
        x = old_model.get_layer(f"G_block_{b + 1}_style")([x, noise, style])

    # Get old RGB and upsample
    old_out = old_model(w_inputs[:block - 1] + [noise_input, constant])
    old_out = UpSampling2D()(old_out)

    # Add new block
    style = w_inputs[block - 1]
    x = GeneratorBlock(filters=filters,
                       block=block,
                       z_dim=z_dim,
                       name=f"G_block_{block}_style")([x, noise, style])

    # Transform to RGB
    new_out = tRGB(block)(x)

    # STRAIGHT MODEL
    g_inputs = w_inputs + [noise_input, constant_input]
    straight_g = Model(inputs=g_inputs,
                       outputs=[new_out],
                       name=f"G_straight_{block}")

    # MERGE MODEL
    g_out = Fade(name="Fade_G")([old_out, new_out])

    merged_g = Model(inputs=g_inputs,
                     outputs=[g_out],
                     name=f"G_merged_{block}")

    return straight_g, merged_g
# Block  encoder 4
max_pool_enc_4 = MaxPooling2D(pool_size=(2, 2))(conv_enc_3)
conv_enc_4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(max_pool_enc_4)
conv_enc_4 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv_enc_4)
# -- Encoder -- #

# ----------- #
maxpool = MaxPooling2D(pool_size=(2, 2))(conv_enc_4)
conv = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(maxpool)
conv = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv)
# ----------- #

# -- Dencoder -- #
# Block decoder 1
up_dec_1 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = initializer)(UpSampling2D(size = (2,2))(conv))
merge_dec_1 = concatenate([conv_enc_4, up_dec_1], axis = 3)
conv_dec_1 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(merge_dec_1)
conv_dec_1 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv_dec_1)

# Block decoder 2
up_dec_2 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = initializer)(UpSampling2D(size = (2,2))(conv_dec_1))
merge_dec_2 = concatenate([conv_enc_3, up_dec_2], axis = 3)
conv_dec_2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(merge_dec_2)
conv_dec_2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv_dec_2)

# Block decoder 3
up_dec_3 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = initializer)(UpSampling2D(size = (2,2))(conv_dec_2))
merge_dec_3 = concatenate([conv_enc_2, up_dec_3], axis = 3)
conv_dec_3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(merge_dec_3)
conv_dec_3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = initializer)(conv_dec_3)
def SegNet():
    model = Sequential()
    #encoder
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=(3, img_w, img_h),
               padding='same',
               activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(128,128)
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(64,64)
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(32,32)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(16,16)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(8,8)
    #decoder
    model.add(UpSampling2D(size=(2, 2)))
    #(16,16)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(32,32)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(64,64)
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(128,128)
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(256,256)
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=(3, img_w, img_h),
               padding='same',
               activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same'))
    model.add(Reshape((n_label, img_w * img_h)))
    #axis=1和axis=2互换位置,等同于np.swapaxes(layer,1,2)
    model.add(Permute((2, 1)))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    model.summary()
    return model
Exemplo n.º 17
0
def unet2D(
    x_in,
    img_shape,
    out_im_chans,
    nf_enc=[64, 64, 128, 128, 256, 256, 512],
    nf_dec=None,
    layer_prefix='unet',
    n_convs_per_stage=1,
):
    ks = 3
    x = x_in

    encodings = []
    encoding_vol_sizes = []
    for i in range(len(nf_enc)):
        for j in range(n_convs_per_stage):
            x = Conv2D(nf_enc[i],
                       kernel_size=ks,
                       strides=(1, 1),
                       padding='same',
                       name='{}_enc_conv2D_{}_{}'.format(
                           layer_prefix, i, j + 1))(x)
            x = LeakyReLU(0.2)(x)

        encodings.append(x)
        encoding_vol_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

        if i < len(nf_enc) - 1:
            x = MaxPooling2D(pool_size=(2, 2),
                             padding='same',
                             name='{}_enc_maxpool_{}'.format(layer_prefix,
                                                             i))(x)

    if nf_dec is None:
        nf_dec = list(reversed(nf_enc[1:]))

    for i in range(len(nf_dec)):
        curr_shape = x.get_shape().as_list()[1:-1]

        # only do upsample if we are not yet at max resolution
        if np.any(curr_shape < list(img_shape[:len(curr_shape)])):
            x = UpSampling2D(size=(2, 2),
                             name='{}_dec_upsamp_{}'.format(layer_prefix,
                                                            i))(x)

        # just concatenate the final layer here
        if i <= len(encodings) - 2:
            x = _pad_or_crop_to_shape_2D(
                x, np.asarray(x.get_shape().as_list()[1:-1]),
                encoding_vol_sizes[-i - 2])
            x = Concatenate(axis=-1)([x, encodings[-i - 2]])

        for j in range(n_convs_per_stage):
            x = Conv2D(nf_dec[i],
                       kernel_size=ks,
                       padding='same',
                       name='{}_dec_conv2D_{}_{}'.format(layer_prefix, i,
                                                         j))(x)
            x = LeakyReLU(0.2)(x)

    y = Conv2D(out_im_chans,
               kernel_size=1,
               padding='same',
               name='{}_dec_conv2D_final'.format(layer_prefix))(
                   x)  # add your own activation after this model

    # add your own activation after this model
    return y
Exemplo n.º 18
0
y_train = to_categorical(y_train)  #(50000, 10)
y_test = to_categorical(y_test)  #(10000, 10)

inceptionresnetV2 = InceptionResNetV2(weights='imagenet',
                                      include_top=False,
                                      input_shape=(96, 96, 3))
# print(model.weights)

# ============== 모델링 =====================
inceptionresnetV2.trainable = False  # 훈련을 안시키겠다, 저장된 가중치 사용
# inceptionresnetV2.summary()
print(len(inceptionresnetV2.weights))  # 26
print(len(inceptionresnetV2.trainable_weights))  # 0

model = Sequential()
model.add(UpSampling2D(size=(3, 3)))  # 3차원 -> layer 26개
model.add(Flatten())
model.add(Dense(10))
model.add(Dense(5))
model.add(Dense(10, activation='softmax'))
# model.summary() # 이거 하면 build에러난다

#3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])
####loss가 이진 분류일 때는binary_crossentropy(0,1만 추출)
model.fit(x_train,
          y_train,
          epochs=20,
          validation_split=0.2,
Exemplo n.º 19
0
def build_stage2_generator():
    """
    Create Stage-II generator containing the CA Augmentation Network,
    the image encoder and the generator network
    """

    # 1. CA Augmentation Network
    input_layer = Input(shape=(1024, ))
    input_lr_images = Input(shape=(64, 64, 3))

    ca = Dense(256)(input_layer)
    mean_logsigma = LeakyReLU(alpha=0.2)(ca)
    c = Lambda(generate_c)(mean_logsigma)

    # 2. Image Encoder
    x = ZeroPadding2D(padding=(1, 1))(input_lr_images)
    x = Conv2D(128, kernel_size=(3, 3), strides=1, use_bias=False)(x)
    x = ReLU()(x)

    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(256, kernel_size=(4, 4), strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(512, kernel_size=(4, 4), strides=2, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    # 3. Joint
    c_code = Lambda(joint_block)([c, x])

    x = ZeroPadding2D(padding=(1, 1))(c_code)
    x = Conv2D(512, kernel_size=(3, 3), strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    # 4. Residual blocks
    x = residual_block(x)
    x = residual_block(x)
    x = residual_block(x)
    x = residual_block(x)

    # 5. Upsampling blocks
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(512, kernel_size=3, padding="same", strides=1,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(256, kernel_size=3, padding="same", strides=1,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(128, kernel_size=3, padding="same", strides=1,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(64, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)

    x = Conv2D(3, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
    x = Activation('tanh')(x)

    model = Model(inputs=[input_layer, input_lr_images],
                  outputs=[x, mean_logsigma])
    return model
Exemplo n.º 20
0
    def __init__(self, args):
        input_img = Input(shape=(28, 28, 1))

        regularization = tf.keras.regularizers.L1L2(l2=1e-5)

        t3 = self.block(input_img, 128, 0.2, regularization)
        t3 = self.block(t3, 256, 0.3, regularization)
        t3 = self.block(t3, 512, 0.4, regularization, False)

        conv = Conv2D(1024, (3, 3),
                      padding='valid',
                      activation='relu',
                      kernel_regularizer=regularization,
                      bias_regularizer=regularization)(t3)
        net = BatchNormalization()(conv)
        conv = Conv2D(1024, (3, 3),
                      padding='valid',
                      activation='relu',
                      kernel_regularizer=regularization,
                      bias_regularizer=regularization)(net)
        net = BatchNormalization()(conv)
        net = AveragePooling2D(pool_size=(2, 2))(net)
        net = Flatten()(net)
        net = Dropout(0.2)(net)

        net = Dense(10,
                    activation='softmax',
                    kernel_regularizer=regularization,
                    bias_regularizer=regularization)(net)

        bitmap = UpSampling2D(size=(4, 4))(t3)
        bitmap = Conv2D(128, (3, 3),
                        padding='same',
                        activation='relu',
                        kernel_regularizer=regularization,
                        bias_regularizer=regularization)(bitmap)
        bitmap = BatchNormalization()(bitmap)
        bitmap = Dropout(0.2)(bitmap)
        bitmap = Conv2D(256, (3, 3),
                        padding='same',
                        activation='relu',
                        kernel_regularizer=regularization,
                        bias_regularizer=regularization)(bitmap)
        bitmap = BatchNormalization()(bitmap)
        bitmap = Dropout(0.2)(bitmap)
        bitmap = Conv2D(512, (3, 3),
                        padding='same',
                        activation='relu',
                        kernel_regularizer=regularization,
                        bias_regularizer=regularization)(bitmap)
        bitmap = BatchNormalization()(bitmap)
        bitmap = Dropout(0.2)(bitmap)
        bitmap = Conv2D(1, (3, 3),
                        padding='same',
                        activation='sigmoid',
                        kernel_regularizer=regularization,
                        bias_regularizer=regularization)(bitmap)

        super().__init__(inputs=input_img, outputs=[net, bitmap])

        schedule = tf.keras.optimizers.schedules.PolynomialDecay(
            initial_learning_rate=0.001,
            decay_steps=args.epochs * 45000 / 500,
            end_learning_rate=0.0001)
        self.compile(
            optimizer=tf.keras.optimizers.Adam(learning_rate=schedule),
            loss=[
                tf.keras.losses.CategoricalCrossentropy(from_logits=False,
                                                        label_smoothing=0.1),
                tf.keras.losses.BinaryCrossentropy()
            ],
            metrics=[
                tf.keras.metrics.CategoricalAccuracy(name="accuracy"),
                tf.keras.metrics.BinaryAccuracy(name="accuracy")
            ])

        self.tb_callback = tf.keras.callbacks.TensorBoard(args.logdir,
                                                          update_freq=1000,
                                                          profile_batch=1)
        self.tb_callback.on_train_end = lambda *_: None
Exemplo n.º 21
0
    def __init__(self, **kwargs):
        train_shape = kwargs['train_shape']

        image_input = Input(shape=train_shape, name='input_img')

        def _conv_block(inp, convs, do_skip=True):
            x = inp
            count = 0

            for conv in convs:
                if count == (len(convs) - 2) and do_skip:
                    skip_connection = x
                count += 1

                if conv['stride'] > 1:
                    # unlike tensorflow darknet prefer left and top paddings
                    x = ZeroPadding2D(((1, 0), (1, 0)))(x)
                x = Conv2D(conv['filter'],
                           conv['kernel'],
                           strides=conv['stride'],
                           # unlike tensorflow darknet prefer left and top paddings
                           padding='valid' if conv['stride'] > 1 else 'same',
                           name='conv_' + str(conv['layer_idx']),
                           use_bias=False if conv['bnorm'] else True)(x)
                if conv['bnorm']:
                    x = BatchNormalization(
                        epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
                if conv['leaky']:
                    x = LeakyReLU(alpha=0.1, name='leaky_' +
                                  str(conv['layer_idx']))(x)

            return add([skip_connection, x]) if do_skip else x

        # Layer  0 => 4
        x = _conv_block(image_input, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
                                      {'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
                                      {'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
                                      {'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])

        # Layer  5 => 8
        x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
                            {'filter':  64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
                            {'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])

        # Layer  9 => 11
        x = _conv_block(x, [{'filter':  64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
                            {'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])

        # Layer 12 => 15
        x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
                            {'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
                            {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])

        # Layer 16 => 36
        for i in range(7):
            x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
                                {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])

        skip_36 = x

        # Layer 37 => 40
        x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
                            {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
                            {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])

        # Layer 41 => 61
        for i in range(7):
            x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
                                {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])

        skip_61 = x

        # Layer 62 => 65
        x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
                            {'filter':  512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
                            {'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])

        # Layer 66 => 74
        for i in range(3):
            x = _conv_block(x, [{'filter':  512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
                                {'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])

        # Layer 75 => 79
        x = _conv_block(x, [{'filter':  512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
                            {'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
                            {'filter':  512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
                            {'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
                            {'filter':  512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], do_skip=False)

        # Layer 80 => 82
        pred_yolo_1 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': 80},
                                    #   {'filter': pred_filter_count, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}
                                      ], do_skip=False)

        # Layer 83 => 86
        x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], do_skip=False)
        x = UpSampling2D(2)(x)
        x = concatenate([x, skip_61])

        # Layer 87 => 91
        x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
                            {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
                            {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
                            {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
                            {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], do_skip=False)

        # Layer 92 => 94
        pred_yolo_2 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': 92},
                                    #   {'filter': pred_filter_count, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}
                                      ], do_skip=False)

        # Layer 95 => 98
        x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True,   'layer_idx': 96}], do_skip=False)
        x = UpSampling2D(2)(x)
        x = concatenate([x, skip_36])

        # Layer 99 => 106
        pred_yolo_3 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': 99},
                                      {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': 100},
                                      {'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': 101},
                                      {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': 102},
                                      {'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': 103},
                                      {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': 104},
                                    #   {'filter': pred_filter_count, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}
                                      ], do_skip=False)

        self.outputs = [pred_yolo_1, pred_yolo_2, pred_yolo_3]
        self.inputs = [image_input]
        self.downgrades = [32, 16, 8]
Exemplo n.º 22
0
def LikeUnet():
    # input
    im = Input(shape=(None, None, 1))
    pre = Conv2D(8, 3, padding='same', activation='relu')(im)
    # conv1
    x = Conv2D(16, 3, padding='same', name='conv1')(pre)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    conv1 = MaxPooling2D(2)(x)
    # conv2
    x = Conv2D(32, 3, padding='same', name='conv2')(conv1)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    conv2 = MaxPooling2D(2)(x)
    # conv3
    x = Conv2D(32, 3, padding='same', name='conv3')(conv2)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    conv3 = MaxPooling2D(2)(x)
    # conv4
    x = Conv2D(32, 3, padding='same', name='conv4')(conv3)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    conv4 = MaxPooling2D(2)(x)
    # conv5
    x = Conv2D(64, 3, padding='same', name='conv5')(conv4)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    conv5 = MaxPooling2D(2)(x)
    # transconv1
    x = Conv2D(64, 3, padding='same', name='conv6')(conv5)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Conv2DTranspose(64, 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = UpSampling2D(2, interpolation='bilinear')(x)
    # transconv2
    x = Concatenate()([conv4, x])
    x = Conv2D(32, 3, padding='same', name='conv7')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Conv2DTranspose(32, 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = UpSampling2D(2, interpolation='bilinear')(x)
    # transconv3
    x = Concatenate()([conv3, x])
    x = Conv2D(32, 3, padding='same', name='conv8')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Conv2DTranspose(32, 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = UpSampling2D(2, interpolation='bilinear')(x)
    # transconv4
    x = Concatenate()([conv2, x])
    x = Conv2D(32, 3, padding='same', name='conv9')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Conv2DTranspose(32, 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = UpSampling2D(2, interpolation='bilinear')(x)
    # transconv5
    x = Concatenate()([conv1, x])
    x = Conv2D(16, 3, padding='same', name='conv10')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Conv2DTranspose(16, 3, padding='same')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = UpSampling2D(2, interpolation='bilinear')(x)
    # output
    x = Conv2D(8, 3, padding='same', activation='relu')(x)
    out = Conv2D(2, 1, padding='same', activation='softmax')(x)
    return Model(inputs=im, outputs=out)
Exemplo n.º 23
0
Arquivo: WGAN.py Projeto: Zachdr1/WGAN
    def generator(self):
        # Input size = 100
        inputs = Input(shape=(100, ))
        x = Dense(4 * 4 * 1024, input_shape=(100, ))(inputs)
        x = Reshape(target_shape=(4, 4, 1024))(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 4x4x1024

        # Input size = 4x4x1024
        x = Conv2D(filters=512, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)
        # Output size = 8x8x512

        # Input size = 8x8x512
        x = Conv2D(filters=256, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)
        # Output size = 16x16x256

        # Input size = 16x16x512
        x = Conv2D(filters=256, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 16x16x256

        # Input size = 16x16x256
        x = Conv2D(filters=128, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)

        # Output size = 32x32x128

        # Input size = 32x32x256
        x = Conv2D(filters=128, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)
        # Output size = 64x64x128

        # Input size = 64x64x256
        x = Conv2D(filters=128, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        x = UpSampling2D()(x)
        # Output size = 128x128x128

        # Input size = 128x128x256
        x = Conv2D(filters=128, kernel_size=5, padding='same',
                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.02)(x)
        # Output size = 128x128x128

        # Input size = 128x128x128
        x = Conv2D(filters=3, kernel_size=5, padding='same', use_bias=False)(x)
        out = Activation('tanh')(x)
        # Output size = 32x32x3

        net = Model(inputs=inputs, outputs=out)

        return net
Exemplo n.º 24
0
 def to_tf(self, tensors):
     from tensorflow.keras.layers import UpSampling2D
     return UpSampling2D(size=(self.stride, self.stride))(tensors[-1])
input_img = Input(
    shape=(28, 28,
           1))  # adapt this if using `channels_first` image data format

x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)

# at this point the representation is (4, 4, 8) i.e. 128-dimensional

x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
#%% Train model

autoencoder.fit(x_train,
                x_train,
                epochs=50,
                batch_size=256,
                shuffle=True,
def NAL_stage_2(input, filters):
    '''
    The second attention module in the naive attention learning model using mixed attention with one skip connection in mask branch.
    
    inputs:
    parameter input: Input data.
    parameter filters: a vector of length 3, representing the number of output filters used in the funtion of residual unit.
    
    outputs:
    X: Output data.
    '''

    F1, F2, F3 = filters

    #p = 1
    X = residual_unit(input, filters, s=1)

    #t = 2 trunk branch
    trunk = residual_unit(X, filters, s=1)
    trunk = residual_unit(trunk, filters, s=1)

    #soft mask branch   ### r = 1
    ###maxpooling
    X = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(X)
    X = residual_unit(X, filters, s=1)

    X_down = residual_unit(X, filters, s=1)

    X = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(X)
    X = residual_unit(X, filters, s=1)
    X = residual_unit(X, filters, s=1)
    X = UpSampling2D()(X)

    X = Add()([X, X_down])

    X = residual_unit(X, filters, s=1)
    X = UpSampling2D()(X)

    X = BatchNormalization()(X)
    X = Activation('relu')(X)
    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               bias_initializer='zeros',
               kernel_initializer=tf.keras.initializers.RandomNormal(
                   mean=0.0, stddev=tf.sqrt(2 / (F3 * 1 * 1))))(X)

    X = BatchNormalization()(X)
    X = Activation('relu')(X)
    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               bias_initializer='zeros',
               kernel_initializer=tf.keras.initializers.RandomNormal(
                   mean=0.0, stddev=tf.sqrt(2 / (F3 * 1 * 1))))(X)

    X = Activation('sigmoid')(X)

    #
    X = Multiply()([X, trunk])

    # p = 1
    X = residual_unit(X, filters, s=1)

    return X
Exemplo n.º 27
0
def final_layer(x, classes=1):
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(classes, 1, use_bias=False, kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=3)(x)
    x = Activation('sigmoid', name='Classification')(x)
    return x
Exemplo n.º 28
0
def fpn_top_down(input_tensors, top_down_pyramid_size, use_bias, weight_decay, trainable, bn_trainable):
	'''
	Top down network in Pyramid Feature Net https://arxiv.org/pdf/1612.03144.pdf
	Arguments
		input_tensors:
		top_down_pyramid_size:
		trainable:
	Return
		P2:
		P3;
		P4;
	'''

	C2, C3, C4, C5 = input_tensors

	L4 = Conv2D(
		filters=top_down_pyramid_size, 
		kernel_size=[3, 3], 
		padding='same', 
		use_bias=use_bias, 
		kernel_regularizer=regularizers.l2(weight_decay), 
		trainable=trainable, 
		name='lateral_P4')(C4)
	L4 = BatchNormalization(trainable=bn_trainable, name='lateral_P4_bn')(L4)
	L4 = Activation('relu')(L4)

	L3 = Conv2D(
		filters=top_down_pyramid_size, 
		kernel_size=[3, 3], 
		padding='same', 
		use_bias=use_bias, 
		kernel_regularizer=regularizers.l2(weight_decay), 
		trainable=trainable, 
		name='lateral_P3')(C3)
	L3 = BatchNormalization(trainable=bn_trainable, name='lateral_P3_bn')(L3)
	L3 = Activation('relu')(L3)

	L2 = Conv2D(
		filters=top_down_pyramid_size, 
		kernel_size=[3, 3], 
		padding='same', 
		trainable=trainable, 
		use_bias=use_bias, 
		kernel_regularizer=regularizers.l2(weight_decay), 
		name='lateral_P2')(C2)
	L2 = BatchNormalization(trainable=bn_trainable, name='lateral_P2_bn')(L2)
	L2 = Activation('relu')(L2)

	M5 = Conv2D(
		filters=top_down_pyramid_size, 
		kernel_size=[3, 3], 
		padding='same', 
		use_bias=use_bias, 
		kernel_regularizer=regularizers.l2(weight_decay), 
		trainable=trainable, 
		name='M5')(C5)
	M5 = BatchNormalization(trainable=bn_trainable, name='M5_bn')(M5)
	M5 = Activation('relu')(M5)
	M4 = Add(name='M4')([UpSampling2D(size=(2, 2), interpolation='bilinear')(M5), L4])
	M3 = Add(name='M3')([UpSampling2D(size=(2, 2), interpolation='bilinear')(M4), L3])
	M2 = Add(name='M2')([UpSampling2D(size=(2, 2), interpolation='bilinear')(M3), L2])

	P5 = RFE(input_tensor=M5, module_name='RFE4', trainable=trainable, bn_trainable=bn_trainable, use_bias=use_bias, weight_decay=weight_decay)
	P4 = RFE(input_tensor=M4, module_name='RFE3', trainable=trainable, bn_trainable=bn_trainable, use_bias=use_bias, weight_decay=weight_decay)
	P3 = RFE(input_tensor=M3, module_name='RFE2', trainable=trainable, bn_trainable=bn_trainable, use_bias=use_bias, weight_decay=weight_decay)
	P2 = RFE(input_tensor=M2, module_name='RFE1', trainable=trainable, bn_trainable=bn_trainable, use_bias=use_bias, weight_decay=weight_decay)

	return [P2, P3, P4, P5]
Exemplo n.º 29
0
def unet(input_size = (256,256,1),batch_norm=True,dropout=True,drop_r=0.5,spatial_drop=False,spatial_drop_r=0.1,multi_class=False,classes=3):
    
    # U-Net model. 
    
    # Optionally, introduce:
    # Batch normalization
    # Dropout and dropout rate
    # Spatial dropout and spatial dropout rate
    # Multi-class segmentation and number of classes
    
    
    unet=Sequential()
    inputs = unet.add(Input(input_size))
    conv1 = unet.add(Conv2D(Base, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti1 = unet.add(Activation('relu'))
    conv1 = unet.add(Conv2D(Base, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti1 = unet.add(Activation('relu'))
    if spatial_drop==True:
        unet.add(SpatialDropout2D(spatial_drop_r))
    pool1 = unet.add(MaxPooling2D(pool_size=(2, 2)))
    
    conv2 = unet.add(Conv2D(Base*2, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti2 = unet.add(Activation('relu'))
    conv2 = unet.add(Conv2D(Base*2, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti2 = unet.add(Activation('relu'))
    if spatial_drop==True:
        unet.add(SpatialDropout2D(spatial_drop_r))
    pool2 = unet.add(MaxPooling2D(pool_size=(2, 2)))
    
    conv3 = unet.add(Conv2D(Base*4, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti3 = unet.add(Activation('relu'))
    conv3 = unet.add(Conv2D(Base*4, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti3 = unet.add(Activation('relu'))
    if spatial_drop==True:
        unet.add(SpatialDropout2D(spatial_drop_r))
    pool3 = unet.add(MaxPooling2D(pool_size=(2, 2)))
    
    conv4 = unet.add(Conv2D(Base*8, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti4 = unet.add(Activation('relu'))
    conv4 = unet.add(Conv2D(Base*8, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti4 = unet.add(Activation('relu'))
    if dropout==True:
        drop4 = unet.add(Dropout(drop_r))
    if spatial_drop==True:
        unet.add(SpatialDropout2D(spatial_drop_r))
    pool4 = unet.add(MaxPooling2D(pool_size=(2, 2)))

    conv5 = unet.add(Conv2D(Base*16, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti5= unet.add(Activation('relu'))
    conv5 = unet.add(Conv2D(Base*16, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti5= unet.add(Activation('relu'))
    if dropout==True:
        drop5 = unet.add(Dropout(drop_r))

    up6 = unet.add(Conv2D(Base*8, 2, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti6 = unet.add(Activation('relu'))
    unet.add(UpSampling2D(size = (2,2)))
    merge6 = Concatenate([drop4,up6])
    conv6 = unet.add(Conv2D(Base*8, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti6 = unet.add(Activation('relu'))
    conv6 = unet.add(Conv2D(Base*8, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti6 = unet.add(Activation('relu'))

    up7 = unet.add(Conv2D(Base*4, 2, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti7 = unet.add(Activation('relu'))
    unet.add(UpSampling2D(size = (2,2)))
    merge7 = Concatenate([conv3,up7])
    conv7 = unet.add(Conv2D(Base*4, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti7 = unet.add(Activation('relu'))
    conv7 = unet.add(Conv2D(Base*4, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti7 = unet.add(Activation('relu'))

    up8 = unet.add(Conv2D(Base*2, 2, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti8 = unet.add(Activation('relu'))
    unet.add(UpSampling2D(size = (2,2)))
    merge8 = Concatenate([conv2,up8])
    conv8 = unet.add(Conv2D(Base*2, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti8 = unet.add(Activation('relu'))
    conv8 = unet.add(Conv2D(Base*2, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti8 = unet.add(Activation('relu'))

    up9 = unet.add(Conv2D(Base, 2, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti9 = unet.add(Activation('relu'))
    unet.add(UpSampling2D(size = (2,2)))
    merge9 = Concatenate([conv1,up9])
    conv9 = unet.add(Conv2D(Base, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti9 = unet.add(Activation('relu'))
    conv9 = unet.add(Conv2D(Base, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti9 = unet.add(Activation('relu'))
    conv9 = unet.add(Conv2D(Base, 3, padding = 'same', kernel_initializer = 'he_normal'))
    if batch_norm==True:
        unet.add(BatchNormalization())
    acti9 = unet.add(Activation('relu'))
    if multi_class==False:
        conv10 = unet.add(Conv2D(1, 1, activation = 'sigmoid'))
    else:
        conv10 = unet.add(Conv2D(classes, 1))
        unet.add(BatchNormalization())
        unet.add(Activation('softmax'))
    
    unet.summary()
    return unet
Exemplo n.º 30
0
def upsample(x):
    return UpSampling2D(2)(x)