Пример #1
0
    def get_unet(self):
        inputs = Input((self.img_rows, self.img_cols, 1))
        '''
        unet with crop(because padding = valid)

        conv1 = Conv2D(64, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(inputs)
        print "conv1 shape:",conv1.shape
        conv1 = Conv2D(64, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv1)
        print "conv1 shape:",conv1.shape
        crop1 = Cropping2D(cropping=((90,90),(90,90)))(conv1)
        print "crop1 shape:",crop1.shape
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        print "pool1 shape:",pool1.shape

        conv2 = Conv2D(128, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(pool1)
        print "conv2 shape:",conv2.shape
        conv2 = Conv2D(128, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv2)
        print "conv2 shape:",conv2.shape
        crop2 = Cropping2D(cropping=((41,41),(41,41)))(conv2)
        print "crop2 shape:",crop2.shape
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
        print "pool2 shape:",pool2.shape

        conv3 = Conv2D(256, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(pool2)
        print "conv3 shape:",conv3.shape
        conv3 = Conv2D(256, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv3)
        print "conv3 shape:",conv3.shape
        crop3 = Cropping2D(cropping=((16,17),(16,17)))(conv3)
        print "crop3 shape:",crop3.shape
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
        print "pool3 shape:",pool3.shape

        conv4 = Conv2D(512, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(pool3)
        conv4 = Conv2D(512, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv4)
        drop4 = Dropout(0.5)(conv4)
        crop4 = Cropping2D(cropping=((4,4),(4,4)))(drop4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(pool4)
        conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv5)
        drop5 = Dropout(0.5)(conv5)

        up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
        merge6 = merge([crop4,up6], mode = 'concat', concat_axis = 3)
        conv6 = Conv2D(512, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(merge6)
        conv6 = Conv2D(512, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv6)

        up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
        merge7 = merge([crop3,up7], mode = 'concat', concat_axis = 3)
        conv7 = Conv2D(256, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(merge7)
        conv7 = Conv2D(256, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv7)

        up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
        merge8 = merge([crop2,up8], mode = 'concat', concat_axis = 3)
        conv8 = Conv2D(128, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(merge8)
        conv8 = Conv2D(128, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv8)

        up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
        merge9 = merge([crop1,up9], mode = 'concat', concat_axis = 3)
        conv9 = Conv2D(64, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(merge9)
        conv9 = Conv2D(64, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv9)
        conv9 = Conv2D(2, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv9)
        '''

        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(inputs)
        print("conv1 shape:", conv1.shape)
        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv1)
        print("conv1 shape:", conv1.shape)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        print("pool1 shape:", pool1.shape)

        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool1)
        print("conv2 shape:", conv2.shape)
        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv2)
        print("conv2 shape:", conv2.shape)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
        print("pool2 shape:", pool2.shape)

        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool2)
        print("conv3 shape:", conv3.shape)
        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv3)
        print("conv3 shape:", conv3.shape)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
        print("pool3 shape:", pool3.shape)

        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool3)
        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv4)
        drop4 = Dropout(0.5)(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool4)
        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv5)
        drop5 = Dropout(0.5)(conv5)

        up6 = Conv2D(512,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(drop5))
        merge6 = merge([drop4, up6], mode='concat', concat_axis=3)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge6)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv6)

        up7 = Conv2D(256,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv6))
        merge7 = merge([conv3, up7], mode='concat', concat_axis=3)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge7)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv7)

        up8 = Conv2D(128,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv7))
        merge8 = merge([conv2, up8], mode='concat', concat_axis=3)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge8)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv8)

        up9 = Conv2D(64,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv8))
        merge9 = merge([conv1, up9], mode='concat', concat_axis=3)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge9)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv9 = Conv2D(2,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

        model = Model(input=inputs, output=conv10)
        model.summary()
        model.compile(optimizer=Adam(lr=1e-4),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
        return model
encoder_output = BatchNormalization()(fusion_output)
fusion_output = Conv2D(256*3, (1, 1), activation='relu', padding='same')(fusion_output)

# Attention
# print(fusion_output.shape)
# attention_output = Reshape([32 * 32, 256 * 3])(fusion_output)
# att_vector = MultiHeadsAttModel(l=32 * 32, d=256 * 3, dv=16 * 3, dout=64, nv=16)
# attention_output = att_vector([attention_output, attention_output, attention_output])
# print(attention_output.shape)
# attention_output = Reshape([32,32, 64])(attention_output)
# attention_output = BatchNormalization()(attention_output)
# print(attention_output.shape)

# Decoder
decoder_output = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer='glorot_normal')(fusion_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(64, (3, 3), activation='relu', padding='same',kernel_initializer='glorot_normal')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(32, (3, 3), activation='relu', padding='same',kernel_initializer='glorot_normal')(decoder_output)
decoder_output = Conv2D(16, (3, 3), activation='relu', padding='same',kernel_initializer='glorot_normal')(decoder_output)
decoder_output = Conv2D(2, (3, 3), activation='tanh', padding='same',kernel_initializer='glorot_normal')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)

model = Model(inputs=[encoder_input, embed_input], outputs=decoder_output)

print(model.summary())


# Create embedding
def create_inception_embedding(grayscaled_rgb):
    grayscaled_rgb_resized = []
Пример #3
0
        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

    # each of these builds a LAGAN-inspired [arXiv/1701.05927] component with
    # linear last layer
    img_layer0 = build_generator(h, 5, 20)
    img_layer1 = build_generator(h, 20, 20)
    img_layer2 = build_generator(h, 20, 10)

    if not no_attn:

        logger.info('using attentional mechanism')

        # resizes from (5, 20) => (20, 20)
        zero2one = AveragePooling2D(pool_size=(1, 1))(
            UpSampling2D(size=(4, 1))(img_layer0))
        img_layer1 = inpainting_attention(img_layer1, zero2one)

        # resizes from (20, 20) => (20, 10)
        one2two = AveragePooling2D(pool_size=(1, 2))(img_layer1)
        img_layer2 = inpainting_attention(img_layer2, one2two)

    generator_outputs = [
        Activation('relu')(img_layer0),
        Activation('relu')(img_layer1),
        Activation('relu')(img_layer2)
    ]

    generator = Model(generator_inputs, generator_outputs)

    generator.compile(optimizer=Adam(lr=gen_lr, beta_1=adam_beta_1),
Пример #4
0
    def build_decoder(self, input_shape, relu_target):
        '''Build the decoder architecture that reconstructs from a given VGG relu layer.

            Args:
                input_shape: Tuple of input tensor shape, needed for channel dimension
                relu_target: Layer of VGG to decode from
        '''
        decoder_num = dict(
            zip(['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1'],
                range(1, 6)))[relu_target]

        # Dict specifying the layers for each decoder level. relu5_1 is the deepest decoder and will contain all layers
        decoder_archs = {
            5:
            [  #    layer    filts      HxW  / InC->OutC                                     
                (Conv2DReflect, 512),  # 16x16 / 512->512
                (UpSampling2D, ),  # 16x16 -> 32x32
                (Conv2DReflect, 512),  # 32x32 / 512->512
                (Conv2DReflect, 512),  # 32x32 / 512->512
                (Conv2DReflect, 512)
            ],  # 32x32 / 512->512
            4: [
                (Conv2DReflect, 256),  # 32x32 / 512->256
                (UpSampling2D, ),  # 32x32 -> 64x64
                (Conv2DReflect, 256),  # 64x64 / 256->256
                (Conv2DReflect, 256),  # 64x64 / 256->256
                (Conv2DReflect, 256)
            ],  # 64x64 / 256->256
            3: [
                (Conv2DReflect, 128),  # 64x64 / 256->128
                (UpSampling2D, ),  # 64x64 -> 128x128
                (Conv2DReflect, 128)
            ],  # 128x128 / 128->128
            2: [
                (Conv2DReflect, 64),  # 128x128 / 128->64
                (UpSampling2D, )
            ],  # 128x128 -> 256x256
            1: [(Conv2DReflect, 64)]  # 256x256 / 64->64
        }

        code = Input(shape=input_shape, name='decoder_input_' + relu_target)
        x = code

        ### Work backwards from deepest decoder # and build layer by layer
        decoders = reversed(range(1, decoder_num + 1))
        count = 0
        for d in decoders:
            for layer_tup in decoder_archs[d]:
                # Unique layer names are needed to ensure var naming consistency with multiple decoders in graph
                layer_name = '{}_{}'.format(relu_target, count)

                if layer_tup[0] == Conv2DReflect:
                    x = Conv2DReflect(layer_name,
                                      filters=layer_tup[1],
                                      kernel_size=3,
                                      padding='valid',
                                      activation='relu',
                                      name=layer_name)(x)
                elif layer_tup[0] == UpSampling2D:
                    x = UpSampling2D(name=layer_name)(x)

                count += 1

        layer_name = '{}_{}'.format(relu_target, count)
        output = Conv2DReflect(layer_name,
                               filters=3,
                               kernel_size=3,
                               padding='valid',
                               activation=None,
                               name=layer_name)(x)  # 256x256 / 64->3

        decoder_model = Model(code,
                              output,
                              name='decoder_model_' + relu_target)
        print(decoder_model.summary())
        return decoder_model
Пример #5
0
def _main(args):
    config_path = os.path.expanduser(args.config_path)
    weights_path = os.path.expanduser(args.weights_path)
    assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
        config_path)
    assert weights_path.endswith(
        '.weights'), '{} is not a .weights file'.format(weights_path)

    output_path = os.path.expanduser(args.output_path)
    assert output_path.endswith(
        '.h5'), 'output path {} is not a .h5 file'.format(output_path)
    output_root = os.path.splitext(output_path)[0]

    # Load weights and config.
    print('Loading weights.')
    weights_file = open(weights_path, 'rb')
    weights_header = np.ndarray(
        shape=(5, ), dtype='int32', buffer=weights_file.read(20))
    print('Weights Header: ', weights_header)
    # TODO: Check transpose flag when implementing fully connected layers.
    # transpose = (weight_header[0] > 1000) or (weight_header[1] > 1000)

    print('Parsing Darknet config.')
    unique_config_file = unique_config_sections(config_path)
    cfg_parser = configparser.ConfigParser()
    cfg_parser.read_file(unique_config_file)

    print('Creating Keras model.')
    if args.fully_convolutional:
        image_height, image_width = None, None
    else:
        image_height = int(cfg_parser['net_0']['height'])
        image_width = int(cfg_parser['net_0']['width'])

    prev_layer = Input(shape=(image_height, image_width, 3))
    all_layers = [prev_layer]
    outputs = []

    weight_decay = float(cfg_parser['net_0']['decay']
                         ) if 'net_0' in cfg_parser.sections() else 5e-4
    count = 0

    for section in cfg_parser.sections():
        print('Parsing section {}'.format(section))
        if section.startswith('convolutional'):
            filters = int(cfg_parser[section]['filters'])
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            pad = int(cfg_parser[section]['pad'])
            activation = cfg_parser[section]['activation']
            batch_normalize = 'batch_normalize' in cfg_parser[section]

            # Setting weights.
            # Darknet serializes convolutional weights as:
            # [bias/beta, [gamma, mean, variance], conv_weights]
            prev_layer_shape = K.int_shape(prev_layer)

            # TODO: This assumes channel last dim_ordering.
            weights_shape = (size, size, prev_layer_shape[-1], filters)
            darknet_w_shape = (filters, weights_shape[2], size, size)
            weights_size = np.product(weights_shape)

            print('conv2d', 'bn'
                  if batch_normalize else '  ', activation, weights_shape)

            conv_bias = np.ndarray(
                shape=(filters, ),
                dtype='float32',
                buffer=weights_file.read(filters * 4))
            count += filters

            if batch_normalize:
                bn_weights = np.ndarray(
                    shape=(3, filters),
                    dtype='float32',
                    buffer=weights_file.read(filters * 12))
                count += 3 * filters

                # TODO: Keras BatchNormalization mistakenly refers to var
                # as std.
                bn_weight_list = [
                    bn_weights[0],  # scale gamma
                    conv_bias,  # shift beta
                    bn_weights[1],  # running mean
                    bn_weights[2]  # running var
                ]

            conv_weights = np.ndarray(
                shape=darknet_w_shape,
                dtype='float32',
                buffer=weights_file.read(weights_size * 4))
            count += weights_size

            # DarkNet conv_weights are serialized Caffe-style:
            # (out_dim, in_dim, height, width)
            # We would like to set these to Tensorflow order:
            # (height, width, in_dim, out_dim)
            # TODO: Add check for Theano dim ordering.
            conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
            conv_weights = [conv_weights] if batch_normalize else [
                conv_weights, conv_bias
            ]

            # Handle activation.
            act_fn = None
            if activation == 'leaky':
                pass  # Add advanced activation later.
            elif activation != 'linear':
                raise ValueError(
                    'Unknown activation function `{}` in section {}'.format(
                        activation, section))

            padding = 'same' if pad == 1 and stride == 1 else 'valid'
            # Adjust padding model for darknet.
            if stride == 2:
                prev_layer = ZeroPadding2D(((1, 0), (1, 0)))(prev_layer)

            # Create Conv2D layer
            conv_layer = (Conv2D(
                filters, (size, size),
                strides=(stride, stride),
                kernel_regularizer=l2(weight_decay),
                use_bias=not batch_normalize,
                weights=conv_weights,
                activation=act_fn,
                padding=padding))(prev_layer)

            if batch_normalize:
                conv_layer = (BatchNormalization(
                    weights=bn_weight_list))(conv_layer)

            prev_layer = conv_layer

            if activation == 'linear':
                all_layers.append(prev_layer)
            elif activation == 'leaky':
                act_layer = LeakyReLU(alpha=0.1)(prev_layer)
                prev_layer = act_layer
                all_layers.append(act_layer)
        elif section.startswith('maxpool'):
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            all_layers.append(
                MaxPooling2D(
                    padding='same',
                    pool_size=(size, size),
                    strides=(stride, stride))(prev_layer))
            prev_layer = all_layers[-1]
        elif section.startswith('avgpool'):
            if cfg_parser.items(section) != []:
                raise ValueError('{} with params unsupported.'.format(section))
            all_layers.append(GlobalAveragePooling2D()(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('route'):
            ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
            layers = [all_layers[i] for i in ids]

            if len(layers) > 1:
                print('Concatenating route layers:', layers)
                concatenate_layer = concatenate(layers)
                all_layers.append(concatenate_layer)
                prev_layer = concatenate_layer
            else:
                skip_layer = layers[0]  # only one layer to route
                all_layers.append(skip_layer)
                prev_layer = skip_layer

        elif section.startswith('shortcut'):
            ids = [int(i) for i in cfg_parser[section]['from'].split(',')][0]
            activation = cfg_parser[section]['activation']
            shortcut = add([all_layers[ids], prev_layer])
            if activation == 'linear':
                shortcut = Activation('linear')(shortcut)
            all_layers.append(shortcut)
            prev_layer = all_layers[-1]

        elif section.startswith('upsample'):
            stride = int(cfg_parser[section]['stride'])
            all_layers.append(
                UpSampling2D(
                    size=(stride, stride))(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('yolo'):
            classes = int(cfg_parser[section]['classes'])
            # num = int(cfg_parser[section]['num'])
            # mask = int(cfg_parser[section]['mask'])
            n1, n2 = int(prev_layer.shape[1]), int(prev_layer.shape[2])
            n3 = 3
            n4 = (4 + 1 + classes)
            yolo = Reshape((n1, n2, n3, n4))(prev_layer)
            all_layers.append(yolo)
            prev_layer = all_layers[-1]
            outputs.append(len(all_layers) - 1)

        elif (section.startswith('net')):
            pass  # Configs not currently handled during model definition.
        else:
            raise ValueError(
                'Unsupported section header type: {}'.format(section))

    # Create and save model.
    model = Model(inputs=all_layers[0],
                  outputs=[all_layers[i] for i in outputs])
    print(model.summary())
    model.save('{}'.format(output_path))
    print('Saved Keras model to {}'.format(output_path))
    # Check to see if all weights have been read.
    remaining_weights = len(weights_file.read()) / 4
    weights_file.close()
    print('Read {} of {} from Darknet weights.'.format(count, count +
                                                       remaining_weights))
    if remaining_weights > 0:
        print('Warning: {} unused weights'.format(remaining_weights))

    if args.plot_model:
        plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
        print('Saved model plot to {}.png'.format(output_root))
Пример #6
0
def model_yang(input_shape):
    model = Sequential(
        [
            ############# ENCODER ###############
            # 224x224x1
            Conv2D(64, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv0',
                   input_shape=input_shape),
            BatchNormalization(axis=3, name='bn0'),
            Activation('relu'),

            # 224x224x64
            Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='conv1'),
            BatchNormalization(axis=3, name='bn1'),
            Activation('relu'),
            # 224x224x64
            MaxPooling2D((2, 2), strides=(2, 2), name='max_pool1'),

            # 112x112x64
            Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv2'),
            BatchNormalization(axis=3, name='bn2'),
            Activation('relu'),

            # 112x112x128
            Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv3'),
            BatchNormalization(axis=3, name='bn3'),
            Activation('relu'),
            MaxPooling2D((2, 2), strides=(2, 2), name='max_pool3'),

            # 56x56x128
            Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv4'),
            BatchNormalization(axis=3, name='bn4'),
            Activation('relu'),

            # 56x56x256
            Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv5'),
            BatchNormalization(axis=3, name='bn5'),
            Activation('relu'),

            # 56x56x256
            Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv6'),
            BatchNormalization(axis=3, name='bn6'),
            Activation('relu'),
            # 56x56x256
            MaxPooling2D((2, 2), strides=(2, 2), name='max_pool6'),

            # 28x28x256
            Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv7'),
            BatchNormalization(axis=3, name='bn7'),
            Activation('relu'),

            # 28x28x512
            Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv8'),
            BatchNormalization(axis=3, name='bn8'),
            Activation('relu'),

            # 28x28x512
            Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv9'),
            BatchNormalization(axis=3, name='bn9'),
            Activation('relu'),
            # 28x28x512
            MaxPooling2D((2, 2), strides=(2, 2), name='max_pool9'),

            # 14x14x512
            Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv10'),
            BatchNormalization(axis=3, name='bn10'),
            Activation('relu'),

            # 14x14x512
            Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv11'),
            BatchNormalization(axis=3, name='bn11'),
            Activation('relu'),

            # 14x14x512
            Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv12'),
            BatchNormalization(axis=3, name='bn12'),
            Activation('relu'),
            # 14x14x512
            MaxPooling2D((2, 2), strides=(2, 2), name='max_pool12'),

            # 7x7x512
            Conv2D(4096, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv12_bis'),
            BatchNormalization(axis=3, name='bn12_bis'),
            Activation('relu'),

            ############# DECODER ###############
            # 7x7x4096
            Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv13'),
            BatchNormalization(axis=3, name='bn13'),
            Activation('relu'),
            Dropout(0.5),

            # 7x7x512
            UpSampling2D(size=(2, 2), name='upsampling14'),
            Conv2D(512, (5, 5), strides=(1, 1), padding='same', name='conv14'),
            BatchNormalization(axis=3, name='bn14'),
            Activation('relu'),
            Dropout(0.5),

            # 14x14x512
            UpSampling2D(size=(2, 2), name='upsampling15'),
            Conv2D(256, (5, 5), strides=(1, 1), padding='same', name='conv15'),
            BatchNormalization(axis=3, name='bn15'),
            Activation('relu'),
            Dropout(0.5),

            # 28x28x256
            UpSampling2D(size=(2, 2), name='upsampling16'),
            Conv2D(128, (5, 5), strides=(1, 1), padding='same', name='conv16'),
            BatchNormalization(axis=3, name='bn16'),
            Activation('relu'),
            Dropout(0.5),

            # 56x56x128
            UpSampling2D(size=(2, 2), name='upsampling17'),
            Conv2D(64, (5, 5), strides=(1, 1), padding='same', name='conv17'),
            BatchNormalization(axis=3, name='bn17'),
            Activation('relu'),
            Dropout(0.5),

            # 112x112x64
            UpSampling2D(size=(2, 2), name='upsampling18'),
            Conv2D(32, (5, 5), strides=(1, 1), padding='same', name='conv18'),
            BatchNormalization(axis=3, name='bn18'),
            Activation('relu'),
            Dropout(0.5),

            # 224x224x32
            Conv2D(1, (5, 5), strides=(1, 1), padding='same', name='conv19'),
            BatchNormalization(axis=3, name='bn19'),
            Activation('sigmoid'),
        ],
        name='model_yang')

    # 224x224x1

    return model
Пример #7
0
def create_model():
    # input_img = Input(shape=(720, 576, 1))  # adapt this if using `channels_first` image data format
    input_img = Input(shape=(120, 120, 1))

    # 256: 120
    x = Conv2D(256, (1, 1),
               padding='same',
               kernel_regularizer=l2(0.0001),
               kernel_constraint=maxnorm(3))(input_img)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               kernel_regularizer=l2(0.0001),
               kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    x = MaxPooling2D((2, 2), padding='same')(x)

    # 128:60
    x = Conv2D(128, (1, 1),
               padding='same',
               kernel_regularizer=l2(0.0001),
               kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)

    x = Conv2D(128, (3, 3),
               padding='same',
               kernel_regularizer=l2(0.0001),
               kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)

    # 64 : 30 == 32
    x = Conv2D(64, (1, 1),
               padding='same',
               kernel_regularizer=l2(0.0001),
               kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)

    x = Conv2D(64, (3, 3),
               padding='same',
               kernel_regularizer=l2(0.0001),
               kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1), dim_ordering='default')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)

    # 32: 15 == 16
    x = Conv2D(32, (1, 1),
               padding='same',
               kernel_regularizer=l2(0.0001),
               kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)

    x = Conv2D(32, (3, 3),
               padding='same',
               kernel_regularizer=l2(0.0001),
               kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)

    encoded = MaxPooling2D((2, 2), padding='same')(x)

    # at this point the representation is (4, 4, 8) i.e. 128-dimensional

    # 32
    x = Conv2DTranspose(32, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.0001),
                        kernel_constraint=maxnorm(3))(encoded)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(32, (1, 1),
                        padding='same',
                        kernel_regularizer=l2(0.0001),
                        kernel_constraint=maxnorm(3))(encoded)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)
    x = UpSampling2D((2, 2))(x)

    # 64
    x = Conv2DTranspose(64, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.0001),
                        kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(64, (1, 1),
                        padding='same',
                        kernel_regularizer=l2(0.0001),
                        kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)
    x = UpSampling2D((2, 2))(x)

    # 128
    x = Conv2DTranspose(128, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.0001),
                        kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(128, (1, 1),
                        padding='same',
                        kernel_regularizer=l2(0.0001),
                        kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)
    x = Cropping2D(cropping=((1, 1), (1, 1)))(x)
    x = UpSampling2D((2, 2))(x)

    # 256
    x = Conv2DTranspose(256, (3, 3),
                        padding='same',
                        kernel_regularizer=l2(0.0001),
                        kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)

    x = Conv2DTranspose(256, (1, 1),
                        padding='same',
                        kernel_regularizer=l2(0.0001),
                        kernel_constraint=maxnorm(3))(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)

    x = UpSampling2D((2, 2))(x)

    decoded = Conv2D(1, (3, 3),
                     activation='sigmoid',
                     padding='same',
                     kernel_regularizer=l2(0.01),
                     bias_regularizer=l2(0.01))(x)

    autoencoder = Model(input_img, decoded)

    encoder = Model(input_img, encoded)

    autoencoder.summary()

    return autoencoder, encoder
Пример #8
0
    def get_unet_model_yuanqing(self):
        # Model inspired by https://github.com/yuanqing811/ISIC2018
        unet_input = Input(shape=(self.input_dim_x, self.input_dim_y,
                                  self.num_channels))

        conv1 = Conv2D(self.n_filters,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(unet_input)
        conv1 = Conv2D(self.n_filters,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = Conv2D(self.n_filters * 2,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(pool1)
        conv2 = Conv2D(self.n_filters * 2,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(self.n_filters * 4,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(pool2)
        conv3 = Conv2D(self.n_filters * 4,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv3)
        conv3 = Conv2D(self.n_filters * 4,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(pool3)
        conv4 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv4)
        conv4 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(pool4)
        conv5 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv5)
        conv5 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv5)

        up6 = Conv2D(self.n_filters * 4, 2, activation='relu',
                     padding='same')(UpSampling2D(size=(2, 2))(conv5))
        feature4 = Conv2D(self.n_filters * 4,
                          kernel_size=3,
                          activation='relu',
                          padding='same')(conv4)
        concat6 = Concatenate()([feature4, up6])
        conv6 = Conv2D(self.n_filters * 4,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(concat6)
        conv6 = Conv2D(self.n_filters * 4,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv6)

        up7 = Conv2D(self.n_filters * 2, 2, activation='relu',
                     padding='same')(UpSampling2D(size=(2, 2))(conv6))
        feature3 = Conv2D(self.n_filters * 2,
                          kernel_size=3,
                          activation='relu',
                          padding='same')(conv3)
        concat7 = Concatenate()([feature3, up7])
        conv7 = Conv2D(self.n_filters * 2,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(concat7)
        conv7 = Conv2D(self.n_filters * 2,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv7)

        up8 = Conv2D(self.n_filters * 1, 2, activation='relu',
                     padding='same')(UpSampling2D(size=(2, 2))(conv7))
        feature2 = Conv2D(self.n_filters * 1,
                          kernel_size=3,
                          activation='relu',
                          padding='same')(conv2)
        concat8 = Concatenate()([feature2, up8])
        conv8 = Conv2D(self.n_filters * 1,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(concat8)
        conv8 = Conv2D(self.n_filters * 1,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv8)

        up9 = Conv2D(int(self.n_filters / 2),
                     2,
                     activation='relu',
                     padding='same')(UpSampling2D(size=(2, 2))(conv8))
        feature1 = Conv2D(int(self.n_filters / 2),
                          kernel_size=3,
                          activation='relu',
                          padding='same')(conv1)
        concat9 = Concatenate()([feature1, up9])
        conv9 = Conv2D(int(self.n_filters / 2),
                       kernel_size=3,
                       activation='relu',
                       padding='same')(concat9)
        conv9 = Conv2D(int(self.n_filters / 2),
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv9)
        conv9 = Conv2D(3, kernel_size=3, activation='relu',
                       padding='same')(conv9)
        conv10 = Conv2D(1, kernel_size=1, activation='sigmoid')(conv9)

        return Model(outputs=conv10, inputs=unet_input), 'unet_model_yuanqing'
Пример #9
0
def get_unet():
    inputs = Input((1, img_rows, img_cols))
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(pool4)
    conv5 = Convolution2D(512, 3, 3, activation='relu',
                          border_mode='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(up6)
    conv6 = Convolution2D(256, 3, 3, activation='relu',
                          border_mode='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv9)

    conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)

    model = Model(input=inputs, output=conv10)

    model.compile(optimizer=Adam(lr=1.0e-5),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])

    return model
X_test_noise_4ch = X_test_noise.reshape(X_test_noise.shape[0], 1, X_test_noise.shape[1], X_test_noise.shape[2])

input_shape = [X_train_4ch.shape[1], X_train_4ch.shape[2], X_train_4ch.shape[3]]


###########################
#### FIRST AUTOENCODER ####

X_input = Input(input_shape)
x = Conv2D(32, (5,1), activation='relu')(X_input)
x = Conv2D(64, (7,1), activation='relu')(x)
x = Conv2D(128, (3,3), activation='relu')(x)
x = MaxPooling2D(name='encoded1')(x)
ae1_enc_shape = x.shape.as_list()
print(ae1_enc_shape)
x = UpSampling2D()(x)
x = Conv2DTranspose(64, (3,3), activation='relu')(x)
x = Conv2DTranspose(32, (7,1), activation='relu')(x)
x = Conv2DTranspose(1, (5,1))(x)

ae1 = Model(input=X_input, output=x, name='ae1')
ae1.compile(loss='mse', optimizer='rmsprop')

ae1.summary()

# train the model, if not already trained
if not Path("weights-ae1-long-gaus.h5").is_file():
    history = ae1.fit(x = X_train_noise_4ch, y = X_train_4ch,
                        epochs=general_conf['iterations'],
                        batch_size=general_conf['batch_size'],
                        callbacks=callbacks('ae1-long-gaus', True),
Пример #11
0
    def get_unet_model_4_levels(self):
        unet_input = Input(shape=(self.input_dim_x, self.input_dim_y,
                                  self.num_channels))

        conv1 = Conv2D(self.n_filters * 2,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(unet_input)
        conv1 = Conv2D(self.n_filters * 2,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv1)
        conv1 = BatchNormalization()(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = Conv2D(self.n_filters * 4,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(pool1)
        conv2 = Conv2D(self.n_filters * 4,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv2)
        conv2 = BatchNormalization()(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(pool2)
        conv3 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv3)
        conv3 = BatchNormalization()(conv3)
        drop3 = Dropout(0.5)(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(drop3)

        conv4 = Conv2D(self.n_filters * 16,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(pool3)
        conv4 = Conv2D(self.n_filters * 16,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv4)
        conv4 = BatchNormalization()(conv4)
        drop4 = Dropout(0.5)(conv4)

        up5 = Conv2D(self.n_filters * 16, 2, activation='relu',
                     padding='same')(UpSampling2D(size=(2, 2))(drop4))
        concat5 = Concatenate()([drop3, up5])
        conv5 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(concat5)
        conv5 = Conv2D(self.n_filters * 8,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv5)
        conv5 = BatchNormalization()(conv5)

        up6 = Conv2D(self.n_filters * 8, 2, activation='relu',
                     padding='same')(UpSampling2D(size=(2, 2))(conv5))
        concat6 = Concatenate()([conv2, up6])
        conv6 = Conv2D(self.n_filters * 4,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(concat6)
        conv6 = Conv2D(self.n_filters * 4,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv6)
        conv6 = BatchNormalization()(conv6)

        up7 = Conv2D(self.n_filters * 4, 2, activation='relu',
                     padding='same')(UpSampling2D(size=(2, 2))(conv6))
        concat7 = Concatenate()([conv1, up7])
        conv7 = Conv2D(self.n_filters * 2,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(concat7)
        conv7 = Conv2D(self.n_filters * 2,
                       kernel_size=3,
                       activation='relu',
                       padding='same')(conv7)
        conv7 = BatchNormalization()(conv7)

        conv9 = Conv2D(3, kernel_size=1, activation='sigmoid',
                       padding='same')(conv7)

        return Model(outputs=conv9, inputs=unet_input), 'unet_model_4_levels'
Пример #12
0
#%% Compile the model structure with some generator data information

# Up-sampling convolutional network with LSTM layer
cs = generator.convolution_shape
cso = generator.output_convolution_shape

# Convolutional NN
input_0 = Input(shape=cs, name='input_0')
periodic_padding_2 = PeriodicPadding2D(padding=(0, 2),
                                       data_format='channels_first')
zero_padding_2 = ZeroPadding2D(padding=(2, 0), data_format='channels_first')
periodic_padding_1 = PeriodicPadding2D(padding=(0, 1),
                                       data_format='channels_first')
zero_padding_1 = ZeroPadding2D(padding=(1, 0), data_format='channels_first')
max_pooling_2 = MaxPooling2D(2, data_format='channels_first')
up_sampling_2 = UpSampling2D(2, data_format='channels_first')
conv_2d_1 = Conv2D(
    32, 3, **{
        'dilation_rate': 2,
        'padding': 'valid',
        'activation': 'tanh',
        'data_format': 'channels_first'
    })
conv_2d_2 = Conv2D(
    64, 3, **{
        'dilation_rate': 1,
        'padding': 'valid',
        'activation': 'tanh',
        'data_format': 'channels_first'
    })
conv_2d_3 = Conv2D(

# 定义decoder
# x = Conv2D(16, (3, 3), activation='relu', padding='same')(encoded)
# x = UpSampling2D((2, 2))(x)
# x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
# print(x.shape)                         # ( ?, 2, 8, 32))
# x = Conv2D(16, (3, 3), activation='relu', padding='same')(encoded)
# x = UpSampling2D((2, 2))(x)
# x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
# print(x.shape)                         # ( ?, 4, 18, 32)
# x = UpSampling2D((2, 2))(x)
# x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
# print(x.shape)                         # ( ?, 8, 32, 32)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
print(x.shape)                         # ( ?, 4, 8, 32)
x = UpSampling2D((2, 2))(x)            ####****************####
x = Conv2D(24, (3, 3), activation='relu', padding='same')(x)
print(x.shape)                         # (?, 8, 16, 32)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
print(x.shape)                         # (?, 16, 32, 32)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
print(x.shape)                         # (?, 32, 64, 32)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(3, (3, 3), activation='relu', padding='same')(x)
print(decoded.shape)                   # (?, 64, 128, 3)
# decoded = UpSampling2D((4, 4))(x)
Пример #14
0
    def get_small_unet(img_rows, img_cols):
        ## Redefining small U-net
        inputs = Input((img_rows, img_cols, 3))
        inputs_norm = Lambda(lambda x: x / 127.5 - 1.)
        conv1 = Convolution2D(8, 3, 3, activation='relu',
                              border_mode='same')(inputs)
        conv1 = Convolution2D(8, 3, 3, activation='relu',
                              border_mode='same')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = Convolution2D(16, 3, 3, activation='relu',
                              border_mode='same')(pool1)
        conv2 = Convolution2D(16, 3, 3, activation='relu',
                              border_mode='same')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Convolution2D(32, 3, 3, activation='relu',
                              border_mode='same')(pool2)
        conv3 = Convolution2D(32, 3, 3, activation='relu',
                              border_mode='same')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Convolution2D(64, 3, 3, activation='relu',
                              border_mode='same')(pool3)
        conv4 = Convolution2D(64, 3, 3, activation='relu',
                              border_mode='same')(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = Convolution2D(128, 3, 3, activation='relu',
                              border_mode='same')(pool4)
        conv5 = Convolution2D(128, 3, 3, activation='relu',
                              border_mode='same')(conv5)

        up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                    mode='concat',
                    concat_axis=3)
        conv6 = Convolution2D(64, 3, 3, activation='relu',
                              border_mode='same')(up6)
        conv6 = Convolution2D(64, 3, 3, activation='relu',
                              border_mode='same')(conv6)

        up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                    mode='concat',
                    concat_axis=3)
        conv7 = Convolution2D(32, 3, 3, activation='relu',
                              border_mode='same')(up7)
        conv7 = Convolution2D(32, 3, 3, activation='relu',
                              border_mode='same')(conv7)

        up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                    mode='concat',
                    concat_axis=3)
        conv8 = Convolution2D(16, 3, 3, activation='relu',
                              border_mode='same')(up8)
        conv8 = Convolution2D(16, 3, 3, activation='relu',
                              border_mode='same')(conv8)

        up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                    mode='concat',
                    concat_axis=3)
        conv9 = Convolution2D(8, 3, 3, activation='relu',
                              border_mode='same')(up9)
        conv9 = Convolution2D(8, 3, 3, activation='relu',
                              border_mode='same')(conv9)

        conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)

        model = Model(input=inputs, output=conv10)

        return model
Пример #15
0
input_img = Input(shape=(96, 96, 3))


x = Conv2D(128, (3, 3), padding='same',activation='relu')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same',activation='relu')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same',activation='relu')(x)

encoded = MaxPooling2D((2, 2), padding='same')(x)

#x = BatchNormalization(axis=3)(x)
#x = Cropping2D(((0,0),(1,0)))(x)

x = Conv2D(128, (3, 3), padding='same',activation='relu')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(128, (3, 3), padding='same',activation='relu')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(128, (3, 3), padding='same',activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)


autoencoder = Model(input_img, decoded)
autoencoder.summary()

print(tuple([i.value for i in encoded.shape][1:]))

Ad = Adam(lr=0.000005)

autoencoder.compile(optimizer=Ad, loss='binary_crossentropy')
Пример #16
0
def upsample_simple(filters, kernel_size, strides, padding):
    return UpSampling2D(strides)
Пример #17
0
# Supress warnings about wrong compilation of TensorFlow.
tf.logging.set_verbosity(tf.logging.ERROR)

noise_size = 100

## G

z = Input(shape=[noise_size])

G = Dense(8 * 4 * 256)(z)
G = BatchNormalization(momentum=0.9)(G)
G = LeakyReLU(alpha=0.2)(G)
G = Reshape((4, 8, 256))(G)

G = UpSampling2D()(G)
G = Conv2D(128, (5, 5), padding='same')(G)
G = BatchNormalization(momentum=0.9)(G)
G = LeakyReLU(alpha=0.2)(G)

G = UpSampling2D()(G)
G = Conv2D(64, (5, 5), padding='same')(G)
G = BatchNormalization(momentum=0.9)(G)
G = LeakyReLU(alpha=0.2)(G)

G = UpSampling2D()(G)
G = Conv2D(32, (5, 5), padding='same')(G)
G = BatchNormalization(momentum=0.9)(G)
G = LeakyReLU(alpha=0.2)(G)

G = UpSampling2D()(G)
Пример #18
0
Xtrain = 1.0 / 255 * Xtrain

model = Sequential()
model.add(InputLayer(input_shape=(256, 256, 1)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))

model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))

model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))

model.compile(optimizer='rmsprop', loss='mse')

# Image transformer
datagen = ImageDataGenerator(shear_range=0.2,
                             zoom_range=0.2,
                             rotation_range=20,
                             horizontal_flip=True)
Пример #19
0
def make_yolov3_model():
    input_image = Input(shape=(None, None, 3))
    # Layer  0 => 4
    x = _conv_block(input_image, [{
        'filter': 32,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 0
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 1
    }, {
        'filter': 32,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 2
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 3
    }])
    # Layer  5 => 8
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 5
    }, {
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 6
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 7
    }])
    # Layer  9 => 11
    x = _conv_block(x, [{
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 9
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 10
    }])
    # Layer 12 => 15
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 12
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 13
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 14
    }])
    # Layer 16 => 36
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 16 + i * 3
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 17 + i * 3
        }])
    skip_36 = x
    # Layer 37 => 40
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 37
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 38
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 39
    }])
    # Layer 41 => 61
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 41 + i * 3
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 42 + i * 3
        }])
    skip_61 = x
    # Layer 62 => 65
    x = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 62
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 63
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 64
    }])
    # Layer 66 => 74
    for i in range(3):
        x = _conv_block(x, [{
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 66 + i * 3
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 67 + i * 3
        }])
    # Layer 75 => 79
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 75
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 76
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 77
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 78
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 79
    }],
                    skip=False)
    # Layer 80 => 82
    yolo_82 = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 80
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 81
    }],
                          skip=False)
    # Layer 83 => 86
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 84
    }],
                    skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_61])
    # Layer 87 => 91
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 87
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 88
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 89
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 90
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 91
    }],
                    skip=False)
    # Layer 92 => 94
    yolo_94 = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 92
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 93
    }],
                          skip=False)
    # Layer 95 => 98
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 96
    }],
                    skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_36])
    # Layer 99 => 106
    yolo_106 = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 99
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 100
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 101
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 102
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 103
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 104
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 105
    }],
                           skip=False)
    model = Model(input_image, [yolo_82, yolo_94, yolo_106])
    return model
Пример #20
0
def build_model():
    tf.logging.set_verbosity(tf.logging.ERROR)
    # Initialize l2 regulator from keras
    l2_reg = l2(1e-3)

    # Input image of specified shape (black and white)
    # Input data is a sizexsize tensor containing l values
    input_tensor = Input(shape=(img_rows, img_cols, 1))

    # ----------------------------------------------------------------------------
    # ---------------------------------- Conv 1 ----------------------------------
    # ----------------------------------------------------------------------------
    # 64 (output channels), 3x3 kernel
    x = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv1_1', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(input_tensor)
    # Spacial resolution of output = 224
    x = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv1_2', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(2, 2))(x)
    # Spacial resolution of output = 112
    x = BatchNormalization()(x)
    # ----------------------------------------------------------------------------
    # ---------------------------------- Conv 2 ----------------------------------
    # ----------------------------------------------------------------------------
    x = Conv2D(128, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv2_1', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 112
    x = Conv2D(128, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv2_2', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(2, 2))(x)
    # Spacial resolution of output = 56
    x = BatchNormalization()(x)
    # ----------------------------------------------------------------------------
    # ---------------------------------- Conv 3 ----------------------------------
    # ----------------------------------------------------------------------------
    x = Conv2D(256, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv3_1', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 56
    x = Conv2D(256, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv3_2', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 56
    x = Conv2D(256, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv3_3', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(2, 2))(x)
    # Spacial resolution of output = 28
    x = BatchNormalization()(x)
    # ----------------------------------------------------------------------------
    # ---------------------------------- Conv 4 ----------------------------------
    # ----------------------------------------------------------------------------
    x = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv4_1', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg,  strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv4_2', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg,  strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv4_3', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg,  strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = BatchNormalization()(x)
    # ----------------------------------------------------------------------------
    # ---------------------------------- Conv 5 ----------------------------------
    # ----------------------------------------------------------------------------
    # Notice dilated convolution
    # Dilated convolution is a basic convolution only applied to the input volume with defined gaps
    x = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
               dilation_rate=2, name='conv5_1', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
               dilation_rate=2, name='conv5_2', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
               dilation_rate=2, name='conv5_3', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = BatchNormalization()(x)
    # ----------------------------------------------------------------------------
    # ---------------------------------- Conv 6 ----------------------------------
    # ----------------------------------------------------------------------------
    x = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
               dilation_rate=2, name='conv6_1', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
               dilation_rate=2, name='conv6_2', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = Conv2D(512, (kernel_size, kernel_size), activation='relu', padding='same',
               dilation_rate=2, name='conv6_3', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = BatchNormalization()(x)
    # ----------------------------------------------------------------------------
    # ---------------------------------- Conv 7 ----------------------------------
    # ----------------------------------------------------------------------------
    # No more dilation
    x = Conv2D(256, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv7_1', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = Conv2D(256, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv7_2', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = Conv2D(256, (kernel_size, kernel_size), activation='relu', padding='same',
               name='conv7_3', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    # Spacial resolution of output = 28
    x = BatchNormalization()(x)
    # ----------------------------------------------------------------------------
    # ---------------------------------- Conv 8 ----------------------------------
    # ----------------------------------------------------------------------------
    # UpSample before convolution
    x = UpSampling2D(size=(2, 2))(x)
    # Spacial resolution of output = 56
    x = Conv2D(128, (kernel_size, kernel_size), activation='relu', padding='same',
               dilation_rate=1, name='conv8_1', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)

    x = Conv2D(128, (kernel_size, kernel_size), activation='relu', padding='same',
               dilation_rate=1, name='conv8_2', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)
    x = Conv2D(128, (kernel_size, kernel_size), activation='relu', padding='same',
               dilation_rate=1, name='conv8_3', kernel_initializer=kernel_init,
               kernel_regularizer=l2_reg, strides=(1, 1))(x)

    outputs = Conv2D(num_colors, (1, 1), activation='softmax', padding='same',
                     dilation_rate=1, name='conv8_313')(x)
    model = Model(inputs=input_tensor, outputs=outputs, name="ColorNet")
    return model
encoder.summary()
plot_model(encoder, to_file='encoder.png', show_shapes=True)

# Build the Decoder Model
latent_inputs = Input(shape=(latent_dim,), name='decoder_input')
x = Dense(shape[1]*shape[2]*shape[3])(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)

# Stack of BN-ReLU-Transposed Conv2D-UpSampling2D blocks
for i in range(2):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2DTranspose(filters=filters,
                        kernel_size=kernel_size,
                        padding='same')(x)
    x = UpSampling2D()(x)
    filters //= 2

x = Conv2DTranspose(filters=1,
                    kernel_size=kernel_size,
                    padding='same')(x)

outputs = Activation('sigmoid', name='decoder_output')(x)

# Instantiate Decoder Model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='decoder.png', show_shapes=True)

# Autoencoder = Encoder + Decoder
# Instantiate Autoencoder Model
Пример #22
0
# construct a convolutional stack autoencoder
auto_encoder = Sequential()
auto_encoder.add(
    Conv2D(15, (3, 3),
           activation='relu',
           padding='same',
           input_shape=input_shape))  # (?, 28, 28, 32)
auto_encoder.add(MaxPooling2D((2, 2), padding='same'))  # (?, 14, 14, 32)
auto_encoder.add(
    Conv2D(10, (3, 3),
           activation='relu',
           padding='same',
           input_shape=input_shape))  # (?, 28, 28, 32)
auto_encoder.add(MaxPooling2D((2, 2), padding='same'))  # (?, 14, 14, 32)
auto_encoder.add(UpSampling2D((2, 2)))  # (?, 28, 28, 32)
auto_encoder.add(
    Conv2D(10, (3, 3),
           activation='relu',
           padding='same',
           input_shape=input_shape))  # (?, 28, 28, 32)
auto_encoder.add(UpSampling2D((2, 2)))  # (?, 28, 28, 32)
auto_encoder.add(Conv2D(15, (3, 3), activation='relu',
                        padding='same'))  # (?, 28, 28, 32)
auto_encoder.add(Conv2D(1, (3, 3), activation='sigmoid',
                        padding='same'))  # (?, 28, 28, 1)

# model compile
auto_encoder.compile(optimizer='sgd', loss='mean_squared_error')
auto_encoder.summary()
Пример #23
0
#input_training_generator = input_training_generator.reshape(input_training_generator.shape[0],3,27,8)
#input_validation_generator = input_validation_generator.reshape(input_validation_generator.shape[0],3,27,8)
#output_training_generator = output_training_generator.reshape(output_training_generator.shape[0],1,27,8)
#output_validation_generator = output_validation_generator.reshape(output_validation_generator.shape[0],1,27,8)

print("image reshape 2")

input_layer = Input((1, 28, 28))

x = Conv2D(10, 5, activation='relu')(input_layer)
x = MaxPooling2D(2)(x)
x = Conv2D(20, 2, activation='relu')(x)
x = MaxPooling2D(2)(x)
encoded = x
x = UpSampling2D(2)(x)
x = Conv2DTranspose(20, 2, activation='relu')(x)
x = UpSampling2D(2)(x)
x = Conv2DTranspose(10, 5, activation='relu')(x)
x = Conv2DTranspose(1, 3, activation='sigmoid')(x)

print("layer definition")

model = Model(input=input_layer, output=x)
model.summary()

print("model creation")

model.compile(loss='binary_crossentropy', optimizer='adam')

print("model compilation")
    down3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = BatchNormalization()(down3)
    conv4 = Convolution2D(8 * f, 3, 3, activation='relu', border_mode='same')(conv4)
    conv4 = BatchNormalization()(conv4)
    conv4 = Convolution2D(8 * f, 3, 3, activation='relu', border_mode='same')(conv4)

    down4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = BatchNormalization()(down4)
    conv5 = Convolution2D(16 * f, 3, 3, activation='relu', border_mode='same')(conv5)
    conv5 = BatchNormalization()(conv5)
    conv5 = Convolution2D(16 * f, 3, 3, activation='relu', border_mode='same')(conv5)

    up1 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=3)

    conv6 = BatchNormalization()(up1)
    conv6 = Convolution2D(8 * f, 3, 3, activation='relu', border_mode='same')(conv6)
    conv6 = BatchNormalization()(conv6)
    conv6 = Convolution2D(8 * f, 3, 3, activation='relu', border_mode='same')(conv6)

    up2 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=3)

    conv7 = BatchNormalization()(up2)
    conv7 = Convolution2D(4 * f, 3, 3, activation='relu', border_mode='same')(conv7)
    conv7 = BatchNormalization()(conv7)
    conv7 = Convolution2D(4 * f, 3, 3, activation='relu', border_mode='same')(conv7)

    up3 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=3)
Пример #25
0
def yolo_body(inputs, num_anchors, num_classes):
    #---------------------------------------------------#
    #   生成CSPdarknet53的主干模型
    #   获得三个有效特征层,他们的shape分别是:
    #   52,52,256
    #   26,26,512
    #   13,13,1024
    #---------------------------------------------------#
    feat1, feat2, feat3 = darknet_body(inputs)

    # 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512 -> 13,13,2048 -> 13,13,512 -> 13,13,1024 -> 13,13,512
    P5 = DarknetConv2D_BN_Leaky(512, (1, 1))(feat3)
    P5 = DarknetConv2D_BN_Leaky(1024, (3, 3))(P5)
    P5 = DarknetConv2D_BN_Leaky(512, (1, 1))(P5)
    # 使用了SPP结构,即不同尺度的最大池化后堆叠。
    maxpool1 = MaxPooling2D(pool_size=(13, 13), strides=(1, 1),
                            padding='same')(P5)
    maxpool2 = MaxPooling2D(pool_size=(9, 9), strides=(1, 1),
                            padding='same')(P5)
    maxpool3 = MaxPooling2D(pool_size=(5, 5), strides=(1, 1),
                            padding='same')(P5)
    P5 = Concatenate()([maxpool1, maxpool2, maxpool3, P5])
    P5 = DarknetConv2D_BN_Leaky(512, (1, 1))(P5)
    P5 = DarknetConv2D_BN_Leaky(1024, (3, 3))(P5)
    P5 = DarknetConv2D_BN_Leaky(512, (1, 1))(P5)

    # 13,13,512 -> 13,13,256 -> 26,26,256
    P5_upsample = compose(DarknetConv2D_BN_Leaky(256, (1, 1)),
                          UpSampling2D(2))(P5)
    # 26,26,512 -> 26,26,256
    P4 = DarknetConv2D_BN_Leaky(256, (1, 1))(feat2)
    # 26,26,256 + 26,26,256 -> 26,26,512
    P4 = Concatenate()([P4, P5_upsample])

    # 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256
    P4 = make_five_convs(P4, 256)

    # 26,26,256 -> 26,26,128 -> 52,52,128
    P4_upsample = compose(DarknetConv2D_BN_Leaky(128, (1, 1)),
                          UpSampling2D(2))(P4)
    # 52,52,256 -> 52,52,128
    P3 = DarknetConv2D_BN_Leaky(128, (1, 1))(feat1)
    # 52,52,128 + 52,52,128 -> 52,52,256
    P3 = Concatenate()([P3, P4_upsample])

    # 52,52,256 -> 52,52,128 -> 52,52,256 -> 52,52,128 -> 52,52,256 -> 52,52,128
    P3 = make_five_convs(P3, 128)

    #---------------------------------------------------#
    #   第三个特征层
    #   y3=(batch_size,52,52,3,85)
    #---------------------------------------------------#
    P3_output = DarknetConv2D_BN_Leaky(256, (3, 3))(P3)
    P3_output = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1),
                              kernel_initializer=keras.initializers.normal(
                                  mean=0.0, stddev=0.01))(P3_output)

    # 52,52,128 -> 26,26,256
    P3_downsample = ZeroPadding2D(((1, 0), (1, 0)))(P3)
    P3_downsample = DarknetConv2D_BN_Leaky(256, (3, 3),
                                           strides=(2, 2))(P3_downsample)
    # 26,26,256 + 26,26,256 -> 26,26,512
    P4 = Concatenate()([P3_downsample, P4])
    # 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256
    P4 = make_five_convs(P4, 256)

    #---------------------------------------------------#
    #   第二个特征层
    #   y2=(batch_size,26,26,3,85)
    #---------------------------------------------------#
    P4_output = DarknetConv2D_BN_Leaky(512, (3, 3))(P4)
    P4_output = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1),
                              kernel_initializer=keras.initializers.normal(
                                  mean=0.0, stddev=0.01))(P4_output)

    # 26,26,256 -> 13,13,512
    P4_downsample = ZeroPadding2D(((1, 0), (1, 0)))(P4)
    P4_downsample = DarknetConv2D_BN_Leaky(512, (3, 3),
                                           strides=(2, 2))(P4_downsample)
    # 13,13,512 + 13,13,512 -> 13,13,1024
    P5 = Concatenate()([P4_downsample, P5])
    # 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512
    P5 = make_five_convs(P5, 512)

    #---------------------------------------------------#
    #   第一个特征层
    #   y1=(batch_size,13,13,3,85)
    #---------------------------------------------------#
    P5_output = DarknetConv2D_BN_Leaky(1024, (3, 3))(P5)
    P5_output = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1),
                              kernel_initializer=keras.initializers.normal(
                                  mean=0.0, stddev=0.01))(P5_output)

    return Model(inputs, [P5_output, P4_output, P3_output])
Пример #26
0
    def buildModel():
        input_shape = (512, 512, 3)

        inputs = Input(input_shape)

        # Downsampling block
        conv1 = Conv2D(64,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(inputs)
        conv1 = Conv2D(64,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = Conv2D(128,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(pool1)
        conv2 = Conv2D(128,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(256,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(pool2)
        conv3 = Conv2D(256,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(512,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(pool3)
        conv4 = Conv2D(512,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv4)
        drop4 = Dropout(0.5)(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

        # Bottleneck block
        conv5 = Conv2D(1024,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(pool4)
        conv5 = Conv2D(1024,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv5)
        drop5 = Dropout(0.5)(conv5)

        # Upsampling block
        up6 = UpSampling2D(size=(2, 2))(drop5)
        up6 = Conv2D(512,
                     2,
                     activation="relu",
                     padding="same",
                     kernel_initializer="he_normal")(up6)
        merge6 = Concatenate(axis=3)([drop4, up6])
        conv6 = Conv2D(512,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(merge6)
        conv6 = Conv2D(512,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv6)

        up7 = UpSampling2D(size=(2, 2))(conv6)
        up7 = Conv2D(256,
                     2,
                     activation="relu",
                     padding="same",
                     kernel_initializer="he_normal")(up7)
        merge7 = Concatenate(axis=3)([conv3, up7])
        conv7 = Conv2D(256,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(merge7)
        conv7 = Conv2D(256,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv7)

        up8 = UpSampling2D(size=(2, 2))(conv7)
        up8 = Conv2D(128,
                     2,
                     activation="relu",
                     padding="same",
                     kernel_initializer="he_normal")(up8)
        merge8 = Concatenate(axis=3)([conv2, up8])
        conv8 = Conv2D(128,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(merge8)
        conv8 = Conv2D(128,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv8)

        up9 = UpSampling2D(size=(2, 2))(conv8)
        up9 = Conv2D(64,
                     2,
                     activation="relu",
                     padding="same",
                     kernel_initializer="he_normal")(up9)
        merge9 = Concatenate(axis=3)([conv1, up9])
        conv9 = Conv2D(64,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(merge9)
        conv9 = Conv2D(64,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv9)

        conv9 = Conv2D(2,
                       3,
                       activation="relu",
                       padding="same",
                       kernel_initializer="he_normal")(conv9)
        conv10 = Conv2D(1, 3, activation="softmax")(conv9)

        model = Model(input=inputs, output=conv10)
        model.compile(
            optimizer=optimizers.Adam(lr=1e-4),
            loss=losses.binary_crossentropy,
            metrics=["accuracy"],
        )

        return model