def build_discriminative_net(img_shape):
    
    img_input = Input(img_shape)
    
    conv_stage_6 = compose(
        conv_stage(8, 5, 1, 'conv_stage_1'),
        conv_stage(16, 5, 1, 'conv_stage_2'),
        conv_stage(32, 5, 1, 'conv_stage_3'),
        conv_stage(64, 5, 1, 'conv_stage_4'),
        conv_stage(128, 5, 1, 'conv_stage_5'),
        conv_stage(128, 5, 1, 'conv_stage_6')
    )(img_input)
    
    attention_map = Conv2D(1, kernel_size=5, strides=1, padding='same', use_bias=False, 
                           name='attention_map')(conv_stage_6)

    fc_2 = compose(
        Multiply(),
        conv_stage(64, 5, 4, 'conv_stage_7'),
        conv_stage(64, 5, 4, 'conv_stage_8'),
        conv_stage(32, 5, 4, 'conv_stage_9'),
        Dense(1024, use_bias=False, name='fc_1'),
        Dense(1, use_bias=False, name='fc_2')
    )([attention_map, conv_stage_6])
    
    fc_out = compose(
        Activation('sigmoid', name='fc_out'),
#         tf.where(tf.not_equal(fc_out, 1.0), fc_out, fc_out - 0.0000001) ???
#         tf.where(tf.not_equal(fc_out, 0.0), fc_out, fc_out + 0.0000001) ???
    )(fc_2)
    
    return Model(inputs=[img_input], outputs=[fc_out, attention_map, fc_2])
示例#2
0
def build_conv_block(dim, padding_type, name=None):

    if name is not None:
        name = name + '/'

    if padding_type == 'reflect':
        return compose(
            ReflectionPadding2D(
                1, name=name + 'reflect_pad1' if name else None),
            Conv2D(dim, kernel_size=3, strides=1, padding='valid',
                   name=name + 'conv1' if name else None),
            BatchNormalization(name=name + 'bn1' if name else None),
            ReLU(name=name + 'relu' if name else None),
            ReflectionPadding2D(
                1, name=name + 'reflect_pad2' if name else None),
            Conv2D(dim, kernel_size=3, strides=1, padding='valid',
                   name=name + 'conv2' if name else None),
            BatchNormalization(name=name + 'bn2' if name else None)
        )

    if padding_type == 'zero':
        return compose(
            Conv2D(dim, kernel_size=3, strides=1, padding='same',
                   name=name + 'conv1' if name else None),
            BatchNormalization(name=name + 'bn1' if name else None),
            ReLU(name=name + 'relu' if name else None),
            Conv2D(dim, kernel_size=3, strides=1, padding='same',
                   name=name + 'conv2' if name else None),
            BatchNormalization(name=name + 'bn2' if name else None)
        )
示例#3
0
def unet_layers(layer_num, output_channel, nf, include_top=True, name=None):

    assert layer_num >= 2

    def multiplier(x):
        return min(x**2, 8)

    if name is not None:
        name = name + '/'

    # Innermost Layers
    layers = compose(
        EncoderBlock(nf * multiplier(layer_num),
                     name=name +
                     'encoder_block{0}'.format(layer_num) if name else None),
        DecoderBlock(nf * multiplier(layer_num),
                     dropout=True,
                     name=name +
                     'decoder_block{0}'.format(layer_num) if name else None))

    # Inter Layers
    for i in range(1, layer_num - 1):

        layer_idx = layer_num - i
        layers = compose(
            EncoderBlock(
                nf * multiplier(layer_idx),
                name=name +
                'encoder_block{0}'.format(layer_idx) if name else None),
            skip_concat(layers, name +
                        'concat{0}'.format(layer_idx) if name else None),
            DecoderBlock(
                nf * multiplier(layer_idx),
                dropout=True if i < 3 else False,
                name=name +
                'decoder_block{0}'.format(layer_idx) if name else None))

    # Outermost Layers
    layers = compose(
        Conv2D(nf,
               kernel_size=4,
               strides=2,
               padding='same',
               use_bias=False,
               name=name + 'encoder_block1/conv' if name else None),
        skip_concat(layers, name + 'concat{0}'.format(1) if name else None),
        ReLU(name=name + 'decoder_block1/relu' if name else None),
        Conv2DTranspose(output_channel,
                        kernel_size=4,
                        strides=2,
                        padding='same',
                        name=name + 'decoder_block1/tconv' if name else None))

    if include_top:
        layers = compose(
            layers,
            Activation('tanh',
                       name=name + 'output_block/tanh' if name else None))

    return layers
示例#4
0
def transition_block(out_filters,
                     transition_layer=None,
                     transition_name='trans',
                     dropRate=0.0,
                     name=None):

    if name is not None:
        name = name + '/'

    layers = compose(
        BatchNormalization(name=name + 'bn' if name else None),
        ReLU(name=name + 'relu' if name else None),
        Conv2D(out_filters,
               kernel_size=1,
               strides=1,
               padding='same',
               use_bias=False,
               name=name + 'conv' if name else None))

    if dropRate:
        layers = compose(
            layers, Dropout(dropRate, name=name + 'dropout' if name else None))

    if transition_layer:
        layers = compose(
            layers,
            transition_layer(name=name + transition_name if name else None))

    return layers
示例#5
0
def Dehaze(img_shape=(256, 256, 3)):

    img_input = Input(img_shape, name='img_input')
    trans = transmission_map_generator(img_shape)(img_input)
    atmos = atmospheric_light_generator(img_shape)(img_input)

    # $trans_{reciprocal} = \frac{1}{trans + 10^{-10}}$
    trans_reciprocal = Lambda(
        function=lambda x: 1 / (K.abs(x) + 10**-10))(trans)

    atmos = compose(
        AvgPool2D(),
        LeakyReLU(0.2),
        UpSampling2D()
    )(atmos)

    # $dehaze = (input - atmos) \times trans^{-1} + atmos$
    dehaze = Subtract()([img_input, atmos])
    dehaze = Multiply()([dehaze, trans_reciprocal])
    dehaze = Add()([dehaze, atmos])

    dehaze = compose(
        Concatenate(),
        Conv2D(6, kernel_size=3, strides=1, padding='same'),
        LeakyReLU(alpha=0.2),
        Conv2D(20, kernel_size=3, strides=1, padding='same'),
        LeakyReLU(alpha=0.2),
        Concat_Samping_Block([32, 16, 8, 4], kernel_size=1),
        Conv2D(3, kernel_size=3, strides=1, padding='same'),
        Activation('tanh')
    )([dehaze, img_input])

    return Model(inputs=[img_input], outputs=[dehaze, trans, atmos])
def autoencoder_layers(layer_num,
                       output_channel,
                       nf,
                       include_top=True,
                       name=None):

    assert layer_num >= 2

    def multiplier(x):
        return min(x**2, 8)

    if name is not None:
        name = name + '/'

    # First Encoder Layers
    encoder_layers = Conv2D(nf,
                            kernel_size=4,
                            strides=2,
                            padding='same',
                            name=name +
                            'encoder_block1/conv' if name else None)

    # Last Decoder Layers
    decoder_layers = compose(
        ReLU(name=name + 'decoder_block1/relu' if name else None),
        Conv2DTranspose(output_channel,
                        kernel_size=4,
                        strides=2,
                        padding='same',
                        name=name + 'decoder_block1/tconv' if name else None))

    # Inter Layers
    for i in range(1, layer_num):

        encoder_layers = compose(
            encoder_layers,
            EncoderBlock(nf * multiplier(i),
                         name=name +
                         'encoder_block{0}'.format(i + 1) if name else None),
        )

        decoder_layers = compose(
            DecoderBlock(nf * multiplier(i - 1),
                         name=name +
                         'decoder_block{0}'.format(i + 1) if name else None),
            decoder_layers)

    layers = compose(encoder_layers, decoder_layers)

    if include_top:
        layers = compose(
            layers,
            Activation('tanh',
                       name=name + 'output_block/tanh' if name else None))

    return layers
示例#7
0
def Dense_rain(img_shape=(128, 128, 3), name='derain'):
    '''
    Multi-stream Dense Network
    '''
    if name is not None:
        name = name + '/'

    img_input = Input(img_shape, name=name + 'img_input' if name else None)
    label_input = Input(img_shape[:2] + (8, ),
                        name=name + 'label_input' if name else None)

    residual = compose(
        Concatenate(name=name + 'concat1' if name else None),
        Conv2D(47,
               kernel_size=3,
               strides=1,
               padding='same',
               name=name + 'concat1/conv2d' if name else None),
        LeakyReLU(alpha=0.2, name=name + 'concat1/lrelu' if name else None),
        Concat_Samping_Block([32, 16, 8, 4],
                             name=name + 'concat_sampling' if name else None),
        Conv2D(3,
               kernel_size=3,
               strides=1,
               padding='same',
               name=name + 'residual/conv2d' if name else None),
        Activation('tanh', name=name + 'residual/tanh' if name else None))([
            Dense3(img_input), img_input,
            Dense2(img_input),
            Dense1(img_input), label_input
        ])

    clear = compose(
        Subtract(name=name + 'clear/subtract' if name else None),
        Conv2D(8,
               kernel_size=7,
               strides=1,
               padding='same',
               name=name + 'clear/conv2d1' if name else None),
        LeakyReLU(alpha=0.2, name=name + 'clear/lrelu' if name else None),
        Conv2D(3,
               kernel_size=3,
               strides=1,
               padding='same',
               name=name + 'clear/conv2d2' if name else None),
        Activation('tanh', name=name +
                   'clear/tanh' if name else None))([img_input, residual])

    model = Model(inputs=[img_input, label_input], outputs=[residual, clear])
    return model
示例#8
0
def imageGAN(img_shape, ndf, use_sigmoid):

    img_input = Input(img_shape)

    layers = compose(
        unet_block(ndf, 4, 2, 'layer1', transposed=False, bn=False,
                   relu=False),
        unet_block(ndf * 2,
                   4,
                   2,
                   'layer2',
                   transposed=False,
                   bn=True,
                   relu=False),
        unet_block(ndf * 4,
                   4,
                   2,
                   'layer3',
                   transposed=False,
                   bn=True,
                   relu=False),
        unet_block(ndf * 8,
                   4,
                   2,
                   'layer4',
                   transposed=False,
                   bn=True,
                   relu=False),
        unet_block(ndf * 8,
                   4,
                   2,
                   'layer5',
                   transposed=False,
                   bn=True,
                   relu=False),
        unet_block(ndf * 8,
                   4,
                   2,
                   'layer6',
                   transposed=False,
                   bn=True,
                   relu=False),
        Conv2D(1, kernel_size=4, strides=2, padding='same', name='layer7'))

    if use_sigmoid:
        layers = compose(layers, Activation('sigmoid'))

    output = layers(img_input)
    return Model(inputs=[img_input], outputs=[output])
示例#9
0
def DB_Blocks_Gen(_bn_block, _bn_filters, _tr_blocks, _tr_filters, name=None):

    if name is not None:
        name = name + '/'

    blocks = []
    for i, (_bn_filter, _tr_block,
            _tr_filter) in enumerate(zip(_bn_filters, _tr_blocks,
                                         _tr_filters)):

        block_id_str = 'db_{0}'.format(i + 1)
        if _tr_block is TransitionBlock_Down:
            tr_str = '/td'
        if _tr_block is TransitionBlock_Plain:
            tr_str = '/tp'
        if _tr_block is TransitionBlock_Up:
            tr_str = '/tu'

        blocks.append(
            compose(
                _bn_block(_bn_filter,
                          name=name + block_id_str + '/bn' if name else None),
                _tr_block(_tr_filter,
                          name=name + block_id_str +
                          tr_str if name else None)))
    return blocks
示例#10
0
def do_deploy_fluence(yml="fluence.yml"):
    with hide():
        compose("pull", yml)
        compose('rm -fs', yml)
        compose('up --no-start', yml)  # was: 'create'
        copy_configs(yml)
        compose("restart", yml)
        sleep(1)
        addrs = get_fluence_addresses(yml)
        return addrs
示例#11
0
def build_autoencoder(img_shape):
    img_input = Input(img_shape[:-1] + (4, ))

    block_conv_lrelu = lambda filters, kernal_size, dilation_rate=1: compose(
        Conv2D(filters,
               kernel_size=kernal_size,
               strides=1,
               padding='same',
               dilation_rate=dilation_rate,
               use_bias=False), LeakyReLU())

    block_deconv_avgpool_lrelu = lambda filters: compose(
        Deconv2D(
            filters, kernel_size=4, strides=2, padding='same', use_bias=False),
        AvgPool2D(), LeakyReLU())

    skip_conv = partial(Conv2D,
                        filters=3,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        use_bias=False)

    # conv1 -> relu12
    relu_12 = compose(block_conv_lrelu(64, 5), block_conv_lrelu(128, 3),
                      block_conv_lrelu(128, 3), block_conv_lrelu(128, 3),
                      block_conv_lrelu(256, 3), block_conv_lrelu(256, 3),
                      block_conv_lrelu(256, 3, 2), block_conv_lrelu(256, 3, 4),
                      block_conv_lrelu(256, 3, 8),
                      block_conv_lrelu(256, 3, 16), block_conv_lrelu(256, 3),
                      block_conv_lrelu(256, 3))(img_input)

    relu_14 = compose(block_deconv_avgpool_lrelu(128),
                      block_conv_lrelu(128, 3))(relu_12)

    relu_16 = compose(block_deconv_avgpool_lrelu(64),
                      block_conv_lrelu(32, 3))(relu_14)

    skip_output_1 = skip_conv()(relu_12)
    skip_output_2 = skip_conv()(relu_14)
    skip_output_3 = skip_conv()(relu_16)
    skip_output_3 = Activation('tanh')(skip_output_3)

    return Model(inputs=[img_input],
                 outputs=[skip_output_1, skip_output_2, skip_output_3])
示例#12
0
def UpSampling_Block(up_sample_size=1, name=None):
    return compose(
        Conv2D(1,
               kernel_size=3,
               strides=1,
               padding='same',
               name=name + '/conv2d' if name else None),
        LeakyReLU(alpha=0.2, name=name + '/lrelu' if name else None),
        UpSampling2D(up_sample_size, name=name + '/us' if name else None))
示例#13
0
def G(img_shape=(256, 256),
      input_num_channel=3,
      output_num_channel=3,
      num_filters=8):
    img_input = Input(img_shape + (input_num_channel, ))

    output = compose(Activation('tanh', name='dlayer1/tanh'))(G_base(
        img_input, output_num_channel, num_filters))

    return Model(inputs=[img_input], outputs=[output])
示例#14
0
def get_fluence_addresses(yml="fluence.yml"):
    containers = compose('ps -q', yml).splitlines()
    nodes = []
    for id in containers:
        (tcp_port, ws_port) = get_ports(id)
        peer_id = get_fluence_peer_ids(id)
        node = Node(peer_id=peer_id,
                    tcp=Service(tcp_port, None),
                    ws=Service(ws_port, None))
        nodes.append(node)
    return nodes
示例#15
0
def inference(images_shape, num_feature=16, kernel_size=3):
    num_channels = images_shape[2]

    def conv2d(filters, layer_id):
        return Conv2D(filters=filters,
                      kernel_size=kernel_size,
                      padding='same',
                      kernel_regularizer=tf.keras.regularizers.l2(1e-10),
                      name='layer_{0}/conv'.format(layer_id))

    def bn(layer_id):
        return BatchNormalization(name='layer_{0}/bn'.format(layer_id))

    def relu(layer_id):
        return ReLU(name='layer_{0}/relu'.format(layer_id))

    def base_layer(filters, layer_id):
        return compose(conv2d(filters, layer_id), bn(layer_id), relu(layer_id))

    images = Input(images_shape, name='images_input')
    detail = Input(images_shape, name='detail_input')

    #     base = guided_filter(inp, inp, 15, 1, nhwc=True) # using guided filter for obtaining base layer
    #     detail = images - base   # detail layer
    output_shortcut = base_layer(num_feature, 1)

    #  layers 2 to 25
    for i in range(1, 13):

        output = compose(base_layer(num_feature, i * 2),
                         base_layer(num_feature, i * 2 + 1))(output_shortcut)

        output_shortcut = Add(name='add_{0}'.format(i))(
            [output_shortcut, output])

    # layer 26
    neg_residual = compose(conv2d(num_channels, 26), bn(26))(output_shortcut)

    final_out = Add(name='add_final')([images, neg_residual])

    return tf.keras.models.Model(inputs=[images, detail], outputs=[final_out])
示例#16
0
def resnet_9blocks(img_shape, ngf, name=None):

    if name is not None:
        name = name + '/'

    img_input = Input(img_shape, name=name + 'input' if name else None)

    layers = compose(
        ReflectionPadding2D(
            3, name=name + 'reflect_in/reflect_pad' if name else None),
        Conv2D(img_shape[-1], kernel_size=7, strides=1,
               padding='valid', name=name + 'reflect_in/conv' if name else None),
        BatchNormalization(name=name + 'reflect_in/relu' if name else None),
        ReLU(name=name + 'reflect_in/bn' if name else None),

        Conv2D(ngf * 2, kernel_size=3, strides=2, padding='same',
               name=name + 'ds_block1/conv' if name else None),
        BatchNormalization(name=name + 'ds_block1/bn' if name else None),
        ReLU(name=name + 'ds_block1/relu' if name else None),

        Conv2D(ngf * 4, kernel_size=3, strides=2, padding='same',
               name=name + 'ds_block2/conv' if name else None),
        BatchNormalization(name=name + 'ds_block2/bn' if name else None),
        ReLU(name=name + 'ds_block2/relu' if name else None),

        build_res_block(ngf * 4, 'reflect', name=name + 'res_block1' if name else None),
        build_res_block(ngf * 4, 'reflect', name=name + 'res_block2' if name else None),
        build_res_block(ngf * 4, 'reflect', name=name + 'res_block3' if name else None),
        build_res_block(ngf * 4, 'reflect', name=name + 'res_block4' if name else None),
        build_res_block(ngf * 4, 'reflect', name=name + 'res_block5' if name else None),
        build_res_block(ngf * 4, 'reflect', name=name + 'res_block6' if name else None),
        build_res_block(ngf * 4, 'reflect', name=name + 'res_block7' if name else None),
        build_res_block(ngf * 4, 'reflect', name=name + 'res_block8' if name else None),
        build_res_block(ngf * 4, 'reflect', name=name + 'res_block9' if name else None),

        Conv2DTranspose(ngf * 2, kernel_size=3, strides=2,
                        padding='same', name=name + 'us_block1/tconv' if name else None),
        BatchNormalization(name=name + 'us_block1/bn' if name else None),
        ReLU(name=name + 'us_block1/relu' if name else None),

        Conv2DTranspose(ngf, kernel_size=3, strides=2, padding='same',
                        name=name + 'us_block2/tconv' if name else None),
        BatchNormalization(name=name + 'us_block2/bn' if name else None),
        ReLU(name=name + 'us_block2/relu' if name else None),

        ReflectionPadding2D(
            3, name=name + 'reflect_out/reflect_pad' if name else None),
        Conv2D(img_shape[-1], kernel_size=7, strides=1,
               padding='valid', name=name + 'reflect_out/conv' if name else None),
        Activation('tanh', name=name + 'reflect_out/tanh' if name else None)
    )

    return Model(inputs=[img_input], outputs=[layers(img_input)])
示例#17
0
def inference(input_shape=(33, 33, 1)):
    layers = compose(
        Conv2D(64, kernel_size=9, strides=1, padding='valid', use_bias=True),
        ReLU(),
        Conv2D(32, kernel_size=1, strides=1, padding='valid', use_bias=True),
        ReLU(),
        Conv2D(3, kernel_size=5, strides=1, padding='valid', use_bias=True),
    )

    inputs = Input(input_shape)

    return Model(inputs=[inputs], outputs=[layers(inputs)])
示例#18
0
def D(input_num_channel=3, output_num_channel=3, num_filters=8, num_layers=3):

    layers = compose(
        Conv2D(num_filters,
               kernel_size=4,
               strides=2,
               padding='same',
               use_bias=False,
               name='layer1'))

    for layer_idx in range(1, num_layers - 1):
        layers = compose(
            layers,
            UNetBlock_4_2(num_filters * multiplier_cal(layer_idx),
                          name='layer%d' % (layer_idx + 1),
                          transposed=False,
                          bn=True,
                          relu=False,
                          dropout=False))

    layers = compose(
        layers,
        UNetBlock_4_1(num_filters * multiplier_cal(num_layers - 1),
                      name='layer%d' % num_layers,
                      transposed=False,
                      bn=True,
                      relu=False,
                      dropout=False))

    layers = compose(
        layers,
        UNetBlock_4_1(1,
                      name='layerfinal',
                      transposed=False,
                      bn=False,
                      relu=False,
                      dropout=False),
        Activation('sigmoid', name='layerfinal/sigmoid'))

    return layers
示例#19
0
def transmission_map_generator(img_shape, output_channel=3, ngf=64):

    layer_num = int(log2(min(img_shape[:2])))
    layers = compose(
        unet_layers(layer_num, 20, ngf,
                    include_top=False, name='trans'),
        Concat_Samping_Block([16, 8, 4, 2], name='trans'),
        Conv2D(output_channel, kernel_size=3, strides=1,
               padding='same', use_bias=False, name='trans/output_block/conv'),
        Activation('tanh', name='trans/output_block/tanh')
    )

    return layers
示例#20
0
def bottleneck_block(out_filters, kernal_size_2, dropRate=0.0, name=None):

    inter_filters = out_filters * 4
    if name is not None:
        name = name + '/'

    layer_1 = compose(
        BatchNormalization(name=name + 'bn1' if name else None),
        ReLU(name=name + 'relu1' if name else None),
        Conv2D(inter_filters,
               kernel_size=1,
               strides=1,
               padding='same',
               use_bias=False,
               name=name + 'conv2d1' if name else None))

    if dropRate:
        layer_1 = compose(
            layer_1, Dropout(dropRate,
                             name=name + 'dropout1' if name else None))

    layer_2 = compose(
        BatchNormalization(name=name + 'bn2' if name else None),
        ReLU(name=name + 'relu2' if name else None),
        Conv2D(out_filters,
               kernel_size=kernal_size_2,
               strides=1,
               padding='same',
               use_bias=False,
               name=name + 'conv2d2' if name else None))

    if dropRate:
        layer_2 = compose(
            layer_2, Dropout(dropRate,
                             name=name + 'dropout2' if name else None))

    return compose(layer_1, layer_2)
示例#21
0
def G_base(inputs, output_num_channel=3, num_filters=64):
    outs = []
    x = Conv2D(num_filters * conv_filters_multipliers[0],
               kernel_size=3,
               strides=1,
               padding='same',
               use_bias=False,
               name='layer1')(inputs)
    outs.append(x)

    for layer_idx, multiplier in enumerate(conv_filters_multipliers[1:]):
        name = 'layer%d' % (layer_idx + 2)
        x = UNetBlock_3_1(int(num_filters *
                              multiplier) if multiplier != 0 else 1,
                          name=name,
                          transposed=False,
                          bn=True,
                          relu=False,
                          dropout=False)(x)
        outs.append(x)

    x = UNetBlock_3_1(int(num_filters * conv_transpose_filters_multipliers[0]),
                      name='dlayer6',
                      transposed=True,
                      bn=False,
                      relu=True,
                      dropout=True)(x)

    for layer_idx, multiplier in enumerate(
            conv_transpose_filters_multipliers[1:]):
        layer_idx = 5 - layer_idx
        name = 'dlayer%d' % layer_idx
        x = UNetBlock_3_1(num_filters * multiplier,
                          name=name,
                          transposed=True,
                          bn=True,
                          relu=True,
                          dropout=False)(x)
        if layer_idx in [3, 5]:
            x = Add()([x, outs[layer_idx - 2]])

    x = compose(
        UNetBlock_3_1(output_num_channel,
                      name='dlayer1',
                      transposed=True,
                      bn=False,
                      relu=True,
                      dropout=False))(x)
    return x
示例#22
0
def VGG19ca(img_shape=(128, 128, 3)):
    vgg19_model = vgg19.VGG19(include_top=False,
                              weights='imagenet',
                              input_shape=img_shape)

    # Build Model
    inputs = vgg19_model.input

    # Blocks in VGG19
    blocks_vgg19 = compose(
        Conv2D(64, kernel_size=3, strides=1, padding='same'),
        BatchNormalization(epsilon=1e-05, momentum=0.1), ReLU(),
        Conv2D(24, kernel_size=3, strides=1, padding='same'), ReLU(),
        AvgPool2D(7))

    #Dense
    blocks_dense = compose(Flatten(), Dense(512), ReLU(), Dense(4))

    model = Model(inputs=[inputs],
                  outputs=[compose(blocks_vgg19, blocks_dense)(inputs)])

    # Transfer Weight
    model.layers[1].set_weights(vgg19_model.layers[1].get_weights())
    return model
示例#23
0
def sampling_block(pool, ds_layer, us_layer, kernel_size=1, name=None):

    if name is not None:
        name = name + '/'

    layers = compose(
        ds_layer(pool, name=name + '/ds' if name else None),
        Conv2D(1,
               kernel_size=kernel_size,
               strides=1,
               padding='same',
               name=name + '/conv2d' if name else None),
        LeakyReLU(alpha=0.2, name=name + '/lrelu' if name else None),
        us_layer(pool, name=name + '/us' if name else None))

    return layers
示例#24
0
def D(img_shape, num_filters=64):

    trans_input = Input((img_shape), name='trans_input')
    img_input = Input((img_shape), name='img_input')

    layers = compose(
        Concatenate(name='concat'),
        Conv2D(num_filters,
               kernel_size=4,
               strides=2,
               padding='same',
               use_bias=False,
               name='layer1'),
        UNetBlock(num_filters * 2,
                  name='layer2',
                  transposed=False,
                  bn=True,
                  relu=False,
                  dropout=False),
        UNetBlock(num_filters * 4,
                  name='layer3',
                  transposed=False,
                  bn=True,
                  relu=False,
                  dropout=False),
        LeakyReLU(alpha=0.2, name='layer4/leakyrelu'),
        Conv2D(num_filters * 8,
               kernel_size=4,
               strides=1,
               padding='valid',
               use_bias=False,
               name='layer4/conv'),
        ZeroPadding2D(name='layer4/zeropad'),
        BatchNormalization(name='layer4/bn'),
        LeakyReLU(alpha=0.2, name='layer5/leakyrelu'),
        Conv2D(1,
               kernel_size=4,
               strides=1,
               padding='valid',
               use_bias=False,
               name='layer5/conv'),
        ZeroPadding2D(name='layer5/zeropad'),
        Activation(activation='sigmoid', name='layer5/sigmoid'),
    )

    return Model(inputs=[trans_input, img_input],
                 outputs=[layers([trans_input, img_input])])
示例#25
0
def unet_block(filters,
               kernel_size,
               strides,
               name,
               transposed=False,
               bn=False,
               relu=True,
               dropout=False):

    if name is not None:
        name = name + '/'

    if relu:
        block_1 = compose(ReLU(name=name + 'relu' if name else None))
    else:
        block_1 = compose(
            LeakyReLU(alpha=0.2, name=name + 'lrelu' if name else None))

    if not transposed:
        block_2 = compose(
            Conv2D(filters,
                   kernel_size=kernel_size,
                   strides=strides,
                   padding='same',
                   use_bias=False,
                   name=name + 'conv' if name else None))
    else:
        block_2 = compose(
            Conv2DTranspose(filters,
                            kernel_size=kernel_size,
                            strides=strides,
                            padding='same',
                            use_bias=False,
                            name=name + 'tconv' if name else None))

    layers = compose(block_1, block_2)

    if bn:
        layers = compose(
            layers, BatchNormalization(name=name + 'bn' if name else None))

    if dropout:
        layers = compose(layers,
                         Dropout(0.5, name=name + 'dropout' if name else None))

    return layers
示例#26
0
from compose import *
from tensorflow.keras.layers import Add, Conv2D, LeakyReLU

__all__ = ['residual_block']

block1 = lambda name: compose(
    Conv2D(32,
           kernel_size=3,
           strides=1,
           padding='same',
           use_bias=False,
           name=name + 'block_1/conv2d'), LeakyReLU(name=name + 'block1/relu_1'
                                                    ))

blockn_1 = lambda n, name: compose(
    Conv2D(32,
           kernel_size=1,
           strides=1,
           padding='same',
           use_bias=False,
           name=name + 'block_{0}/conv2d_1'.format(n)),
    LeakyReLU(name=name + 'block_{0}/relu_1'.format(n)),
    Conv2D(32,
           kernel_size=1,
           strides=1,
           padding='same',
           use_bias=False,
           name=name + 'block_{0}/conv2d_2'.format(n)),
    LeakyReLU(name=name + 'block_{0}/relu_2'.format(n)))

blockn_2 = lambda n, name: compose(
示例#27
0
 def base_layer(filters, layer_id):
     return compose(conv2d(filters, layer_id), bn(layer_id), relu(layer_id))
示例#28
0
from compose import *
from tensorflow.keras.layers import Activation, Add, Conv2D, Input, LeakyReLU, Multiply

__all__ = ['conv_lstm']

conv_activat = lambda activat, block_name, name, out_channel=32: compose(
    Conv2D(out_channel, kernel_size=3, strides=1, padding='same', use_bias=False, name=name + 'conv_' + block_name),
    Activation(activat, name=name + 'activat_' + block_name)
)

def conv_lstm(input_tensor, input_cell_state, name):
    
    if type(name) is str:
        name = name + '/'
    
    sigmoid_input = conv_activat('sigmoid', 'i', name)(input_tensor)
    sigmoid_forget = conv_activat('sigmoid', 'f', name)(input_tensor)
    tanh_cell_state = conv_activat('tanh', 'c', name)(input_tensor)
    sigmoid_output = conv_activat('sigmoid', 'o', name)(input_tensor)
    
    cell_state = Add(name=name + 'add_c')([
        Multiply(name=name + 'mul_f_c')([sigmoid_forget, input_cell_state]),
        Multiply(name=name + 'mul_i_c')([sigmoid_input, tanh_cell_state])
    ])
        
    lstm_feats = Multiply(name=name + 'mul_lf')([
        sigmoid_output,
        Activation('tanh', name=name + 'tanh_c')(cell_state)
    ])
    
    attention_map = conv_activat('sigmoid', 'attention_map', name, out_channel=1)(lstm_feats)
示例#29
0
import os
from compose import *

streifen = []
for f in sorted(os.listdir("./try2")):
    # if f.startswith("ju") and not f.startswith("juf"):
    if f.startswith("juf"):
        streifen.append(Image.open("./try2/" + f))
        print("choosing", f)

composite = compose(cropped(streifen))

composite.save("./composite.tif")
# os.system("xviewer composite.tif")
# os.system("display composite.tif")
示例#30
0
	def test_compose(self):
		c = compose(add, mult, square)
		self.assertEqual(144, c(5))
示例#31
0
	def test_compose(self):
		with self.assertRaises(IndexError):
			c = compose()
			c(5)
示例#32
0
	def test_compose(self):
		with self.assertRaises(TypeError):
			c = compose(add, 5)
			c(5)
示例#33
0
	def test_compose(self):
		with self.assertRaises(TypeError):
			c = compose(add, mult, square)
			c("hello")
示例#34
0
def copy_configs(yml):
    # there's no `cp` in `docker-compose`: https://github.com/docker/compose/issues/5523
    put("Config.toml", "./")
    containers = compose('ps -q', yml).splitlines()
    for id in containers:
        run('docker cp ./Config.toml %s:/Config.toml' % id)