Пример #1
0
def decoder_end_block(x, label_sizes, nb_bits, activation, weight_decay=0.):
    x = Flatten()(x)
    ids = sequential([
        Dense(256, W_regularizer=l2(weight_decay)),
        activation(),
        BatchNormalization(mode=0, axis=1),
        Dropout(0.5),
    ])(x)

    outputs = OrderedDict()
    losses = OrderedDict()
    for i in range(nb_bits):
        name = 'bit_{}'.format(i)
        outputs[name] = Dense(1, activation='sigmoid', name=name)(ids)
        losses[name] = 'binary_crossentropy'

    params = sequential([
        Dense(256, W_regularizer=l2(weight_decay)),
        activation(),
        BatchNormalization(mode=0, axis=1),
        Dropout(0.5),
    ])(x)

    for name, output_size in label_sizes:
        outputs[name] = Dense(output_size, activation='tanh', name=name)(params)
        losses[name] = 'mse'

    return outputs, losses
Пример #2
0
def decoder_end_block(x, label_sizes, nb_bits, activation, weight_decay=0.):
    x = Flatten()(x)
    ids = sequential([
        Dense(256, W_regularizer=l2(weight_decay)),
        activation(),
        BatchNormalization(mode=0, axis=1),
        Dropout(0.5),
    ])(x)

    outputs = OrderedDict()
    losses = OrderedDict()
    for i in range(nb_bits):
        name = 'bit_{}'.format(i)
        outputs[name] = Dense(1, activation='sigmoid', name=name)(ids)
        losses[name] = 'binary_crossentropy'

    params = sequential([
        Dense(256, W_regularizer=l2(weight_decay)),
        activation(),
        BatchNormalization(mode=0, axis=1),
        Dropout(0.5),
    ])(x)

    for name, output_size in label_sizes:
        outputs[name] = Dense(output_size, activation='tanh',
                              name=name)(params)
        losses[name] = 'mse'

    return outputs, losses
Пример #3
0
def test_model_multiple_calls():
    x1 = Input(shape=(20,))

    y1 = sequential([
        Dense(10),
        Dense(1),
    ])(x1)
    m1 = Model(x1, y1)

    x2 = Input(shape=(25,))
    y2 = sequential([
        Dense(20),
        m1
    ])(x2)
    m2 = Model(x2, y2)
    m2.compile('adam', 'mse')

    x3 = Input(shape=(20,))
    y3 = sequential([
        Dense(25),
        m2
    ])(x3)
    m3 = Model(x3, y3)
    m3.compile('adam', 'mse')
    m3.train_on_batch(np.zeros((32, 20)), np.zeros((32, 1)))
Пример #4
0
    def wrapper(x):
        shape = x._keras_shape
        if shape[1] != n:
            x = sequential(conv2d_block(n, filters, depth=1, activation=activation))(x)

        f = sequential(conv2d_block(n, filters, depth=2, activation=activation))
        return merge([x, f(x)], mode='sum')
Пример #5
0
    def tag3d_network_dense(x):
        mask = sequential(
            [Dense(16), Reshape((1, 4, 4)),
             UpSampling2D((16, 16))])(x)

        depth_map = sequential(
            [Dense(16), Reshape((1, 4, 4)),
             UpSampling2D((4, 4))])(x)
        return mask, depth_map
Пример #6
0
    def tag3d_network_dense(x):
        mask = sequential([
            Dense(16),
            Reshape((1, 4, 4)),
            UpSampling2D((16, 16))
        ])(x)

        depth_map = sequential([
            Dense(16),
            Reshape((1, 4, 4)),
            UpSampling2D((4, 4))
        ])(x)
        return mask, depth_map
Пример #7
0
def get_offset_back(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    back_feature_map = sequential([
        UpSampling2D(),  # 64x64
        conv(n, 3, 3),
        conv(n, 3, 3),
        InBounds(-1, 1),
    ], ns='offset.back')(input)

    return back_feature_map, sequential([
        Convolution2D(1, 3, 3, border_mode='same'),
        InBounds(-1, 1),
    ], ns='offset.back_out')(back_feature_map)
Пример #8
0
def tag_3d_network_conv(input, nb_inputs, nb_units=64, depth=2, filter_size=3):
    n = nb_units

    def conv(n, repeats=1, f=None):
        if f is None:
            f = filter_size
        return [[
            Convolution2D(n, f, f, border_mode='same', init='he_normal'),
            Activation('relu')
        ] for _ in range(repeats)]

    base = sequential(
        [
            Reshape((
                nb_inputs,
                1,
                1,
            )),
            conv(8 * n, depth, f=1),
            UpSampling2D(),  # 2x2
            conv(8 * n, depth, f=2),
            UpSampling2D(),  # 4x4
            conv(8 * n, depth),
            UpSampling2D(),  # 8x8
            conv(4 * n, depth),
            UpSampling2D(),  # 16x16
            conv(2 * n),
        ],
        ns='mask_gen.base')(input)

    mask = sequential(
        [
            conv(2 * n, depth),
            UpSampling2D(),  # 32x32
            conv(n, depth),
            UpSampling2D(),  # 64x64
            conv(n, depth - 1),
            Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
        ],
        ns='mask_gen.mask')(base)

    depth_map = sequential([
        conv(n // 2, depth - 1),
        Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
    ],
                           ns='mask_gen.depth_map')(base)

    return mask, depth_map
Пример #9
0
 def _bn_relu_conv(nb_filter, nb_row=3, nb_col=3, subsample=1):
     return sequential([
         BatchNormalization(mode=0, axis=1),
         ELU(),
         Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col,
                       subsample=(subsample, subsample), init="he_normal", border_mode="same")
     ])
Пример #10
0
def render_gan_discriminator_resnet(x, n=32, conv_repeat=1, dense=[], out_activation='sigmoid'):
    def get_dense(nb):
        return [
            Dense(nb),
            batch_norm(),
            LeakyReLU(0.2),
        ]

    return sequential([
        Convolution2D(n, 3, 3, border_mode='same'),
        resnet(n, activation=LeakyReLU(0.3)),
        Convolution2D(n, 3, 3, subsample=(2, 2), border_mode='same'),
        LeakyReLU(0.2),
        batch_norm(),
        resnet(2*n, activation=LeakyReLU(0.3)),
        resnet(2*n, activation=LeakyReLU(0.3)),
        resnet(2*n, activation=LeakyReLU(0.3)),
        resnet(2*n, activation=LeakyReLU(0.3)),
        resnet(2*n, activation=LeakyReLU(0.3)),
        Convolution2D(4*n, 3, 3, subsample=(2, 2), border_mode='same'),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        Convolution2D(4*n, 3, 3, subsample=(2, 2), border_mode='same'),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        Flatten(),
        [get_dense(nb) for nb in dense],
        Dense(1, activation=out_activation)
    ], ns='dis')(concat(x, axis=0, name='concat_fake_real'))
Пример #11
0
def render_gan_discriminator(x, n=32, conv_repeat=1, dense=[],
                             out_activation='sigmoid'):
    def conv(n):
        layers = [
            Convolution2D(n, 3, 3, subsample=(2, 2), border_mode='same'),
            batch_norm(),
            LeakyReLU(0.2),
        ]

        return layers + [[
            Convolution2D(n, 3, 3, border_mode='same'),
            batch_norm(),
            LeakyReLU(0.2),
        ] for _ in range(conv_repeat-1)]

    def get_dense(nb):
        return [
            Dense(nb),
            batch_norm(),
            LeakyReLU(0.2),
        ]

    return sequential([
        Convolution2D(n, 5, 5, subsample=(2, 2), border_mode='same'),
        LeakyReLU(0.2),
        conv(2*n),
        conv(4*n),
        conv(8*n),
        Flatten(),
        [get_dense(nb) for nb in dense],
        Dense(1, activation=out_activation)
    ], ns='dis')(concat(x, axis=0, name='concat_fake_real'))
Пример #12
0
def get_lighting_generator(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    light_conv = sequential(
        [
            conv(n, 3, 3),  # 16x16
            MaxPooling2D(),  # 8x8
            conv(n, 3, 3),
            conv(n, 3, 3),
            UpSampling2D(),  # 16x16
            conv(n, 3, 3),
            UpSampling2D(),  # 32x32
            conv(n, 3, 3),
            Convolution2D(3, 1, 1, border_mode='same'),
            UpSampling2D(),  # 64x64
            GaussianBlur(sigma=2.5),
        ],
        ns='lighting')(input)

    shift = Subtensor(0, 1, axis=1)(light_conv)
    shift = InBounds(-1, 1)(shift)
    in_bounds = InBounds(0, 2)

    scale_black = in_bounds(Subtensor(1, 2, axis=1)(light_conv))
    scale_white = in_bounds(Subtensor(2, 3, axis=1)(light_conv))

    return [scale_black, scale_white, shift]
Пример #13
0
def get_lighting_generator(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    light_conv = sequential([
        conv(n, 3, 3),   # 16x16
        MaxPooling2D(),  # 8x8
        conv(n, 3, 3),
        conv(n, 3, 3),
        UpSampling2D(),  # 16x16
        conv(n, 3, 3),
        UpSampling2D(),  # 32x32
        conv(n, 3, 3),
        Convolution2D(3, 1, 1, border_mode='same'),
        UpSampling2D(),  # 64x64
        GaussianBlur(sigma=2.5),
    ], ns='lighting')(input)

    shift = Subtensor(0, 1, axis=1)(light_conv)
    shift = InBounds(-1, 1)(shift)
    in_bounds = InBounds(0, 2)

    scale_black = in_bounds(Subtensor(1, 2, axis=1)(light_conv))
    scale_white = in_bounds(Subtensor(2, 3, axis=1)(light_conv))

    return [scale_black, scale_white, shift]
Пример #14
0
def get_decoder_model(input,
                      nb_units,
                      nb_output=NUM_MIDDLE_CELLS + NUM_CONFIGS,
                      depth=1,
                      dense=[]):
    def dense_bn(n):
        return [Dense(n), batch_norm(mode=1), Activation('relu')]

    def conv(n):
        return [[
            Convolution2D(n, 3, 3),
            batch_norm(mode=1),
            Activation('relu')
        ] for _ in range(depth)]

    n = nb_units
    return sequential([
        conv(n),
        MaxPooling2D(),  # 32x32
        conv(2 * n),
        MaxPooling2D(),  # 16x16
        conv(4 * n),
        MaxPooling2D(),  # 8x8
        conv(8 * n),
        MaxPooling2D(),  # 4x4
        conv(16 * n),
        Flatten(),
        [dense_bn(d) for d in dense],
        Dense(nb_output)
    ])(input)
Пример #15
0
def render_gan_discriminator(x,
                             n=32,
                             conv_repeat=1,
                             dense=[],
                             out_activation='sigmoid'):
    def conv(n):
        layers = [
            Convolution2D(n, 3, 3, subsample=(2, 2), border_mode='same'),
            batch_norm(),
            LeakyReLU(0.2),
        ]

        return layers + [[
            Convolution2D(n, 3, 3, border_mode='same'),
            batch_norm(),
            LeakyReLU(0.2),
        ] for _ in range(conv_repeat - 1)]

    def get_dense(nb):
        return [
            Dense(nb),
            batch_norm(),
            LeakyReLU(0.2),
        ]

    return sequential([
        Convolution2D(n, 5, 5, subsample=(2, 2), border_mode='same'),
        LeakyReLU(0.2),
        conv(2 * n),
        conv(4 * n),
        conv(8 * n),
        Flatten(), [get_dense(nb) for nb in dense],
        Dense(1, activation=out_activation)
    ],
                      ns='dis')(concat(x, axis=0, name='concat_fake_real'))
Пример #16
0
def tag3d_network_dense(input, nb_units=64, nb_dense_units=[512, 512],
                        depth=2, nb_output_channels=1, trainable=True):
    n = nb_units

    def conv(n, repeats=None):
        def normal(shape, name=None):
            return keras.initializations.normal(shape, scale=0.01, name=name)

        if repeats is None:
            repeats = depth
        return [
            [
                Convolution2D(n, 3, 3, border_mode='same', init='he_normal'),
                Activation('relu')
            ] for _ in range(repeats)
        ]

    base = sequential([
        [
            Dense(nb_dense, activation='relu')
            for nb_dense in nb_dense_units
        ],
        Dense(8*n*4*4),
        Activation('relu'),
        Reshape((8*n, 4, 4,)),
        conv(8*n),
        UpSampling2D(),  # 8x8
        conv(4*n),
        UpSampling2D(),  # 16x16
        conv(2*n),
    ], ns='tag3d_gen.base', trainable=trainable)(input)

    tag3d = sequential([
        conv(2*n),
        UpSampling2D(),  # 32x32
        conv(n),
        UpSampling2D(),  # 64x64
        conv(n, 1),
        Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
    ], ns='tag3d', trainable=trainable)(base)

    depth_map = sequential([
        conv(n // 2, depth - 1),
        Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
    ], ns='depth_map', trainable=trainable)(base)

    return name_tensor(tag3d, 'tag3d'), name_tensor(depth_map, 'depth_map')
Пример #17
0
def get_blur_factor(inputs, min=0, max=2):
    input = concat(inputs)
    return sequential([
        Convolution2D(1, 3, 3),
        Flatten(),
        Dense(1),
        InBounds(min, max),
    ], ns='mask_weight_blending')(input)
Пример #18
0
def get_offset_back(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    back_feature_map = sequential(
        [
            UpSampling2D(),  # 64x64
            conv(n, 3, 3),
            conv(n, 3, 3),
            InBounds(-1, 1),
        ],
        ns='offset.back')(input)

    return back_feature_map, sequential([
        Convolution2D(1, 3, 3, border_mode='same'),
        InBounds(-1, 1),
    ],
                                        ns='offset.back_out')(back_feature_map)
Пример #19
0
 def discriminator_fn(x):
     return gan_outputs(sequential([
         Flatten(),
         Dense(1),
     ])(concat(x)),
                        fake_for_gen=(0, 10),
                        fake_for_dis=(0, 10),
                        real=(10, 20))
Пример #20
0
def test_resnet():
    n = 4
    x = Input(shape=(1, 8, 8))
    y = sequential([
        conv2d_block(n),
        resnet(n)
    ])(x)
    model = Model(x, y)
    assert model.get_output_shape_for((None, 1, 8, 8)) == (None, n, 8, 8)
Пример #21
0
def get_details(inputs, nb_units):
    n = nb_units
    return sequential([
        conv(n, 3, 3),
        conv(n, 3, 3),
        conv(n, 3, 3),
        Convolution2D(1, 3, 3, border_mode='same', init='normal'),
        InBounds(-2, 2)
    ], ns='details')(concat(inputs))
Пример #22
0
def get_offset_middle(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    return sequential([
        UpSampling2D(),  # 32x32
        conv(2*n, 3, 3),
        conv(2*n, 3, 3),
        conv(2*n, 3, 3),
    ], ns='offset.middle')(input)
Пример #23
0
def get_blur_factor(inputs, min=0, max=2):
    input = concat(inputs)
    return sequential([
        Convolution2D(1, 3, 3),
        Flatten(),
        Dense(1),
        InBounds(min, max),
    ],
                      ns='mask_weight_blending')(input)
Пример #24
0
def get_details(inputs, nb_units):
    n = nb_units
    return sequential([
        conv(n, 3, 3),
        conv(n, 3, 3),
        conv(n, 3, 3),
        Convolution2D(1, 3, 3, border_mode='same', init='normal'),
        InBounds(-2, 2)
    ],
                      ns='details')(concat(inputs))
Пример #25
0
def get_offset_middle(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    return sequential(
        [
            UpSampling2D(),  # 32x32
            conv(2 * n, 3, 3),
            conv(2 * n, 3, 3),
            conv(2 * n, 3, 3),
        ],
        ns='offset.middle')(input)
Пример #26
0
 def _bn_relu_conv(nb_filter, nb_row=3, nb_col=3, subsample=1):
     return sequential([
         BatchNormalization(mode=0, axis=1),
         ELU(),
         Convolution2D(nb_filter=nb_filter,
                       nb_row=nb_row,
                       nb_col=nb_col,
                       subsample=(subsample, subsample),
                       init="he_normal",
                       border_mode="same")
     ])
Пример #27
0
def tag_3d_network_conv(input, nb_inputs, nb_units=64, depth=2, filter_size=3):
    n = nb_units

    def conv(n, repeats=1, f=None):
        if f is None:
            f = filter_size
        return [
            [
                Convolution2D(n, f, f, border_mode='same', init='he_normal'),
                Activation('relu')
            ] for _ in range(repeats)
        ]

    base = sequential([
        Reshape((nb_inputs, 1, 1,)),
        conv(8*n, depth, f=1),
        UpSampling2D(),  # 2x2
        conv(8*n, depth, f=2),
        UpSampling2D(),  # 4x4
        conv(8*n, depth),
        UpSampling2D(),  # 8x8
        conv(4*n, depth),
        UpSampling2D(),  # 16x16
        conv(2*n),
    ], ns='mask_gen.base')(input)

    mask = sequential([
        conv(2*n, depth),
        UpSampling2D(),  # 32x32
        conv(n, depth),
        UpSampling2D(),  # 64x64
        conv(n, depth - 1),
        Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
    ], ns='mask_gen.mask')(base)

    depth_map = sequential([
        conv(n // 2, depth - 1),
        Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
    ], ns='mask_gen.depth_map')(base)

    return mask, depth_map
Пример #28
0
def test_conv2d_block():
    x = Input(shape=(1, 8, 8))
    y = sequential(
        conv2d_block(4)
    )(x)
    model = Model(x, y)
    assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 8, 8)

    x = Input(shape=(1, 8, 8))
    y = sequential(
        conv2d_block(4, pooling='avg')
    )(x)
    model = Model(x, y)
    assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 4, 4)

    x = Input(shape=(1, 8, 8))
    y = sequential(
        conv2d_block(4, up=True)
    )(x)
    model = Model(x, y)
    assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 16, 16)
Пример #29
0
def simple_gan():
    z = Input(batch_shape=simple_gan_z_shape, name='z')
    generator = sequential([
        Dense(4*simple_gan_nb_z, activation='relu', name='g1'),
        Dense(4*simple_gan_nb_z, activation='relu', name='g2'),
        Dense(simple_gan_nb_out, name='g_loss'),
    ])(z)

    d_input = Input(batch_shape=simple_gan_real_shape, name='data')

    discriminator = sequential([
        Dense(400, input_dim=2, name='d1'),
        LeakyReLU(0.3),
        Dense(400, name='d2'),
        LeakyReLU(0.3),
        Dense(1, activation='sigmoid', name='d_loss')
    ])(d_input)
    g = Model(z, generator)
    g.compile(Adam(lr=0.0002, beta_1=0.5), {'g_loss': 'binary_crossentropy'})
    d = Model(d_input, discriminator)
    d.compile(Adam(lr=0.0002, beta_1=0.5), {'d_loss': 'binary_crossentropy'})
    return GAN(g, d)
Пример #30
0
def decoder_baseline(label_sizes, nb_bits=12, data_shape=(1, 64, 64),
                     depth=1, nb_filter=16, optimizer='adam'):
    n = nb_filter
    input = Input(shape=data_shape)
    x = sequential([
        conv2d_block(n, depth=depth, pooling='max'),    # 32x32
        conv2d_block(2*n, depth=depth, pooling='max'),  # 16x16
        conv2d_block(4*n, depth=depth, pooling='max'),  # 8x8
        conv2d_block(8*n, depth=depth, pooling='max'),  # 4x4
    ])(input)
    outputs, losses = decoder_end_block(x, label_sizes, nb_bits,
                                        activation=lambda: ELU())
    model = Model(input, list(outputs.values()))
    model.compile(optimizer, loss=list(losses.values()),)
    return model
Пример #31
0
def get_offset_front(inputs, nb_units):
    n = nb_units
    input = concat(inputs)

    return sequential([
        Dense(8*n*4*4),
        batch_norm(),
        Activation('relu'),
        Reshape((8*n, 4, 4)),
        UpSampling2D(),  # 8x8
        conv(4*n, 3, 3),
        conv(4*n, 3, 3),
        UpSampling2D(),  # 16x16
        conv(2*n, 3, 3),
        conv(2*n, 3, 3),
    ], ns='offset.front')(input)
Пример #32
0
def get_label_generator(x, nb_units, nb_output_units):
    n = nb_units
    driver = sequential([
        Dense(n),
        batch_norm(),
        Dropout(0.25),
        Activation('relu'),
        Dense(n),
        batch_norm(),
        Dropout(0.25),
        Activation('relu'),
        Dense(nb_output_units),
        batch_norm(gamma_init=constant_init(0.25)),
        InBounds(-1, 1),
    ], ns='driver')
    return driver(x)
Пример #33
0
def get_preprocess(input, nb_units, nb_conv_layers=None, resize=None, ns=None):
    assert not (nb_conv_layers is None and resize is None)

    if nb_conv_layers is None:
        nb_conv_layers = len(resize)

    if resize is None:
        resize = [None] * nb_conv_layers

    if type(nb_units) == int:
        nb_units = [nb_units] * nb_conv_layers

    layers = []
    for i, (units, up_or_down) in enumerate(zip(nb_units, resize)):
        layers.extend(conv_block(units, up_or_down))
    return sequential(layers, ns=ns)(input)
Пример #34
0
def get_preprocess(input, nb_units, nb_conv_layers=None, resize=None, ns=None):
    assert not (nb_conv_layers is None and resize is None)

    if nb_conv_layers is None:
        nb_conv_layers = len(resize)

    if resize is None:
        resize = [None] * nb_conv_layers

    if type(nb_units) == int:
        nb_units = [nb_units] * nb_conv_layers

    layers = []
    for i, (units, up_or_down) in enumerate(zip(nb_units, resize)):
        layers.extend(conv_block(units, up_or_down))
    return sequential(layers, ns=ns)(input)
Пример #35
0
def get_label_generator(x, nb_units, nb_output_units):
    n = nb_units
    driver = sequential([
        Dense(n),
        batch_norm(),
        Dropout(0.25),
        Activation('relu'),
        Dense(n),
        batch_norm(),
        Dropout(0.25),
        Activation('relu'),
        Dense(nb_output_units),
        batch_norm(gamma_init=constant_init(0.25)),
        InBounds(-1, 1),
    ],
                        ns='driver')
    return driver(x)
Пример #36
0
def get_offset_front(inputs, nb_units):
    n = nb_units
    input = concat(inputs)

    return sequential(
        [
            Dense(8 * n * 4 * 4),
            batch_norm(),
            Activation('relu'),
            Reshape((8 * n, 4, 4)),
            UpSampling2D(),  # 8x8
            conv(4 * n, 3, 3),
            conv(4 * n, 3, 3),
            UpSampling2D(),  # 16x16
            conv(2 * n, 3, 3),
            conv(2 * n, 3, 3),
        ],
        ns='offset.front')(input)
Пример #37
0
def render_gan_discriminator_resnet(x,
                                    n=32,
                                    conv_repeat=1,
                                    dense=[],
                                    out_activation='sigmoid'):
    def get_dense(nb):
        return [
            Dense(nb),
            batch_norm(),
            LeakyReLU(0.2),
        ]

    return sequential([
        Convolution2D(n, 3, 3, border_mode='same'),
        resnet(n, activation=LeakyReLU(0.3)),
        Convolution2D(n, 3, 3, subsample=(2, 2), border_mode='same'),
        LeakyReLU(0.2),
        batch_norm(),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        Convolution2D(4 * n, 3, 3, subsample=(2, 2), border_mode='same'),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        Convolution2D(4 * n, 3, 3, subsample=(2, 2), border_mode='same'),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        Flatten(), [get_dense(nb) for nb in dense],
        Dense(1, activation=out_activation)
    ],
                      ns='dis')(concat(x, axis=0, name='concat_fake_real'))
Пример #38
0
def get_decoder_model(
        input,
        nb_units,
        nb_output=NUM_MIDDLE_CELLS + NUM_CONFIGS,
        depth=1,
        dense=[]):
    def dense_bn(n):
        return [
            Dense(n),
            batch_norm(mode=1),
            Activation('relu')
        ]

    def conv(n):
        return [
            [Convolution2D(n, 3, 3),
             batch_norm(mode=1),
             Activation('relu')
             ]
            for _ in range(depth)
        ]
    n = nb_units
    return sequential([
        conv(n),
        MaxPooling2D(),  # 32x32
        conv(2*n),
        MaxPooling2D(),  # 16x16
        conv(4*n),
        MaxPooling2D(),  # 8x8
        conv(8*n),
        MaxPooling2D(),  # 4x4
        conv(16*n),
        Flatten(),
        [dense_bn(d) for d in dense],
        Dense(nb_output)
    ])(input)
Пример #39
0
def decoder_baseline(label_sizes,
                     nb_bits=12,
                     data_shape=(1, 64, 64),
                     depth=1,
                     nb_filter=16,
                     optimizer='adam'):
    n = nb_filter
    input = Input(shape=data_shape)
    x = sequential([
        conv2d_block(n, depth=depth, pooling='max'),  # 32x32
        conv2d_block(2 * n, depth=depth, pooling='max'),  # 16x16
        conv2d_block(4 * n, depth=depth, pooling='max'),  # 8x8
        conv2d_block(8 * n, depth=depth, pooling='max'),  # 4x4
    ])(input)
    outputs, losses = decoder_end_block(x,
                                        label_sizes,
                                        nb_bits,
                                        activation=lambda: ELU())
    model = Model(input, list(outputs.values()))
    model.compile(
        optimizer,
        loss=list(losses.values()),
    )
    return model
Пример #40
0
 def light_generator(ins):
     seq = sequential([Convolution2D(1, 3, 3,
                                     border_mode='same')])(concat(ins))
     return UpSampling2D((4, 4))(seq), UpSampling2D((4, 4))(seq), \
         UpSampling2D((4, 4))(seq),
Пример #41
0
def simple_gan_generator(nb_units, z, labels, depth_map, tag3d, depth=2):
    n = nb_units
    depth_map_features = sequential([
        conv2d_block(n),
        conv2d_block(2 * n),
    ])(depth_map)

    tag3d_features = sequential([
        conv2d_block(n, subsample=2),
        conv2d_block(2 * n, subsample=2),
    ])(tag3d)

    x = sequential([
        Dense(5 * n),
        BatchNormalization(mode=2),
        Activation('relu'),
        Dense(5 * n),
        BatchNormalization(mode=2),
        Activation('relu'),
    ])(concat([z, labels]))

    blur = InBounds(0, 1, clip=True)(Dense(1)(x))

    x = sequential([
        Dense(8 * 4 * 4 * n),
        Activation('relu'),
        BatchNormalization(mode=2),
        Reshape((8 * n, 4, 4)),
    ])(x)

    x = sequential([
        conv2d_block(8 * n, filters=1, depth=1, up=True),  # 4x4 -> 8x8
        conv2d_block(8 * n, depth=depth, up=True),  # 8x8 -> 16x16
    ])(x)

    off_depth_map = sequential([
        conv2d_block(2 * n, depth=depth),
    ])(concat([x, depth_map_features]))

    light = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 -> 64x64
    ])(off_depth_map)

    def get_light(x):
        return sequential([
            conv2d_block(1, filters=1, batchnorm=False),
            GaussianBlur(sigma=4),
            InBounds(0, 1, clip=True),
        ])(x)

    light_sb = get_light(light)
    light_sw = get_light(light)
    light_t = get_light(light)

    background = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, batchnorm=False),
        InBounds(-1, 1, clip=True),
    ])(off_depth_map)

    details = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, depth=1, batchnorm=False),
        InBounds(-1, 1, clip=True)
    ])(concat(tag3d_features, off_depth_map))
    return blur, [light_sb, light_sw, light_t], background, details
Пример #42
0
 def light_generator(ins):
     seq = sequential([
         Convolution2D(1, 3, 3, border_mode='same')
     ])(concat(ins))
     return UpSampling2D((4, 4))(seq), UpSampling2D((4, 4))(seq), \
         UpSampling2D((4, 4))(seq),
Пример #43
0
 def f(nb_filter, subsample=1):
     return sequential([
         _bn_relu_conv(nb_filter, subsample=subsample),
         _bn_relu_conv(nb_filter),
     ])
Пример #44
0
def tag3d_network_dense(input,
                        nb_units=64,
                        nb_dense_units=[512, 512],
                        depth=2,
                        nb_output_channels=1,
                        trainable=True):
    n = nb_units

    def conv(n, repeats=None):
        def normal(shape, name=None):
            return keras.initializations.normal(shape, scale=0.01, name=name)

        if repeats is None:
            repeats = depth
        return [[
            Convolution2D(n, 3, 3, border_mode='same', init='he_normal'),
            Activation('relu')
        ] for _ in range(repeats)]

    base = sequential(
        [
            [
                Dense(nb_dense, activation='relu')
                for nb_dense in nb_dense_units
            ],
            Dense(8 * n * 4 * 4),
            Activation('relu'),
            Reshape((
                8 * n,
                4,
                4,
            )),
            conv(8 * n),
            UpSampling2D(),  # 8x8
            conv(4 * n),
            UpSampling2D(),  # 16x16
            conv(2 * n),
        ],
        ns='tag3d_gen.base',
        trainable=trainable)(input)

    tag3d = sequential(
        [
            conv(2 * n),
            UpSampling2D(),  # 32x32
            conv(n),
            UpSampling2D(),  # 64x64
            conv(n, 1),
            Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
        ],
        ns='tag3d',
        trainable=trainable)(base)

    depth_map = sequential([
        conv(n // 2, depth - 1),
        Convolution2D(1, 3, 3, border_mode='same', init='he_normal'),
    ],
                           ns='depth_map',
                           trainable=trainable)(base)

    return name_tensor(tag3d, 'tag3d'), name_tensor(depth_map, 'depth_map')
Пример #45
0
 def discriminator_fn(x):
     return gan_outputs(sequential([
         Flatten(),
         Dense(1),
     ])(concat(x)), fake_for_gen=(0, 10), fake_for_dis=(0, 10),
                        real=(10, 20))
Пример #46
0
 def get_light(x):
     return sequential([
         conv2d_block(1, filters=1, batchnorm=False),
         GaussianBlur(sigma=4),
         InBounds(0, 1, clip=True),
     ])(x)
Пример #47
0
 def mask_post(x):
     return sequential([Convolution2D(1, 3, 3,
                                      border_mode='same')])(concat(x))
Пример #48
0
 def offset_front(x):
     return sequential(
         [Dense(16), Reshape((1, 4, 4)),
          UpSampling2D((4, 4))])(concat(x))
Пример #49
0
 def mask_post(x):
     return sequential([
         Convolution2D(1, 3, 3, border_mode='same')
     ])(concat(x))
Пример #50
0
 def mask_weight_blending(x):
     return sequential([
         Flatten(),
         Dense(1),
     ])(x)
Пример #51
0
 def offset_back(x):
     feature_map = sequential([
         UpSampling2D(),
     ])(concat(x))
     return feature_map, Convolution2D(1, 3, 3,
                                       border_mode='same')(feature_map)
Пример #52
0
 def offset_back(x):
     feature_map = sequential([
         UpSampling2D(),
     ])(concat(x))
     return feature_map, Convolution2D(1, 3, 3,
                                       border_mode='same')(feature_map)
Пример #53
0
 def mask_weight_blending(x):
     return sequential([
         Flatten(),
         Dense(1),
     ])(x)
Пример #54
0
 def offset_front(x):
     return sequential([
         Dense(16),
         Reshape((1, 4, 4)),
         UpSampling2D((4, 4))
     ])(concat(x))
Пример #55
0
def simple_gan_generator(nb_units, z, labels, depth_map,
                         tag3d, depth=2):
    n = nb_units
    depth_map_features = sequential([
        conv2d_block(n),
        conv2d_block(2*n),
    ])(depth_map)

    tag3d_features = sequential([
        conv2d_block(n, subsample=2),
        conv2d_block(2*n, subsample=2),
    ])(tag3d)

    x = sequential([
        Dense(5*n),
        BatchNormalization(mode=2),
        Activation('relu'),
        Dense(5*n),
        BatchNormalization(mode=2),
        Activation('relu'),
    ])(concat([z, labels]))

    blur = InBounds(0, 1, clip=True)(Dense(1)(x))

    x = sequential([
        Dense(8*4*4*n),
        Activation('relu'),
        BatchNormalization(mode=2),
        Reshape((8*n, 4, 4)),
    ])(x)

    x = sequential([
        conv2d_block(8*n, filters=1, depth=1, up=True),  # 4x4 -> 8x8
        conv2d_block(8*n, depth=depth, up=True),  # 8x8 -> 16x16
    ])(x)

    off_depth_map = sequential([
        conv2d_block(2*n, depth=depth),
    ])(concat([x, depth_map_features]))

    light = sequential([
        conv2d_block(2*n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 -> 64x64
    ])(off_depth_map)

    def get_light(x):
        return sequential([
            conv2d_block(1, filters=1, batchnorm=False),
            GaussianBlur(sigma=4),
            InBounds(0, 1, clip=True),
        ])(x)

    light_sb = get_light(light)
    light_sw = get_light(light)
    light_t = get_light(light)

    background = sequential([
        conv2d_block(2*n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, batchnorm=False),
        InBounds(-1, 1, clip=True),
    ])(off_depth_map)

    details = sequential([
        conv2d_block(2*n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, depth=1, batchnorm=False),
        InBounds(-1, 1, clip=True)
    ])(concat(tag3d_features, off_depth_map))
    return blur, [light_sb, light_sw, light_t], background, details
Пример #56
0
 def get_light(x):
     return sequential([
         conv2d_block(1, filters=1, batchnorm=False),
         GaussianBlur(sigma=4),
         InBounds(0, 1, clip=True),
     ])(x)