Ejemplo n.º 1
0
def test_linear_in_bounds_clip():
    layer = LinearInBounds(-1, 1, clip=True)
    shape = (1, 1)
    layer.build(shape)
    arr = np.array([[0]], dtype=np.float32)
    output = layer(theano.shared(arr)).eval()
    assert (output == arr).all()

    arr = np.array([[0]], dtype=np.float32)
    output = layer(theano.shared(arr)).eval()
    assert (output == arr).all()

    arr = np.array([[2]], dtype=np.float32)
    output = layer(theano.shared(arr)).eval()
    assert float(output) == 1.
Ejemplo n.º 2
0
def test_linear_in_bounds_clip():
    layer = LinearInBounds(-1, 1, clip=True)
    shape = (1, 1)
    layer.build(shape)
    arr = np.array([[0]], dtype=np.float32)
    output = layer(theano.shared(arr)).eval()
    assert (output == arr).all()

    arr = np.array([[0]], dtype=np.float32)
    output = layer(theano.shared(arr)).eval()
    assert (output == arr).all()

    arr = np.array([[2]], dtype=np.float32)
    output = layer(theano.shared(arr)).eval()
    assert float(output) == 1.
Ejemplo n.º 3
0
def dcgan_generator_conv(n=32,
                         input_dim=50,
                         nb_output_channels=1,
                         init=normal(0.02)):
    def conv(nb_filter, h, w):
        model.add(Convolution2D(nb_filter, h, w, border_mode='same',
                                init=init))
        model.add(BatchNormalization(axis=1))
        model.add(Activation('relu'))

    def deconv(nb_filter, h, w):
        deconv_layer = Deconvolution2D(nb_filter,
                                       h,
                                       w,
                                       border_mode=(1, 1),
                                       init=init)
        model.add(deconv_layer)

        w = np.random.normal(0, 0.02, deconv_layer.W_shape).astype(np.float32)
        w *= np.random.uniform(0, 1, (1, w.shape[1], 1, 1))
        deconv_layer.W.set_value(w)
        model.add(BatchNormalization(axis=1))
        model.add(Activation('relu'))

    def up():
        model.add(UpSampling2D())

    z = Layer(input_shape=(input_dim, ))
    model = Sequential()
    model.add(z)
    model.add(Dense(8 * n * 4 * 4, init=init))
    model.add(batch_norm())
    model.add(Reshape((
        8 * n,
        4,
        4,
    )))
    model.add(Activation('relu'))

    up()  # 8
    conv(4 * n, 3, 3)
    up()  # 16
    conv(2 * n, 3, 3)
    conv(2 * n, 3, 3)
    up()  # 32
    conv(n, 3, 3)
    conv(n, 3, 3)
    up()  # 64
    conv(n, 3, 3)

    model.add(
        Deconvolution2D(nb_output_channels,
                        3,
                        3,
                        border_mode=(1, 1),
                        init=init))
    model.add(LinearInBounds(-1, 1))
    return model
Ejemplo n.º 4
0
def get_mask_postprocess(inputs, nb_units):
    n = nb_units
    return sequential([
        conv(n, 3, 3),
        conv(n, 3, 3),
        Deconvolution2D(1, 5, 5, border_mode=(2, 2)),
        LinearInBounds(-1, 1, clip=True),
    ],
                      ns='mask_post')(concat(inputs))
Ejemplo n.º 5
0
def get_mask_weight_blending(inputs, min=0, max=2):
    input = concat(inputs)
    return sequential([
        Convolution2D(1, 3, 3),
        Flatten(),
        Dense(1),
        LinearInBounds(K.variable(min), K.variable(2), clip=True),
    ],
                      ns='mask_weight_blending')(input)
Ejemplo n.º 6
0
def test_linear_in_bounds_regularizer():
    model = Sequential()
    model.add(LinearInBounds(-1, 1, clip=True, input_shape=(1, )))
    model.compile('adam', 'mse')
    loss = model.train_on_batch(np.array([[0]]), np.array([[0]]))
    assert float(loss) == 0

    loss_on_2 = model.train_on_batch(np.array([[2]]), np.array([[1]]))
    assert float(loss_on_2) > 0

    loss_on_100 = model.train_on_batch(np.array([[100]]), np.array([[1]]))
    assert float(loss_on_2) < float(loss_on_100)
Ejemplo n.º 7
0
def get_offset_back(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    back_feature_map = sequential(
        [
            UpSampling2D(),  # 64x64
            conv(n, 3, 3),
            conv(n, 3, 3),
        ],
        ns='offset.back')(input)

    return back_feature_map, sequential([
        Convolution2D(1, 3, 3, border_mode='same'),
        LinearInBounds(-1, 1, clip=True),
    ],
                                        ns='offset.back_out')(back_feature_map)
Ejemplo n.º 8
0
def dcgan_generator(n=32,
                    input_dim=50,
                    nb_output_channels=1,
                    use_dct=False,
                    init=normal(0.02)):
    def deconv(nb_filter, h, w):
        return Deconvolution2D(nb_filter,
                               h,
                               w,
                               subsample=(2, 2),
                               border_mode=(2, 2),
                               init=init)

    model = Sequential()
    model.add(Dense(8 * n * 4 * 4, input_dim=input_dim, init=init))
    model.add(batch_norm())
    model.add(Reshape((
        8 * n,
        4,
        4,
    )))
    if use_dct:
        model.add(iDCT())
    model.add(Activation('relu'))

    model.add(deconv(4 * n, 5, 5))
    model.add(batch_norm())
    model.add(Activation('relu'))

    model.add(deconv(2 * n, 5, 5))
    model.add(batch_norm())
    model.add(Activation('relu'))

    model.add(deconv(n, 5, 5))
    model.add(batch_norm())
    model.add(Activation('relu'))

    model.add(
        Deconvolution2D(nb_output_channels,
                        5,
                        5,
                        subsample=(2, 2),
                        border_mode=(2, 2),
                        init=init))
    model.add(LinearInBounds(-1, 1))
    return model
Ejemplo n.º 9
0
def get_mask_driver(x, nb_units, nb_output_units):
    n = nb_units
    driver = sequential([
        Dense(n),
        BatchNormalization(),
        Dropout(0.25),
        Activation('relu'),
        Dense(n),
        BatchNormalization(),
        Dropout(0.25),
        Activation('relu'),
        Dense(nb_output_units),
        BatchNormalization(),
        LinearInBounds(-1, 1),
    ],
                        ns='driver')
    return driver(x)
Ejemplo n.º 10
0
def get_lighting_generator(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    light_conv = sequential(
        [
            conv(n, 5, 5),
            conv(n, 5, 5),
            conv(n, 3, 3),
            UpSampling2D(),  # 32x32
            conv(n, 5, 5),
            Convolution2D(2, 1, 1, border_mode='same'),
            UpSampling2D(),  # 64x64
            LinearInBounds(-1, 1, clip=True),
            GaussianBlur(sigma=4),
        ],
        ns='lighting')(input)

    shift = Split(0, 1, axis=1)(light_conv)
    scale = Split(1, 2, axis=1)(light_conv)

    return shift, scale
Ejemplo n.º 11
0
    def generator(inputs):
        z, = inputs
        z_driver = Split(*z_for_driver, axis=1)(z)
        z_offset = Split(*z_for_offset, axis=1)(z)
        z_bits = Split(*z_for_bits, axis=1)(z)
        bits = get_bits(z_bits)
        driver = mask_driver(z_driver)
        driver_norm = NormSinCosAngle(0)(driver)
        mask_input = concat([bits, driver_norm], name='mask_gen_in')
        mask = mask_generator(mask_input)

        if mask_generator_weights:
            mask_layers = collect_layers(mask_input, mask)
            load_weights(mask_layers, mask_generator_weights)

        selection = with_regularizer(
            Selection(threshold=-0.08,
                      smooth_threshold=0.2,
                      sigma=1.5,
                      name='selection'), MinCoveredRegularizer())

        mask_down = PyramidReduce()(mask)
        mask_selection = selection(mask)
        mask_selection_down = PyramidReduce(scale=4)(mask_selection)
        out_offset_front = offset_front(
            [z_offset, ZeroGradient()(driver_norm)])

        light_scale64, light_shift64 = \
            lighting_generator([out_offset_front,
                                light_merge_mask16(mask_selection_down)])

        mask_with_lighting = AddLighting(scale_factor=0.5, shift_factor=0.75)(
            [mask, light_scale64, light_shift64])

        out_offset_middle = offset_middle([
            out_offset_front,
            offset_merge_mask16(mask_selection_down),
            offset_merge_light16(concat(light_scale64, light_shift64))
        ])

        offset_back_feature_map, out_offset_back = offset_back(
            [out_offset_middle,
             offset_merge_mask32(mask_down)])

        mask_weight32 = mask_weight_blending32(out_offset_middle)
        mask_weight64 = mask_weight_blending64(out_offset_middle)

        blending = PyramidBlending(offset_pyramid_layers=3,
                                   mask_pyramid_layers=3,
                                   mask_weights=['variable', 'variable', 1],
                                   offset_weights=[1, 1, 1],
                                   use_selection=[True, True, True],
                                   name='blending')([
                                       out_offset_back, mask_with_lighting,
                                       mask_selection, mask_weight32,
                                       mask_weight64
                                   ])

        mask_post = mask_postprocess([
            blending, mask_selection, light_scale64, light_shift64, mask,
            out_offset_back, offset_back_feature_map
        ])
        mask_post_high = HighFrequencies(4, nb_steps=5,
                                         name='mask_post_high')(mask_post)
        blending_post = merge([mask_post_high, blending],
                              mode='sum',
                              name='blending_post')
        return LinearInBounds(-1.2, 1.2)(blending_post)