Example #1
0
def get_lighting_generator(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    light_conv = sequential(
        [
            conv(n, 3, 3),  # 16x16
            MaxPooling2D(),  # 8x8
            conv(n, 3, 3),
            conv(n, 3, 3),
            UpSampling2D(),  # 16x16
            conv(n, 3, 3),
            UpSampling2D(),  # 32x32
            conv(n, 3, 3),
            Convolution2D(3, 1, 1, border_mode='same'),
            UpSampling2D(),  # 64x64
            GaussianBlur(sigma=2.5),
        ],
        ns='lighting')(input)

    shift = Subtensor(0, 1, axis=1)(light_conv)
    shift = InBounds(-1, 1)(shift)
    in_bounds = InBounds(0, 2)

    scale_black = in_bounds(Subtensor(1, 2, axis=1)(light_conv))
    scale_white = in_bounds(Subtensor(2, 3, axis=1)(light_conv))

    return [scale_black, scale_white, shift]
Example #2
0
def render_gan_discriminator(x,
                             n=32,
                             conv_repeat=1,
                             dense=[],
                             out_activation='sigmoid'):
    def conv(n):
        layers = [
            Convolution2D(n, 3, 3, subsample=(2, 2), border_mode='same'),
            batch_norm(),
            LeakyReLU(0.2),
        ]

        return layers + [[
            Convolution2D(n, 3, 3, border_mode='same'),
            batch_norm(),
            LeakyReLU(0.2),
        ] for _ in range(conv_repeat - 1)]

    def get_dense(nb):
        return [
            Dense(nb),
            batch_norm(),
            LeakyReLU(0.2),
        ]

    return sequential([
        Convolution2D(n, 5, 5, subsample=(2, 2), border_mode='same'),
        LeakyReLU(0.2),
        conv(2 * n),
        conv(4 * n),
        conv(8 * n),
        Flatten(), [get_dense(nb) for nb in dense],
        Dense(1, activation=out_activation)
    ],
                      ns='dis')(concat(x, axis=0, name='concat_fake_real'))
Example #3
0
def render_gan_discriminator(x, n=32, conv_repeat=1, dense=[],
                             out_activation='sigmoid'):
    def conv(n):
        layers = [
            Convolution2D(n, 3, 3, subsample=(2, 2), border_mode='same'),
            batch_norm(),
            LeakyReLU(0.2),
        ]

        return layers + [[
            Convolution2D(n, 3, 3, border_mode='same'),
            batch_norm(),
            LeakyReLU(0.2),
        ] for _ in range(conv_repeat-1)]

    def get_dense(nb):
        return [
            Dense(nb),
            batch_norm(),
            LeakyReLU(0.2),
        ]

    return sequential([
        Convolution2D(n, 5, 5, subsample=(2, 2), border_mode='same'),
        LeakyReLU(0.2),
        conv(2*n),
        conv(4*n),
        conv(8*n),
        Flatten(),
        [get_dense(nb) for nb in dense],
        Dense(1, activation=out_activation)
    ], ns='dis')(concat(x, axis=0, name='concat_fake_real'))
Example #4
0
def render_gan_discriminator_resnet(x, n=32, conv_repeat=1, dense=[], out_activation='sigmoid'):
    def get_dense(nb):
        return [
            Dense(nb),
            batch_norm(),
            LeakyReLU(0.2),
        ]

    return sequential([
        Convolution2D(n, 3, 3, border_mode='same'),
        resnet(n, activation=LeakyReLU(0.3)),
        Convolution2D(n, 3, 3, subsample=(2, 2), border_mode='same'),
        LeakyReLU(0.2),
        batch_norm(),
        resnet(2*n, activation=LeakyReLU(0.3)),
        resnet(2*n, activation=LeakyReLU(0.3)),
        resnet(2*n, activation=LeakyReLU(0.3)),
        resnet(2*n, activation=LeakyReLU(0.3)),
        resnet(2*n, activation=LeakyReLU(0.3)),
        Convolution2D(4*n, 3, 3, subsample=(2, 2), border_mode='same'),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        Convolution2D(4*n, 3, 3, subsample=(2, 2), border_mode='same'),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        resnet(4*n, activation=LeakyReLU(0.3)),
        Flatten(),
        [get_dense(nb) for nb in dense],
        Dense(1, activation=out_activation)
    ], ns='dis')(concat(x, axis=0, name='concat_fake_real'))
Example #5
0
def get_lighting_generator(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    light_conv = sequential([
        conv(n, 3, 3),   # 16x16
        MaxPooling2D(),  # 8x8
        conv(n, 3, 3),
        conv(n, 3, 3),
        UpSampling2D(),  # 16x16
        conv(n, 3, 3),
        UpSampling2D(),  # 32x32
        conv(n, 3, 3),
        Convolution2D(3, 1, 1, border_mode='same'),
        UpSampling2D(),  # 64x64
        GaussianBlur(sigma=2.5),
    ], ns='lighting')(input)

    shift = Subtensor(0, 1, axis=1)(light_conv)
    shift = InBounds(-1, 1)(shift)
    in_bounds = InBounds(0, 2)

    scale_black = in_bounds(Subtensor(1, 2, axis=1)(light_conv))
    scale_white = in_bounds(Subtensor(2, 3, axis=1)(light_conv))

    return [scale_black, scale_white, shift]
Example #6
0
    def _build_generator_given_z(self):
        z = Input(shape=(self.z_dim, ), name='z')

        z_bits = Subtensor(*self.pos_z_bits, axis=1)(z)
        z_labels = Subtensor(*self.pos_z_labels, axis=1)(z)
        z_offset = Subtensor(*self.pos_z_offset, axis=1)(z)
        bits = ThresholdBits()(z_bits)
        nb_labels_without_bits = self.labels_shape[0] - self.nb_bits
        generated_labels = get_label_generator(
            z_labels,
            self.generator_units,
            nb_output_units=nb_labels_without_bits)

        labels_normed = NormSinCosAngle(0)(generated_labels)
        labels = concat([bits, labels_normed], name='labels')
        fake = self.generator_given_z_and_labels([z_offset, labels])
        self.generator_given_z = Model([z], [fake])

        sample_tensors = self.sample_generator_given_z_and_labels(
            [z_offset, labels])
        sample_tensors = [
            name_tensor(t, n) for t, n in zip(
                sample_tensors,
                self.sample_generator_given_z_and_labels.output_names)
        ]
        self.sample_generator_given_z_output_names = ['labels'] + \
            self.sample_generator_given_z_and_labels_output_names
        self.sample_generator_given_z = Model([z], [labels] + sample_tensors)
Example #7
0
def get_blur_factor(inputs, min=0, max=2):
    input = concat(inputs)
    return sequential([
        Convolution2D(1, 3, 3),
        Flatten(),
        Dense(1),
        InBounds(min, max),
    ], ns='mask_weight_blending')(input)
Example #8
0
 def discriminator_fn(x):
     return gan_outputs(sequential([
         Flatten(),
         Dense(1),
     ])(concat(x)),
                        fake_for_gen=(0, 10),
                        fake_for_dis=(0, 10),
                        real=(10, 20))
Example #9
0
def get_blur_factor(inputs, min=0, max=2):
    input = concat(inputs)
    return sequential([
        Convolution2D(1, 3, 3),
        Flatten(),
        Dense(1),
        InBounds(min, max),
    ],
                      ns='mask_weight_blending')(input)
Example #10
0
def get_details(inputs, nb_units):
    n = nb_units
    return sequential([
        conv(n, 3, 3),
        conv(n, 3, 3),
        conv(n, 3, 3),
        Convolution2D(1, 3, 3, border_mode='same', init='normal'),
        InBounds(-2, 2)
    ], ns='details')(concat(inputs))
Example #11
0
def get_offset_middle(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    return sequential([
        UpSampling2D(),  # 32x32
        conv(2*n, 3, 3),
        conv(2*n, 3, 3),
        conv(2*n, 3, 3),
    ], ns='offset.middle')(input)
Example #12
0
def get_details(inputs, nb_units):
    n = nb_units
    return sequential([
        conv(n, 3, 3),
        conv(n, 3, 3),
        conv(n, 3, 3),
        Convolution2D(1, 3, 3, border_mode='same', init='normal'),
        InBounds(-2, 2)
    ],
                      ns='details')(concat(inputs))
Example #13
0
def get_offset_middle(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    return sequential(
        [
            UpSampling2D(),  # 32x32
            conv(2 * n, 3, 3),
            conv(2 * n, 3, 3),
            conv(2 * n, 3, 3),
        ],
        ns='offset.middle')(input)
Example #14
0
def get_offset_back(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    back_feature_map = sequential([
        UpSampling2D(),  # 64x64
        conv(n, 3, 3),
        conv(n, 3, 3),
        InBounds(-1, 1),
    ], ns='offset.back')(input)

    return back_feature_map, sequential([
        Convolution2D(1, 3, 3, border_mode='same'),
        InBounds(-1, 1),
    ], ns='offset.back_out')(back_feature_map)
Example #15
0
    def _build(self):
        fake, _, _, g_additional_losses = self.g.run_internal_graph(self.g.inputs)

        real = self.d.inputs[0]
        data = concat([fake, real], axis=0)

        realness, _, _, d_additional_losses = self.d.run_internal_graph(
            [data] + self.d.inputs[1:])

        nb_fakes = fake.shape[0]
        fake_realness = realness[:nb_fakes]
        real_realness = realness[nb_fakes:]
        split = 2*nb_fakes // 3
        g_fake_realness = fake_realness[:split]
        d_fake_realness = fake_realness[split:]

        outputs = OrderedDict()
        g_loss = K.mean(K.binary_crossentropy(g_fake_realness, K.ones_like(real_realness)))
        outputs['g_loss'] = g_loss
        g_reg_loss = sum([v for v in g_additional_losses.values()])
        if g_reg_loss != 0:
            outputs['g_reg_loss'] = g_reg_loss
        g_total_loss = g_loss + g_reg_loss

        d_loss = K.mean(K.binary_crossentropy(real_realness, K.ones_like(real_realness)))
        d_loss += K.mean(K.binary_crossentropy(d_fake_realness, K.zeros_like(real_realness)))
        outputs['d_loss'] = d_loss
        d_reg_loss = sum([v for v in d_additional_losses.values()])
        if d_reg_loss != 0:
            outputs['d_reg_loss'] = d_reg_loss
        d_total_loss = d_loss + d_reg_loss

        inputs = {i.name: i for i in self.g.inputs + self.d.inputs}
        inputs_list = []
        for name in self.input_names:
            inputs_list.append(inputs[name])

        g_updates = self.g_optimizer.get_updates(
            collect_trainable_weights(self.g), self.g.constraints, g_total_loss)
        d_updates = self.d_optimizer.get_updates(
            collect_trainable_weights(self.d), self.d.constraints, d_total_loss)

        if self.uses_learning_phase:
            lr_phase = [K.learning_phase()]
        else:
            lr_phase = []
        self.metrics_names = list(outputs.keys())
        self._train_function = K.function(inputs_list + lr_phase, list(outputs.values()),
                                          updates=g_updates + d_updates)
Example #16
0
def get_offset_front(inputs, nb_units):
    n = nb_units
    input = concat(inputs)

    return sequential([
        Dense(8*n*4*4),
        batch_norm(),
        Activation('relu'),
        Reshape((8*n, 4, 4)),
        UpSampling2D(),  # 8x8
        conv(4*n, 3, 3),
        conv(4*n, 3, 3),
        UpSampling2D(),  # 16x16
        conv(2*n, 3, 3),
        conv(2*n, 3, 3),
    ], ns='offset.front')(input)
Example #17
0
def get_offset_back(inputs, nb_units):
    n = nb_units
    input = concat(inputs)
    back_feature_map = sequential(
        [
            UpSampling2D(),  # 64x64
            conv(n, 3, 3),
            conv(n, 3, 3),
            InBounds(-1, 1),
        ],
        ns='offset.back')(input)

    return back_feature_map, sequential([
        Convolution2D(1, 3, 3, border_mode='same'),
        InBounds(-1, 1),
    ],
                                        ns='offset.back_out')(back_feature_map)
Example #18
0
def get_offset_front(inputs, nb_units):
    n = nb_units
    input = concat(inputs)

    return sequential(
        [
            Dense(8 * n * 4 * 4),
            batch_norm(),
            Activation('relu'),
            Reshape((8 * n, 4, 4)),
            UpSampling2D(),  # 8x8
            conv(4 * n, 3, 3),
            conv(4 * n, 3, 3),
            UpSampling2D(),  # 16x16
            conv(2 * n, 3, 3),
            conv(2 * n, 3, 3),
        ],
        ns='offset.front')(input)
Example #19
0
def render_gan_discriminator_resnet(x,
                                    n=32,
                                    conv_repeat=1,
                                    dense=[],
                                    out_activation='sigmoid'):
    def get_dense(nb):
        return [
            Dense(nb),
            batch_norm(),
            LeakyReLU(0.2),
        ]

    return sequential([
        Convolution2D(n, 3, 3, border_mode='same'),
        resnet(n, activation=LeakyReLU(0.3)),
        Convolution2D(n, 3, 3, subsample=(2, 2), border_mode='same'),
        LeakyReLU(0.2),
        batch_norm(),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        resnet(2 * n, activation=LeakyReLU(0.3)),
        Convolution2D(4 * n, 3, 3, subsample=(2, 2), border_mode='same'),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        Convolution2D(4 * n, 3, 3, subsample=(2, 2), border_mode='same'),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        resnet(4 * n, activation=LeakyReLU(0.3)),
        Flatten(), [get_dense(nb) for nb in dense],
        Dense(1, activation=out_activation)
    ],
                      ns='dis')(concat(x, axis=0, name='concat_fake_real'))
Example #20
0
    def _build_generator_given_z(self):
        z = Input(shape=(self.z_dim,), name='z')

        z_bits = Subtensor(*self.pos_z_bits, axis=1)(z)
        z_labels = Subtensor(*self.pos_z_labels, axis=1)(z)
        z_offset = Subtensor(*self.pos_z_offset, axis=1)(z)
        bits = ThresholdBits()(z_bits)
        nb_labels_without_bits = self.labels_shape[0] - self.nb_bits
        generated_labels = get_label_generator(
            z_labels, self.generator_units, nb_output_units=nb_labels_without_bits)

        labels_normed = NormSinCosAngle(0)(generated_labels)
        labels = concat([bits, labels_normed], name='labels')
        fake = self.generator_given_z_and_labels([z_offset, labels])
        self.generator_given_z = Model([z], [fake])

        sample_tensors = self.sample_generator_given_z_and_labels([z_offset, labels])
        sample_tensors = [name_tensor(t, n)
                          for t, n in zip(sample_tensors,
                                          self.sample_generator_given_z_and_labels.output_names)]
        self.sample_generator_given_z_output_names = ['labels'] + \
            self.sample_generator_given_z_and_labels_output_names
        self.sample_generator_given_z = Model([z], [labels] + sample_tensors)
Example #21
0
 def offset_front(x):
     return sequential(
         [Dense(16), Reshape((1, 4, 4)),
          UpSampling2D((4, 4))])(concat(x))
Example #22
0
 def light_generator(ins):
     seq = sequential([Convolution2D(1, 3, 3,
                                     border_mode='same')])(concat(ins))
     return UpSampling2D((4, 4))(seq), UpSampling2D((4, 4))(seq), \
         UpSampling2D((4, 4))(seq),
Example #23
0
def simple_gan_generator(nb_units, z, labels, depth_map, tag3d, depth=2):
    n = nb_units
    depth_map_features = sequential([
        conv2d_block(n),
        conv2d_block(2 * n),
    ])(depth_map)

    tag3d_features = sequential([
        conv2d_block(n, subsample=2),
        conv2d_block(2 * n, subsample=2),
    ])(tag3d)

    x = sequential([
        Dense(5 * n),
        BatchNormalization(mode=2),
        Activation('relu'),
        Dense(5 * n),
        BatchNormalization(mode=2),
        Activation('relu'),
    ])(concat([z, labels]))

    blur = InBounds(0, 1, clip=True)(Dense(1)(x))

    x = sequential([
        Dense(8 * 4 * 4 * n),
        Activation('relu'),
        BatchNormalization(mode=2),
        Reshape((8 * n, 4, 4)),
    ])(x)

    x = sequential([
        conv2d_block(8 * n, filters=1, depth=1, up=True),  # 4x4 -> 8x8
        conv2d_block(8 * n, depth=depth, up=True),  # 8x8 -> 16x16
    ])(x)

    off_depth_map = sequential([
        conv2d_block(2 * n, depth=depth),
    ])(concat([x, depth_map_features]))

    light = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 -> 64x64
    ])(off_depth_map)

    def get_light(x):
        return sequential([
            conv2d_block(1, filters=1, batchnorm=False),
            GaussianBlur(sigma=4),
            InBounds(0, 1, clip=True),
        ])(x)

    light_sb = get_light(light)
    light_sw = get_light(light)
    light_t = get_light(light)

    background = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, batchnorm=False),
        InBounds(-1, 1, clip=True),
    ])(off_depth_map)

    details = sequential([
        conv2d_block(2 * n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, depth=1, batchnorm=False),
        InBounds(-1, 1, clip=True)
    ])(concat(tag3d_features, off_depth_map))
    return blur, [light_sb, light_sw, light_t], background, details
Example #24
0
def simple_gan_generator(nb_units, z, labels, depth_map,
                         tag3d, depth=2):
    n = nb_units
    depth_map_features = sequential([
        conv2d_block(n),
        conv2d_block(2*n),
    ])(depth_map)

    tag3d_features = sequential([
        conv2d_block(n, subsample=2),
        conv2d_block(2*n, subsample=2),
    ])(tag3d)

    x = sequential([
        Dense(5*n),
        BatchNormalization(mode=2),
        Activation('relu'),
        Dense(5*n),
        BatchNormalization(mode=2),
        Activation('relu'),
    ])(concat([z, labels]))

    blur = InBounds(0, 1, clip=True)(Dense(1)(x))

    x = sequential([
        Dense(8*4*4*n),
        Activation('relu'),
        BatchNormalization(mode=2),
        Reshape((8*n, 4, 4)),
    ])(x)

    x = sequential([
        conv2d_block(8*n, filters=1, depth=1, up=True),  # 4x4 -> 8x8
        conv2d_block(8*n, depth=depth, up=True),  # 8x8 -> 16x16
    ])(x)

    off_depth_map = sequential([
        conv2d_block(2*n, depth=depth),
    ])(concat([x, depth_map_features]))

    light = sequential([
        conv2d_block(2*n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 -> 64x64
    ])(off_depth_map)

    def get_light(x):
        return sequential([
            conv2d_block(1, filters=1, batchnorm=False),
            GaussianBlur(sigma=4),
            InBounds(0, 1, clip=True),
        ])(x)

    light_sb = get_light(light)
    light_sw = get_light(light)
    light_t = get_light(light)

    background = sequential([
        conv2d_block(2*n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, batchnorm=False),
        InBounds(-1, 1, clip=True),
    ])(off_depth_map)

    details = sequential([
        conv2d_block(2*n, depth=depth, up=True),  # 16x16 -> 32x32
        conv2d_block(n, depth=depth, up=True),  # 32x32 ->  64x64
        conv2d_block(1, depth=1, batchnorm=False),
        InBounds(-1, 1, clip=True)
    ])(concat(tag3d_features, off_depth_map))
    return blur, [light_sb, light_sw, light_t], background, details
Example #25
0
 def mask_post(x):
     return sequential([
         Convolution2D(1, 3, 3, border_mode='same')
     ])(concat(x))
Example #26
0
 def mask_post(x):
     return sequential([Convolution2D(1, 3, 3,
                                      border_mode='same')])(concat(x))
Example #27
0
 def light_generator(ins):
     seq = sequential([
         Convolution2D(1, 3, 3, border_mode='same')
     ])(concat(ins))
     return UpSampling2D((4, 4))(seq), UpSampling2D((4, 4))(seq), \
         UpSampling2D((4, 4))(seq),
Example #28
0
 def offset_front(x):
     return sequential([
         Dense(16),
         Reshape((1, 4, 4)),
         UpSampling2D((4, 4))
     ])(concat(x))
Example #29
0
 def offset_middle(x):
     return UpSampling2D()(concat(x))
Example #30
0
    def _build_generator_given_z_offset_and_labels(self):
        labels = Input(shape=self.labels_shape, name='input_labels')
        z_offset = Input(shape=(self.z_dim_offset, ), name='input_z_offset')

        outputs = OrderedDict()
        labels_without_bits = Subtensor(self.nb_bits,
                                        self.labels_shape[0],
                                        axis=1)(labels)
        raw_tag3d, tag3d_depth_map = self.tag3d_network(labels)

        tag3d = ScaleUnitIntervalTo(-1, 1)(raw_tag3d)
        outputs['tag3d'] = tag3d
        outputs['tag3d_depth_map'] = tag3d_depth_map

        segmentation = Segmentation(threshold=-0.08,
                                    smooth_threshold=0.2,
                                    sigma=1.5,
                                    name='segmentation')

        tag3d_downsampled = PyramidReduce()(tag3d)
        tag3d_segmented = segmentation(raw_tag3d)
        outputs['tag3d_segmented'] = tag3d_segmented
        tag3d_segmented_blur = GaussianBlur(sigma=0.66)(tag3d_segmented)

        out_offset_front = get_offset_front(
            [z_offset, ZeroGradient()(labels_without_bits)],
            self.generator_units)

        light_depth_map = get_preprocess(tag3d_depth_map,
                                         self.preprocess_units,
                                         nb_conv_layers=2)
        light_outs = get_lighting_generator(
            [out_offset_front, light_depth_map], self.generator_units)
        offset_depth_map = get_preprocess(tag3d_depth_map,
                                          self.preprocess_units,
                                          nb_conv_layers=2)
        offset_middle_light = get_preprocess(concat(light_outs),
                                             self.preprocess_units,
                                             resize=['down', 'down'])

        offset_middle_tag3d = get_preprocess(tag3d_downsampled,
                                             self.preprocess_units // 2,
                                             resize=['down', ''],
                                             nb_conv_layers=2)
        out_offset_middle = get_offset_middle([
            out_offset_front, offset_depth_map, offset_middle_light,
            offset_middle_tag3d
        ], self.generator_units)

        offset_back_tag3d_downsampled = get_preprocess(tag3d_downsampled,
                                                       self.preprocess_units //
                                                       2,
                                                       nb_conv_layers=2)

        offset_back_feature_map, out_offset_back = get_offset_back(
            [out_offset_middle, offset_back_tag3d_downsampled],
            self.generator_units)

        blur_factor = get_blur_factor(out_offset_middle, min=0.25, max=1.)
        outputs['blur_factor'] = blur_factor

        tag3d_blur = BlendingBlur(sigma=2.0)([tag3d, blur_factor])
        outputs['tag3d_blur'] = tag3d_blur
        outputs['light_black'] = light_outs[0]
        outputs['light_white'] = light_outs[1]
        outputs['light_shift'] = light_outs[2]
        tag3d_lighten = AddLighting(
            scale_factor=0.90, shift_factor=0.90)([tag3d_blur] + light_outs)
        tag3d_lighten = InBounds(clip=True, weight=15)(tag3d_lighten)
        outputs['tag3d_lighten'] = tag3d_lighten

        outputs['background_offset'] = out_offset_back
        blending = Background(name='blending')(
            [out_offset_back, tag3d_lighten, tag3d_segmented_blur])
        outputs['fake_without_noise'] = blending
        details = get_details([
            blending, tag3d_segmented_blur, tag3d, out_offset_back,
            offset_back_feature_map
        ] + light_outs, self.generator_units)
        outputs['details_offset'] = details
        details_high_pass = HighPass(3.5, nb_steps=3)(details)
        outputs['details_high_pass'] = details_high_pass
        fake = InBounds(-2.0, 2.0)(merge([details_high_pass, blending],
                                         mode='sum'))
        outputs['fake'] = fake
        for name in outputs.keys():
            outputs[name] = name_tensor(outputs[name], name)

        self.generator_given_z_and_labels = Model([z_offset, labels], [fake])
        self.sample_generator_given_z_and_labels_output_names = list(
            outputs.keys())
        self.sample_generator_given_z_and_labels = Model([z_offset, labels],
                                                         list(
                                                             outputs.values()))
Example #31
0
 def offset_middle(x):
     return UpSampling2D()(concat(x))
Example #32
0
 def offset_back(x):
     feature_map = sequential([
         UpSampling2D(),
     ])(concat(x))
     return feature_map, Convolution2D(1, 3, 3,
                                       border_mode='same')(feature_map)
Example #33
0
    def _build_generator_given_z_offset_and_labels(self):
        labels = Input(shape=self.labels_shape, name='input_labels')
        z_offset = Input(shape=(self.z_dim_offset,), name='input_z_offset')

        outputs = OrderedDict()
        labels_without_bits = Subtensor(self.nb_bits, self.labels_shape[0], axis=1)(labels)
        raw_tag3d, tag3d_depth_map = self.tag3d_network(labels)

        tag3d = ScaleUnitIntervalTo(-1, 1)(raw_tag3d)
        outputs['tag3d'] = tag3d
        outputs['tag3d_depth_map'] = tag3d_depth_map

        segmentation = Segmentation(threshold=-0.08, smooth_threshold=0.2,
                                    sigma=1.5, name='segmentation')

        tag3d_downsampled = PyramidReduce()(tag3d)
        tag3d_segmented = segmentation(raw_tag3d)
        outputs['tag3d_segmented'] = tag3d_segmented
        tag3d_segmented_blur = GaussianBlur(sigma=0.66)(tag3d_segmented)

        out_offset_front = get_offset_front([z_offset, ZeroGradient()(labels_without_bits)],
                                            self.generator_units)

        light_depth_map = get_preprocess(tag3d_depth_map, self.preprocess_units,
                                         nb_conv_layers=2)
        light_outs = get_lighting_generator([out_offset_front, light_depth_map],
                                            self.generator_units)
        offset_depth_map = get_preprocess(tag3d_depth_map, self.preprocess_units,
                                          nb_conv_layers=2)
        offset_middle_light = get_preprocess(concat(light_outs), self.preprocess_units,
                                             resize=['down', 'down'])

        offset_middle_tag3d = get_preprocess(tag3d_downsampled,
                                             self.preprocess_units // 2,
                                             resize=['down', ''],
                                             nb_conv_layers=2)
        out_offset_middle = get_offset_middle(
            [out_offset_front, offset_depth_map,
             offset_middle_light, offset_middle_tag3d],
            self.generator_units)

        offset_back_tag3d_downsampled = get_preprocess(tag3d_downsampled,
                                                       self.preprocess_units // 2,
                                                       nb_conv_layers=2)

        offset_back_feature_map, out_offset_back = get_offset_back(
            [out_offset_middle, offset_back_tag3d_downsampled], self.generator_units)

        blur_factor = get_blur_factor(out_offset_middle, min=0.25, max=1.)
        outputs['blur_factor'] = blur_factor

        tag3d_blur = BlendingBlur(sigma=2.0)([tag3d, blur_factor])
        outputs['tag3d_blur'] = tag3d_blur
        outputs['light_black'] = light_outs[0]
        outputs['light_white'] = light_outs[1]
        outputs['light_shift'] = light_outs[2]
        tag3d_lighten = AddLighting(
            scale_factor=0.90, shift_factor=0.90)([tag3d_blur] + light_outs)
        tag3d_lighten = InBounds(clip=True, weight=15)(tag3d_lighten)
        outputs['tag3d_lighten'] = tag3d_lighten

        outputs['background_offset'] = out_offset_back
        blending = Background(name='blending')([out_offset_back, tag3d_lighten,
                                                tag3d_segmented_blur])
        outputs['fake_without_noise'] = blending
        details = get_details(
            [blending, tag3d_segmented_blur, tag3d, out_offset_back,
             offset_back_feature_map] + light_outs, self.generator_units)
        outputs['details_offset'] = details
        details_high_pass = HighPass(3.5, nb_steps=3)(details)
        outputs['details_high_pass'] = details_high_pass
        fake = InBounds(-2.0, 2.0)(
            merge([details_high_pass, blending], mode='sum'))
        outputs['fake'] = fake
        for name in outputs.keys():
            outputs[name] = name_tensor(outputs[name], name)

        self.generator_given_z_and_labels = Model([z_offset, labels], [fake])
        self.sample_generator_given_z_and_labels_output_names = list(outputs.keys())
        self.sample_generator_given_z_and_labels = Model([z_offset, labels],
                                                         list(outputs.values()))
Example #34
0
 def discriminator_fn(x):
     return gan_outputs(sequential([
         Flatten(),
         Dense(1),
     ])(concat(x)), fake_for_gen=(0, 10), fake_for_dis=(0, 10),
                        real=(10, 20))
Example #35
0
 def offset_back(x):
     feature_map = sequential([
         UpSampling2D(),
     ])(concat(x))
     return feature_map, Convolution2D(1, 3, 3,
                                       border_mode='same')(feature_map)