def generator_outputs(inputs, sizes):
    latent = inputs[0]
    input_energy = inputs[1]
    h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

    img_layer0 = build_generator(h, sizes[0], sizes[1])
    img_layer1 = build_generator(h, sizes[2], sizes[3])
    img_layer2 = build_generator(h, sizes[4], sizes[5])

    avgpool = AveragePooling2D(pool_size=(1, 8))
    zero2one = avgpool(UpSampling2D(size=(4, 1))(img_layer0))
    img_layer1 = inpainting_attention(img_layer1, zero2one)

    one2two = AveragePooling2D(pool_size=(1, 2))(img_layer1)
    img_layer2 = inpainting_attention(img_layer2, one2two)

    outputs = [Activation('relu')(img_layer0),
               Activation('relu')(img_layer1),
               Activation('relu')(img_layer2)]

    return outputs
Esempio n. 2
0
        h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

    # each of these builds a LAGAN-inspired [arXiv/1701.05927] component with
    # linear last layer
    img_layer0 = build_generator(h, 3, 96)
    img_layer1 = build_generator(h, 12, 12)
    img_layer2 = build_generator(h, 12, 6)

    if not no_attn:

        logger.info('using attentional mechanism')

        # resizes from (3, 96) => (12, 12)
        zero2one = AveragePooling2D(pool_size=(1, 8))(
            UpSampling2D(size=(4, 1))(img_layer0))
        img_layer1 = inpainting_attention(img_layer1, zero2one)

        # resizes from (12, 12) => (12, 6)
        one2two = AveragePooling2D(pool_size=(1, 2))(img_layer1)
        img_layer2 = inpainting_attention(img_layer2, one2two)

    generator_outputs = [
        Activation('relu')(img_layer0),
        Activation('relu')(img_layer1),
        Activation('relu')(img_layer2)
    ]

    generator = Model(generator_inputs, generator_outputs)

    generator.compile(optimizer=Adam(lr=gen_lr, beta_1=adam_beta_1),
                      loss='binary_crossentropy')
Esempio n. 3
0
generator_inputs = [latent, input_energy]

# multiply the (scaled) energy into the latent space
h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

# build three LAGAN-style generators (checkout out `build_generator` in architectures.py)
img_layer0 = build_generator(h, 3, 96)
img_layer1 = build_generator(h, 12, 12)
img_layer2 = build_generator(h, 12, 6)

# inpainting
# 0 --> 1
zero2one = AveragePooling2D(pool_size=(1,
                                       8))(UpSampling2D(size=(4,
                                                              1))(img_layer0))
img_layer1 = inpainting_attention(img_layer1,
                                  zero2one)  # this function is in ops.py
# 1 --> 2
one2two = AveragePooling2D(pool_size=(1, 2))(img_layer1)
img_layer2 = inpainting_attention(img_layer2, one2two)
# ^^ pooling and upsampling are needed to reshape images to same dimensions

# outputs
generator_outputs = [
    Activation('relu')(img_layer0),
    Activation('relu')(img_layer1),
    Activation('relu')(img_layer2)
]

# build the actual model
generator = Model(generator_inputs, generator_outputs)
Esempio n. 4
0
        generator_inputs.append(image_class)
    else:
        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

    # each of these builds a LAGAN-inspired [arXiv/1701.05927] component
    img_layer = []
    for i in range(5):
        img_layer.append(build_generator(h, 12, 12))

    if not no_attn:

        logger.info('using attentional mechanism')
        zero2one = AveragePooling2D(pool_size=(1, 1))(
            UpSampling2D(size=(1, 1))(img_layer[0]))
        img_layer[1] = inpainting_attention(img_layer[1], zero2one)
        for j in range(1, 4):
            one2N = AveragePooling2D(pool_size=(1, 1))(img_layer[j])
            img_layer[j + 1] = inpainting_attention(img_layer[j + 1], one2N)

    generator_outputs = []

    for k in range(5):
        generator_outputs.append(Activation('relu')(img_layer[k]))

    generator = Model(generator_inputs, generator_outputs)

    generator.compile(optimizer=Adam(lr=gen_lr, beta_1=adam_beta_1),
                      loss='binary_crossentropy')

    discriminator.trainable = False