Exemple #1
0
                                 embeddings_initializer='glorot_normal')
        emb = Flatten()(lookup_table(image_class))

        # hadamard product between z-space and a class conditional embedding
        hc = multiply([latent, emb])

        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([hc, scale(input_energy, 100)])
        generator_inputs.append(image_class)
    else:
        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

    # each of these builds a LAGAN-inspired [arXiv/1701.05927] component with
    # linear last layer
    img_layer0 = build_generator(h, 3, 96)
    img_layer1 = build_generator(h, 12, 12)
    img_layer2 = build_generator(h, 12, 6)

    if not no_attn:

        logger.info('using attentional mechanism')

        # resizes from (3, 96) => (12, 12)
        zero2one = AveragePooling2D(pool_size=(1, 8))(
            UpSampling2D(size=(4, 1))(img_layer0))
        img_layer1 = inpainting_attention(img_layer1, zero2one)

        # resizes from (12, 12) => (12, 6)
        one2two = AveragePooling2D(pool_size=(1, 2))(img_layer1)
        img_layer2 = inpainting_attention(img_layer2, one2two)
Exemple #2
0
        emb = Flatten()(lookup_table(image_class))

        # hadamard product between z-space and a class conditional embedding
        hc = multiply([latent, emb])

        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([hc, scale(input_energy, 100)])
        generator_inputs.append(image_class)
    else:
        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

    # each of these builds a LAGAN-inspired [arXiv/1701.05927] component
    img_layer = []
    for i in range(5):
        img_layer.append(build_generator(h, 12, 12))

    if not no_attn:

        logger.info('using attentional mechanism')
        zero2one = AveragePooling2D(pool_size=(1, 1))(
            UpSampling2D(size=(1, 1))(img_layer[0]))
        img_layer[1] = inpainting_attention(img_layer[1], zero2one)
        for j in range(1, 4):
            one2N = AveragePooling2D(pool_size=(1, 1))(img_layer[j])
            img_layer[j + 1] = inpainting_attention(img_layer[j + 1], one2N)

    generator_outputs = []

    for k in range(5):
        generator_outputs.append(Activation('relu')(img_layer[k]))
Exemple #3
0
        emb = Flatten()(lookup_table(image_class))

        # hadamard product between z-space and a class conditional embedding
        hc = multiply([latent, emb])

        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([hc, scale(input_energy, 100)])
        generator_inputs.append(image_class)
    else:
        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

    # each of these builds a LAGAN-inspired [arXiv/1701.05927] component
    img_layer = []
    for i in range(5):
        img_layer.append(build_generator(h, 20, 20))

    if not no_attn:

        logger.info('using attentional mechanism')
        zero2one = AveragePooling2D(pool_size=(1, 1))(
            UpSampling2D(size=(1, 1))(img_layer[0]))
        img_layer[1] = inpainting_attention(img_layer[1], zero2one)
        for j in range(1, 4):
            one2N = AveragePooling2D(pool_size=(1, 1))(img_layer[j])
            img_layer[j + 1] = inpainting_attention(img_layer[j + 1], one2N)

    generator_outputs = []

    for k in range(5):
        generator_outputs.append(Activation('relu')(img_layer[k]))
Exemple #4
0
                                 embeddings_initializer='glorot_normal')
        emb = Flatten()(lookup_table(image_class))

        # hadamard product between z-space and a class conditional embedding
        hc = multiply([latent, emb])

        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([hc, scale(input_energy, 100)])
        generator_inputs.append(image_class)
    else:
        # requested energy comes in GeV
        h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

    # each of these builds a LAGAN-inspired [arXiv/1701.05927] component with
    # linear last layer
    img_layer0 = build_generator(h, 20, 20)
    img_layer1 = build_generator(h, 20, 20)
    img_layer2 = build_generator(h, 20, 20)

    if not no_attn:

        logger.info('using attentional mechanism')

        # resizes from (20, 20) => (20, 20)
        zero2one = AveragePooling2D(pool_size=(1, 1))(
            UpSampling2D(size=(1, 1))(img_layer0))
        img_layer1 = inpainting_attention(img_layer1, zero2one)

        # resizes from (20, 20) => (20, 20)
        one2two = AveragePooling2D(pool_size=(1, 1))(img_layer1)
        img_layer2 = inpainting_attention(img_layer2, one2two)
Exemple #5
0
                                  embeddings_initializer='glorot_normal')(image_class))
        # hadamard product between z-space and a class conditional embedding
        hc = merge([latent, emb], mode='mul')
        h = Lambda(lambda x: x[0] * x[1])([hc, scale(input_energy, 100)])
        generator_inputs.append(image_class)
    else:
        h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)])

    # h = concatenate([latent, input_energy])

    # emb = Flatten()(Embedding(nb_classes, latent_size, input_length=1,
    #                           embeddings_initializer='glorot_normal')(image_class))
    # # hadamard product between z-space and a class conditional embedding
    # h = merge([latent, emb], mode='mul')

    img_layer0 = build_generator(h, 5, 5)
    img_layer1 = build_generator(h, 5, 5)
    # img_layer2 = build_generator(h, 12, 6)

    # if parse_args.in_paint:
    #
    #     # inpainting
    #     zero2one = AveragePooling2D(pool_size=(1, 8))(
    #         UpSampling2D(size=(4, 1))(img_layer0))
    #     # final_img_layer1 = add([zero2one, img_layer1])
    #     img_layer1 = inpainting_attention(img_layer1, zero2one)
    #
    #     one2two = AveragePooling2D(pool_size=(1, 2))(img_layer1)
    #     # final_img_layer2 = add([one2two, img_layer2])
    #     img_layer2 = inpainting_attention(img_layer2, one2two)