Example #1
0
def style_loss(x, targ): return metrics.mse(gram_matrix(x), gram_matrix(targ))


# In[157]:


loss = sum(style_loss(l1[0], l2[0]) for l1,l2 in zip(layers, targs))
Example #2
0
def content_recreate():
    '''
    returns an image of recreated content
    '''
    model = VGG16_Avg(include_top=False)
    layer = model.get_layer('block5_conv1').output
    layer_model = Model(model.input, layer)
    targ = K.variable(layer_model.predict(img_arr))

    loss = metrics.mse(layer, targ)
    grads = K.gradients(loss, model.input)

    function_input = [model.input]
    function_output = ([loss] + grads)
    fn = K.function(function_input, function_output)
    evaluator = Evaluator(fn, img_arr.shape)

    x = rand_img(img_arr.shape)
    content_iterations = 10
    x_final, content_loss_history = solve_image(evaluator,
                                                content_iterations,
                                                x,
                                                path=content_result_path)
    c_path = content_result_path + '/res_at_iteration_9.png'
    return c_path
    def loss(y_true, y_pred):
        reconstruction_loss = mse(y_true, y_pred)
        reconstruction_loss *= original_dimension

        kl_loss = -0.5 * K.sum(
            1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(reconstruction_loss + kl_loss)
Example #4
0
def merge():
    '''
    returns an image of the neural style transfer
    '''

    input_shape = style_arr.shape[1:]
    model = VGG16_Avg(include_top=False, input_shape=input_shape)
    outputs = {l.name: l.output for l in model.layers}

    style_layers = [outputs['block{}_conv1'.format(o)] for o in range(1, 6)]
    content_layer = outputs['block4_conv2']

    style_model = Model(model.input, style_layers)
    style_targs = [K.variable(o) for o in style_model.predict(style_arr)]
    content_model = Model(model.input, content_layer)
    content_targ = K.variable(content_model.predict(img_arr))

    alpha = 0.1
    beta = 0.00001
    gama = 0.000001

    st_loss = sum(
        style_loss(l1[0], l2[0]) for l1, l2 in zip(style_layers, style_targs))
    loss = alpha * metrics.mse(
        content_layer, content_targ
    ) + beta * st_loss + gama * total_variation_loss(model.input)
    grads = K.gradients(loss, model.input)
    transfer_fn = K.function([model.input], [loss] + grads)
    evaluator = Evaluator(transfer_fn, shp)
    merge_iterations = 10
    x = rand_img(shp)
    x, merge_loss_history = solve_image(evaluator, merge_iterations, x,
                                        merge_result_path)
 def vae_loss(inp, out):
     repr_loss = self.k * mse(inp, out)
     kl_loss = (
         -0.5
         * self.beta
         * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     )
     return K.mean(repr_loss + kl_loss)
Example #6
0
def target_MSE(y_true, y_pred, val=False):
    tars = y_true[:, :, data_dim]
    preds = y_pred[:, :, data_dim]
    if not val:
        return metrics.mse(tars, preds)
    else:
        #This only works for a single example at a time
        #print(tars)
        #print(preds)
        tars = tars[0, 0]
        preds = preds[0, 0]
        #return mean_squared_error(tars, preds)
        return np.mean(np.square(tars - preds))
Example #7
0
def vae_loss(y_true, y_pred):
    ''' Negative variational lower bound used as loss function for training the variational auto-encoder. '''
    # Reconstruction loss
    rc_loss = metrics.mse(K.flatten(image), K.flatten(t_decoded)) #binary_crossentropy
    #rc_loss *= img_size * img_size

    if wandb.config.variational:
        # Regularization term (KL divergence)
        kl_loss = -0.5 * K.sum(1 + t_log_var
                               - K.square(t_mean)
                               - K.exp(t_log_var), axis=-1)
        # Average over mini-batch
        return K.mean(rc_loss + kl_loss)
    else:
        return K.mean(rc_loss)
Example #8
0
    def do_style_transfer(self):
        # Load source image
        im = self.load_image(self.original_image_path)
        img_arr = self.preproc(np.expand_dims(im, axis=0))
        shp = img_arr.shape

        # Load style image
        style = self.load_image(self.style_image_path)
        style = style.resize((shp[1], shp[2]), resample=Image.ANTIALIAS
                             )  # Make style and original image the same size
        style_arr = self.preproc(np.expand_dims(style, 0)[:, :, :, :3])

        self.content_layer = self.outputs[self.content_layer_name]
        self.style_layers = [
            self.outputs['block{}_conv2'.format(o)]
            for o in self.style_loss_conv_blocks
        ]

        # Content model
        content_model = Model(self.model.input, self.content_layer)
        content_targ = K.variable(content_model.predict(img_arr))

        # Style model # Single input but multiple outputs
        style_model = Model(self.model.input, self.style_layers)
        style_targs = [K.variable(o) for o in style_model.predict(style_arr)]

        # Compute style loss between actual and generated for all layers - use the style_wgts for each layer
        self.style_loss = sum(
            self.get_style_loss(l1[0], l2[0]) * w for l1, l2, w in zip(
                self.style_layers, style_targs, self.style_wgts))
        # Compute content loss
        self.content_loss = K.mean(
            metrics.mse(self.content_layer, content_targ))

        # Compute total loss
        self.total_loss = self.style_loss + self.lambda_coeff * self.content_loss
        grads = K.gradients(self.total_loss, self.model.input)
        transfer_fn = K.function([self.model.input], [self.total_loss] + grads)

        evaluator = Evaluator(transfer_fn, shp)
        x = self.rand_img(shp)
        x = self.solve_image(evaluator, x, shp)
outputs = {l.name: l.output for l in vgg_model.layers}

style_layers = [outputs['block{}_conv2'.format(o)] for o in range(1,6)]
content_name = 'block4_conv2'
content_layer = outputs[content_name]

style_model = Model(vgg_model.input, style_layers)
style_targs = [K.variable(o) for o in style_model.predict(style_image_array)]

content_model = Model(vgg_model.input, content_layer)
content_targ = K.variable(content_model.predict(src))

style_wgts = [0.05, 0.2, 0.2, 0.25, 0.3]

loss = sum(style_loss(l1[0], l2[0])*w for l1, l2, w in zip(style_layers, style_targs, style_wgts))
loss += metrics.mse(content_layer, content_targ)/10

grads = tf.keras.backend.gradients(loss, vgg_model.input)
transfer_fn = tf.keras.backend.function([vgg_model.input], [loss]+grads)
evaluator = ConvexOptimiser(transfer_fn, style_image_shape)
enerated_image = generate_rand_img(style_image_shape)
generated_image = optimise(optimiser, iterations, generated_image, style_image_shape)



# Content plus style transfer


style_width, style_height = style_image.size
content_image_array = content_image_array[:, :style_height, :style_width]
def style_mse_loss(x, y):
    return metrics.mse(grammian_matrix(x), grammian_matrix(y))
Example #11
0
layer = vgg.get_layer('block5_conv1').output

# %% Calculate target activations. ie The output given a single image
layer_model = Model(vgg.input, layer)
pred = K.variable(layer_model.predict(img_arr))

# %% Define class that handles loss and gradient outputs
class Evaluator(object):
    def __init__(self, fnc, shape): self.fnc, self.shape = fnc, shape
    def loss(self, img):
        loss_, self.grads_ = self.fnc([img.reshape(self.shape)])
        return loss_.astype(np.float64)
    def grads(self, img): return self.grads_.flatten().astype(np.float64)

# %% Define loss, gradients
loss = K.mean(metrics.mse(layer, pred))  # Actual layer is y_true
grads = K.gradients(loss, vgg.input)
fnc = K.function([vgg.input], [loss]+grads)
evaluator = Evaluator(fnc, shape)

# %% Perform deterministic optimization on img
def solve_img(eval_obj, n_iter, x, out_shape, path):
    for i in range(n_iter):
        x, min_val, info = fmin_l_bfgs_b(eval_obj.loss,
                                           x.flatten(),
                                           fprime=eval_obj.grads,
                                           maxfun=20)
        x = np.clip(x, -127, 127)

        print('Current loss value %s/%s:' % (i+1, n_iter), min_val)
        imsave(f'{path}at_iteration_{i}.png', deproc(x.copy(), out_shape)[0])
Example #12
0
 def lossFunction(y_true, y_pred) :
     loss = mse(y_true, y_pred)
     loss += K.prod(val, K.sum(K.abs(weights)))
Example #13
0
# latent_rep = E(X)[0]
# output = G(latent_rep)
E_mean, E_logsigma, Z = E(X)

# Z = Input(shape=(512,))
# Z2 = Input(shape=(batch_size, 512))

output = G(Z)
G_dec = G(E_mean + E_logsigma)
D_fake, F_fake = D(output)
D_fromGen, F_fromGen = D(G_dec)
D_true, F_true = D(X)

VAE = Model(X, output)
kl = - 0.5 * K.sum(1 + E_logsigma - K.square(E_mean) - K.exp(E_logsigma), axis=-1)
crossent = 64 * metrics.mse(K.flatten(X), K.flatten(output))
VAEloss = K.mean(crossent + kl)
VAE.add_loss(VAEloss)
VAE.compile(optimizer=SGDop)

for epoch in range(epochs):
    latent_vect = E.predict(dataset)[0]
    encImg = G.predict(latent_vect)
    fakeImg = G.predict(noise)

    DlossTrue = D_true.train_on_batch(dataset, np.ones((batch_size, 1)))
    DlossEnc = D_fromGen.train_on_batch(encImg, np.ones((batch_size, 1)))
    DlossFake = D_fake.train_on_batch(fakeImg, np.zeros((batch_size, 1)))

    cnt = epoch
    while cnt > 3:
Example #14
0
File: go.py Project: dpellow/MLPROJ
    def build_go(self, gene_list, go2genes, genes2go, vertices, edges,
                 number_of_neurons, latent_dim, var_th_index):
        regex = re.compile(
            r"[\s, \,, \+, \:, \- ,\(,\, \), \' , \[ , \], \=, \<, \>]",
            re.IGNORECASE)

        count = 0
        gene_list = [x[:x.index(".")] for x in gene_list]
        genes2input = {}

        # print "prepare input layer"
        # for k,v in genes2go.iteritems():
        #     count+=1
        #     print count
        #     e2e_id = entrez2ensembl_convertor([k])
        #     if len(e2e_id)==0 or e2e_id[0] not in gene_list: continue
        #     genes2input[k]= Input(shape=(1,), name="{}_{}_{}".format(e2e_id[0],str(k),"input") ) # Dense(1, )(Input(shape=(1,)))
        input_ensembl_ids = []
        print "connect input layer to GO leafs"
        for k, v in vertices.iteritems():
            cur_layer = []
            if v["n_children"] == 0 or True:  # probably genes would be connect to upper layers
                for cur_entrez in go2genes[k]:
                    if not genes2input.has_key(cur_entrez):
                        e2e_id = entrez2ensembl_convertor([cur_entrez])
                        if len(e2e_id) != 0 and e2e_id[0] in gene_list:
                            input_ensembl_ids.append(e2e_id[0])
                            genes2input[cur_entrez] = Input(
                                shape=(1, ),
                                name="{}_{}_{}".format(e2e_id[0],
                                                       str(cur_entrez),
                                                       "input"))

                    if genes2input.has_key(cur_entrez):
                        cur_layer.append(cur_entrez)

                go_name = regex.sub(app_config["go_separator"],
                                    v["name"]) + "_converged"

                v["neuron_converged"] = Dense(
                    number_of_neurons,
                    activation=app_config['activation_function'],
                    name=go_name)
                v["neuron_converged_inputs"] = cur_layer

        print "save input layer as list " + os.path.join(
            constants.LIST_DIR, "{}_{}_{}_{}".format(
                var_th_index, number_of_neurons, latent_dim,
                app_config["actual_vae_input_genes_file_name"]))
        file(
            os.path.join(
                constants.LIST_DIR, "{}_{}_{}_{}".format(
                    var_th_index, number_of_neurons, latent_dim,
                    app_config["actual_vae_input_genes_file_name"])),
            'w+').write("\n".join(input_ensembl_ids))
        print "connect intermediate converged GO layers"

        for k, v in sorted([(k, v) for k, v in vertices.iteritems()],
                           key=lambda x: max(x[1]["depth"]),
                           reverse=True):
            if not v.has_key("neuron_converged"):
                go_name = regex.sub(app_config["go_separator"],
                                    v["name"] + "_converged")
                v["neuron_converged"] = Dense(
                    number_of_neurons,
                    activation=app_config['activation_function'],
                    name=go_name)
                v["neuron_converged_inputs"] = []

            inputs = [
                cur_child.id for cur_child in v["obj"].children
                if vertices[cur_child.id].has_key("neuron_converged")
            ]

            v["neuron_converged_inputs"] = v["neuron_converged_inputs"] + inputs

        for k, v in sorted([(k, v) for k, v in vertices.iteritems()],
                           key=lambda x: max(x[1]["depth"]),
                           reverse=True):
            if v.has_key("neuron_converged") and v.has_key(
                    "neuron_converged_inputs"):
                inputs = [genes2input[x] for x in v["neuron_converged_inputs"] if genes2input.has_key(x)] + \
                         [vertices[x]["neuron_converged"] for x in v["neuron_converged_inputs"] if vertices.has_key(x)]
                if len(inputs) == 0:
                    del vertices[k]

                if len(inputs) == 1:
                    v["neuron_converged"] = BatchNormalization()(
                        v["neuron_converged"](inputs[0]))

                if len(inputs) > 1:
                    v["neuron_converged"] = BatchNormalization()(
                        v["neuron_converged"](concatenate(inputs)))

        # print [min(x["depth"]) for x in [v for k, v in vertices.iteritems()]]
        roots = [
            vertices[x] for x in
            [k for k, v in vertices.iteritems() if min(v["depth"]) == 1]
        ]
        print "num of roots: {}".format(len(roots))
        if app_config["is_variational"]:
            print "root and sampling layers"
            inputs = [r["neuron_converged"] for r in roots]
            if len(inputs) > 1:
                z_mean = BatchNormalization()(Dense(latent_dim, name="z_mean")(
                    concatenate(inputs)))
                z_log_var = BatchNormalization()(Dense(
                    latent_dim, name="z_log_var")(concatenate(inputs)))
            else:
                z_mean = BatchNormalization()(Dense(latent_dim,
                                                    name="z_mean")(inputs[0]))
                z_log_var = BatchNormalization()(Dense(latent_dim,
                                                       name="z_log_var")(
                                                           inputs[0]))
            z = BatchNormalization()(Lambda(
                self.sampling,
                output_shape=(latent_dim, ),
                name='z',
                arguments={"latent_dim": latent_dim})([z_mean, z_log_var]))
            for r in roots:
                go_name = regex.sub(app_config["go_separator"],
                                    r["name"] + "_diverged")
                r['neuron_diverged'] = BatchNormalization()(Dense(
                    number_of_neurons,
                    activation=app_config['activation_function'],
                    name=go_name)(z))
        else:
            for r in roots:
                r['neuron_diverged'] = r['neuron_converged']

        print "connect intermediate diverged GO layers"
        neuron_count = 0
        is_converged = False
        while not is_converged:
            is_converged = True
            for k, v in sorted([(k, v) for k, v in vertices.iteritems()],
                               key=lambda x: max(x[1]["depth"]),
                               reverse=False):

                if v.has_key("neuron_diverged"):
                    continue
                inputs = [
                    vertices[cur_parent]["neuron_diverged"]
                    for cur_parent in v["obj"]._parents
                    if vertices.has_key(cur_parent)
                    and vertices[cur_parent].has_key("neuron_diverged")
                ]
                go_name = regex.sub(app_config["go_separator"],
                                    v["name"] + "_diverged")
                if len(inputs) == 1:
                    v["neuron_diverged"] = BatchNormalization()(Dense(
                        number_of_neurons,
                        activation=app_config['activation_function'],
                        name=go_name)(inputs[0]))
                    neuron_count += 1
                    is_converged = False
                if len(inputs) > 1:
                    v["neuron_diverged"] = BatchNormalization()(Dense(
                        number_of_neurons,
                        activation=app_config['activation_function'],
                        name=go_name)(concatenate(inputs)))
                    neuron_count += 1
                    is_converged = False
            print neuron_count
        print "intermediate layers have {} neurons".format(neuron_count)

        genes2output = {}
        print "connect input layer to output leafs"

        for k, v in genes2go.iteritems():
            count += 1
            #        print count
            e2e_id = entrez2ensembl_convertor([k])
            if len(e2e_id) == 0 or e2e_id[0] not in gene_list: continue
            neuron_parents = []
            for cur_go_term in genes2go[k]:
                if vertices.has_key(
                        cur_go_term
                ):  # and len(vertices[cur_go_term]["obj"].children) ==0:
                    neuron_parents.append(
                        vertices[cur_go_term]["neuron_diverged"])
            if len(neuron_parents) == 1:
                genes2output[k] = Dense(1,
                                        activation='sigmoid',
                                        name="{}_{}_{}".format(
                                            e2e_id[0], str(k),
                                            "output"))(neuron_parents[0])
            if len(neuron_parents) > 1:
                genes2output[k] = Dense(1,
                                        activation='sigmoid',
                                        name="{}_{}_{}".format(
                                            e2e_id[0], str(k), "output"))(
                                                concatenate(neuron_parents))

        # self.vae = Model([v for k,v in genes2input.iteritems()], root['neuron_converged'])

        model_inputs = []
        model_outputs = []
        for k in genes2output.keys():
            model_inputs.append(genes2input[k])
            model_outputs.append(genes2output[k])

        assert len(model_inputs) == len(
            model_outputs), "different # of inputs and outputs"

        concatenated_inputs = concatenate(model_inputs)
        concatenated_outputs = concatenate(model_outputs)

        # instantiate encoder model
        self.encoder = Model(model_inputs, [z_mean, z_log_var, z],
                             name='encoder')
        # self.encoder.summary()
        plot_model(self.encoder,
                   to_file=os.path.join(constants.OUTPUT_GLOBAL_DIR,
                                        "encoder_{}.svg".format(time.time())))

        # self.decoder = Model()

        # Overall VAE model, for reconstruction and training
        self.vae = Model(model_inputs, model_outputs)  # concatenated_outputs
        # print self.vae.summary()
        if app_config["loss_function"] == "mse":
            reconstruction_loss = len(model_inputs) * metrics.mse(
                concatenated_inputs, concatenated_outputs)
        if app_config["loss_function"] == "cross_ent":
            reconstruction_loss = len(
                model_inputs) * metrics.binary_crossentropy(
                    concatenated_inputs, concatenated_outputs)

        if app_config["is_variational"]:
            kl = -0.5 * K.sum(
                1 + z_log_var - K.exp(z_log_var) - K.square(z_mean), axis=1)
            self.vae.add_loss(K.mean(reconstruction_loss +
                                     kl))  # weighting? average?

        else:
            self.vae.add_loss(reconstruction_loss)
        #            for i in range(len(model_inputs)):
        #            self.vae.add_loss(metrics.binary_crossentropy(model_inputs[i], model_outputs[i]))

        # loss = metrics.binary_crossentropy(concatenated_inputs, concatenated_outputs)
        # loss = {}
        # for i in range(len(model_inputs)):
        #     loss[str(model_outputs[i].name[:model_outputs[i].name.index("/")])] = 'binary_crossentropy' # \
        # metrics.binary_crossentropy(model_inputs[i], model_outputs[i])
        # self.vae.add_loss(loss)

        self.vae.compile(optimizer='rmsprop')  # , loss=loss
        print "number of inputs: {}".format(len(model_inputs))
        print "number of outputs: {}".format(len(model_outputs))

        plot_model(self.vae,
                   to_file=os.path.join(constants.OUTPUT_GLOBAL_DIR,
                                        "model_{}.svg".format(time.time())))
Example #15
0
class Evaluator(object):
    def __init__(self, f, shp):
        self.f, self.shp = f, shp

    def loss(self, x):
        loss_, self.grad_values = self.f([x.reshape(self.shp)])
        return loss_.astype(np.float64)

    def grads(self, x):
        return self.grad_values.flatten().astype(np.float64)


# In[29]:

loss = metrics.mse(layer, target)
grads = K.gradients(loss, model.input)
fn = K.function([model.input], [loss] + grads)
evaluator = Evaluator(fn, shape_content)

# In[8]:


def solve_image(eval_obj, niter, x, path):
    for i in range(niter):
        x, min_val, info = fmin_l_bfgs_b(eval_obj.loss,
                                         x.flatten(),
                                         fprime=eval_obj.grads,
                                         maxfun=20)
        x = np.clip(x, -127, 127)
        print('Minimum Loss Value:', min_val)
Example #16
0
 def get_style_loss(self, x, targ):  # MSE between the 2 gram matrices
     return K.mean(metrics.mse(self.gram_matrix(x), self.gram_matrix(targ)))
style_layers = [outputs['block{}_conv2'.format(o)] for o in range(1, 6)]
content_name = 'block4_conv2'
content_layer = outputs[content_name]

style_model = Model(vgg_model.input, style_layers)
style_targs = [K.variable(o) for o in style_model.predict(style_image_array)]

content_model = Model(vgg_model.input, content_layer)
content_targ = K.variable(content_model.predict(src))

style_wgts = [0.05, 0.2, 0.2, 0.25, 0.3]

loss = sum(
    style_loss(l1[0], l2[0]) * w
    for l1, l2, w in zip(style_layers, style_targs, style_wgts))
loss += metrics.mse(content_layer, content_targ) / 10

grads = tf.keras.backend.gradients(loss, vgg_model.input)
transfer_fn = tf.keras.backend.function([vgg_model.input], [loss] + grads)
evaluator = ConvexOptimiser(transfer_fn, style_image_shape)
enerated_image = generate_rand_img(style_image_shape)
generated_image = optimise(optimiser, iterations, generated_image,
                           style_image_shape)

# Content plus style transfer

style_width, style_height = style_image.size
content_image_array = content_image_array[:, :style_height, :style_width]

style_layers_2 = [
    style_layers['block{}_conv2'.format(block_no)] for block_no in range(1, 6)
Example #18
0
content_layer = outputs['block4_conv2']

#gram matrix is a matrix collect the correlation of all of the vectors
#in a set. Check wiki(https://en.wikipedia.org/wiki/Gramian_matrix) for more details
def gram_matrix(x):
    #change height,width,depth to depth, height, width, it could be 2,1,0 too
    #maybe 2,0,1 is more efficient due to underlying memory layout
    features = K.permute_dimensions(x, (2,0,1))
    #batch flatten make features become 2D array
    features = K.batch_flatten(features)
    return K.dot(features, K.transpose(features)) / x.get_shape().num_elements()    

def style_loss(x, targ):
    return metrics.mse(gram_matrix(x), gram_matrix(targ))
    
content_loss = lambda base, gen: metrics.mse(gen, base)    

#l[1] is the output(activation) of style_base, l[2] is the output of gen_img
loss = sum([style_loss(l[1], l[2]) for l in style_layers]) #loss of style image and gen_img

#content_layer[0] is the output of content_base, content_layer[2] is the output of gen_img
loss += content_loss(content_layer[0], content_layer[2]) / 10. #loss of content image and gen_img

#The loss need two variables but we only pass in one,
#because we only got one placeholder in the graph,
#the other variable already determine by K.variable
grad = K.gradients(loss, gen_img)
fn = K.function([gen_img], [loss] + grad)

#fn will return loss and grad, but fmin_l_bfgs need to seperate them
#that is why we need a class to separate loss and gradient and store them
Example #19
0
def ConvolutionalVAE(img_rows, img_cols, img_chans, mse=True, external_model=None):
    filters             = 16
    kernel_size         = 3
    latent_dim          = 100
    original_img_size   = (img_rows, img_cols, img_chans)

    # ------------------------------------------------------------------------------------------------------------------
    # Encoder:
    inputs = Input(shape=original_img_size, name='encoder_input')
    x = inputs
    for i in range(2):
        filters *= 2
        x = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   activation='relu',
                   strides=2,
                   padding='same')(x)

    encoder_shape = K.int_shape(x)
    x = Flatten()(x)
    # z_mean = z_log_var = z = (None, latent_dim)
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)
    ## use reparameterization trick to push the sampling out as input
    z = Lambda(var_sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

    #encoder = Model(inputs=inputs, outputs = [z_mean, z_log_var, z], name='encoder')
    encoder = Model(inputs=inputs, outputs=z, name='encoder')
    encoder.summary()

    # ------------------------------------------------------------------------------------------------------------------
    # Decoder:
    latent_inputs = Input(shape=(latent_dim, ), name='z_sampling')
    x = Dense(encoder_shape[1]*encoder_shape[2]*encoder_shape[3], activation='relu')(latent_inputs)
    x = Reshape((encoder_shape[1], encoder_shape[2], encoder_shape[3]))(x)
    for i in range(2):
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            strides=2,
                            padding='same')(x)
        filters //= 2
    outputs = Conv2DTranspose(filters=img_chans,
                              kernel_size=kernel_size,
                              activation='sigmoid',
                              padding='same',
                              name='decoder_output')(x)

    decoder = Model(inputs=latent_inputs, outputs=outputs, name='decoder')
    decoder.summary()

    pred_outputs = decoder(encoder(inputs))

    reconstruction_loss = metrics.mse(K.flatten(pred_outputs), K.flatten(inputs)) * img_rows * img_cols
    # KL(p(z|x), p(z  = -1/2*(e^d*u^2)/d))
    kl_loss             = K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) * -0.5
    #vae_loss            = K.mean(sigmoid(reconstruction_loss) + sigmoid(kl_loss))
    vae_loss = K.mean(reconstruction_loss + kl_loss)

    external_loss = 0
    if external_model:
        external_model.summary()

        # entropy_loss: our goal is to maxmimize entropy for gender prediction,
        # so we negate its original definition
        # p_x = external_model(pred_outputs)
        # entropy_gender_loss = -K.mean(-p_x * K.log(p_x))
        # vae_loss += entropy_gender_loss

        # penalize similarity between gender predictions for original and reconstructed images
        external_loss = K.mean(metrics.mse(external_model(pred_outputs), (1. - external_model(inputs))))

        #reranking loss as in Adversarial Transformation Networks
        #external_l = external_loss(external_model)

        vae_loss = vae_loss + 100.*external_loss

    vae = Model(inputs=inputs, outputs=pred_outputs, name='vae')
    vae.add_loss(vae_loss)
    vae.compile(optimizer='adam')
    return vae, encoder, decoder
Example #20
0
 def vae_loss(x, x_decoded_mean):
     reconstruction_loss = metrics.mse(x,
                                       x_decoded_mean) * self.input_dim
     kl_loss = -0.5 * K.mean(
         1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return reconstruction_loss + kl_loss
Example #21
0
model = VGG16_Avg(include_top=False, input_shape=shp[1:])
outputs = {l.name: l.output for l in model.layers}


style_layers = [outputs['block{}_conv2'.format(o)] for o in range(1,6)]
content_layer = outputs['block4_conv2']

style_model = Model(model.input, style_layers)
style_targs = [K.variable(o) for o in style_model.predict(style_arr)]

content_model = Model(model.input, content_layer)
content_targ = K.variable(content_model.predict(img_arr))

style_wgts = [0.05,0.2,0.2,0.25,0.3]


loss = sum(style_loss(l1[0], l2[0])*w for l1,l2,w in zip(style_layers, style_targs, style_wgts))
loss += K.mean(metrics.mse(content_layer, content_targ))/10
grads = K.gradients(loss, model.input)
transfer_fn = K.function([model.input], [loss]+grads)

evaluator = Evaluator(transfer_fn, shp)

iterations=15
x = rand_img(shp)
x = solve_image(evaluator, iterations, x)



Example #22
0
# discriminator training model
discriminator_model = Model(disc_input, disc_out, name='discriminator_model')
discriminator_model.compile(loss='binary_crossentropy', optimizer=optimizer)
print("DISCRIMINATOR TRAINER:")
discriminator_model.summary()
# decoder training model (GAN)
discriminator.trainable = False
outputs = discriminator(decoder(latent_inputs))
generator_model = Model(latent_inputs, outputs, name='generator_model')
generator_model.compile(loss='binary_crossentropy', optimizer=optimizer)
print("DECODER TRAINER (GENERATOR):")
generator_model.summary()
# encoder & decoder training model (VAE)
outputs = decoder(encoder(inputs)[2])
vae_model = Model(inputs, outputs)
xent_loss = 64 * 64 * metrics.mse(K.flatten(inputs), K.flatten(outputs))
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                       axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
vae_model.add_loss(vae_loss)
vae_model.compile(optimizer=optimizer)
print("ENCODER&DECODER TRAINER (VAE):")
vae_model.summary()

if (len(sys.argv) == 1 or sys.argv[1] == 'continue'):
    if (len(sys.argv) > 1 and sys.argv[1] == 'continue'):
        encoder.load_weights('c_vae_gan_enc.h5')
        decoder.load_weights('c_vae_gan_dec.h5')
        discriminator.load_weights('c_vae_gan_disc.h5')

    X_train = []
Example #23
0
class Evaluator(object):
    def __init__(self, f, shp): self.f, self.shp = f, shp
        
    def loss(self, x):
        loss_, self.grad_values = self.f([x.reshape(self.shp)])
        return loss_.astype(np.float64)

    def grads(self, x): return self.grad_values.flatten().astype(np.float64)


# We'll define our loss function to calculate the mean squared error between the two outputs at the specified convolutional layer.

# In[32]:


loss = metrics.mse(layer, targ)
grads = K.gradients(loss, model.input)
fn = K.function([model.input], [loss]+grads)
evaluator = Evaluator(fn, shp)


# Now we're going to optimize this loss function with a deterministic approach to optimization that uses a line search, which we can implement with sklearn's `fmin_l_bfgs_b` funtionc. 

# In[33]:


def solve_image(eval_obj, niter, x):
    for i in range(niter):
        x, min_val, info = fmin_l_bfgs_b(eval_obj.loss, x.flatten(),
                                         fprime=eval_obj.grads, maxfun=20)
        x = np.clip(x, -127,127)
Example #24
0
def encoder_loss(y_true, y_pred):
    recon_loss = 64 * 64 * metrics.mse(K.flatten(y_true), K.flatten(y_pred))
    kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                           axis=-1)
    return K.mean(recon_loss + kl_loss)
Example #25
0
def vae_loss(model_inputs, model_outputs):
    _, target_sample = model_inputs
    recon_sample, z_mean, z_log_var = model_outputs
    mse_loss = float(K.shape(target_sample)[1] * K.shape(target_sample)[2]) * metrics.mse(target_sample, recon_sample)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return mse_loss + kl_loss
Example #26
0
def style_loss(x, targ):
    '''
    returns style loss by comparing the input image and target image
    the factor 1/(4*(N**N)* (M*M)) is calculated in the gram_matrix simplify style_loss
    '''
    return metrics.mse(gram_matrix(x), gram_matrix(targ))
Example #27
0
def style_loss(x, targ): return K.mean(metrics.mse(gram_matrix(x), gram_matrix(targ)))

def gram_matrix(x):
def style_mse_loss(x, y):
    return metrics.mse(grammian_matrix(x), grammian_matrix(y))
Example #29
0
def vanila_autoencoder_loss(model_inputs, model_outputs):
    _, target_sample = model_inputs
    recon_sample, _ = model_outputs
    return float(K.shape(target_sample)[1] * K.shape(target_sample)[2]) * metrics.mse(target_sample, recon_sample)
def style_loss(x, targ):
    return metrics.mse(gram_matrix(x), gram_matrix(targ))
Example #31
0
def ConvVAE(num_filters, kernel_sizes, num_z, input_shape=kDefaultInputShape):

    assert (len(num_filters) == len(kernel_sizes))

    last_kernel_size = input_shape[0]
    for sk in kernel_sizes:
        last_kernel_size -= (sk - 1)
        assert(0 < last_kernel_size)

    num_layers = len(num_filters)

    # encoder
    encoder_settings = [(num_filters[i], kernel_sizes[i]) for i in range(1, num_layers)]

    encoder = Sequential()
    encoder.add(Conv2D(num_filters[0], (kernel_sizes[0], input_shape[1]), input_shape=input_shape))
    encoder.add(BatchNormalization())
    encoder.add(Activation('relu'))
    for (num_fs, sz_k) in encoder_settings:
        encoder.add(Conv2D(num_fs, (sz_k, 1)))
        encoder.add(BatchNormalization())
        encoder.add(Activation('relu'))

    # output layer of encoder
    encoder.add(Conv2D(num_z, (last_kernel_size, 1)))
    encoder.add(BatchNormalization())
    encoder.add(Activation('linear', name='latent'))

    encoder.summary()

    # latent space
    input_sample = Input(shape=input_shape)
    z_mean = encoder(input_sample)
    z_log_var = encoder(input_sample)
    z = Lambda(sampling)([z_mean, z_log_var])

    # for deconvolutional layers
    dec_num_filters = num_filters[::-1] + [input_shape[2]]
    dec_kernel_sizes = [last_kernel_size] + kernel_sizes[::-1]
    dec_channels = [1 if i < num_layers else input_shape[1] for i in range(num_layers + 1)]
    decoder_settings = [(dec_num_filters[i], dec_kernel_sizes[i], dec_channels[i]) for i in range(len(dec_num_filters))]

    # decoder
    decoder = Sequential()
    for i, (num_fs, sz_k, ch) in enumerate(decoder_settings):
        if i == 0:
            decoder.add(Conv2DTranspose(num_fs, (sz_k, ch), input_shape=(1, 1, num_z)))
        else:
            decoder.add(Conv2DTranspose(num_fs, (sz_k, ch)))
        if i + 1 == num_layers:
            decoder.add(Activation('linear'))
        else:
            decoder.add(Activation('relu'))

    # reconstruction
    recon_sample = decoder(z)
    target_sample = Input(shape=input_shape)

    decoder.summary()

    # instantiate VAE model
    vae = Model(inputs=[input_sample, target_sample], outputs=[recon_sample, z, z_mean, z_log_var])
    vae.summary()

    # Compute VAE loss
    mse_loss = float(input_shape[0]) * float(input_shape[1]) * metrics.mse(target_sample, recon_sample)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    vae_loss = K.mean(mse_loss + kl_loss)
    vae.add_loss(vae_loss)

    return vae
Example #32
0
def style_loss(x, targ):
    return metrics.mse(gram_matrix(x), gram_matrix(targ))
Example #33
0
def reconstruction_loss(x_input, x_decoded):
    return metrics.mse(x_input, x_decoded)