Ejemplo n.º 1
0
    def weighted_answer_probabilities(self, *args, **kwargs):
        probs = self.answer_probabilities(*args, **kwargs)

        # go through the rescaling sequence in order (6 steps)
        for probs_slice, scale_idx in self.scaling_sequence:
            probs = T.set_subtensor(probs[:, probs_slice], probs[:, probs_slice] * consider_constant(probs[:, scale_idx].dimshuffle(0, 'x')))

        return probs
Ejemplo n.º 2
0
    def weighted_answer_probabilities(self, *args, **kwargs):
        probs = self.answer_probabilities(*args, **kwargs)

        # go through the rescaling sequence in order (6 steps)
        for probs_slice, scale_idx in self.scaling_sequence:
            probs = T.set_subtensor(
                probs[:, probs_slice], probs[:, probs_slice] *
                consider_constant(probs[:, scale_idx].dimshuffle(0, 'x')))

        return probs
Ejemplo n.º 3
0
    params.update(params_enc)
    params.update(params_dec)

    for pv in params_enc.values() + params_disc.values() + params_dec.values():
        print pv.dtype

    x = T.matrix()

    results_map = define_network(normalize(x), params, config)

    x_reconstructed = results_map['reconstruction']
    x_sampled = results_map['sample']

    disc_real_D = discriminator(normalize(x), results_map['z'], params_disc, mb_size = config['mb_size'], num_hidden = config['num_hidden'], num_latent = config['num_latent'])
    disc_fake_D = discriminator(x_reconstructed, results_map['z'], params_disc, mb_size = config['mb_size'], num_hidden = config['num_hidden'], num_latent = config['num_latent'])
    disc_fake_G = discriminator(x_reconstructed, consider_constant(results_map['z']), params_disc, mb_size = config['mb_size'], num_hidden = config['num_hidden'], num_latent = config['num_latent'])

    bce = T.nnet.binary_crossentropy

    LD_dD = bce(disc_real_D['c'], 0.999 * T.ones(disc_real_D['c'].shape)).mean() + bce(disc_fake_D['c'], 0.0001 + T.zeros(disc_fake_D['c'].shape)).mean()
    LD_dG = bce(disc_fake_G['c'], T.ones(disc_fake_G['c'].shape)).mean()

    vae_loss = results_map['z_loss']
    rec_loss = compute_loss(x_reconstructed, normalize(x))

    loss = vae_loss + rec_loss

    inputs = [x]

    outputs = {'loss' : loss, 'vae_loss' : vae_loss, 'rec_loss' : rec_loss, 'reconstruction' : denormalize(x_reconstructed), 'c_real' : disc_real_D['c'], 'c_fake' : disc_fake_D['c'], 'x' : x, 'sample' : denormalize(x_sampled), 'interp' : denormalize(results_map['interp'])}