コード例 #1
0
def radial_gaussians(batch_size, n_mixture=8, std=0.01, radius=1.0, add_far=False):
    thetas = np.linspace(0, 2 * np.pi, n_mixture + 1)[:-1]
    xs, ys = radius * np.cos(thetas), radius * np.sin(thetas)
    cat = ds.Categorical(tf.zeros(n_mixture))
    comps = [ds.MultivariateNormalDiag([xi, yi], [std, std]) for xi, yi in zip(xs.ravel(), ys.ravel())]
    data = ds.Mixture(cat, comps)
    return data.sample(batch_size)
コード例 #2
0
def GMM2Dslow(log_pis, mus, log_sigmas, corrs, clip_lo=-10, clip_hi=10):
    # shapes
    # pis: [..., GMM_c]
    # mus: [..., GMM_c*state_dim]
    # sigmas: [..., GMM_c*state_dim]
    # corrs: [..., GMM_c]
    GMM_c = log_pis.shape[-1]

    mus_split = tf.split(mus, GMM_c, axis=-1)
    sigmas = tf.exp(tf.clip_by_value(log_sigmas, clip_lo, clip_hi))

    # Sigma = [s1^2    p*s1*s2      L = [s1   0
    #          p*s1*s2 s2^2 ]            p*s2 sqrt(1-p^2)*s2]
    sigmas_reshaped = tf.reshape(
        sigmas,
        [-1 if s.value is None else s.value
         for s in sigmas.shape[:-1]] + [GMM_c.value, 2])
    Ls = tf.stack(
        [
            (sigmas_reshaped *
             tf.stack([tf.ones_like(corrs), corrs], -1)),  # [s1, p*s2]
            (sigmas_reshaped *
             tf.stack([tf.zeros_like(corrs),
                       tf.sqrt(1 - corrs**2)], -1))
        ],  # [0, sqrt(1-p^2)*s2]
        axis=-1)
    Ls_split = tf.unstack(Ls, axis=-3)

    cat = distributions.Categorical(logits=log_pis)
    dists = [
        distributions.MultivariateNormalTriL(mu, L)
        for mu, L in zip(mus_split, Ls_split)
    ]
    return distributions.Mixture(cat, dists)
コード例 #3
0
def swiss(batch_size, size=1., std=0.01):
    x, _ = datasets.make_swiss_roll(1000)
    norm = x[:, ::2].max()
    xs = x[:, 0] * size / norm
    ys = x[:, 2] * size / norm
    cat = ds.Categorical(tf.zeros(len(x)))
    comps = [ds.MultivariateNormalDiag([xi, yi], [std, std]) for xi, yi in zip(xs.ravel(), ys.ravel())]
    data = ds.Mixture(cat, comps)
    return data.sample(batch_size)
コード例 #4
0
def radial_gaussians2(batch_size, n_mixture=8, std=0.01, r1=1.0, r2=2.0):
    thetas = np.linspace(0, 2 * np.pi, n_mixture + 1)[:-1]
    x1s, y1s = r1 * np.sin(thetas), r1 * np.cos(thetas)
    x2s, y2s = r2 * np.sin(thetas), r2 * np.cos(thetas)
    xs = np.vstack([x1s, x2s])
    ys = np.vstack([y1s, y2s])
    cat = ds.Categorical(tf.zeros(n_mixture * 2))
    comps = [ds.MultivariateNormalDiag([xi, yi], [std, std]) for xi, yi in zip(xs.ravel(), ys.ravel())]
    data = ds.Mixture(cat, comps)
    return data.sample(batch_size)
コード例 #5
0
def line_1d(batch_size, n_mixture=5, std=0.01, d=1.0, add_far=False):
    xs = np.linspace(-d, d, n_mixture, dtype=np.float32)
    p = [0.] * n_mixture
    if add_far:
        xs = np.concatenate([np.array([-10 * d]), xs, np.array([10 * d])], 0)
        p = [0.] + p + [0.]
    cat = ds.Categorical(tf.constant(p))
    comps = [ds.MultivariateNormalDiag([xi], [std]) for xi in xs.ravel()]
    data = ds.Mixture(cat, comps)
    return data.sample(batch_size)
コード例 #6
0
def ring_mog(batch_size, n_mixture=8, std=0.01, radius=1.0):
    thetas = np.linspace(0, 2 * np.pi, n_mixture, endpoint=False)
    xs = radius * np.sin(thetas, dtype=np.float32)
    ys = radius * np.cos(thetas, dtype=np.float32)
    cat = ds.Categorical(tf.zeros(n_mixture))
    comps = [
        ds.MultivariateNormalDiag([xi, yi], [std, std])
        for xi, yi in zip(xs.ravel(), ys.ravel())
    ]
    data = ds.Mixture(cat, comps)
    return data.sample(batch_size), np.stack([xs, ys], axis=1)
コード例 #7
0
def rect(batch_size, std=0.01, nx=5, ny=5, h=2, w=2):
    x = np.linspace(- h, h, nx)
    y = np.linspace(- w, w, ny)
    p = []
    for i in x:
        for j in y:
            p.append((i, j))
    cat = ds.Categorical(tf.zeros(len(p)))
    comps = [ds.MultivariateNormalDiag([xi, yi], [std, std]) for xi, yi in p]
    data = ds.Mixture(cat, comps)
    return data.sample(batch_size)
コード例 #8
0
def grid_mog(batch_size, n_mixture=25, std=0.05, space=2.0):
    grid_range = int(np.sqrt(n_mixture))
    modes = np.array([
        np.array([i, j])
        for i, j in itertools.product(range(-grid_range + 1, grid_range, 2),
                                      range(-grid_range + 1, grid_range, 2))
    ],
                     dtype=np.float32)
    modes = modes * space / 2.
    cat = ds.Categorical(tf.zeros(n_mixture))
    comps = [ds.MultivariateNormalDiag(mu, [std, std]) for mu in modes]
    data = ds.Mixture(cat, comps)
    return data.sample(batch_size), modes
コード例 #9
0
def input_tensor(batch_size, return_mixture=False):
    gaussians = [
        ds.MultivariateNormalDiag(loc=(5.0, 5.0), scale_diag=(0.5, 0.5)),
        ds.MultivariateNormalDiag(loc=(-5.0, 5.0), scale_diag=(0.5, 0.5)),
        ds.MultivariateNormalDiag(loc=(-5.0, -5.0), scale_diag=(0.5, 0.5)),
        ds.MultivariateNormalDiag(loc=(5.0, -5.0), scale_diag=(0.5, 0.5))
    ]
    uniform_mixture_probs = [1 / len(gaussians)] * len(gaussians)

    mixture = ds.Mixture(cat=ds.Categorical(uniform_mixture_probs),
                         components=gaussians)

    sampled = mixture.sample(batch_size)

    return (sampled, mixture) if return_mixture else sampled
コード例 #10
0
def create_mixgaussian2D(num_components=8, std=0.05):
    cat = ds.Categorical(tf.zeros(num_components, dtype=tf.float32))
    #    mus = np.array([np.array([i, j]) for i, j in itertools.product(np.linspace(-1, 1, 5),
    #                                                           np.linspace(-1, 1, 5))],dtype=np.float32)
    mus = np.array(
        [(np.cos(theta), np.sin(theta))
         for theta in np.linspace(0, 2 * np.pi, num_components + 1)],
        dtype=np.float32)
    #    mus = (mus + 2) / 4.

    sigmas = [
        np.array([std, std]).astype(np.float32) for i in range(num_components)
    ]
    components = list((ds.MultivariateNormalDiag(mu, sigma)
                       for (mu, sigma) in zip(mus, sigmas)))
    data = ds.Mixture(cat, components)
    return data
コード例 #11
0
def GMMdiag(log_pis, mus, log_sigmas, clip_lo=-10, clip_hi=10):
    # shapes
    # pis: [..., GMM_c]
    # mus: [..., GMM_c*state_dim]
    # sigmas: [..., GMM_c*state_dim]
    GMM_c = log_pis.shape[-1]
    ax = len(mus.shape) - 1

    mus_split = tf.split(mus, GMM_c, axis=ax)
    sigmas = tf.exp(tf.clip_by_value(log_sigmas, clip_lo, clip_hi))
    sigmas_split = tf.split(sigmas, GMM_c, axis=ax)

    cat = distributions.Categorical(logits=log_pis)
    dists = [
        distributions.MultivariateNormalDiag(mu, sigma)
        for mu, sigma in zip(mus_split, sigmas_split)
    ]
    return distributions.Mixture(cat, dists)
コード例 #12
0
ファイル: decoders.py プロジェクト: shaun95/seq2seq-1
 def sample(self, time, outputs, state, name=None):
     """Returns `sample_ids`."""
     del time, state
     # return outputs
     with tf.variable_scope('mdn'):
         means = tf.reshape(
             tf.slice(
                 outputs, [0, 0],
                 [self._batch_size, self._n_features * self._n_mixtures]),
             [self._batch_size, self._n_features, self._n_mixtures],
             name='means')
         sigmas = tf.minimum(
             10000.0,
             tf.maximum(
                 1e-1,
                 tf.nn.softplus(
                     tf.reshape(
                         tf.slice(outputs,
                                  [0, self._n_features * self._n_mixtures],
                                  [
                                      self._batch_size,
                                      self._n_features * self._n_mixtures
                                  ],
                                  name='sigmas_pre_norm'), [
                                      self._batch_size, self._n_features,
                                      self._n_mixtures
                                  ]))))
         weights = tf.nn.softmax(tf.reshape(
             tf.slice(outputs, [0, 2 * self._n_features * self._n_mixtures],
                      [self._batch_size, self._n_mixtures],
                      name='weights_pre_norm'),
             [self._batch_size, self._n_mixtures]),
                                 name='weights')
         components = []
         for gauss_i in range(self._n_mixtures):
             mean_i = means[:, :, gauss_i]
             sigma_i = sigmas[:, :, gauss_i]
             components.append(
                 tfd.MultivariateNormalDiag(loc=mean_i, scale_diag=sigma_i))
         gauss = tfd.Mixture(cat=tfd.Categorical(probs=weights),
                             components=components)
         sample = gauss.sample()
     return sample
コード例 #13
0
    def get_mixture_model(self):
        """
        ds.Mixture in TensorFlow requires a Categorical dist. to determine which individual dist. is 
        used for generating a sample, 'components' is a list of different classes defined from 
        tf.contrib.distributions
        """
        prob = 1. / self.num_gaussians
        probs = [prob for i in range(self.num_gaussians)]

        mus = self.get_mus()
        scales = self.get_scale_matrices()
        gaussians = [
            ds.MultivariateNormalTriL(loc=mus[i], scale_tril=scales[i])
            for i in range(self.num_gaussians)
        ]

        mixture = ds.Mixture(cat=ds.Categorical(probs=probs),
                             components=gaussians)

        return mixture
コード例 #14
0
def get_mixture(j, xoo, sx_defalut):
    # mi, ni = [ get_mix(0.33, xoo[i], 0.4, j, i) for i in range(len(xoo))]
    mms = []
    nns = []
    value = 1.0 / (1.0 * len(xoo))
    for m, n in [
            get_mix(value, xoo[i], sx_defalut, j, i) for i in range(len(xoo))
    ]:
        mms.append(m)
        nns.append(n)
    print(mms[:-1])
    mcomp = get_normalized_complement(mms[:-1])
    print(mcomp)
    mms = mms[:-1] + [mcomp]
    print(mms)
    # m2, n2 = get_mix(0.33, 1.3, 0.4, j, 2)
    # m3, n3 = get_mix(0.33, 1.5, 0.4, j, 3)

    xDist = tfd.Mixture(cat=tfd.Categorical(probs=mms), components=nns)
    return xDist
コード例 #15
0
def MixtureOfGaussians(batch_size,
                       components=7,
                       stddev=MOG_STDDEV,
                       radius=1.0,
                       RAT=True):
    with tf.name_scope('MixtureOfGaussians'):
        thetas = np.linspace(0, 2 * pi, components + 1)[:-1]
        xs, ys = radius * np.sin(thetas), radius * np.cos(thetas)
        cat = tfcds.Categorical(np.zeros(components))
        comps = [
            tfcds.MultivariateNormalDiag([xi, yi], [stddev, stddev])
            for xi, yi in zip(xs, ys)
        ]
        mixture = tfcds.Mixture(cat, comps)
        # Embedding 2D Mixture in 3D space
        E = tf.constant([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
        embedding = tf.matmul(mixture.sample(batch_size), E)
        if RAT == True:
            submanifold = RotateAndTranslate(embedding)
            return submanifold
        else:
            return embedding
コード例 #16
0
# Define data
t_x = tf.placeholder(tf.float32)

# Define parameters
t_p1_ = tf.Variable(0.0, dtype=tf.float32)
t_p1 = tf.nn.softplus(t_p1_)
t_mu1 = tf.Variable(0.0, dtype=tf.float32)
t_mu2 = tf.Variable(1.0, dtype=tf.float32)
t_sigma1_ = tf.Variable(1.0, dtype=tf.float32)
t_sigma1 = tf.nn.softplus(t_sigma1_)
t_sigma2_ = tf.Variable(1.0, dtype=tf.float32)
t_sigma2 = tf.nn.softplus(t_sigma2_)

# Define model and objective function
t_gm = ds.Mixture(
    cat=ds.Categorical(probs=[t_p1, 1.0 - t_p1]),
    components=[ds.Normal(t_mu1, t_sigma1),
                ds.Normal(t_mu2, t_sigma2)])
t_ll = tf.reduce_mean(t_gm.log_prob(t_x))

# Optimization
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(-t_ll)

# Run
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(500):
    sess.run(train, {t_x: x})

print('Estimated values:', sess.run([t_p1, t_mu1, t_mu2, t_sigma1, t_sigma2]))
コード例 #17
0
    def build_policy_network_op(self, scope="policy", trainable=True):
        ac_means = []
        ac_log_stds = []
        ac_stds = []
        dists = []
        pi = {}
        with tf.variable_scope(scope):
            for i in range(c.num_models):
                with tf.variable_scope('policy_%s' % i):
                    if self.discrete:
                        raise NotImplementedError
                    else:
                        ac_means.append(dnn(input=self.obv_ph,
                                            output_size=self.act_dim,
                                            scope='dnn',
                                            n_layers=c.pg_pi_n_layers,
                                            size=c.pg_pi_hidden_fc_size,
                                            trainable=trainable,
                                            hid_init=normc_initializer(1.0),
                                            final_init=normc_initializer(1.0)))
                        ac_log_stds.append(tf.get_variable('act_log_std_%s' % i,
                                                           shape=[self.act_dim],
                                                           trainable=trainable,
                                                           initializer=tf.zeros_initializer()))
                        ac_stds.append(tf.exp(ac_log_stds[i]))
                        dists.append(tfd.MultivariateNormalDiag(loc=ac_means[i],
                                                                scale_diag=tf.zeros_like(ac_means[i]) + ac_stds[i]))
                        pi['sampled_action_sub_%s' % i] = tf.squeeze(dists[i].sample())
                        pi['logprob_s_%s' % i] = dists[i].log_prob(self.sub_act_ph)

            for i in range(c.num_models):
                for j in range(i + 1, c.num_models):
                    pi['kl_divergence_%s_%s_tr' % (
                        i, j)] = tf.reduce_mean(
                        tfd.kl_divergence(distribution_a=dists[i], distribution_b=dists[j], allow_nan_stats=False))

            oracle_master_tr = tf.tile(
                tf.expand_dims(self.oracle_master_ph, axis=1),
                (1, c.num_models))  # NOTE: [1] or [0]

            if c.num_models == 1:
                pi['sampled_action'] = dists[0].sample()
                pi['logprob'] = dists[0].log_prob(self.sub_act_ph)
                pi['action_std'] = dists[0].stddev()
                pi['mean'] = dists[0].mean()
                pi['act_log_std'] = tf.log(pi['action_std'])
                return pi

            if c.oracle_master:
                probs = tf.one_hot(self.oracle_m_label_ph,  # NOTE: ORACLE
                                   depth=c.num_models) * oracle_master_tr + (
                                1 - oracle_master_tr) * self.master_prob  #
                # NOTE: SOFT
            else:
                probs = self.master_prob

            categorial = tfd.Categorical(probs=probs)
            pi['entropy'] = categorial.entropy()
            gaussian_mixture = tfd.Mixture(cat=categorial, components=dists)

            pi['sampled_action'] = tf.squeeze(gaussian_mixture.sample())
            pi['logprob'] = gaussian_mixture.log_prob(self.sub_act_ph)
            pi['action_std'] = gaussian_mixture.stddev()
            pi['act_log_std'] = tf.log(pi['action_std'])
            pi['mean'] = gaussian_mixture.mean()

        return pi
コード例 #18
0
def create_model(batch_size=50,
                 sequence_length=120,
                 n_features=72,
                 n_neurons=512,
                 n_layers=2,
                 n_gaussians=5,
                 use_mdn=False):
    # [batch_size, max_time, n_features]
    source = tf.placeholder(tf.float32,
                            shape=(batch_size, sequence_length, n_features),
                            name='source')
    lengths = tf.multiply(tf.ones((batch_size, ), tf.int32),
                          sequence_length,
                          name='source_lengths')
    initial_state = tf.placeholder_with_default(
        input=np.zeros((2 * n_layers, 2, batch_size, n_neurons),
                       dtype=np.float32),
        shape=[2 * n_layers, 2, batch_size, n_neurons],
        name='initial_state')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    with tf.variable_scope('target/slicing'):
        source_input = tf.slice(
            source, [0, 0, 0],
            [batch_size, max(1, sequence_length - 1), n_features])
        source_output = tf.slice(source, [0, 1, 0],
                                 [batch_size, sequence_length - 1, n_features])

    # Build the encoder
    with tf.variable_scope('encoder'):
        encoding, final_state = _create_encoder(source=source_input,
                                                lengths=lengths,
                                                batch_size=batch_size,
                                                n_enc_neurons=n_neurons,
                                                n_layers=n_layers,
                                                keep_prob=keep_prob,
                                                initial_state=initial_state)

    n_outputs = n_features * n_gaussians + n_features * n_gaussians + n_gaussians
    outputs = tfl.fully_connected(encoding, n_outputs, activation_fn=None)

    max_sequence_size = max(1, sequence_length - 1)
    with tf.variable_scope('mdn'):
        means = tf.reshape(
            tf.slice(
                outputs, [0, 0, 0],
                [batch_size, max_sequence_size, n_features * n_gaussians]),
            [batch_size, max_sequence_size, n_features, n_gaussians])
        sigmas = tf.maximum(
            1e-4,
            tf.nn.softplus(
                tf.reshape(
                    tf.slice(outputs, [0, 0, n_features * n_gaussians], [
                        batch_size, max_sequence_size, n_features * n_gaussians
                    ]),
                    [batch_size, max_sequence_size, n_features, n_gaussians])))
        weights = tf.nn.softmax(
            tf.reshape(
                tf.slice(outputs, [
                    0, 0, n_features * n_gaussians + n_features * n_gaussians
                ], [batch_size, max_sequence_size, n_gaussians]),
                [batch_size, max_sequence_size, n_gaussians]))
        components = []
        for gauss_i in range(n_gaussians):
            mean_i = means[:, :, :, gauss_i]
            sigma_i = sigmas[:, :, :, gauss_i]
            components.append(
                tfd.MultivariateNormalDiag(loc=mean_i, scale_diag=sigma_i))
        gauss = tfd.Mixture(cat=tfd.Categorical(probs=weights),
                            components=components)
        sample = gauss.sample()

    with tf.variable_scope('loss'):
        negloglike = -gauss.log_prob(source_output)
        mdn_loss = tf.reduce_mean(negloglike)
        weighted_reconstruction = tf.reduce_mean(
            tf.expand_dims(weights, 2) * means, 3)
        if sequence_length > 1:
            weighted_mse_loss = tf.losses.mean_squared_error(
                weighted_reconstruction, source_output)
            mse = tf.losses.mean_squared_error(sample, source_output)
        else:
            weighted_mse_loss = tf.constant(0.0)
            mse = tf.constant(0.0)
        loss = mdn_loss + weighted_mse_loss

    return {
        'source': source,
        'keep_prob': keep_prob,
        'outputs': outputs,
        'sample': sample,
        'loss': loss,
        'initial_state': initial_state,
        'final_state': final_state,
        'mse': mse,
        'weighted_mse': weighted_mse_loss,
        'weighted_reconstruction': weighted_reconstruction
    }
コード例 #19
0
def model_mixture(mix_cat, xx, sx ):
    """gera o modelo estatistico"""
    n = len(xx)
    comp_x = [tfd.Normal(loc=xx[j], scale=sx[j]) for j in range(n)]
    xDist = tfd.Mixture(cat=tfd.Categorical(probs=mix_cat), components=comp_x)
    return xDist
コード例 #20
0
def create_model(batch_size=50,
                 sequence_length=120,
                 n_features=72,
                 n_neurons=512,
                 input_embed_size=None,
                 target_embed_size=None,
                 n_layers=2,
                 n_gaussians=5,
                 use_mdn=False,
                 use_attention=False):
    # [batch_size, max_time, n_features]
    source = tf.placeholder(
        tf.float32,
        shape=(batch_size, sequence_length, n_features),
        name='source')
    target = tf.placeholder(
        tf.float32,
        shape=(batch_size, sequence_length, n_features),
        name='target')
    lengths = tf.multiply(
        tf.ones((batch_size,), tf.int32),
        sequence_length,
        name='source_lengths')

    # Dropout
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    with tf.variable_scope('target/slicing'):
        source_last = tf.slice(source, [0, sequence_length - 1, 0], [batch_size, 1, n_features])
        decoder_input = tf.slice(target, [0, 0, 0],
                                 [batch_size, sequence_length - 1, n_features])
        decoder_input = tf.concat([source_last, decoder_input], axis=1)
        decoder_output = tf.slice(target, [0, 0, 0],
                                  [batch_size, sequence_length, n_features])

    if input_embed_size:
        with tf.variable_scope('source/embedding'):
            source_embed, source_embed_matrix = _create_embedding(
                x=source, embed_size=input_embed_size)
    else:
        source_embed = source

    # Build the encoder
    with tf.variable_scope('encoder'):
        encoder_outputs, encoder_state = _create_encoder(
            source=source_embed,
            lengths=lengths,
            batch_size=batch_size,
            n_enc_neurons=n_neurons,
            n_layers=n_layers,
            keep_prob=keep_prob)

    # TODO: Add (vq?) variational loss

    # Build the decoder
    with tf.variable_scope('decoder') as scope:
        outputs, infer_outputs = _create_decoder(
            n_dec_neurons=n_neurons,
            n_layers=n_layers,
            keep_prob=keep_prob,
            batch_size=batch_size,
            encoder_outputs=encoder_outputs,
            encoder_state=encoder_state,
            encoder_lengths=lengths,
            decoding_inputs=decoder_input,
            decoding_lengths=lengths,
            n_features=n_features,
            scope=scope,
            max_sequence_size=sequence_length,
            n_gaussians=n_gaussians,
            use_mdn=use_mdn)

    if use_mdn:
        max_sequence_size = sequence_length
        with tf.variable_scope('mdn'):
            means = tf.reshape(
                tf.slice(
                    outputs[0], [0, 0, 0],
                    [batch_size, max_sequence_size, n_features * n_gaussians]),
                [batch_size, max_sequence_size, n_features, n_gaussians])
            sigmas = tf.nn.softplus(
                tf.reshape(
                    tf.slice(outputs[0], [0, 0, n_features * n_gaussians], [
                        batch_size, max_sequence_size, n_features * n_gaussians
                    ]),
                    [batch_size, max_sequence_size, n_features, n_gaussians]))
            weights = tf.nn.softmax(
                tf.reshape(
                    tf.slice(outputs[0], [
                        0, 0,
                        n_features * n_gaussians + n_features * n_gaussians
                    ], [batch_size, max_sequence_size, n_gaussians]),
                    [batch_size, max_sequence_size, n_gaussians]))
            components = []
            for gauss_i in range(n_gaussians):
                mean_i = means[:, :, :, gauss_i]
                sigma_i = sigmas[:, :, :, gauss_i]
                components.append(
                    tfd.MultivariateNormalDiag(
                        loc=mean_i, scale_diag=sigma_i))
            gauss = tfd.Mixture(
                cat=tfd.Categorical(probs=weights), components=components)
            sample = gauss.sample()

        with tf.variable_scope('loss'):
            negloglike = -gauss.log_prob(decoder_output)
            weighted_reconstruction = tf.reduce_mean(
                tf.expand_dims(weights, 2) * means, 3)
            mdn_loss = tf.reduce_mean(negloglike)
            mse_loss = tf.losses.mean_squared_error(weighted_reconstruction,
                                                    decoder_output)
            loss = mdn_loss
    else:
        with tf.variable_scope('loss'):
            mdn_loss = tf.reduce_mean(tf.reduce_sum([[0.0]], 1))
            mse_loss = tf.losses.mean_squared_error(outputs[0], decoder_output)
            loss = mse_loss

    return {
        'source': source,
        'target': target,
        'keep_prob': keep_prob,
        'encoding': encoder_state,
        'decoding': infer_outputs,
        'sample': sample,
        'weighted': weighted_reconstruction,
        'loss': loss,
        'mdn_loss': mdn_loss,
        'mse_loss': mse_loss
    }