def _unvectorized_open_loop_prediction(self, open_loop_data):
        # This is a simple implementation of the open loop prediction. This uses for-loop and no vectorized operations
        # at all. This is meant primarily as a way to check the vectorized implementation.
        # Open Loop

        sequence_length = open_loop_data.input.sequence_length
        output_obs_encoding = open_loop_data.input.output_obs_encoding
        output_obs = open_loop_data.input.output_obs
        action = open_loop_data.input.action
        h_t = open_loop_data.input.h_t

        open_loop_data.output = []

        self.state_transition_model.set_state(h_t)

        start_time = time.time()
        for t in range(0, sequence_length):
            current_output_obs_encoding = output_obs_encoding[:, t, :]
            a_t = action[:, t, :]
            posterior = self.sample_zt_from_posterior(
                h=h_t, a=a_t, o=current_output_obs_encoding)
            z_t = posterior.z_t
            inp = torch.cat((z_t, a_t), dim=1)
            h_t = self.state_transition_model(inp.unsqueeze(1)).squeeze(1)

            likelihood_mu, liklihood_sigma = self.convolutional_decoder(
                torch.cat((h_t, z_t), dim=1))

            current_output_obs = output_obs[:, t, :]
            prior = self.sample_zt_from_prior(h=h_t, a=a_t)
            # Note that z_t is not used anywhere
            elbo_prior = log_pdf(z_t, prior.mu, prior.sigma)
            elbo_q_liklihood = log_pdf(z_t, posterior.mu, posterior.sigma)
            elbo_liklihood = log_pdf(current_output_obs, likelihood_mu,
                                     liklihood_sigma)
            elbo = sum([
                torch.mean(x)
                for x in (elbo_liklihood, elbo_prior, -elbo_q_liklihood)
            ])
            open_loop_data.output.append((-elbo, torch.mean(elbo_liklihood)))

        print("Time taken in unvectorized version = {}".format(time.time() -
                                                               start_time))
        return open_loop_data
    def _vectorized_closed_loop_prediction(self, close_loop_data):
        # This is a simple implementation of the open loop prediction. This function pulls some operations outside the
        # for-loop and vectorizes them. This is meant as the primary function for doing open-loop prediction.
        # Open Loop

        sequence_length = close_loop_data.input.sequence_length
        imagination_length = close_loop_data.input.imagination_length
        output_obs = close_loop_data.input.output_obs
        action = close_loop_data.input.action.contiguous()
        true_h_t = close_loop_data.input.h_t \
            .view(action.shape[0], action.shape[1], -1)
        h_t = true_h_t[:, :sequence_length, :]
        h_t = merge_first_and_second_dim(h_t.contiguous())

        self.state_transition_model.set_state(h_t)
        elbo_likelihood = []
        consistency_loss = Dict()
        consistency_loss.discriminator = []
        consistency_loss.close_loop = []
        h_t_from_close_loop = None

        for t in range(0, imagination_length):
            a_t = merge_first_and_second_dim(
                action[:, t:t + sequence_length, :].contiguous())
            prior = self.sample_zt_from_prior(h=h_t, a=a_t)
            z_t = prior.z_t
            inp = torch.cat((z_t, a_t), dim=1)
            h_t = self.state_transition_model(inp.unsqueeze(1)).squeeze(1)

            h_t_from_open_loop = true_h_t[:, t + 1:t + sequence_length + 1, :]
            h_t_from_close_loop = h_t

            if (self.use_consistency_model):

                if (self.is_consistency_model_euclidean):
                    h_t_from_open_loop = merge_first_and_second_dim(
                        h_t_from_open_loop.contiguous())

                else:
                    h_t_from_close_loop = unmerge_first_and_second_dim(
                        h_t_from_close_loop,
                        first_dim=-1,
                        second_dim=sequence_length)

                loss_close_loop, loss_discriminator = self.consistency_model(
                    (h_t_from_open_loop, h_t_from_close_loop))
                consistency_loss.close_loop.append(loss_close_loop)
                consistency_loss.discriminator.append(loss_discriminator)

            likelihood_mu, likelihood_sigma = self.convolutional_decoder(
                torch.cat((h_t, z_t), dim=1))

            elbo_likelihood.append(
                log_pdf(
                    merge_first_and_second_dim(
                        output_obs[:, t:t + sequence_length, :].contiguous()),
                    likelihood_mu, likelihood_sigma))

        elbo_likelihood = list(
            map(lambda x: torch.mean(x).unsqueeze(0), elbo_likelihood))
        elbo_likelihood = torch.mean(torch.cat(elbo_likelihood))

        for key in consistency_loss:
            if consistency_loss[key]:
                # Checking if the list is non-empty
                consistency_loss[key] = torch.mean(
                    torch.cat(consistency_loss[key]))
            else:
                consistency_loss[key] = torch.tensor(0.0).to(
                    device=elbo_likelihood.device)

        close_loop_output = Dict()
        close_loop_output.loss = -elbo_likelihood
        close_loop_output.likelihood = elbo_likelihood
        close_loop_output.consistency_loss = consistency_loss.close_loop
        discriminator_output = Dict()
        discriminator_output.loss = consistency_loss.discriminator

        return close_loop_output, discriminator_output
    def _vectorized_open_loop_prediction(self, open_loop_data):
        # This is a simple implementation of the open loop prediction. This function pulls some operations outside the
        #  for-loop and vectorizes them. This is meant as the primary function for doing open-loop prediction.
        # Open loop

        unroll_length = open_loop_data.input.unroll_length
        output_obs_encoding = open_loop_data.input.output_obs_encoding
        output_obs = open_loop_data.input.output_obs
        action = open_loop_data.input.action
        h_t = open_loop_data.input.h_t

        self.state_transition_model.set_state(h_t)

        # Note that this datastructure is used as a container for variables to track. It helps to avoid writing multiple
        # statements.
        temp_data = Dict()

        vars_to_track = ["h_t", "z_t", "posterior_mu", "posterior_sigma"]

        for name in vars_to_track:
            key = name + "_list"
            temp_data[key] = []

        for t in range(0, unroll_length):
            current_output_obs_encoding = output_obs_encoding[:, t, :]
            a_t = action[:, t, :]
            posterior = self.sample_zt_from_posterior(
                h=h_t, a=a_t, o=current_output_obs_encoding)
            z_t = posterior.z_t
            inp = torch.cat((z_t, a_t), dim=1)
            h_t = self.state_transition_model(inp.unsqueeze(1)).squeeze(1)
            posterior_mu = posterior.mu
            posterior_sigma = posterior.sigma

            for name in vars_to_track:
                key = name + "_list"
                temp_data[key].append(eval(name).unsqueeze(1))

        for name in vars_to_track:
            key = name + "_list"
            temp_data[name] = merge_first_and_second_dim(
                torch.cat(temp_data[key], dim=1))

        temp_data.a_t = merge_first_and_second_dim(
            action[:, :unroll_length, :].contiguous())

        temp_data.prior = self.sample_zt_from_prior(h=temp_data.h_t,
                                                    a=temp_data.a_t)

        likelihood_mu, likelihood_sigma = self.convolutional_decoder(
            torch.cat((temp_data.h_t, temp_data.z_t), dim=1))

        elbo_prior = log_pdf(temp_data.z_t, temp_data.prior.mu,
                             temp_data.prior.sigma)
        elbo_q_likelihood = log_pdf(temp_data.z_t, temp_data.posterior_mu,
                                    temp_data.posterior_sigma)
        elbo_likelihood = log_pdf(
            merge_first_and_second_dim(output_obs.contiguous()), likelihood_mu,
            likelihood_sigma)
        elbo = sum([
            torch.mean(x)
            for x in (elbo_likelihood, elbo_prior, -elbo_q_likelihood)
        ])
        open_loop_data.output = Dict()
        open_loop_data.output.loss = -elbo
        open_loop_data.output.log_likelihood = torch.mean(elbo_likelihood)

        open_loop_data.h_t = temp_data.h_t.detach()

        return open_loop_data