def eval(self, values, grad=False, grad_loop=False):
        ''' Takes a map of variable names, to variable values '''
        assert(isinstance(values, Variable))
        ################## Start FOPPL input ##########
        values = Variable(values.data, requires_grad=True)
        a = VariableCast(0.0)
        b = VariableCast(1.0)

        normal_obj = dis.Normal(a,b)
        c = VariableCast(3.0)
        i = 0
        logp_x  = normal_obj.logpdf(values)
        grad1 = self.calc_grad(logp_x, values)
        while(i<10):
            normal_obj_while = dis.Normal(values, c )
            values = Variable(normal_obj_while.sample().data, requires_grad = True)
            i = i + 1
        logp_x_g_x = normal_obj_while.logpdf(values)
        grad2 = self.calc_grad(logp_x_g_x, values)
        gradients = grad1 + grad2

        logjoint = Variable.add(logp_x, logp_x_g_x)
        if grad:
            return gradients
        elif grad_loop:
            return logjoint, gradients
        else:
            return logjoint, values
    def eval(self, values, grad=False, grad_loop=False):
        ''' Takes a map of variable names, to variable values '''
        assert (isinstance(values, Variable))
        ################## Start FOPPL input ##########
        values = Variable(values.data, requires_grad=True)
        a = VariableCast(0.0)
        b = VariableCast(2)
        normal_obj1 = dis.Normal(a, b)
        # log of prior p(x)
        logp_x = normal_obj1.logpdf(values)
        # else:
        #     x = normal_object.sample()
        #     x = Variable(x.data, requires_grad = True)
        if torch.gt(values.data, torch.zeros(values.size()))[0][0]:
            y = VariableCast(5)
            normal_obj2 = dis.Normal(values + b, b)
            logp_y_x = normal_obj2.logpdf(y)
        else:
            y = VariableCast(-5)
            normal_obj3 = dis.Normal(values-b, b)
            logp_y_x = normal_obj3.logpdf(y)

        logjoint = Variable.add(logp_x, logp_y_x)
        if grad:
            gradients = self.calc_grad(logjoint, values)
            return gradients
        elif grad_loop:
            gradients = self.calc_grad(logjoint, values)
            return logjoint, gradients
        else:
            return logjoint, values
    def __init__(self, p, M=None):

        if M is not None:
            if isinstance(M, Variable):
                self.M = VariableCast(torch.inverse(M.data))
            else:
                self.M = VariableCast(torch.inverse(M))
        else:
            self.M = VariableCast(torch.eye(
                p.size()[0]))  # inverse of identity is identity
Beispiel #4
0
    def generate(self):
        logp = []  # empty list to store logps of each variable
        a = VariableCast(0.0)
        b = VariableCast(1)
        c1 = VariableCast(-1)
        normal_obj1 = dis.Normal(a, b)
        x = Variable(normal_obj1.sample().data, requires_grad=True)
        logp_x = normal_obj1.logpdf(x)

        if torch.gt(x.data, torch.zeros(x.size()))[0][0]:
            y = VariableCast(1)
            normal_obj2 = dis.Normal(b, b)
            logp_y_x = normal_obj2.logpdf(y)
        else:
            y = VariableCast(1)
            normal_obj3 = dis.Normal(c1, b)
            logp_y_x = normal_obj3.logpdf(y)

        logp_x_y = logp_x + logp_y_x

        return logp_x_y, x, VariableCast(self.calc_grad(logp_x_y, x))

        # sum up all logs
        logp_x_y = VariableCast(torch.zeros(1, 1))
        for logprob in logp:
            logp_x_y = logp_x_y + logprob
        return logp_x_y, x, VariableCast(self.calc_grad(logp_x_y, x))
def test():
    prog_obj = program()
    logjointOrig, values_init, init_gradient  = prog_obj.generate()
    print(logjointOrig, values_init)
    print(init_gradient)
    ham_orig                   = fake_ham(logjointOrig)
    #
    # # in the future we would have to change this line so that
    # # if values is a dictionary then, we can generate a
    # # momentum with the right
    p0         = VariableCast(torch.randn(values_init.size()))
    kinetic_obj = Kinetic(p0)
    values     = values_init
    print('******** Before ********')
    print(p0)
    # first half step
    print(type(p0))
    print(type(init_gradient))
    p = p0 + 0.5 *  init_gradient
    print('******* Before ******')
    print(values)
    print(p)
    print()
    for i in range(10-1):
        print('Iter :', i )
        p      = p + 0.5 * prog_obj.eval(values,grad=True)
        values = values + 0.5 *  kinetic_obj.gauss_ke(p, grad = True)
        print('**** Inter ****')
        print(p.data)
        print(values.data)
    print('******** After ********')
    print(values)
    print(p)
 def laplace_ke(self, p, grad=False):
     self.p = VariableCast(p)
     P = Variable(self.p.data, requires_grad=True)
     K = torch.sign(P).mm(self.M)
     if grad:
         return self.ke_gradients(P, K)
     else:
         return K
Beispiel #7
0
    def eval(self, values, grad=False, grad2=False):
        ''' Takes a map of variable names, to variable values '''
        a = VariableCast(0.0)
        b = VariableCast(1)
        c1 = VariableCast(-1)
        normal_obj1 = dis.Normal(a, b)
        values = Variable(values.data, requires_grad=True)
        logp_x = normal_obj1.logpdf(values)
        # else:
        #     x = normal_object.sample()
        #     x = Variable(x.data, requires_grad = True)
        if torch.gt(values.data, torch.zeros(values.size()))[0][0]:
            y = VariableCast(1)
            normal_obj2 = dis.Normal(b, b)
            logp_y_x = normal_obj2.logpdf(y)
        else:
            y = VariableCast(1)
            normal_obj3 = dis.Normal(c1, b)
            logp_y_x = normal_obj3.logpdf(y)

        logjoint = Variable.add(logp_x, logp_y_x)
        if grad:
            gradients = self.calc_grad(logjoint, values)
            return VariableCast(gradients)
        elif grad2:
            gradients = self.calc_grad(logjoint, values)
            return logjoint, VariableCast(gradients)
        else:
            return logjoint, values
    def gauss_ke(self, p, grad=False):
        '''' (p dot p) / 2 and Mass matrix M = \mathbb{I}_{dim,dim}'''
        self.p = VariableCast(p)
        P = Variable(self.p.data, requires_grad=True)
        K = 0.5 * P.t().mm(self.M).mm(P)

        if grad:
            return self.ke_gradients(P, K)
        else:
            return K
    def eval(self, values, grad= False, grad_loop= False):
        ''' Takes a map of variable names, to variable values . This will be continually called
            within the leapfrog step

        values      -       Type: python dict object
                            Size: len(self.params)
                            Description: dictionary of 'parameters of interest'
        grad        -       Type: bool
                            Size: -
                            Description: Flag to denote whether the gradients are needed or not
        '''
        logp = []  # empty list to store logps of each variable # In addition to foopl input
        assert (isinstance(values, Variable))
        ################## Start FOPPL input ##########
        values = Variable(values.data, requires_grad = True)
        a = VariableCast(0.0)
        b = VariableCast(2.236)
        normal_object = dis.Normal(a, b)

        std  = VariableCast(1.4142)
        obs2 = VariableCast(7.0)
        # Need a better way of dealing with values. As ideally we have a dictionary (hash map)
        # then we say if values['x']
        # values[0,:] = Variable(values[0,:].data, requires_grad  = True)
        p_y_g_x    = dis.Normal(values[0,:], std)

        logp.append(normal_object.logpdf(values[0,:]))
        logp.append(p_y_g_x.logpdf(obs2))

        ################# End FOPPL output ############
        logjoint = VariableCast(torch.zeros(1, 1))

        for logprob in logp:
            logjoint = logjoint + logprob
        # grad2 is a hack so that we can call this at the start
        if grad:
            gradients = self.calc_grad(logjoint, values)
            return gradients
        elif grad_loop:
            gradients = self.calc_grad(logjoint, values)
            return logjoint, gradients
        else:
            return logjoint, values
    def generate(self):
        dim = 1
        logp = []
        params = []
        values = Variable(torch.FloatTensor(1, dim).zero_())
        a = VariableCast(0.0)
        c1 = VariableCast(10.0)
        b = VariableCast(1.0)
        c2 = VariableCast(2.0)
        normal_obj1 = dis.Normal(a, b)
        x1 = Variable(normal_obj1.sample().data, requires_grad=True)
        params.append(x1)
        logp_x1 = normal_obj1.logpdf(x1)
        logp.append(logp_x1)
        print(torch.gt(x1.data, 0.5*torch.ones(x1.size()))[0][0])
        if torch.gt(x1.data, 0.5*torch.ones(x1.size()))[0][0]:
            normal_obj2 = dis.Normal(c1, c2)
            x2          = Variable(normal_obj2.sample().data, requires_grad = True)
            params.append(x2)
            logp_x2_x1  = normal_obj2.logpdf(x2)
            logp.append(logp_x2_x1)


        logjoint = VariableCast(torch.zeros(1, 1))
        for logprob in logp:
            logjoint = logprob + logjoint

        for i in range(len(params)):
            if i == 0:
                values = params[i]
            else:
                values = torch.cat((values, params[i]), dim=0)

        grad = torch.autograd.grad(logjoint, params, grad_outputs=torch.ones(values.size()))
        gradients   = torch.zeros(1,1)
        for i in grad:
            gradients += 1/values.size()[0] * i.data
        gradients = Variable(gradients)


        if len(params) > 1:
            values = values[1,:].unsqueeze(-1)
        return logjoint,values, gradients, dim
    def eval(self, values, grad=False, grad_loop=False):
        ''' Takes a map of variable names, to variable values
            : double x;
        2: x ~ Gaussian(0, 1);
        3: if (x > 0.5) then
        4: x ~ Gaussian(10, 2);
        5: return x;
        https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/final.pdf
        '''
        logp = []
        assert (isinstance(values, Variable))
        values = Variable(values.data, requires_grad = True)
        ################## Start FOPPL input ##########
        a = VariableCast(0.0)
        c1 = VariableCast(10.0)
        b = VariableCast(1.0)
        c2 = VariableCast(1.41)
        normal_obj1 = dis.Normal(a, b)
        logp_x1 = normal_obj1.logpdf(values)
        logp.append(logp_x1)
        # else:
        #     x = normal_object.sample()
        #     x = Variable(x.data, requires_grad = True)
        if torch.gt(values.data, torch.Tensor([0.5]))[0]:
            normal_obj2 = dis.Normal(c1, c2)
            logp_x1_x1 = normal_obj2.logpdf(values)
            logp.append(logp_x1_x1)

        logjoint = VariableCast(torch.zeros(1, 1))
        for logprob in logp:
            logjoint = logprob + logjoint

        if grad:
            gradients = self.calc_grad(logjoint, values)
            return gradients
        elif grad_loop:
            gradients = self.calc_grad(logjoint, values)
            return logjoint, gradients
        else:
            return logjoint, values
Beispiel #12
0
    def generate(self):
        ''' Generates the initial state and returns the samples and logjoint evaluated at initial samples  '''

        ################## Start FOPPL input ##########
        logp = []  # empty list to store logps of each variable
        a = VariableCast(0.0)
        b = VariableCast(2.236)
        normal_object = dis.Normal(a, b)
        x = Variable(normal_object.sample().data, requires_grad=True)

        std = VariableCast(1.4142)
        obs2 = VariableCast(7.0)
        p_y_g_x = dis.Normal(x, std)

        # TO DO Ask Yuan, is it either possible to have once an '.logpdf' method is initiated can we do a
        # logp.append(<'variable upon which .logpdf method used'>)
        logp.append(normal_object.logpdf(x))
        logp.append(p_y_g_x.logpdf(obs2))
        # TO DO We will need to su m all the logs here.
        # Do I have them stored in a dictionary with the value
        # or do we have a separate thing for the logs?
        ################# End FOPPL output ############

        # sum up all logs
        logp_x_y = VariableCast(torch.zeros(1, 1))
        for logprob in logp:
            logp_x_y = logp_x_y + logprob
        return logp_x_y, x, VariableCast(self.calc_grad(logp_x_y, x))
    def generate(self):
        ''' Generates the initial state and returns the samples and logjoint evaluated at initial samples  '''

        ################## Start FOPPL input ##########
        logp = []
        dim = 1
        params = Variable(torch.FloatTensor(1,dim).zero_())
        a = VariableCast(0.0)
        b = VariableCast(2.236)
        normal_object = dis.Normal(a, b)
        x = Variable(normal_object.sample().data, requires_grad = True)
        params = x

        std  = VariableCast(1.4142)
        obs2 = VariableCast(7.0)
        p_y_g_x    = dis.Normal(params[0,:], std)

        logp.append(normal_object.logpdf(params))
        logp.append(p_y_g_x.logpdf(obs2))

        ################# End FOPPL output ############
        dim_values = params.size()[0]

        # sum up all logs
        logp_x_y   = VariableCast(torch.zeros(1,1))
        for logprob in logp:
            logp_x_y = logp_x_y + logprob
        return logp_x_y, params, VariableCast(self.calc_grad(logp_x_y,params)), dim_values
    def generate(self):
        dim = 1
        params = Variable(torch.FloatTensor(1, dim).zero_())
        a = VariableCast(0)
        b = VariableCast(2)
        normal_obj1 = dis.Normal(a, b)
        x = Variable(normal_obj1.sample().data, requires_grad=True)
        params = x
        logp_x = normal_obj1.logpdf(x)

        if torch.gt(x.data, torch.zeros(x.size()))[0][0]:
            y = VariableCast(5)
            normal_obj2 = dis.Normal(x+b, b)
            logp_y_x = normal_obj2.logpdf(y)
        else:
            y = VariableCast(-5)
            normal_obj3 = dis.Normal(x-b, b)
            logp_y_x = normal_obj3.logpdf(y)

        logp_x_y = logp_x + logp_y_x

        return logp_x_y, params, self.calc_grad(logp_x_y, params), dim
    def generate(self):
        dim    = 1
        params = Variable(torch.FloatTensor(1, dim).zero_())
        a = VariableCast(0.0)
        b = VariableCast(1.0)
        normal_obj1 = dis.Normal(a, b)
        x = Variable(normal_obj1.sample().data, requires_grad=True)
        logp_x = normal_obj1.logpdf(x)
        c     = VariableCast(3.0)
        grad1 = self.calc_grad(logp_x, x)
        i=0
        while (i < 10):
            normal_obj_while = dis.Normal(x, c)
            x = Variable(normal_obj_while.sample().data, requires_grad=True)
            i = i + 1
        logp_x_g_x = normal_obj_while.logpdf(x)
        grad2  = self.calc_grad(logp_x_g_x, x)
        gradients = grad1 + grad2


        logp_x_y = logp_x + logp_x_g_x


        return logp_x_y, x, gradients, dim
    def generate(self):
        ''' Returns log_joint a tensor.float of size (1,1)
                     params    a Variable FloatTensor of size (#parameters of interest,dim)
                                contains all variables
                     gradients a Variable tensor of gradients wrt to parameters

        '''
        # I will need Yuan to spit out the number of dimensions of  the parameters
        # of interest
        dim   = 1
        logp   = []
        params  = []
        values  = Variable(torch.FloatTensor(1,dim).zero_())
        c23582 = VariableCast(0.0).unsqueeze(-1)
        c23583 = VariableCast(10.0).unsqueeze(-1)
        normal_obj1 = dis.Normal(c23582, c23583)
        x23474 = Variable(normal_obj1.sample().data, requires_grad = True)  # sample
        # append first entry of params
        params.append(x23474)
        p23585 = normal_obj1.logpdf(x23474)  # prior
        logp.append(p23585)
        c23586 = VariableCast(0.0).unsqueeze(-1)
        c23587 = VariableCast(10.0).unsqueeze(-1)
        normal_obj2 = dis.Normal(c23586, c23587)
        x23471 = Variable(normal_obj2.sample().data, requires_grad = True)  # sample
        # append second entry to params
        params.append(x23471)
        p23589 = normal_obj2.logpdf(x23471)  # prior
        logp.append(p23589)
        c23590 = VariableCast(1.0).unsqueeze(-1)
        x23591 = x23471 * c23590 + x23474 # some problem on Variable, Variable.data

        # x23592 = Variable(x23591.data + x23474.data, requires_grad = True)

        c23593 = VariableCast(1.0).unsqueeze(-1)
        normal_obj2 = dis.Normal(x23591, c23593)

        c23595 = VariableCast(2.1).unsqueeze(-1)
        y23481 = c23595
        p23596 = normal_obj2.logpdf(y23481)  # obs, log likelihood
        logp.append(p23596)
        c23597 = VariableCast(2.0).unsqueeze(-1)

        # This is highly likely to be the next variable
        x23598 = x23471.mm(c23597) + x23474
        # x23599 = torch.add(x23598, x23474)
        c23600 = VariableCast(1.0).unsqueeze(-1)
        # x23601 = dis.Normal(x23599, c23600)

        normal_obj3 = dis.Normal(x23598, c23600)
        c23602 = VariableCast(3.9).unsqueeze(-1)
        y23502 = c23602
        p23603 = normal_obj3.logpdf(y23502)  # obs, log likelihood
        logp.append(p23603)
        c23604 = VariableCast(3.0).unsqueeze(-1)
        x23605 = x23471.mm(c23604)
        x23606 = torch.add(x23605, x23474)
        c23607 = VariableCast(1.0).unsqueeze(-1)
        normal_obj4 = dis.Normal(x23606, c23607)
        c23609 = VariableCast(5.3).unsqueeze(-1)
        y23527 = c23609
        p23610 = normal_obj4.logpdf(y23527)  # obs, log likelihood
        logp.append(p23610)
        p23611 = Variable(torch.zeros(1,1))
        for logprob in logp:
            p23611 = logprob + p23611
        for i in range(len(params)):
            if i == 0:
                values = params[i]
            else:
                values = torch.cat((values, params[i]), dim=0)
        dim_values = values.size()[0]
        # dim_values = values.size()[0]
        # return E from the model
        # Do I want the gradients of x23471 and x23474? and nothing else.
        grad = torch.autograd.grad(p23611, params, grad_outputs=torch.ones(values.size()))
        # For some reason all the gradients are d times bigger than they should be, where d is the dimension
        gradients = Variable(torch.Tensor(values.size()))
        for i in range(len(params)):
            gradients[i, :] = 1 / len(params) * grad[i][0].data.unsqueeze(0)  # ensures that each row of the grads represents a params grad
        return p23611,values, gradients, dim_values
    def generate(self):
        dim   = 1
        logp   = []
        params  = []
        values  = Variable(torch.FloatTensor(1,dim).zero_())
        x32131 = [0.5, 0.5]
        x32132 = Categorical(p=x32131)
        x31950 = x32132.sample()  # sample
        p32133 = x32132.log_pdf(x31950)  # from prior
        x32134 = [0.5, 0.5]
        x32135 = Categorical(p=x32134)
        x31918 = x32135.sample()  # sample
        p32136 = x32135.log_pdf(x31918)  # from prior
        x32137 = [0.5, 0.5]
        x32138 = Categorical(p=x32137)
        x31930 = x32138.sample()  # sample
        p32139 = x32138.log_pdf(x31930)  # from prior
        x32140 = [0.5, 0.5]
        x32141 = Categorical(p=x32140)
        x31990 = x32141.sample()  # sample
        p32142 = x32141.log_pdf(x31990)  # from prior
        x32143 = [0.5, 0.5]
        x32144 = Categorical(p=x32143)
        x31970 = x32144.sample()  # sample
        p32145 = x32144.log_pdf(x31970)  # from prior

        x32146 = Normal(mean=0, std=2)
        x31911 = Variable(x32146.sample().data, requires_grad = True)  # sample
        p32147 = x32146.log_pdf(x31911)  # from prior
        x32148 = Normal(mean=0, std=2)
        x31908 = x32148.sample()  # sample
        p32149 = x32148.log_pdf(x31908)  # from prior
        x32150 = [x31908, x31911]
        x32151 = x32150[int(x31930)]
        x32152 = Normal(mean=x32151, std=2)
        y31939 = -2.5
        p32153 = x32152.log_pdf(y31939)  # from observe
        x32154 = [x31908, x31911]
        x32155 = x32154[int(x31990)]
        x32156 = Normal(mean=x32155, std=2)
        y31999 = -2.2
        p32157 = x32156.log_pdf(y31999)  # from observe
        x32158 = [x31908, x31911]
        x32159 = x32158[int(x31918)]
        x32160 = Normal(mean=x32159, std=2)
        y31923 = -2.0
        p32161 = x32160.log_pdf(y31923)  # from observe
        x32162 = [x31908, x31911]
        x32163 = x32162[int(x31970)]
        x32164 = Normal(mean=x32163, std=2)
        y31979 = -1.9
        p32165 = x32164.log_pdf(y31979)  # from observe
        x32166 = [x31908, x31911]
        x32167 = x32166[int(x31950)]
        x32168 = Normal(mean=x32167, std=2)
        y31959 = VariableCast(-1.7)
        p32169 = x32168.log_pdf(y31959)  # from observe
        p32170 = p32169 + p32149 + p32147 + p32153 + p32145 + p32157 + p32142 + p32139 + p32136 + p32161 + p32165 + p32133  # total log joint
        # printing original E in foppl:
        x32171 = [x31908, x31911]
        print(x32171)
    def eval(self, values, grad=False, grad_loop=False):
        logp   = []
        assert(isinstance(values, Variable))
        values = Variable(values.data)
        for i in range(values.data.size()[0]):
            values[i,:]  = Variable(values[i,:].data, requires_grad = True)
        c23582 = VariableCast(0.0).unsqueeze(-1)
        c23583 = VariableCast(10.0).unsqueeze(-1)
        normal_obj1 = dis.Normal(c23582, c23583)

        x23474 = values[0,:].unsqueeze(-1)# sample
        # append first entry of params
        p23585 = normal_obj1.logpdf(x23474)  # prior
        logp.append(p23585)
        c23586 = VariableCast(0.0).unsqueeze(-1)
        c23587 = VariableCast(10.0).unsqueeze(-1)
        normal_obj2 = dis.Normal(c23586, c23587)
        x23471 = values[1,:].unsqueeze(-1)# sample
        p23589 = normal_obj2.logpdf(x23471)  # prior
        logp.append(p23589)
        c23590 = VariableCast(1.0).unsqueeze(-1)
        x23591 = x23471 * c23590 + x23474  # some problem on Variable, Variable.data

        # x23592 = Variable(x23591.data + x23474.data, requires_grad = True)

        c23593 = VariableCast(1.0).unsqueeze(-1)
        normal_obj2 = dis.Normal(x23591, c23593)

        c23595 = VariableCast(2.1).unsqueeze(-1)
        y23481 = c23595
        p23596 = normal_obj2.logpdf(y23481)  # obs, log likelihood
        logp.append(p23596)
        c23597 = VariableCast(2.0).unsqueeze(-1)

        # This is highly likely to be the next variable
        x23598 = torch.mul(x23471, c23597) + x23474
        # x23599 = torch.add(x23598, x23474)
        c23600 = VariableCast(1.0).unsqueeze(-1)
        # x23601 = dis.Normal(x23599, c23600)

        normal_obj3 = dis.Normal(x23598, c23600)
        c23602 = VariableCast(3.9).unsqueeze(-1)
        y23502 = c23602
        p23603 = normal_obj3.logpdf(y23502)  # obs, log likelihood
        logp.append(p23603)
        c23604 = VariableCast(3.0).unsqueeze(-1)
        x23605 = torch.mul(x23471, c23604)
        x23606 = torch.add(x23605, x23474)
        c23607 = VariableCast(1.0).unsqueeze(-1)
        normal_obj4 = dis.Normal(x23606, c23607)
        c23609 = VariableCast(5.3).unsqueeze(-1)
        y23527 = c23609
        p23610 = normal_obj4.logpdf(y23527)  # obs, log likelihood
        logp.append(p23610)
        p23611 = Variable(torch.zeros(1, 1))
        for logprob in logp:
            p23611 = logprob + p23611
        if grad:
            gradients = 1 / values.size()[0]  * torch.autograd.grad(p23611, values, grad_outputs=torch.ones(values.size()))[0].data
            # For some reason all the gradients are d times bigger than they should be, where d is the dimension
            return Variable(gradients)
        elif grad_loop:
            gradients = 1 / values.size()[0] * \
                        torch.autograd.grad(p23611, values, grad_outputs=torch.ones(values.size()))[0].data
            # For some reason all the gradients are d times bigger than they should be, where d is the dimension
            return p23611, Variable(gradients)
        else:
            return p23611, values
Beispiel #19
0
    def eval(self, values, grad=False, grad2=False):
        logp = []
        parms = []
        for value in values:
            if isinstance(value, Variable):
                temp = Variable(value.data, requires_grad=True)
                parms.append(temp)
            else:
                temp = VariableCast(value)
                temp = Variable(value.data, requires_grad=True)
                parms.append(value)
        c23582 = VariableCast(torch.Tensor([0.0]))
        c23583 = VariableCast(torch.Tensor([10.0]))
        normal_obj1 = dis.Normal(c23582, c23583)
        x23474 = parms[0]  # sample
        parms.append(x23474)
        p23585 = normal_obj1.logpdf(x23474)  # prior
        logp.append(p23585)
        c23586 = VariableCast(torch.Tensor([0.0]))
        c23587 = VariableCast(torch.Tensor([10.0]))
        normal_obj2 = dis.Normal(c23586, c23587)
        x23471 = parms[1]  # sample
        parms.append(x23471)
        p23589 = normal_obj2.logpdf(x23471)  # prior
        logp.append(p23589)
        c23590 = VariableCast(torch.Tensor([1.0]))
        # Do I cast this as a variable with requires_grad = True ???
        x23591 = x23471 * c23590 + x23474  # some problem on Variable, Variable.data

        # x23592 = Variable(x23591.data + x23474.data, requires_grad = True)

        c23593 = VariableCast(torch.Tensor([1.0]))
        normal_obj2 = dis.Normal(x23591, c23593)

        c23595 = VariableCast(torch.Tensor([2.1]))
        y23481 = c23595
        p23596 = normal_obj2.logpdf(y23481)  # obs, log likelihood
        logp.append(p23596)
        c23597 = VariableCast(torch.Tensor([2.0]))

        # This is highly likely to be the next variable
        x23598 = torch.mul(x23471, c23597) + x23474
        # x23599 = torch.add(x23598, x23474)
        c23600 = torch.Tensor([1.0])
        # x23601 = dis.Normal(x23599, c23600)

        normal_obj3 = dis.Normal(x23598, c23600)
        c23602 = torch.Tensor([3.9])
        y23502 = c23602
        p23603 = normal_obj3.logpdf(y23502)  # obs, log likelihood
        logp.append(p23603)
        c23604 = torch.Tensor([3.0])
        x23605 = Variable(torch.mul(x23471, c23604).data, requires_grad=True)
        x23606 = torch.add(x23605, x23474)
        c23607 = torch.Tensor([1.0])
        normal_obj4 = dis.Normal(x23606, c23607)
        c23609 = torch.Tensor([5.3])
        y23527 = c23609
        p23610 = normal_obj4.log_pdf(y23527)  # obs, log likelihood
        logp.append(p23610)
        p23611 = torch.add([p23585, p23589, p23596, p23603, p23610])
        # return E from the model
        # Do I want the gradients of x23471 and x23474? and nothing else.
        if grad:
            gradients = self.calc_grad(p23611, parms)
            return VariableCast(gradients)
        elif grad2:
            gradients = self.calc_grad(p23611, values)
            return p23611, VariableCast(gradients)
        else:
            return p23611, values