Example #1
0
    def logp_partial_gradient(self, variable, calculation_set=None):
        """
        gets the logp gradient of this deterministic with respect to variable
        """
        if self.verbose > 0:
            print '\t' + self.__name__ + ': logp_partial_gradient accessed.'

        if not (datatypes.is_continuous(variable)
                and datatypes.is_continuous(self)):
            return zeros(shape(variable.value))

        # loop through all the parameters and add up all the gradients of log p with respect to the approrpiate variable
        gradient = __builtin__.sum([
            child.logp_partial_gradient(self, calculation_set)
            for child in self.children
        ])

        totalGradient = 0
        for parameter, value in self.parents.iteritems():
            if value is variable:

                totalGradient += self.apply_jacobian(parameter, variable,
                                                     gradient)

        return np.reshape(totalGradient, shape(variable.value))
Example #2
0
    def logp_partial_gradient(self, variable, calculation_set=None):
        """
        Calculates the partial gradient of the posterior of self with respect to variable.
        Returns zero if self is not in calculation_set.
        """
        if (calculation_set is None) or (self in calculation_set):

            if not datatypes.is_continuous(variable):
                return zeros(shape(variable.value))

            if variable is self:
                try:
                    gradient_func = self._logp_partial_gradients['value']

                except KeyError:
                    raise NotImplementedError(
                        repr(self) + " has no gradient function for 'value'")

                gradient = np.reshape(gradient_func.get(),
                                      np.shape(variable.value))
            else:
                gradient = __builtin__.sum([
                    self._pgradient(variable, parameter, value)
                    for parameter, value in self.parents.iteritems()
                ])

            return gradient
        else:
            return 0
Example #3
0
    def logp_partial_gradient(self, variable, calculation_set=None):
        """
        Calculates the partial gradient of the posterior of self with respect to variable.
        Returns zero if self is not in calculation_set.
        """
        if (calculation_set is None) or (self in calculation_set):

            if not datatypes.is_continuous(variable):
                return zeros(shape(variable.value))

            if variable is self:
                try:
                    gradient_func = self._logp_partial_gradients["value"]

                except KeyError:
                    raise NotImplementedError(repr(self) + " has no gradient function for 'value'")

                gradient = np.reshape(gradient_func.get(), np.shape(variable.value))
            else:
                gradient = __builtin__.sum(
                    [self._pgradient(variable, parameter, value) for parameter, value in self.parents.iteritems()]
                )

            return gradient
        else:
            return 0
Example #4
0
    def logp_partial_gradient(self, variable, calculation_set = None):
        """
        gets the logp gradient of this deterministic with respect to variable
        """
        if self.verbose > 0:
            print '\t' + self.__name__ + ': logp_partial_gradient accessed.'

        if not (datatypes.is_continuous(variable) and datatypes.is_continuous(self)):
                return zeros(shape(variable.value))

        # loop through all the parameters and add up all the gradients of log p with respect to the approrpiate variable
        gradient = __builtin__.sum([child.logp_partial_gradient(self, calculation_set) for child in self.children ])

        totalGradient = 0
        for parameter, value in self.parents.iteritems():
            if value is variable:
                    
                totalGradient += self.apply_jacobian(parameter, variable, gradient )

        return np.reshape(totalGradient, shape(variable.value))
Example #5
0
    def logp_partial_gradient(self, variable, calculation_set = None):
        gradient = 0
        if self in calculation_set:

            if not datatypes.is_continuous(variable):
                return zeros(shape(variable.value))
            
            for parameter, value in self.parents.iteritems():

                if value is variable:
                    try :
                        grad_func = self._logp_partial_gradients[parameter]
                    except KeyError:
                        raise NotImplementedError(repr(self) + " has no gradient function for parameter " + parameter)
                    
                    gradient = gradient + grad_func.get()
        
        return np.reshape(gradient, np.shape(variable.value)) #np.reshape(gradient, np.shape(variable.value))
Example #6
0
    def logp_partial_gradient(self, variable, calculation_set=None):
        gradient = 0
        if self in calculation_set:

            if not datatypes.is_continuous(variable):
                return zeros(shape(variable.value))

            for parameter, value in self.parents.iteritems():

                if value is variable:
                    try:
                        grad_func = self._logp_partial_gradients[parameter]
                    except KeyError:
                        raise NotImplementedError(
                            repr(self) +
                            " has no gradient function for parameter " +
                            parameter)

                    gradient = gradient + grad_func.get()

        return np.reshape(gradient, np.shape(
            variable.value))  #np.reshape(gradient, np.shape(variable.value))