Example #1
0
    def gen_lazy_function(self):
        """
        Will be called by Node at instantiation.
        """

        # If value argument to __init__ was None, draw value from random method.
        if self._value is None:

            # Use random function if provided
            if self._random is not None:
                self.value = self._random(**self._parents.value)

            # Otherwise leave initial value at None and warn.
            else:
                raise ValueError, 'Stochastic ' + self.__name__ + "'s value initialized to None; no initial value or random method provided."

        arguments = {}
        arguments.update(self.parents)
        arguments['value'] = self
        arguments = DictContainer(arguments)

        self._logp = LazyFunction(fun = self._logp_fun,
                                    arguments = arguments,
                                    ultimate_args = self.extended_parents | set([self]),
                                    cache_depth = self._cache_depth)
        self._logp.force_compute()
Example #2
0
    def gen_lazy_function(self):

        self._logp = LazyFunction(fun = self._logp_fun,
                                    arguments = self.parents,
                                    ultimate_args = self.extended_parents,
                                    cache_depth = self._cache_depth)
        self._logp.force_compute()
Example #3
0
    def gen_lazy_function(self):

        self._logp = LazyFunction(fun = self._logp_fun,
                                    arguments = self.parents,
                                    ultimate_args = self.extended_parents,
                                    cache_depth = self._cache_depth)
        self._logp.force_compute()

        self._logp_partial_gradients= {}
        for parameter, function in self._logp_partial_gradients_functions.iteritems():
            lazy_logp_partial_gradients = LazyFunction(fun = function,
                                            arguments = self.parents,
                                            ultimate_args = self.extended_parents,
                                            cache_depth = self._cache_depth)
            lazy_logp_partial_gradients.force_compute()
            self._logp_partial_gradients[parameter] = lazy_logp_partial_gradients
Example #4
0
    def gen_lazy_function(self):

        self._logp = LazyFunction(fun = self._logp_fun,
                                    arguments = self.parents,
                                    ultimate_args = self.extended_parents,
                                    cache_depth = self._cache_depth)
        self._logp.force_compute()
Example #5
0
    def gen_lazy_function(self):
        """
        Will be called by Node at instantiation.
        """

        # If value argument to __init__ was None, draw value from random method.
        if self._value is None:

            # Use random function if provided
            if self._random is not None:
                self.value = self._random(**self._parents.value)

            # Otherwise leave initial value at None and warn.
            else:
                raise ValueError, 'Stochastic ' + self.__name__ + "'s value initialized to None; no initial value or random method provided."

        arguments = {}
        arguments.update(self.parents)
        arguments['value'] = self
        arguments = DictContainer(arguments)

        self._logp = LazyFunction(fun = self._logp_fun,
                                    arguments = arguments,
                                    ultimate_args = self.extended_parents | set([self]),
                                    cache_depth = self._cache_depth)
        self._logp.force_compute()
Example #6
0
    def gen_lazy_function(self):

        self._value = LazyFunction(fun = self._eval_fun,
                                    arguments = self.parents,
                                    ultimate_args = self.extended_parents,
                                    cache_depth = self._cache_depth)

        self._value.force_compute()

        self._jacobians = {}
        for parameter, function in self._jacobian_functions.iteritems():
            lazy_jacobian = LazyFunction(fun = function,
                                            arguments = self.parents,
                                            ultimate_args = self.extended_parents,
                                            cache_depth = self._cache_depth)
            lazy_jacobian.force_compute()
            self._jacobians[parameter] = lazy_jacobian
Example #7
0
    def gen_lazy_function(self):

        self._value = LazyFunction(fun=self._eval_fun,
                                   arguments=self.parents,
                                   ultimate_args=self.extended_parents,
                                   cache_depth=self._cache_depth)

        self._value.force_compute()

        self._jacobians = {}
        for parameter, function in self._jacobian_functions.iteritems():
            lazy_jacobian = LazyFunction(fun=function,
                                         arguments=self.parents,
                                         ultimate_args=self.extended_parents,
                                         cache_depth=self._cache_depth)
            lazy_jacobian.force_compute()
            self._jacobians[parameter] = lazy_jacobian
Example #8
0
    def gen_lazy_function(self):

        self._logp = LazyFunction(fun=self._logp_fun,
                                  arguments=self.parents,
                                  ultimate_args=self.extended_parents,
                                  cache_depth=self._cache_depth)
        self._logp.force_compute()

        self._logp_partial_gradients = {}
        for parameter, function in self._logp_partial_gradients_functions.iteritems(
        ):
            lazy_logp_partial_gradients = LazyFunction(
                fun=function,
                arguments=self.parents,
                ultimate_args=self.extended_parents,
                cache_depth=self._cache_depth)
            lazy_logp_partial_gradients.force_compute()
            self._logp_partial_gradients[
                parameter] = lazy_logp_partial_gradients
Example #9
0
class Stochastic(StochasticBase):

    """
    A variable whose value is not determined by the values of its parents.


    Decorator instantiation:

    @stoch(trace=True)
    def X(value = 0., mu = B, tau = C):
        return Normal_like(value, mu, tau)

    @stoch(trace=True)
    def X(value=0., mu=B, tau=C):

        def logp(value, mu, tau):
            return Normal_like(value, mu, tau)

        def random(mu, tau):
            return Normal_r(mu, tau)

        rseed = 1.


    Direct instantiation:



    - logp : function
            The function that computes the variable's log-probability from
            its value and the values of its parents.

    - doc : string
            The docstring for this variable.

    - name : string
            The name of this variable.

    - parents: dict
            A dictionary containing the parents of this variable.

    - random (optional) : function
            A function that draws a new value for this
            variable given its parents' values.

    - trace (optional) : boolean
            A boolean indicating whether this variable's value
            should be traced (in MCMC).

    - value (optional) : number or array
            An initial value for this variable

    - dtype (optional) : type
            A type for this variable.

    - rseed (optional) : integer or rseed
            A seed for this variable's rng. Either value or rseed must
            be given.

    - observed (optional) :  boolean
            A flag indicating whether this variable is data; whether
            its value is known.

    - cache_depth (optional) : integer
            An integer indicating how many of this variable's
            log-probability computations should be 'memoized'.

    - plot (optional) : boolean
            A flag indicating whether this variable is to be plotted.

    - verbose (optional) : integer
            Level of output verbosity: 0=none, 1=low, 2=medium, 3=high


    Externally-accessible attribute:

    - value: any class
          Returns this variable's current value.

    - logp: float
          Returns the variable's log-probability given its value and its
          parents' values. Skips computation if possible.

    last_value: any class
          Returns this variable's last value. Useful for rejecting
          Metropolis-Hastings jumps. See touch() and the warning below.

    Externally-accessible methods:

    random():   Draws a new value for this variable from its distribution and
                returns it.

    :SeeAlso: Deterministic, Node, LazyFunction, stoch, dtrm, data, Model, Container
    """

    __array_priority__ = 1000

    def __init__(
        self,
        logp,
        doc,
        name,
        parents,
        random=None,
        trace=True,
        value=None,
        dtype=None,
        rseed=False,
        observed=False,
        cache_depth=2,
        plot=None,
        verbose=None,
        isdata=None,
        check_logp=True,
        logp_partial_gradients={},
    ):

        self.counter = Counter()
        self.ParentDict = ParentDict

        # Support legacy 'isdata' for a while
        if isdata is not None:
            print "Deprecation Warning: the 'isdata' flag has been replaced by 'observed'. Please update your model accordingly."
            self.observed = isdata

        # A flag indicating whether self's value has been observed.
        self._observed = observed
        # Default value of None for mask
        self._mask = None
        if observed:

            if value is None:
                raise ValueError, "Stochastic %s must be given an initial value if observed=True." % name

            try:

                # If there are missing values, store mask to missing elements
                self._mask = value.mask

                # Set to value of mean of observed data
                value.fill_value = value.mean()
                value = value.filled()

                # Set observed flag to False, so that missing values will update
                self._observed = False

            except AttributeError:
                # Must not have missing values
                pass

        # This function will be used to evaluate self's log probability.
        self._logp_fun = logp

        # This function will be used to evaluate self's gradient of log probability.
        self._logp_partial_gradient_functions = logp_partial_gradients

        # This function will be used to draw values for self conditional on self's parents.
        self._random = random

        # A seed for self's rng. If provided, the initial value will be drawn. Otherwise it's
        # taken from the constructor.
        self.rseed = rseed

        self.errmsg = (
            "Stochastic %s's value is outside its support,\n or it forbids its parents' current values." % name
        )

        dtype = np.dtype(dtype)

        # Initialize value, either from value provided or from random function.
        try:
            if dtype.kind != "O" and value is not None:
                self._value = asanyarray(value, dtype=dtype)
                self._value.flags["W"] = False
            else:
                self._value = value
        except:
            cls, inst, tb = sys.exc_info()
            new_inst = cls(
                "Stochastic %s: Failed to cast initial value to required dtype.\n\nOriginal error message:\n" % name
                + inst.message
            )
            raise cls, new_inst, tb

        # Store the shape of the stochastic value
        self._shape = np.shape(self._value)

        Variable.__init__(
            self,
            doc=doc,
            name=name,
            parents=parents,
            cache_depth=cache_depth,
            trace=trace,
            dtype=dtype,
            plot=plot,
            verbose=verbose,
        )

        # self._logp.force_compute()

        self._shape = np.shape(self._value)

        if isinstance(self._value, ndarray):
            self._value.flags["W"] = False

        if check_logp:
            # Check initial value
            if not isinstance(self.logp, float):
                raise ValueError, "Stochastic " + self.__name__ + "'s initial log-probability is %s, should be a float." % self.logp.__repr__()

    def gen_lazy_function(self):
        """
        Will be called by Node at instantiation.
        """

        # If value argument to __init__ was None, draw value from random method.
        if self._value is None:

            # Use random function if provided
            if self._random is not None:
                self.value = self._random(**self._parents.value)

            # Otherwise leave initial value at None and warn.
            else:
                raise ValueError, "Stochastic " + self.__name__ + "'s value initialized to None; no initial value or random method provided."

        arguments = {}
        arguments.update(self.parents)
        arguments["value"] = self
        arguments = DictContainer(arguments)

        self._logp = LazyFunction(
            fun=self._logp_fun,
            arguments=arguments,
            ultimate_args=self.extended_parents | set([self]),
            cache_depth=self._cache_depth,
        )
        self._logp.force_compute()

        self._logp_partial_gradients = {}
        for parameter, function in self._logp_partial_gradient_functions.iteritems():
            lazy_logp_partial_gradient = LazyFunction(
                fun=function,
                arguments=arguments,
                ultimate_args=self.extended_parents | set([self]),
                cache_depth=self._cache_depth,
            )
            lazy_logp_partial_gradient.force_compute()
            self._logp_partial_gradients[parameter] = lazy_logp_partial_gradient

    def get_value(self):
        # Define value attribute
        if self.verbose > 1:
            print "\t" + self.__name__ + ": value accessed."
        return self._value

    def get_stoch_value(self):
        if self.verbose > 1:
            print "\t" + self.__name__ + ": stoch_value accessed."
        return self._value[self.mask]

    def set_value(self, value, force=False):
        # Record new value and increment counter

        # Value can't be updated if observed=True
        if self.observed and not force:
            raise AttributeError, "Stochastic " + self.__name__ + "'s value cannot be updated if observed flag is set"

        if self.verbose > 0:
            print "\t" + self.__name__ + ": value set to ", value

        # Save current value as last_value
        # Don't copy because caching depends on the object's reference.
        self.last_value = self._value

        if self.mask is None:

            if self.dtype.kind != "O":
                self._value = asanyarray(value, dtype=self.dtype)
                self._value.flags["W"] = False
            else:
                self._value = value

        else:

            new_value = self.value.copy()

            new_value[self.mask] = asanyarray(value, dtype=self.dtype)[self.mask]
            self._value = new_value

        self.counter.click()

    value = property(fget=get_value, fset=set_value, doc="Self's current value.")

    def mask():
        doc = "Returns the mask for missing values"

        def fget(self):
            return self._mask

        return locals()

    mask = property(**mask())

    def shape():
        doc = "The shape of the value of self."

        def fget(self):
            if self.verbose > 1:
                print "\t" + self.__name__ + ": shape accessed."
            return self._shape

        return locals()

    shape = property(**shape())

    def revert(self):
        """
        Sets self's value to self's last value. Bypasses the data cleaning in
        the set_value method.
        """
        self.counter.unclick()
        self._value = self.last_value

    def get_logp(self):

        if self.verbose > 0:
            print "\t" + self.__name__ + ": logp accessed."
        logp = self._logp.get()

        try:
            logp = float(logp)
        except:
            raise TypeError, self.__name__ + ": computed log-probability " + str(logp) + " cannot be cast to float"

        if logp != logp:
            return -np.inf

        if self.verbose > 0:
            print "\t" + self.__name__ + ": Returning log-probability ", logp

        # Check if the value is smaller than a double precision infinity:
        if logp <= d_neg_inf:
            if self.verbose > 0:
                raise ZeroProbability, self.errmsg + "\nValue: %s\nParents' values:%s" % (
                    self._value,
                    self._parents.value,
                )
            else:
                raise ZeroProbability, self.errmsg

        return logp

    def set_logp(self, new_logp):
        raise AttributeError, "Stochastic " + self.__name__ + "'s logp attribute cannot be set"

    logp = property(
        fget=get_logp,
        fset=set_logp,
        doc="Log-probability or log-density of self's current value\n given values of parents.",
    )

    def logp_gradient_contribution(self, calculation_set=None):
        """
        Calculates the gradient of the joint log posterior with respect to self. 
        Calculation of the log posterior is restricted to the variables in calculation_set. 
        """
        # NEED some sort of check to see if the log p calculation has recently failed, in which case not to continue

        return self.logp_partial_gradient(self, calculation_set) + __builtin__.sum(
            [child.logp_partial_gradient(self, calculation_set) for child in self.children]
        )

    def logp_partial_gradient(self, variable, calculation_set=None):
        """
        Calculates the partial gradient of the posterior of self with respect to variable.
        Returns zero if self is not in calculation_set.
        """
        if (calculation_set is None) or (self in calculation_set):

            if not datatypes.is_continuous(variable):
                return zeros(shape(variable.value))

            if variable is self:
                try:
                    gradient_func = self._logp_partial_gradients["value"]

                except KeyError:
                    raise NotImplementedError(repr(self) + " has no gradient function for 'value'")

                gradient = np.reshape(gradient_func.get(), np.shape(variable.value))
            else:
                gradient = __builtin__.sum(
                    [self._pgradient(variable, parameter, value) for parameter, value in self.parents.iteritems()]
                )

            return gradient
        else:
            return 0

    def _pgradient(self, variable, parameter, value):
        if value is variable:
            try:
                return np.reshape(self._logp_partial_gradients[parameter].get(), np.shape(variable.value))
            except KeyError:
                raise NotImplementedError(repr(self) + " has no gradient function for parameter " + parameter)
        else:
            return 0

    # Sample self's value conditional on parents.
    def random(self):
        """
        Draws a new value for a stoch conditional on its parents
        and returns it.

        Raises an error if no 'random' argument was passed to __init__.
        """

        if self._random:
            # Get current values of parents for use as arguments for _random()
            r = self._random(**self.parents.value)
        else:
            raise AttributeError, "Stochastic " + self.__name__ + " does not know how to draw its value, see documentation"

        if self.shape:
            r = np.reshape(r, self.shape)

        # Set Stochastic's value to drawn value
        if not self.observed:
            self.value = r

        return r

    # Shortcut alias to random
    rand = random

    def _get_isdata(self):
        import warnings

        warnings.warn('"isdata" is deprecated, please use "observed" instead.')
        return self._observed

    def _set_isdata(self, isdata):
        raise ValueError, 'Stochastic %s: "observed" flag cannot be changed.' % self.__name__

    isdata = property(_get_isdata, _set_isdata)

    def _get_observed(self):
        return self._observed

    def _set_observed(self, observed):
        raise ValueError, 'Stochastic %s: "observed" flag cannot be changed.' % self.__name__

    observed = property(_get_observed, _set_observed)

    def _get_coparents(self):
        coparents = set()
        for child in self.extended_children:
            coparents |= child.extended_parents
        coparents.add(self)
        return coparents

    coparents = property(_get_coparents, doc="All the variables whose extended children intersect with self's.")

    def _get_moral_neighbors(self):
        moral_neighbors = self.coparents | self.extended_parents | self.extended_children
        for neighbor in copy(moral_neighbors):
            if isinstance(neighbor, PotentialBase):
                moral_neighbors.remove(neighbor)
        return moral_neighbors

    moral_neighbors = property(
        _get_moral_neighbors, doc="Self's neighbors in the moral graph: self's Markov blanket with self removed."
    )

    def _get_markov_blanket(self):
        return self.moral_neighbors | set([self])

    markov_blanket = property(
        _get_markov_blanket, doc="Self's coparents, self's extended parents, self's children and self."
    )
Example #10
0
class Deterministic(DeterministicBase):
    """
    A variable whose value is determined by the values of its parents.

    Decorator instantiation:

    @dtrm(trace=True)
    def A(x = B, y = C):
        return sqrt(x ** 2 + y ** 2)

    :Parameters:
      eval : function
        The function that computes the variable's value from the values
        of its parents.
      doc : string
        The docstring for this variable.
      name: string
        The name of this variable.
      parents: dictionary
        A dictionary containing the parents of this variable.
      trace (optional): boolean
        A boolean indicating whether this variable's value
        should be traced (in MCMC).
      cache_depth (optional): integer
        An integer indicating how many of this variable's
        value computations should be 'memoized'.
      plot (optional) : boolean
        A flag indicating whether this variable is to be plotted.
      verbose (optional) : integer
        Level of output verbosity: 0=none, 1=low, 2=medium, 3=high

    :Attributes:
      value : any object
        Returns the variable's value given its parents' values. Skips
        computation if possible.

    :SeeAlso:
      Stochastic, Potential, deterministic, MCMC, Lambda,
      LinearCombination, Index
    """
    def __init__(self, eval,  doc, name, parents, dtype=None, trace=True, cache_depth=2, plot=None, verbose=None):
        self.ParentDict = ParentDict

        # This function gets used to evaluate self's value.
        self._eval_fun = eval

        Variable.__init__(  self,
                        doc=doc,
                        name=name,
                        parents=parents,
                        cache_depth = cache_depth,
                        dtype=dtype,
                        trace=trace,
                        plot=plot,
                        verbose=verbose)

        # self._value.force_compute()

    def gen_lazy_function(self):

        self._value = LazyFunction(fun = self._eval_fun,
                                    arguments = self.parents,
                                    ultimate_args = self.extended_parents,
                                    cache_depth = self._cache_depth)

        self._value.force_compute()

    def get_value(self):
        if self.verbose > 1:
            print '\t' + self.__name__ + ': value accessed.'
        _value = self._value.get()
        if isinstance(_value, ndarray):
            _value.flags['W'] = False
        if self.verbose > 1:
            print '\t' + self.__name__ + ': Returning value ',_value
        return _value

    def set_value(self,value):
        raise AttributeError, 'Deterministic '+self.__name__+'\'s value cannot be set.'

    value = property(fget = get_value, fset=set_value, doc="Self's value computed from current values of parents.")
Example #11
0
class Stochastic(StochasticBase):

    """
    A variable whose value is not determined by the values of its parents.


    Decorator instantiation:

    @stoch(trace=True)
    def X(value = 0., mu = B, tau = C):
        return Normal_like(value, mu, tau)

    @stoch(trace=True)
    def X(value=0., mu=B, tau=C):

        def logp(value, mu, tau):
            return Normal_like(value, mu, tau)

        def random(mu, tau):
            return Normal_r(mu, tau)

        rseed = 1.


    Direct instantiation:



    - logp : function
            The function that computes the variable's log-probability from
            its value and the values of its parents.

    - doc : string
            The docstring for this variable.

    - name : string
            The name of this variable.

    - parents: dict
            A dictionary containing the parents of this variable.

    - random (optional) : function
            A function that draws a new value for this
            variable given its parents' values.

    - trace (optional) : boolean
            A boolean indicating whether this variable's value
            should be traced (in MCMC).

    - value (optional) : number or array
            An initial value for this variable

    - dtype (optional) : type
            A type for this variable.

    - rseed (optional) : integer or rseed
            A seed for this variable's rng. Either value or rseed must
            be given.

    - observed (optional) :  boolean
            A flag indicating whether this variable is data; whether
            its value is known.

    - cache_depth (optional) : integer
            An integer indicating how many of this variable's
            log-probability computations should be 'memoized'.

    - plot (optional) : boolean
            A flag indicating whether this variable is to be plotted.

    - verbose (optional) : integer
            Level of output verbosity: 0=none, 1=low, 2=medium, 3=high


    Externally-accessible attribute:

    - value: any class
          Returns this variable's current value.

    - logp: float
          Returns the variable's log-probability given its value and its
          parents' values. Skips computation if possible.

    last_value: any class
          Returns this variable's last value. Useful for rejecting
          Metropolis-Hastings jumps. See touch() and the warning below.

    Externally-accessible methods:

    random():   Draws a new value for this variable from its distribution and
                returns it.

    :SeeAlso: Deterministic, Node, LazyFunction, stoch, dtrm, data, Model, Container
    """

    def __init__(   self,
                    logp,
                    doc,
                    name,
                    parents,
                    random = None,
                    trace=True,
                    value=None,
                    dtype=None,
                    rseed=False,
                    observed=False,
                    cache_depth=2,
                    plot=None,
                    verbose = None,
                    isdata=None):

        self.counter = Counter()
        self.ParentDict = ParentDict

        # Support legacy 'isdata' for a while
        if isdata is not None:
            print "Deprecation Warning: the 'isdata' flag has been replaced by 'observed'. Please update your model accordingly."
            self.observed = isdata

        # A flag indicating whether self's value has been observed.
        self._observed = observed
        if observed and value is None:
            raise ValueError, 'Stochastic %s must be given an initial value if observed=True.'%name

        # This function will be used to evaluate self's log probability.
        self._logp_fun = logp

        # This function will be used to draw values for self conditional on self's parents.
        self._random = random

        # A seed for self's rng. If provided, the initial value will be drawn. Otherwise it's
        # taken from the constructor.
        self.rseed = rseed

        self.errmsg = "Stochastic %s's value is outside its support,\n or it forbids its parents' current values."%name

        dtype = np.dtype(dtype)

        # Initialize value, either from value provided or from random function.
        try:
            if dtype.kind != 'O' and value is not None:
                self._value = asanyarray(value, dtype=dtype)
                self._value.flags['W']=False
            else:
                self._value = value
        except:
            cls, inst, tb = sys.exc_info()
            new_inst = cls('Stochastic %s: Failed to cast initial value to required dtype.\n\nOriginal error message:\n'%name + inst.message)
            raise cls, new_inst, tb

        # Store the shape of the stochastic value
        self._shape = np.shape(self._value)

        Variable.__init__(  self,
                        doc=doc,
                        name=name,
                        parents=parents,
                        cache_depth=cache_depth,
                        trace=trace,
                        dtype=dtype,
                        plot=plot,
                        verbose=verbose)

        # self._logp.force_compute()

        # Store the shape of the stochastic value
        self._shape = np.shape(self._value)

        if isinstance(self._value, ndarray):
            self._value.flags['W'] = False

        # Check initial value
        if not isinstance(self.logp, float):
            raise ValueError, "Stochastic " + self.__name__ + "'s initial log-probability is %s, should be a float." %self.logp.__repr__()


    def gen_lazy_function(self):
        """
        Will be called by Node at instantiation.
        """

        # If value argument to __init__ was None, draw value from random method.
        if self._value is None:

            # Use random function if provided
            if self._random is not None:
                self.value = self._random(**self._parents.value)

            # Otherwise leave initial value at None and warn.
            else:
                raise ValueError, 'Stochastic ' + self.__name__ + "'s value initialized to None; no initial value or random method provided."

        arguments = {}
        arguments.update(self.parents)
        arguments['value'] = self
        arguments = DictContainer(arguments)

        self._logp = LazyFunction(fun = self._logp_fun,
                                    arguments = arguments,
                                    ultimate_args = self.extended_parents | set([self]),
                                    cache_depth = self._cache_depth)
        self._logp.force_compute()

    def get_value(self):
        # Define value attribute
        if self.verbose > 1:
            print '\t' + self.__name__ + ': value accessed.'
        return self._value


    def set_value(self, value, force=False):
        # Record new value and increment counter

        # Value can't be updated if observed=True
        if self.observed and not force:
            raise AttributeError, 'Stochastic '+self.__name__+'\'s value cannot be updated if observed flag is set'

        if self.verbose > 0:
            print '\t' + self.__name__ + ': value set to ', value

        # Save current value as last_value
        # Don't copy because caching depends on the object's reference.
        self.last_value = self._value

        if self.dtype.kind != 'O':
            self._value = asanyarray(value, dtype=self.dtype)
            self._value.flags['W']=False
        else:
            self._value = value

        self.counter.click()

    value = property(fget=get_value, fset=set_value, doc="Self's current value.")

    def shape():
        doc = "The shape of the value of self."
        def fget(self):
            if self.verbose > 1:
                print '\t' + self.__name__ + ': shape accessed.'
            return self._shape
        return locals()
    shape = property(**shape())

    def revert(self):
        """
        Sets self's value to self's last value. Bypasses the data cleaning in
        the set_value method.
        """
        self.counter.unclick()
        self._value = self.last_value


    def get_logp(self):

        if self.verbose > 0:
            print '\t' + self.__name__ + ': logp accessed.'
        logp = self._logp.get()

        try:
            logp = float(logp)
        except:
            raise TypeError, self.__name__ + ': computed log-probability ' + str(logp) + ' cannot be cast to float'

        if logp != logp:
            raise ValueError, self.__name__ + ': computed log-probability is NaN'

        if self.verbose > 0:
            print '\t' + self.__name__ + ': Returning log-probability ', logp

        # Check if the value is smaller than a double precision infinity:
        if logp <= d_neg_inf:
            if self.verbose > 0:
                raise ZeroProbability, self.errmsg + "\nValue: %s\nParents' values:%s" % (self._value, self._parents.value)
            else:
                raise ZeroProbability, self.errmsg

        return logp

    def set_logp(self):
        raise AttributeError, 'Stochastic '+self.__name__+'\'s logp attribute cannot be set'

    logp = property(fget = get_logp, fset=set_logp, doc="Log-probability or log-density of self's current value\n given values of parents.")


    # Sample self's value conditional on parents.
    def random(self):
        """
        Draws a new value for a stoch conditional on its parents
        and returns it.

        Raises an error if no 'random' argument was passed to __init__.
        """

        if self._random:
            # Get current values of parents for use as arguments for _random()
            r = self._random(**self.parents.value)
        else:
            raise AttributeError, 'Stochastic '+self.__name__+' does not know how to draw its value, see documentation'

        if self.shape:
            r = np.reshape(r, self.shape)

        # Set Stochastic's value to drawn value
        if not self.observed:
            self.value = r

        return r

    # Shortcut alias to random
    rand = random

    def _get_isdata(self):
        import warnings
        warnings.warn('"isdata" is deprecated, please use "observed" instead.')
        return self._observed
    def _set_isdata(self, isdata):
        raise ValueError, 'Stochastic %s: "observed" flag cannot be changed.'%self.__name__
    isdata = property(_get_isdata, _set_isdata)

    def _get_observed(self):
        return self._observed
    def _set_observed(self, observed):
        raise ValueError, 'Stochastic %s: "observed" flag cannot be changed.'%self.__name__
    observed = property(_get_observed, _set_observed)


    def _get_coparents(self):
        coparents = set()
        for child in self.extended_children:
            coparents |= child.extended_parents
        coparents.add(self)
        return coparents
    coparents = property(_get_coparents, doc="All the variables whose extended children intersect with self's.")

    def _get_moral_neighbors(self):
        moral_neighbors = self.coparents | self.extended_parents | self.extended_children
        for neighbor in copy(moral_neighbors):
            if isinstance(neighbor, PotentialBase):
                moral_neighbors.remove(neighbor)
        return moral_neighbors
    moral_neighbors = property(_get_moral_neighbors, doc="Self's neighbors in the moral graph: self's Markov blanket with self removed.")

    def _get_markov_blanket(self):
        return self.moral_neighbors | set([self])
    markov_blanket = property(_get_markov_blanket, doc="Self's coparents, self's extended parents, self's children and self.")
Example #12
0
class Potential(PotentialBase):
    """
    Not a variable; just an arbitrary log-probability term to multiply into the
    joint distribution. Useful for expressing models that aren't directed, such as
    Markov random fields.

    Decorator instantiation:

    @potential(trace = True)
    def A(x = B, y = C):
        return -.5 * (x-y)**2 / 3.

    Direct instantiation:

    :Parameters:

        -logp: function
              The function that computes the potential's value from the values
              of its parents.

        -doc: string
              The docstring for this potential.

        -name: string
              The name of this potential.

        -parents: dictionary
              A dictionary containing the parents of this potential.

        -cache_depth (optional): integer
              An integer indicating how many of this potential's value computations
              should be 'memoized'.

        - plot (optional) : boolean
            A flag indicating whether this variable is to be plotted.

        - verbose (optional) : integer
              Level of output verbosity: 0=none, 1=low, 2=medium, 3=high


    Externally-accessible attribute:

        -logp: float
              Returns the potential's log-probability given its parents' values. Skips
              computation if possible.

    No methods.

    :SeeAlso: Stochastic, Node, LazyFunction, stoch, dtrm, data, Model, Container
    """
    def __init__(self, logp,  doc, name, parents, cache_depth=2, plot=None, verbose=None):

        self.ParentDict = ParentDict

        # This function gets used to evaluate self's value.
        self._logp_fun = logp

        self.errmsg = "Potential %s forbids its parents' current values"%name

        Node.__init__(  self,
                        doc=doc,
                        name=name,
                        parents=parents,
                        cache_depth = cache_depth,
                        verbose=verbose)

        self._plot = plot

        # self._logp.force_compute()

        # Check initial value
        if not isinstance(self.logp, float):
            raise ValueError, "Potential " + self.__name__ + "'s initial log-probability is %s, should be a float." %self.logp.__repr__()

    def gen_lazy_function(self):

        self._logp = LazyFunction(fun = self._logp_fun,
                                    arguments = self.parents,
                                    ultimate_args = self.extended_parents,
                                    cache_depth = self._cache_depth)
        self._logp.force_compute()

    def get_logp(self):
        if self.verbose > 1:
            print '\t' + self.__name__ + ': log-probability accessed.'
        logp = self._logp.get()
        if self.verbose > 1:
            print '\t' + self.__name__ + ': Returning log-probability ', logp

        try:
            logp = float(logp)
        except:
            raise TypeError, self.__name__ + ': computed log-probability ' + str(logp) + ' cannot be cast to float'

        if logp != logp:
            raise ValueError, self.__name__ + ': computed log-probability is NaN'

        # Check if the value is smaller than a double precision infinity:
        if logp <= d_neg_inf:
            if self.verbose > 0:
                raise ZeroProbability, self.errmsg + ": %s" %self._parents.value
            else:
                raise ZeroProbability, self.errmsg

        return logp

    def set_logp(self,value):
        raise AttributeError, 'Potential '+self.__name__+'\'s log-probability cannot be set.'

    logp = property(fget = get_logp, fset=set_logp, doc="Self's log-probability value conditional on parents.")
Example #13
0
class Deterministic(DeterministicBase):
    """
    A variable whose value is determined by the values of its parents.

    Decorator instantiation:

    @dtrm(trace=True)
    def A(x = B, y = C):
        return sqrt(x ** 2 + y ** 2)

    :Parameters:
      eval : function
        The function that computes the variable's value from the values
        of its parents.
      doc : string
        The docstring for this variable.
      name: string
        The name of this variable.
      parents: dictionary
        A dictionary containing the parents of this variable.
      trace (optional): boolean
        A boolean indicating whether this variable's value
        should be traced (in MCMC).
      cache_depth (optional): integer
        An integer indicating how many of this variable's
        value computations should be 'memoized'.
      plot (optional) : boolean
        A flag indicating whether this variable is to be plotted.
      verbose (optional) : integer
        Level of output verbosity: 0=none, 1=low, 2=medium, 3=high

    :Attributes:
      value : any object
        Returns the variable's value given its parents' values. Skips
        computation if possible.

    :SeeAlso:
      Stochastic, Potential, deterministic, MCMC, Lambda,
      LinearCombination, Index
    """
    def __init__(self, eval,  doc, name, parents, dtype=None, trace=True, cache_depth=2, plot=None, verbose=None):
        self.ParentDict = ParentDict

        # This function gets used to evaluate self's value.
        self._eval_fun = eval

        Variable.__init__(  self,
                        doc=doc,
                        name=name,
                        parents=parents,
                        cache_depth = cache_depth,
                        dtype=dtype,
                        trace=trace,
                        plot=plot,
                        verbose=verbose)

        # self._value.force_compute()

    def gen_lazy_function(self):

        self._value = LazyFunction(fun = self._eval_fun,
                                    arguments = self.parents,
                                    ultimate_args = self.extended_parents,
                                    cache_depth = self._cache_depth)

        self._value.force_compute()

    def get_value(self):
        if self.verbose > 1:
            print '\t' + self.__name__ + ': value accessed.'
        _value = self._value.get()
        if isinstance(_value, ndarray):
            _value.flags['W'] = False
        if self.verbose > 1:
            print '\t' + self.__name__ + ': Returning value ',_value
        return _value

    def set_value(self,value):
        raise AttributeError, 'Deterministic '+self.__name__+'\'s value cannot be set.'

    value = property(fget = get_value, fset=set_value, doc="Self's value computed from current values of parents.")
Example #14
0
class Stochastic(StochasticBase):
    """
    A variable whose value is not determined by the values of its parents.


    Decorator instantiation:

    @stoch(trace=True)
    def X(value = 0., mu = B, tau = C):
        return Normal_like(value, mu, tau)

    @stoch(trace=True)
    def X(value=0., mu=B, tau=C):

        def logp(value, mu, tau):
            return Normal_like(value, mu, tau)

        def random(mu, tau):
            return Normal_r(mu, tau)

        rseed = 1.


    Direct instantiation:



    - logp : function
            The function that computes the variable's log-probability from
            its value and the values of its parents.

    - doc : string
            The docstring for this variable.

    - name : string
            The name of this variable.

    - parents: dict
            A dictionary containing the parents of this variable.

    - random (optional) : function
            A function that draws a new value for this
            variable given its parents' values.

    - trace (optional) : boolean
            A boolean indicating whether this variable's value
            should be traced (in MCMC).

    - value (optional) : number or array
            An initial value for this variable

    - dtype (optional) : type
            A type for this variable.

    - rseed (optional) : integer or rseed
            A seed for this variable's rng. Either value or rseed must
            be given.

    - observed (optional) :  boolean
            A flag indicating whether this variable is data; whether
            its value is known.

    - cache_depth (optional) : integer
            An integer indicating how many of this variable's
            log-probability computations should be 'memoized'.

    - plot (optional) : boolean
            A flag indicating whether this variable is to be plotted.

    - verbose (optional) : integer
            Level of output verbosity: 0=none, 1=low, 2=medium, 3=high


    Externally-accessible attribute:

    - value: any class
          Returns this variable's current value.

    - logp: float
          Returns the variable's log-probability given its value and its
          parents' values. Skips computation if possible.

    last_value: any class
          Returns this variable's last value. Useful for rejecting
          Metropolis-Hastings jumps. See touch() and the warning below.

    Externally-accessible methods:

    random():   Draws a new value for this variable from its distribution and
                returns it.

    :SeeAlso: Deterministic, Node, LazyFunction, stoch, dtrm, data, Model, Container
    """
    __array_priority__ = 1000

    def __init__(self,
                 logp,
                 doc,
                 name,
                 parents,
                 random=None,
                 trace=True,
                 value=None,
                 dtype=None,
                 rseed=False,
                 observed=False,
                 cache_depth=2,
                 plot=None,
                 verbose=None,
                 isdata=None,
                 check_logp=True,
                 logp_partial_gradients={}):

        self.counter = Counter()
        self.ParentDict = ParentDict

        # Support legacy 'isdata' for a while
        if isdata is not None:
            print "Deprecation Warning: the 'isdata' flag has been replaced by 'observed'. Please update your model accordingly."
            self.observed = isdata

        # A flag indicating whether self's value has been observed.
        self._observed = observed
        # Default value of None for mask
        self._mask = None
        if observed:

            if value is None:
                raise ValueError, 'Stochastic %s must be given an initial value if observed=True.' % name

            try:

                # If there are missing values, store mask to missing elements
                self._mask = value.mask

                # Set to value of mean of observed data
                value.fill_value = value.mean()
                value = value.filled()

                # Set observed flag to False, so that missing values will update
                self._observed = False

            except AttributeError:
                # Must not have missing values
                pass

        # This function will be used to evaluate self's log probability.
        self._logp_fun = logp

        #This function will be used to evaluate self's gradient of log probability.
        self._logp_partial_gradient_functions = logp_partial_gradients

        # This function will be used to draw values for self conditional on self's parents.
        self._random = random

        # A seed for self's rng. If provided, the initial value will be drawn. Otherwise it's
        # taken from the constructor.
        self.rseed = rseed

        self.errmsg = "Stochastic %s's value is outside its support,\n or it forbids its parents' current values." % name

        dtype = np.dtype(dtype)

        # Initialize value, either from value provided or from random function.
        try:
            if dtype.kind != 'O' and value is not None:
                self._value = asanyarray(value, dtype=dtype)
                self._value.flags['W'] = False
            else:
                self._value = value
        except:
            cls, inst, tb = sys.exc_info()
            new_inst = cls(
                'Stochastic %s: Failed to cast initial value to required dtype.\n\nOriginal error message:\n'
                % name + inst.message)
            raise cls, new_inst, tb

        # Store the shape of the stochastic value
        self._shape = np.shape(self._value)

        Variable.__init__(self,
                          doc=doc,
                          name=name,
                          parents=parents,
                          cache_depth=cache_depth,
                          trace=trace,
                          dtype=dtype,
                          plot=plot,
                          verbose=verbose)

        # self._logp.force_compute()

        self._shape = np.shape(self._value)

        if isinstance(self._value, ndarray):
            self._value.flags['W'] = False

        if check_logp:
            # Check initial value
            if not isinstance(self.logp, float):
                raise ValueError, "Stochastic " + self.__name__ + "'s initial log-probability is %s, should be a float." % self.logp.__repr__(
                )

    def gen_lazy_function(self):
        """
        Will be called by Node at instantiation.
        """

        # If value argument to __init__ was None, draw value from random method.
        if self._value is None:

            # Use random function if provided
            if self._random is not None:
                self.value = self._random(**self._parents.value)

            # Otherwise leave initial value at None and warn.
            else:
                raise ValueError, 'Stochastic ' + self.__name__ + "'s value initialized to None; no initial value or random method provided."

        arguments = {}
        arguments.update(self.parents)
        arguments['value'] = self
        arguments = DictContainer(arguments)

        self._logp = LazyFunction(fun=self._logp_fun,
                                  arguments=arguments,
                                  ultimate_args=self.extended_parents
                                  | set([self]),
                                  cache_depth=self._cache_depth)
        self._logp.force_compute()

        self._logp_partial_gradients = {}
        for parameter, function in self._logp_partial_gradient_functions.iteritems(
        ):
            lazy_logp_partial_gradient = LazyFunction(
                fun=function,
                arguments=arguments,
                ultimate_args=self.extended_parents | set([self]),
                cache_depth=self._cache_depth)
            lazy_logp_partial_gradient.force_compute()
            self._logp_partial_gradients[
                parameter] = lazy_logp_partial_gradient

    def get_value(self):
        # Define value attribute
        if self.verbose > 1:
            print '\t' + self.__name__ + ': value accessed.'
        return self._value

    def get_stoch_value(self):
        if self.verbose > 1:
            print '\t' + self.__name__ + ': stoch_value accessed.'
        return self._value[self.mask]

    def set_value(self, value, force=False):
        # Record new value and increment counter

        # Value can't be updated if observed=True
        if self.observed and not force:
            raise AttributeError, 'Stochastic ' + self.__name__ + '\'s value cannot be updated if observed flag is set'

        if self.verbose > 0:
            print '\t' + self.__name__ + ': value set to ', value

        # Save current value as last_value
        # Don't copy because caching depends on the object's reference.
        self.last_value = self._value

        if self.mask is None:

            if self.dtype.kind != 'O':
                self._value = asanyarray(value, dtype=self.dtype)
                self._value.flags['W'] = False
            else:
                self._value = value

        else:

            new_value = self.value.copy()

            new_value[self.mask] = asanyarray(value,
                                              dtype=self.dtype)[self.mask]
            self._value = new_value

        self.counter.click()

    value = property(fget=get_value,
                     fset=set_value,
                     doc="Self's current value.")

    def mask():
        doc = "Returns the mask for missing values"

        def fget(self):
            return self._mask

        return locals()

    mask = property(**mask())

    def shape():
        doc = "The shape of the value of self."

        def fget(self):
            if self.verbose > 1:
                print '\t' + self.__name__ + ': shape accessed.'
            return self._shape

        return locals()

    shape = property(**shape())

    def revert(self):
        """
        Sets self's value to self's last value. Bypasses the data cleaning in
        the set_value method.
        """
        self.counter.unclick()
        self._value = self.last_value

    def get_logp(self):

        if self.verbose > 0:
            print '\t' + self.__name__ + ': logp accessed.'
        logp = self._logp.get()

        try:
            logp = float(logp)
        except:
            raise TypeError, self.__name__ + ': computed log-probability ' + str(
                logp) + ' cannot be cast to float'

        if logp != logp:
            return -np.inf

        if self.verbose > 0:
            print '\t' + self.__name__ + ': Returning log-probability ', logp

        # Check if the value is smaller than a double precision infinity:
        if logp <= d_neg_inf:
            if self.verbose > 0:
                raise ZeroProbability, self.errmsg + "\nValue: %s\nParents' values:%s" % (
                    self._value, self._parents.value)
            else:
                raise ZeroProbability, self.errmsg

        return logp

    def set_logp(self, new_logp):
        raise AttributeError, 'Stochastic ' + self.__name__ + '\'s logp attribute cannot be set'

    logp = property(
        fget=get_logp,
        fset=set_logp,
        doc=
        "Log-probability or log-density of self's current value\n given values of parents."
    )

    def logp_gradient_contribution(self, calculation_set=None):
        """
        Calculates the gradient of the joint log posterior with respect to self. 
        Calculation of the log posterior is restricted to the variables in calculation_set. 
        """
        #NEED some sort of check to see if the log p calculation has recently failed, in which case not to continue

        return self.logp_partial_gradient(
            self, calculation_set) + __builtin__.sum([
                child.logp_partial_gradient(self, calculation_set)
                for child in self.children
            ])

    def logp_partial_gradient(self, variable, calculation_set=None):
        """
        Calculates the partial gradient of the posterior of self with respect to variable.
        Returns zero if self is not in calculation_set.
        """
        if (calculation_set is None) or (self in calculation_set):

            if not datatypes.is_continuous(variable):
                return zeros(shape(variable.value))

            if variable is self:
                try:
                    gradient_func = self._logp_partial_gradients['value']

                except KeyError:
                    raise NotImplementedError(
                        repr(self) + " has no gradient function for 'value'")

                gradient = np.reshape(gradient_func.get(),
                                      np.shape(variable.value))
            else:
                gradient = __builtin__.sum([
                    self._pgradient(variable, parameter, value)
                    for parameter, value in self.parents.iteritems()
                ])

            return gradient
        else:
            return 0

    def _pgradient(self, variable, parameter, value):
        if value is variable:
            try:
                return np.reshape(
                    self._logp_partial_gradients[parameter].get(),
                    np.shape(variable.value))
            except KeyError:
                raise NotImplementedError(
                    repr(self) + " has no gradient function for parameter " +
                    parameter)
        else:
            return 0

    # Sample self's value conditional on parents.
    def random(self):
        """
        Draws a new value for a stoch conditional on its parents
        and returns it.

        Raises an error if no 'random' argument was passed to __init__.
        """

        if self._random:
            # Get current values of parents for use as arguments for _random()
            r = self._random(**self.parents.value)
        else:
            raise AttributeError, 'Stochastic ' + self.__name__ + ' does not know how to draw its value, see documentation'

        if self.shape:
            r = np.reshape(r, self.shape)

        # Set Stochastic's value to drawn value
        if not self.observed:
            self.value = r

        return r

    # Shortcut alias to random
    rand = random

    def _get_isdata(self):
        import warnings
        warnings.warn('"isdata" is deprecated, please use "observed" instead.')
        return self._observed

    def _set_isdata(self, isdata):
        raise ValueError, 'Stochastic %s: "observed" flag cannot be changed.' % self.__name__

    isdata = property(_get_isdata, _set_isdata)

    def _get_observed(self):
        return self._observed

    def _set_observed(self, observed):
        raise ValueError, 'Stochastic %s: "observed" flag cannot be changed.' % self.__name__

    observed = property(_get_observed, _set_observed)

    def _get_coparents(self):
        coparents = set()
        for child in self.extended_children:
            coparents |= child.extended_parents
        coparents.add(self)
        return coparents

    coparents = property(
        _get_coparents,
        doc="All the variables whose extended children intersect with self's.")

    def _get_moral_neighbors(self):
        moral_neighbors = self.coparents | self.extended_parents | self.extended_children
        for neighbor in copy(moral_neighbors):
            if isinstance(neighbor, PotentialBase):
                moral_neighbors.remove(neighbor)
        return moral_neighbors

    moral_neighbors = property(
        _get_moral_neighbors,
        doc=
        "Self's neighbors in the moral graph: self's Markov blanket with self removed."
    )

    def _get_markov_blanket(self):
        return self.moral_neighbors | set([self])

    markov_blanket = property(
        _get_markov_blanket,
        doc=
        "Self's coparents, self's extended parents, self's children and self.")
Example #15
0
class Stochastic(StochasticBase):

    """
    A variable whose value is not determined by the values of its parents.


    Decorator instantiation:

    @stoch(trace=True)
    def X(value = 0., mu = B, tau = C):
        return Normal_like(value, mu, tau)

    @stoch(trace=True)
    def X(value=0., mu=B, tau=C):

        def logp(value, mu, tau):
            return Normal_like(value, mu, tau)

        def random(mu, tau):
            return Normal_r(mu, tau)

        rseed = 1.


    Direct instantiation:



    - logp : function
            The function that computes the variable's log-probability from
            its value and the values of its parents.

    - doc : string
            The docstring for this variable.

    - name : string
            The name of this variable.

    - parents: dict
            A dictionary containing the parents of this variable.

    - random (optional) : function
            A function that draws a new value for this
            variable given its parents' values.

    - trace (optional) : boolean
            A boolean indicating whether this variable's value
            should be traced (in MCMC).

    - value (optional) : number or array
            An initial value for this variable

    - dtype (optional) : type
            A type for this variable.

    - rseed (optional) : integer or rseed
            A seed for this variable's rng. Either value or rseed must
            be given.

    - observed (optional) :  boolean
            A flag indicating whether this variable is data; whether
            its value is known.

    - cache_depth (optional) : integer
            An integer indicating how many of this variable's
            log-probability computations should be 'memoized'.

    - plot (optional) : boolean
            A flag indicating whether this variable is to be plotted.

    - verbose (optional) : integer
            Level of output verbosity: 0=none, 1=low, 2=medium, 3=high


    Externally-accessible attribute:

    - value: any class
          Returns this variable's current value.

    - logp: float
          Returns the variable's log-probability given its value and its
          parents' values. Skips computation if possible.

    last_value: any class
          Returns this variable's last value. Useful for rejecting
          Metropolis-Hastings jumps. See touch() and the warning below.

    Externally-accessible methods:

    random():   Draws a new value for this variable from its distribution and
                returns it.

    :SeeAlso: Deterministic, Node, LazyFunction, stoch, dtrm, data, Model, Container
    """
    __array_priority__ = 1000

    def __init__(   self,
                    logp,
                    doc,
                    name,
                    parents,
                    random=None,
                    trace=True,
                    value=None,
                    dtype=None,
                    rseed=False,
                    observed=False,
                    cache_depth=2,
                    plot=None,
                    verbose=None,
                    isdata=None, 
                    check_logp=True,
                    logp_partial_gradients=None):

        if logp_partial_gradients is None:
            logp_partial_gradients = {}
            
        self.counter = Counter()
        self.ParentDict = ParentDict
        
        # Support legacy 'isdata' for a while
        if isdata is not None:
            print "Deprecation Warning: the 'isdata' flag has been replaced by 'observed'. Please update your model accordingly."
            self.observed = isdata

        # A flag indicating whether self's value has been observed.
        self._observed = observed
        # Default value of None for mask
        self._mask = None
        if observed:

            if value is None:
                raise ValueError, 'Stochastic %s must be given an initial value if observed=True.'%name
                
            try:
                
                # If there are missing values, store mask to missing elements
                self._mask = value.mask
                
                # Set to value of mean of observed data
                value.fill_value = value.mean()
                value = value.filled()
                
                # Set observed flag to False, so that missing values will update
                self._observed = False
                
            except AttributeError:
                # Must not have missing values
                pass

        # This function will be used to evaluate self's log probability.
        self._logp_fun = logp

        #This function will be used to evaluate self's gradient of log probability.
        self._logp_partial_gradient_functions = logp_partial_gradients

        # This function will be used to draw values for self conditional on self's parents.
        self._random = random

        # A seed for self's rng. If provided, the initial value will be drawn. Otherwise it's
        # taken from the constructor.
        self.rseed = rseed

        self.errmsg = "Stochastic %s's value is outside its support,\n or it forbids its parents' current values."%name

        dtype = np.dtype(dtype)

        # Initialize value, either from value provided or from random function.
        try:
            if dtype.kind != 'O' and value is not None:
                self._value = asanyarray(value, dtype=dtype)
                self._value.flags['W']=False
            else:
                self._value = value
        except:
            cls, inst, tb = sys.exc_info()
            new_inst = cls('Stochastic %s: Failed to cast initial value to required dtype.\n\nOriginal error message:\n'%name + inst.message)
            raise cls, new_inst, tb

        # Store the shape of the stochastic value
        self._shape = np.shape(self._value)
        
        Variable.__init__(  self,
                        doc=doc,
                        name=name,
                        parents=parents,
                        cache_depth=cache_depth,
                        trace=trace,
                        dtype=dtype,
                        plot=plot,
                        verbose=verbose)

        # self._logp.force_compute()
        
        self._shape = np.shape(self._value)

        if isinstance(self._value, ndarray):
            self._value.flags['W'] = False

        if check_logp:
            # Check initial value
            if not isinstance(self.logp, float):
                raise ValueError, "Stochastic " + self.__name__ + "'s initial log-probability is %s, should be a float." %self.logp.__repr__()


    def gen_lazy_function(self):
        """
        Will be called by Node at instantiation.
        """

        # If value argument to __init__ was None, draw value from random method.
        if self._value is None:

            # Use random function if provided
            if self._random is not None:
                self.value = self._random(**self._parents.value)

            # Otherwise leave initial value at None and warn.
            else:
                raise ValueError, 'Stochastic ' + self.__name__ + "'s value initialized to None; no initial value or random method provided."

        arguments = {}
        arguments.update(self.parents)
        arguments['value'] = self
        arguments = DictContainer(arguments)

        self._logp = LazyFunction(fun = self._logp_fun,
                                    arguments = arguments,
                                    ultimate_args = self.extended_parents | set([self]),
                                    cache_depth = self._cache_depth)
        self._logp.force_compute()

        
        self._logp_partial_gradients = {}
        for parameter, function in self._logp_partial_gradient_functions.iteritems():
            lazy_logp_partial_gradient = LazyFunction(fun = function,
                                            arguments = arguments,
                                            ultimate_args = self.extended_parents | set([self]),
                                            cache_depth = self._cache_depth)
            lazy_logp_partial_gradient.force_compute()
            self._logp_partial_gradients[parameter] = lazy_logp_partial_gradient
        
    def get_value(self):
        # Define value attribute
        if self.verbose > 1:
            print '\t' + self.__name__ + ': value accessed.'    
        return self._value

    def get_stoch_value(self):
        if self.verbose > 1:
            print '\t' + self.__name__ + ': stoch_value accessed.'
        return self._value[self.mask]

    def set_value(self, value, force=False):
        # Record new value and increment counter

        # Value can't be updated if observed=True
        if self.observed and not force:
            raise AttributeError, 'Stochastic '+self.__name__+'\'s value cannot be updated if observed flag is set'

        if self.verbose > 0:
            print '\t' + self.__name__ + ': value set to ', value

        # Save current value as last_value
        # Don't copy because caching depends on the object's reference.
        self.last_value = self._value
        
        if self.mask is None:
            
            if self.dtype.kind != 'O':
                self._value = asanyarray(value, dtype=self.dtype)
                self._value.flags['W']=False
            else:
                self._value = value
        
        else:
            
            new_value = self.value.copy()
        
            new_value[self.mask] = asanyarray(value, dtype=self.dtype)[self.mask]
            self._value = new_value

        self.counter.click()

    value = property(fget=get_value, fset=set_value, doc="Self's current value.")
    
    def mask():
        doc = "Returns the mask for missing values"
        def fget(self):
            return self._mask
        return locals()
    mask = property(**mask())

    def shape():
        doc = "The shape of the value of self."
        def fget(self):
            if self.verbose > 1:
                print '\t' + self.__name__ + ': shape accessed.'
            return self._shape
        return locals()
    shape = property(**shape())

    def revert(self):
        """
        Sets self's value to self's last value. Bypasses the data cleaning in
        the set_value method.
        """
        self.counter.unclick()
        self._value = self.last_value


    def get_logp(self):

        if self.verbose > 0:
            print '\t' + self.__name__ + ': logp accessed.'
        logp = self._logp.get()

        try:
            logp = float(logp)
        except:
            raise TypeError, self.__name__ + ': computed log-probability ' + str(logp) + ' cannot be cast to float'

        if logp != logp:
Example #16
0
class Deterministic(DeterministicBase):
    """
    A variable whose value is determined by the values of its parents.

    Decorator instantiation:

    @dtrm(trace=True)
    def A(x = B, y = C):
        return sqrt(x ** 2 + y ** 2)

    :Parameters:
      eval : function
        The function that computes the variable's value from the values
        of its parents.
      doc : string
        The docstring for this variable.
      name: string
        The name of this variable.
      parents: dictionary
        A dictionary containing the parents of this variable.
      trace (optional): boolean
        A boolean indicating whether this variable's value
        should be traced (in MCMC).
      cache_depth (optional): integer
        An integer indicating how many of this variable's
        value computations should be 'memoized'.
      plot (optional) : boolean
        A flag indicating whether this variable is to be plotted.
      verbose (optional) : integer
        Level of output verbosity: 0=none, 1=low, 2=medium, 3=high
      jacobian (optional) : function(parameter, **same args as function)
        function which calculates the analytical jacobian for the deterministic with respect to some parameter
      jacobian_format (optional) : dict <string, string>
        formats of the jacobians returned by the jacobian function for each parameter:
            'full' : the function returns the full jacobian 
            'broadcast_operation' : the function returns the jacobian for an operation where the argument arrays are broadcast to eachother
            'accumulation_operation' : the function returns the jacobian for an operation where the number of dimensions is reduced
        the default is 'full'

    :Attributes:
      value : any object
        Returns the variable's value given its parents' values. Skips
        computation if possible.

    :SeeAlso:
      Stochastic, Potential, deterministic, MCMC, Lambda,
      LinearCombination, Index
    """
    __array_priority__ = 1000

    def __init__(self,
                 eval,
                 doc,
                 name,
                 parents,
                 dtype=None,
                 trace=True,
                 cache_depth=2,
                 plot=None,
                 verbose=None,
                 jacobians={},
                 jacobian_formats={}):
        self.ParentDict = ParentDict

        # This function gets used to evaluate self's value.
        self._eval_fun = eval
        self._jacobian_functions = jacobians
        self._jacobian_formats = jacobian_formats

        Variable.__init__(self,
                          doc=doc,
                          name=name,
                          parents=parents,
                          cache_depth=cache_depth,
                          dtype=dtype,
                          trace=trace,
                          plot=plot,
                          verbose=verbose)

        # self._value.force_compute()

    def gen_lazy_function(self):

        self._value = LazyFunction(fun=self._eval_fun,
                                   arguments=self.parents,
                                   ultimate_args=self.extended_parents,
                                   cache_depth=self._cache_depth)

        self._value.force_compute()

        self._jacobians = {}
        for parameter, function in self._jacobian_functions.iteritems():
            lazy_jacobian = LazyFunction(fun=function,
                                         arguments=self.parents,
                                         ultimate_args=self.extended_parents,
                                         cache_depth=self._cache_depth)
            lazy_jacobian.force_compute()
            self._jacobians[parameter] = lazy_jacobian

    def get_value(self):
        if self.verbose > 1:
            print '\t' + self.__name__ + ': value accessed.'
        _value = self._value.get()
        if isinstance(_value, ndarray):
            _value.flags['W'] = False
        if self.verbose > 1:
            print '\t' + self.__name__ + ': Returning value ', _value
        return _value

    def set_value(self, value):
        raise AttributeError, 'Deterministic ' + self.__name__ + '\'s value cannot be set.'

    value = property(
        fget=get_value,
        fset=set_value,
        doc="Self's value computed from current values of parents.")

    def apply_jacobian(self, parameter, variable, gradient):
        try:
            jacobian_func = self._jacobians[parameter]
        except KeyError:
            raise NotImplementedError(
                repr(self) + " has no jacobian function for parameter " +
                parameter)

        jacobian = jacobian_func.get()

        mapping = self._jacobian_formats.get(parameter, 'full')

        p = self._format_mapping[mapping](self, variable, jacobian, gradient)
        return p

    def logp_partial_gradient(self, variable, calculation_set=None):
        """
        gets the logp gradient of this deterministic with respect to variable
        """
        if self.verbose > 0:
            print '\t' + self.__name__ + ': logp_partial_gradient accessed.'

        if not (datatypes.is_continuous(variable)
                and datatypes.is_continuous(self)):
            return zeros(shape(variable.value))

        # loop through all the parameters and add up all the gradients of log p with respect to the approrpiate variable
        gradient = __builtin__.sum([
            child.logp_partial_gradient(self, calculation_set)
            for child in self.children
        ])

        totalGradient = 0
        for parameter, value in self.parents.iteritems():
            if value is variable:

                totalGradient += self.apply_jacobian(parameter, variable,
                                                     gradient)

        return np.reshape(totalGradient, shape(variable.value))

    def full_jacobian(self, variable, jacobian, gradient):
        return dot(np.transpose(jacobian), np.ravel(gradient)[:, np.newaxis])

    def transformation_operation_jacobian(self, variable, jacobian, gradient):
        return jacobian * gradient

    def broadcast_operation_jacobian(self, variable, jacobian, gradient):
        return calc_utils.sum_to_shape(id(variable), id(self),
                                       jacobian * gradient,
                                       shape(variable.value))

    def accumulation_operation_jacobian(self, variable, jacobian, gradient):
        for i in range(ndim(jacobian)):
            if i >= ndim(gradient) or shape(gradient)[i] != shape(
                    variable.value)[i]:
                expand_dims(gradient, i)
        return gradient * jacobian

    def index_operation_jacobian(self, variable, jacobian, gradient):
        derivative = zeros(shape(variable.value))
        derivative[jacobian] = gradient
        return derivative

    _format_mapping = {
        'full': full_jacobian,
        'transformation_operation': transformation_operation_jacobian,
        'broadcast_operation': broadcast_operation_jacobian,
        'accumulation_operation': accumulation_operation_jacobian,
        'index_operation': index_operation_jacobian
    }
Example #17
0
class Deterministic(DeterministicBase):
    """
    A variable whose value is determined by the values of its parents.

    Decorator instantiation:

    @dtrm(trace=True)
    def A(x = B, y = C):
        return sqrt(x ** 2 + y ** 2)

    :Parameters:
      eval : function
        The function that computes the variable's value from the values
        of its parents.
      doc : string
        The docstring for this variable.
      name: string
        The name of this variable.
      parents: dictionary
        A dictionary containing the parents of this variable.
      trace (optional): boolean
        A boolean indicating whether this variable's value
        should be traced (in MCMC).
      cache_depth (optional): integer
        An integer indicating how many of this variable's
        value computations should be 'memoized'.
      plot (optional) : boolean
        A flag indicating whether this variable is to be plotted.
      verbose (optional) : integer
        Level of output verbosity: 0=none, 1=low, 2=medium, 3=high
      jacobian (optional) : function(parameter, **same args as function)
        function which calculates the analytical jacobian for the deterministic with respect to some parameter
      jacobian_format (optional) : dict <string, string>
        formats of the jacobians returned by the jacobian function for each parameter:
            'full' : the function returns the full jacobian 
            'broadcast_operation' : the function returns the jacobian for an operation where the argument arrays are broadcast to eachother
            'accumulation_operation' : the function returns the jacobian for an operation where the number of dimensions is reduced
        the default is 'full'

    :Attributes:
      value : any object
        Returns the variable's value given its parents' values. Skips
        computation if possible.

    :SeeAlso:
      Stochastic, Potential, deterministic, MCMC, Lambda,
      LinearCombination, Index
    """
    __array_priority__ =1000

    def __init__(self, eval,  doc, name, parents, dtype=None, trace=True, cache_depth=2, plot=None, verbose=None, jacobians = {}, jacobian_formats = {}):
        self.ParentDict = ParentDict

        
        # This function gets used to evaluate self's value.
        self._eval_fun = eval
        self._jacobian_functions = jacobians
        self._jacobian_formats = jacobian_formats    

        Variable.__init__(  self,
                        doc=doc,
                        name=name,
                        parents=parents,
                        cache_depth = cache_depth,
                        dtype=dtype,
                        trace=trace,
                        plot=plot,
                        verbose=verbose)

        # self._value.force_compute()

        
             

    def gen_lazy_function(self):

        self._value = LazyFunction(fun = self._eval_fun,
                                    arguments = self.parents,
                                    ultimate_args = self.extended_parents,
                                    cache_depth = self._cache_depth)

        self._value.force_compute()

        self._jacobians = {}
        for parameter, function in self._jacobian_functions.iteritems():
            lazy_jacobian = LazyFunction(fun = function,
                                            arguments = self.parents,
                                            ultimate_args = self.extended_parents,
                                            cache_depth = self._cache_depth)
            lazy_jacobian.force_compute()
            self._jacobians[parameter] = lazy_jacobian

    def get_value(self):
        if self.verbose > 1:
            print '\t' + self.__name__ + ': value accessed.'
        _value = self._value.get()
        if isinstance(_value, ndarray):
            _value.flags['W'] = False
        if self.verbose > 1:
            print '\t' + self.__name__ + ': Returning value ',_value
        return _value

    

    def set_value(self,value):
        raise AttributeError, 'Deterministic '+self.__name__+'\'s value cannot be set.'

    value = property(fget = get_value, fset=set_value, doc="Self's value computed from current values of parents.")

    def apply_jacobian(self, parameter, variable, gradient):
        try :
            jacobian_func = self._jacobians[parameter]
        except KeyError:
            raise NotImplementedError(repr(self) + " has no jacobian function for parameter " + parameter)
        
        jacobian = jacobian_func.get()
        
        
        mapping = self._jacobian_formats.get(parameter, 'full')

            
        p =  self._format_mapping[mapping](self, variable, jacobian, gradient)
        return p 
        
    def logp_partial_gradient(self, variable, calculation_set = None):
        """
        gets the logp gradient of this deterministic with respect to variable
        """
        if self.verbose > 0:
            print '\t' + self.__name__ + ': logp_partial_gradient accessed.'

        if not (datatypes.is_continuous(variable) and datatypes.is_continuous(self)):
                return zeros(shape(variable.value))

        # loop through all the parameters and add up all the gradients of log p with respect to the approrpiate variable
        gradient = __builtin__.sum([child.logp_partial_gradient(self, calculation_set) for child in self.children ])

        totalGradient = 0
        for parameter, value in self.parents.iteritems():
            if value is variable:
                    
                totalGradient += self.apply_jacobian(parameter, variable, gradient )

        return np.reshape(totalGradient, shape(variable.value))
        
    
    def full_jacobian(self, variable, jacobian, gradient):
        return dot(np.transpose(jacobian), np.ravel(gradient)[:,np.newaxis])
    
    def transformation_operation_jacobian(self, variable, jacobian, gradient):
        return jacobian * gradient
    
    
    def broadcast_operation_jacobian(self, variable, jacobian, gradient):
        return calc_utils.sum_to_shape(id(variable), id(self), jacobian * gradient, shape(variable.value))

    def accumulation_operation_jacobian(self, variable, jacobian, gradient):   
        for i in range(ndim(jacobian)):
            if i >= ndim(gradient) or shape(gradient)[i] != shape(variable.value)[i]:
                expand_dims(gradient, i)
        return gradient * jacobian
        
    def index_operation_jacobian(self, variable, jacobian, gradient):
        derivative = zeros(shape(variable.value))
        derivative[jacobian] = gradient
        return derivative 
    
    _format_mapping = {'full' : full_jacobian,
                       'transformation_operation' : transformation_operation_jacobian,
                       'broadcast_operation' : broadcast_operation_jacobian,
                       'accumulation_operation' : accumulation_operation_jacobian,
                       'index_operation' : index_operation_jacobian}
Example #18
0
class Potential(PotentialBase):
    """
    Not a variable; just an arbitrary log-probability term to multiply into the
    joint distribution. Useful for expressing models that aren't directed, such as
    Markov random fields.

    Decorator instantiation:

    @potential(trace = True)
    def A(x = B, y = C):
        return -.5 * (x-y)**2 / 3.

    Direct instantiation:

    :Parameters:

        -logp: function
              The function that computes the potential's value from the values
              of its parents.

        -doc: string
              The docstring for this potential.

        -name: string
              The name of this potential.

        -parents: dictionary
              A dictionary containing the parents of this potential.

        -cache_depth (optional): integer
              An integer indicating how many of this potential's value computations
              should be 'memoized'.

        - plot (optional) : boolean
            A flag indicating whether this variable is to be plotted.

        - verbose (optional) : integer
              Level of output verbosity: 0=none, 1=low, 2=medium, 3=high


    Externally-accessible attribute:

        -logp: float
              Returns the potential's log-probability given its parents' values. Skips
              computation if possible.

    No methods.

    :SeeAlso: Stochastic, Node, LazyFunction, stoch, dtrm, data, Model, Container
    """
    def __init__(self,
                 logp,
                 doc,
                 name,
                 parents,
                 cache_depth=2,
                 plot=None,
                 verbose=None,
                 logp_partial_gradients={}):

        self.ParentDict = ParentDict

        # This function gets used to evaluate self's value.
        self._logp_fun = logp
        self._logp_partial_gradients_functions = logp_partial_gradients

        self.errmsg = "Potential %s forbids its parents' current values" % name

        Node.__init__(self,
                      doc=doc,
                      name=name,
                      parents=parents,
                      cache_depth=cache_depth,
                      verbose=verbose)

        self._plot = plot

        # self._logp.force_compute()

        # Check initial value
        if not isinstance(self.logp, float):
            raise ValueError, "Potential " + self.__name__ + "'s initial log-probability is %s, should be a float." % self.logp.__repr__(
            )

    def gen_lazy_function(self):

        self._logp = LazyFunction(fun=self._logp_fun,
                                  arguments=self.parents,
                                  ultimate_args=self.extended_parents,
                                  cache_depth=self._cache_depth)
        self._logp.force_compute()

        self._logp_partial_gradients = {}
        for parameter, function in self._logp_partial_gradients_functions.iteritems(
        ):
            lazy_logp_partial_gradients = LazyFunction(
                fun=function,
                arguments=self.parents,
                ultimate_args=self.extended_parents,
                cache_depth=self._cache_depth)
            lazy_logp_partial_gradients.force_compute()
            self._logp_partial_gradients[
                parameter] = lazy_logp_partial_gradients

    def get_logp(self):
        if self.verbose > 1:
            print '\t' + self.__name__ + ': log-probability accessed.'
        logp = self._logp.get()
        if self.verbose > 1:
            print '\t' + self.__name__ + ': Returning log-probability ', logp

        try:
            logp = float(logp)
        except:
            raise TypeError, self.__name__ + ': computed log-probability ' + str(
                logp) + ' cannot be cast to float'

        if logp != logp:
            raise ValueError, self.__name__ + ': computed log-probability is NaN'

        # Check if the value is smaller than a double precision infinity:
        if logp <= d_neg_inf:
            if self.verbose > 0:
                raise ZeroProbability, self.errmsg + ": %s" % self._parents.value
            else:
                raise ZeroProbability, self.errmsg

        return logp

    def set_logp(self, value):
        raise AttributeError, 'Potential ' + self.__name__ + '\'s log-probability cannot be set.'

    logp = property(fget=get_logp,
                    fset=set_logp,
                    doc="Self's log-probability value conditional on parents.")

    def logp_partial_gradient(self, variable, calculation_set=None):
        gradient = 0
        if self in calculation_set:

            if not datatypes.is_continuous(variable):
                return zeros(shape(variable.value))

            for parameter, value in self.parents.iteritems():

                if value is variable:
                    try:
                        grad_func = self._logp_partial_gradients[parameter]
                    except KeyError:
                        raise NotImplementedError(
                            repr(self) +
                            " has no gradient function for parameter " +
                            parameter)

                    gradient = gradient + grad_func.get()

        return np.reshape(gradient, np.shape(
            variable.value))  #np.reshape(gradient, np.shape(variable.value))