Ejemplo n.º 1
0
def hastings_scores(opqr, pscale=None):
    pred, succ, prop, revp = opqr.o, opqr.p, opqr.q, opqr.r
    message = "No valid scalar probability distribution found"
    assert succ is not None, message
    assert isscalar(succ.prob), message
    if pred is None:
        return None
    assert isscalar(pred.prob), "Preceding probability non-scalar"
    if prop is None:
        return None
    else:
        assert isscalar(prop.prob), "Proposal probability non-scalar"
        prop = rescale(prop.prob, pscale, 1.)
        if prop <= 0.:
            return None
        if revp is None:
            return min(
                1., div_prob(succ.prob, pred.prob, pscale, pscale, pscale=1.))
        else:
            assert isscalar(
                revp.prob), "Reverse proposal probability non-scalar"
            revp = rescale(revp.prob, pscale, 1.)
            if revp <= 0.:
                return 1.
            return min(
                1.,
                div_prob(succ.prob * prop,
                         pred.prob * revp,
                         pscale,
                         pscale,
                         pscale=1.))
Ejemplo n.º 2
0
    def set_func(self, func=None, *args, **kwds):
        """ Set the Func instance's function object.

    :param func: an uncallable object, callable function, or tuple of functions
    :param *args: arguments to pass onto callables
    :param **kwds: keywords to pass onto callables

    Note that the following two reserved keywords are disallowed:

    'order': which instead denotes a dictionary of remappings.
    'delta': which instead denotes a mapping of differences.

    A special case is made for func instances that are Scipy multivariate objects.
    """

        self._func = func
        self._args = tuple(args)
        self._kwds = dict(kwds)
        self.__order = None
        self.__delta = None
        self.__callable = None
        self.__scipyobj = None
        self.__isscipy = False

        # Sanity check func
        if self._func is None:
            assert not args and not kwds, "No optional args without a function"
        self.__ismulti = isinstance(self._func, tuple)
        self.__isscalar = False
        if not self.__ismulti:
            self.__callable = callable(self._func)
            if not self.__callable:
                assert not args and not kwds, "No optional args with uncallable function"
                self.__isscalar = isscalar(self._func)
        else:
            func_callable = [callable(func) for func in self._func]
            func_isscalar = [isscalar(func) for func in self._func]
            assert len(set(func_callable)) < 2, \
                "Cannot mix callable and uncallable functions"
            assert len(set(func_isscalar)) < 2, \
                "Cannot mix scalars and nonscalars"
            if len(func_callable):
                self.__callable = func_callable[0]
                self.__isscalar = func_isscalar[0]
            if not self.__callable:
                assert not args and not kwds, "No optional args with uncallable function"
        if is_scipy_stats_mvar(self._func):
            self.__isscipy = True
            self.__scipyobj = self._func(*args, **kwds)
            self.__scipycalls = {
                0: self.__scipyobj.pdf,
                1: self.__scipyobj.logpdf,
                2: self.__scipyobj.cdf,
                3: self.__scipyobj.logcdf,
                4: self.__scipyobj.rvs,
            }
        if 'order' in self._kwds:
            self.set_order(self._kwds.pop('order'))
        if 'delta' in self._kwds:
            self.set_delta(self._kwds.pop('delta'))
Ejemplo n.º 3
0
def metropolis_scores(opqr, pscale=None):
    pred, succ = opqr.o, opqr.p
    message = "No valid scalar probability distribution found"
    assert succ is not None, message
    assert isscalar(succ.prob), message
    if pred is None:
        return None
    assert isscalar(pred.prob), "Preceding probability distribution non-scalar"
    return min(1., div_prob(succ.prob, pred.prob, pscale, pscale, pscale=1.))
Ejemplo n.º 4
0
def matrix_cond_sample(pred_vals, succ_vals, prob, vset=None):
    """ Returns succ_vals with sampling """
    if not isunitset(succ_vals):
        return succ_vals
    assert isscalar(pred_vals), \
        "Can only cumulatively sample from a single predecessor"
    assert prob.ndim==2 and len(set(prob.shape)) == 1, \
        "Transition matrix must be a square"
    support = prob.shape[0]
    if vset is None:
        vset = set(range(support))
    else:
        assert len(vset) == support, \
            "Transition matrix size {} incommensurate with set support {}".\
            format(support, len(vset))
    vset = sorted(vset)
    pred_idx = vset.index(pred_vals)
    cmf = np.cumsum(prob[:, pred_idx], axis=0)
    succ_cmf = list(succ_vals)[0]
    if type(succ_cmf) in VTYPES[int]:
        succ_cmf = uniform(0., 1., succ_cmf)
    else:
        succ_cmf = np.atleast_1d(succ_cmf)
    succ_idx = np.maximum(0, np.minimum(support - 1,
                                        np.digitize(succ_cmf, cmf)))
    return vset[succ_idx], pred_idx, succ_idx
Ejemplo n.º 5
0
 def __call__(self, values=None):
   """ Return a probability distribution for the quantities in values. """
   dist_name = self.eval_dist_name(values)
   vals = self.evaluate(values)
   prob = self.eval_prob(vals)
   dims = {self._name: None} if isscalar(vals[self.name]) else {self._name: 0}
   return PD(dist_name, vals, dims=dims, prob=prob, pscale=self._pscale)
Ejemplo n.º 6
0
def lookup_square_matrix(col_vals,
                         row_vals,
                         sq_matrix,
                         vset=None,
                         col_idx=None,
                         row_idx=None):
    assert sq_matrix.ndim==2 and len(set(sq_matrix.shape)) == 1, \
        "Transition matrix must be a square"
    support = sq_matrix.shape[0]
    if vset is None:
        vset = list(range(support))
    else:
        assert len(vset) == support, \
            "Transition matrix size {} incommensurate with set support {}".\
            format(support, len(vset))
        vset = sorted(vset)
    rc_scalar = False
    if row_idx is None:
        if isscalar(row_vals):
            rc_scalar = True
            row_idx = vset.index(row_vals)
        else:
            if isinstance(row_vals, np.ndarray):
                row_vals = np.ravel(row_vals).tolist()
            row_idx = [vset.index(row_val) for row_val in row_vals]
    elif isscalar(row_idx):
        rc_scalar = True
    if col_idx is None:
        if isscalar(col_vals):
            rc_scalar = True
            col_idx = vset.index(col_vals)
        else:
            if isinstance(col_vals, np.ndarray):
                col_vals = np.ravel(col_vals).tolist()
            col_idx = [vset.index(col_val) for col_val in col_vals]
    elif isscalar(col_idx):
        rc_scalar = True
    if rc_scalar:
        return sq_matrix[row_idx, col_idx]
    mat = np.empty([len(row_idx), len(col_idx)], dtype=sq_matrix.dtype)
    for i, row in enumerate(row_idx):
        mat[i] = sq_matrix[row][col_idx]
    return mat
Ejemplo n.º 7
0
 def dims_from_vals(vals_dict):
     if not isinstance(vals_dict, dict):
         raise TypeError("Dictionary type expected")
     dims = collections.OrderedDict()
     run_dim = 0
     for key, val in vals_dict.items():
         if isscalar(val):
             dims.update({key: None})
         else:
             assert val.size == np.product(vals.shape), \
                 "Multiple non-singleton dimensions: {}".format(val.size)
             if val.size > 1:
                 run_dim = np.argmax(val.shape)
             dims.update({key: run_dim})
             run_dim += 1
Ejemplo n.º 8
0
    def eval_step(self, pred_vals, succ_vals, reverse=False):
        """ Returns adjusted succ_vals """

        # Evaluate deltas if required
        if succ_vals is None:
            if self._delta is None:
                pred_values = list(pred_vals.values())
                if all([isscalar(pred_value) for pred_value in pred_values]):
                    raise ValueError(
                        "Stochastic step sampling not supported for Field; use RF"
                    )
                else:
                    succ_vals = pred_vals
            else:
                succ_vals = self.eval_delta()
        elif isinstance(succ_vals, Expression) or \
            isinstance(succ_vals, (tuple, self._delta_type)):
            succ_vals = self.eval_delta(succ_vals)

        # Apply deltas
        cond = None
        if isinstance(succ_vals, self._delta_type):
            succ_vals = self.apply_delta(pred_vals, succ_vals)
        elif isunitsetint(succ_vals):
            raise ValueError(
                "Stochastic step sampling not supported for Field; use RF")

        # Initialise outputs with predecessor values
        dims = {}
        kwargs = {'reverse': reverse}
        if cond is not None:
            kwargs = {'cond': cond}
        vals = collections.OrderedDict()
        for key in self._keylist:
            vals.update({key: pred_vals[key]})
        if succ_vals is None:
            return vals, dims, kwargs

        # If stepping, add successor values
        for key in self._keylist:
            mod_key = key + "'"
            succ_key = key if mod_key not in succ_vals else mod_key
            vals.update({key + "'": succ_vals[succ_key]})

        return vals, dims, kwargs
Ejemplo n.º 9
0
    def eval_step(self, pred_vals, succ_vals, reverse=False):
        """ Returns adjusted succ_vals """
        if succ_vals is None:
            if self._delta is None:
                if all([isscalar(pred_value) for pred_value in pred_vals]):
                    succ_vals = {0}

        # If not sampling succeeding values, use deterministic call
        if not isunitsetint(succ_vals):
            return super().eval_step(pred_vals, succ_vals, reverse=reverse)

        if self._tfun is not None and self._tfun.callable:
            succ_vals = self.eval_tfun(pred_vals)
        elif self._nvars == 1:
            var = self._varlist[0]
            tran = var.tran
            tfun = var.tfun
            if (tran is not None and not tran.callable) or \
                (tfun is not None and tfun.callable):
                vals, dims, kwargs = var.eval_step(pred_vals[var.name],
                                                   succ_vals,
                                                   reverse=reverse)
                return vals, dims, kwargs
            raise ValueError("Transitional CDF calling requires callable tfun")
        else:
            raise ValueError("Transitional CDF calling requires callable tfun")

        # Initialise outputs with predecessor values
        dims = {}
        kwargs = {'reverse': reverse}
        vals = collections.OrderedDict()
        for key in self._keylist:
            vals.update({key: pred_vals[key]})
        if succ_vals is None and self._tran is None:
            return vals, dims, kwargs

        # If stepping or have a transition function, add successor values
        for key in self._keylist:
            mod_key = key + "'"
            succ_key = key if mod_key not in succ_vals else mod_key
            vals.update({key + "'": succ_vals[succ_key]})

        return vals, dims, kwargs
Ejemplo n.º 10
0
    def step(self, *args, **kwds):
        """ Returns a proposal distribution p(args[1]) given args[0], depending on
    whether using self._prop, that denotes a simple proposal distribution,
    or self._tran, that denotes a transitional distirbution. """

        reverse = False if 'reverse' not in kwds else kwds.pop('reverse')
        pred_vals, succ_vals = None, None
        if len(args) == 1:
            if isinstance(args[0], (list, tuple)) and len(args[0]) == 2:
                pred_vals, succ_vals = args[0][0], args[0][1]
            else:
                pred_vals = args[0]
        elif len(args) == 2:
            pred_vals, succ_vals = args[0], args[1]

        # Evaluate predecessor values
        if not isinstance(pred_vals, dict):
            pred_vals = {key: pred_vals for key in self._keyset}
        pred_vals = self.parse_args(pred_vals, pass_all=True)
        dist_pred_name = self.eval_dist_name(pred_vals)
        pred_vals, pred_dims = self.evaluate(pred_vals)

        # Default successor values if None and delta is None
        if succ_vals is None and self._delta is None:
            pred_values = list(pred_vals.values())
            if all([isscalar(pred_value) for pred_value in pred_values]):
                succ_vals = {0}
            else:
                succ_vals = pred_vals

        # Evaluate successor evaluates
        vals, dims, kwargs = self.eval_step(pred_vals,
                                            succ_vals,
                                            reverse=reverse)
        succ_vals = {
            key[:-1]: val
            for key, val in vals.items() if key[-1] == "'"
        }
        cond = self.eval_tran(vals, **kwargs)
        dist_succ_name = self.eval_dist_name(succ_vals, "'")
        dist_name = '|'.join([dist_succ_name, dist_pred_name])

        return PD(dist_name, vals, dims=dims, prob=cond, pscale=self._pscale)
Ejemplo n.º 11
0
def uniform_prob(*args, prob=None, inside=None, pscale=1.):
    """ Uniform probability function for discrete and continuous vtypes. """

    # Detect ptype, default to prob if no values, otherwise detect vtype
    assert len(args) >= 1, "Minimum of a single positional argument"
    pscale = eval_pscale(pscale)
    use_logs = iscomplex(pscale)
    if prob is None:
        prob = 0. if use_logs else 1.
    vals = args[0]
    if vals is None:
        return prob
    vtype = eval_vtype(vals) if callable(inside) else eval_vtype(inside)

    # Set inside function by vtype if not specified
    if not callable(inside):
        if vtype in VTYPES[float]:
            inside = lambda x: np.logical_and(x >= min(inside), x <= max(inside
                                                                         ))
        else:
            inside = lambda x: np.isin(x, inside)

    # If scalar, check within variable set
    p_zero = NEARLY_NEGATIVE_INF if use_logs else 0.
    if isscalar(vals):
        prob = prob if inside(vals) else p_zero

    # Otherwise treat as uniform within range
    else:
        p_true = prob
        prob = np.tile(p_zero, vals.shape)
        prob[inside(vals)] = p_true

    # This section below is there just to play nicely with conditionals
    if len(args) > 1:
        for arg in args[1:]:
            if use_logs:
                prob = prob + uniform_prob(arg, inside=inside, pscale=0.j)
            else:
                prob = prob * uniform_prob(arg, inside=inside)
    return prob
Ejemplo n.º 12
0
    def eval_delta(self, delta=None):
        """ Evaluates the value(s) of a delta operation without applying them.

    :param delta: delta value(s) to offset (see Variable.apply_delta).
    :return: the evaluated delta offset values.
    :rtype Variable.delta()

    If delta is not entered, then the default set by Variable.set_delta() is used.
    """
        delta = delta or self._delta
        if delta is None:
            return None
        if isinstance(delta, Expression):
            if delta.ret_callable():
                return delta
            delta = delta()
        if isinstance(delta, self._Delta):
            delta = delta[0]
        orand = isinstance(delta, tuple)
        urand = isinstance(delta, list)
        if orand:
            assert len(delta) == 1, "Tuple delta must contain one element"
            delta = delta[0]
            if self._vtype not in VTYPES[bool]:
                delta = delta if np.random.uniform() > 0.5 else -delta
        elif urand:
            assert len(delta) == 1, "List delta must contain one element"
            delta = delta[0]
            if self._vtype in VTYPES[bool]:
                pass
            elif self._vtype in VTYPES[int]:
                delta = np.random.randint(-delta, delta)
            else:
                delta = np.random.uniform(-delta, delta)
        assert isscalar(delta), "Unrecognised delta type: {}".format(delta)
        if delta == self._delta and self._delta_kwds['scale']:
            assert np.isfinite(self._length), "Cannot scale by infinite length"
            delta *= self._length
        return self._Delta(delta)
Ejemplo n.º 13
0
    def evaluate(self, *args, _skip_parsing=False, min_dim=0, **kwds):
        """ 
    Keep args and kwds since could be called externally. This ignores self._prob.
    """
        values = self.parse_args(*args, **
                                 kwds) if not _skip_parsing else args[0]
        dims = collections.OrderedDict()

        # Don't reshape if all scalars (and therefore by definition no shared keys)
        if all([np.isscalar(value)
                for value in values.values()]):  # use np.scalar
            return values, dims

        # Create reference mapping for shared keys across vars
        values_ref = collections.OrderedDict(
            {key: [key, None]
             for key in self._keylist})
        for key in values.keys():
            if ',' in key:
                subkeys = key.split(',')
                for i, subkey in enumerate(subkeys):
                    values_ref[subkey] = [key, i]

        # Share dimensions for joint variables and do not dimensionalise scalars
        ndim = min_dim
        dims = collections.OrderedDict({key: None for key in self._keylist})
        seen_keys = set()
        for i, key in enumerate(self._keylist):
            new_dim = False
            if values_ref[key][1] is None:  # i.e. not shared
                if not isdimensionless(values[key]):
                    dims[key] = ndim
                    new_dim = True
                seen_keys.add(key)
            elif key not in seen_keys:
                val_ref = values_ref[key]
                subkeys = val_ref[0].split(',')
                for subkey in subkeys:
                    dims[subkey] = ndim
                    seen_keys.add(subkey)
                if not isdimensionless(values[val_ref[0]][val_ref[1]]):
                    new_dim = True
            if new_dim:
                ndim += 1

        # Reshape
        vdims = [dim for dim in dims.values() if dim is not None]
        ndims = max(vdims) + 1 if len(vdims) else 0
        ones_ndims = np.ones(ndims, dtype=int)
        vals = collections.OrderedDict()
        for i, var in enumerate(self._varlist):
            key = var.name
            reshape = True
            if key in values.keys():
                vals.update({key: values[key]})
                reshape = not np.isscalar(vals[key])
                if vals[key] is None or isinstance(vals[key], set):
                    vals.update(var.evaluate(vals[key]))
            else:
                val_ref = values_ref[key]
                vals_val = values[val_ref[0]][val_ref[1]]
                if vals_val is None or isinstance(vals_val, set):
                    vals.update(var.evaluate(vals_val))
                else:
                    vals.update({key: vals_val})
            if reshape and not isscalar(vals[key]):
                re_shape = np.copy(ones_ndims)
                re_dim = dims[key]
                re_shape[re_dim] = vals[key].size
                vals[key] = vals[key].reshape(re_shape)

        # Remove dimensionality for singletons
        for key in self._keylist:
            if issingleton(vals[key]):
                dims[key] = None
        return vals, dims
Ejemplo n.º 14
0
    def set_expr(self, expr=None, *args, **kwds):
        """ Set the Func instance's function object.

    :param expr: an uncallable object, callable function, or tuple of functions
    :param *args: arguments to pass onto callables
    :param **kwds: keywords to pass onto callables

    Note that the following two reserved keywords are disallowed:

    'order': which instead denotes a dictionary of remappings.
    'delta': which instead denotes a mapping of differences.
    """
        self._expr = expr
        self._args = tuple(args)
        self._kwds = dict(kwds)
        self._callable = None
        self._islambda = None
        self._ismulti = None
        self._isscalar = None
        self._isiconic = None
        self.__order = None
        self.__delta = None
        self.__inverse = None
        self.__invexpr = None
        self.__invderi = None
        self.__invertible = False if 'invertible' not in kwds else \
                            self._kwds.pop('invertible')
        self._exprs = collections.OrderedDict()
        self._symbols = collections.OrderedDict()

        # Sanity check func
        if self._expr is None:
            assert not args and not kwds, "No optional args without a function"
        self._isscalar = isscalar(self._expr)
        self._isiconic = isiconic(self._expr)
        self._ismulti = isinstance(self._expr, (dict, tuple))
        if self.__invertible:
            assert not self._ismulti,\
                "Cannot invert for multiple expressions"

        # Non-multi iconic
        if self._isiconic:
            self._exprs.update(
                {None: Expr(self._expr, *self._args, **self._kwds)})
            self._symbols.update(self._exprs[None].symbols)
            self._ismulti = self.__invertible and \
                            len(self._exprs[None].symbols) == 1
            self._callable = True
            self._islambda = False

        # Unitary
        elif not self._ismulti:
            self._callable = callable(self._expr)
            self._islambda = islambda(self._expr)
            if not self._callable:
                assert not args and not kwds, \
                    "No optional arguments with uncallable expressions"
                self._isscalar = isscalar(self._expr)

        # Multi
        else:
            exprs = self._expr if isinstance(self._expr, tuple) else \
                    self._expr.values()
            self._callable = False
            self._islambda = False
            self._isscalar = False
            self._isiconic = False
            each_callable = [callable(expr) for expr in exprs]
            each_islambda = [islambda(expr) for expr in exprs]
            each_isscalar = [isscalar(expr) for expr in exprs]
            each_isiconic = [isiconic(expr) for expr in exprs]
            assert len(set(each_callable)) < 2, \
                "Cannot mix callable and uncallable expressions"
            assert len(set(each_islambda)) < 2, \
                "Cannot mix lambda and non-lambda expressions"
            assert len(set(each_isscalar)) < 2, \
                "Cannot mix scalars and nonscalars"
            assert len(set(each_isiconic)) < 2, \
                "Cannot mix iconics and non-iconics"
            if len(self._expr):
                self._callable = each_callable[0]
                self._islambda = each_islambda[0]
                self._isscalar = each_isscalar[0]
                self._isiconic = each_isiconic[0]
            if not self._callable:
                assert not args and not kwds, "No optional args with uncallable function"
            if self._isiconic:
                assert not args and not kwds, "No optional args with iconic function"
            if self._isiconic:
                if isinstance(self._expr, tuple):
                    for i, expr in enumerate(self._expr):
                        self._exprs.update(
                            {i: Expr(expr, *self._args, **self._kwds)})
                        self._symbols.update(self._exprs[i].symbols)
                elif isinstance(self._expr, dict):
                    for key, val in self._expr.items():
                        self._exprs.update(
                            {key: Expr(val, *self._args, **self._kwds)})
                        self._symbols.update(self._exprs[key].symbols)
        if 'order' in self._kwds:
            self.set_order(self._kwds.pop('order'))
        if 'delta' in self._kwds:
            self.set_delta(self._kwds.pop('delta'))
        self._set_partials()
        self._keys = list(self._partials.keys())
Ejemplo n.º 15
0
    def set_prob(self, prob=None, *args, **kwds):
        """ Sets the probability and pscale with optional arguments and keywords.

    :param prob: may be a scalar, array, or callable function.
    :param *args: optional arguments to pass if prob is callable.
    :param **kwds: optional keywords to pass if prob is callable.

    'pscale' is a reserved keyword. See set_pscale() for explanation of how 
    pscale is used.
    """
        pscale = None if 'pscale' not in kwds else kwds.pop('pscale')
        self.pscale = pscale or self._pscale
        self.__isscipy = is_scipy_stats_dist(prob)
        self.__issympy = is_sympy_stats_dist(prob)
        self.__issmvar = is_scipy_stats_mvar(prob)

        # Probabilities can be defined as regular expressions, but iconise scalars
        if not self.__isscipy and not self.__issympy:
            prob_scalar = isscalar(prob)
            self._prob = prob if not prob_scalar else sympy.Float(prob)
            self.set_expr(self._prob, *args, **kwds)

            # Set probability callers to expression
            if not prob_scalar:
                self._prob = self._expr
            return  # Expression() takes care of all partials

        # Scipy/SymPy expressions (self._expr set by _set_partials() later)
        if self.__issmvar or self.__issympy:
            self._expr = prob
            self._ismulti = True
            self._callable = True
            self._islambda = False
            self._isscalar = False
        else:
            self.set_expr(prob, *args, **kwds)
        self._args = tuple(args)
        self._kwds = dict(kwds)
        self._prob = self._expr
        if 'order' in self._kwds:
            self.set_order(self._kwds.pop('order'))
        if 'delta' in self._kwds:
            self.set_delta(self._kwds.pop('delta'))
        self._set_partials()

        # Scipy dist - set pfun and sfun calls - self._prob is updated but not used
        if self.__isscipy:
            self._ismulti = True
            self._prob = self._partials['logp'] if self._logp \
                         else self._partials['prob']
            if 'cdf' in self._keys and 'ppf' in self._keys:
                self.set_pfun((self._expr.cdf, self._expr.ppf), *self._args,
                              **self._kwds)
            if 'rvs' in self._keys and hasattr(self._expr, 'rvs'):
                self.set_sfun(self._expr.rvs, *self._args, **self._kwds)

        # Sympy dist - set pfun and sfun calls - self._prob is updated but not used
        elif self.__issympy:
            self._prob = self._partials['logp'].expr if self._logp \
                         else self._partials['prob'].expr
            if 'cdf' in self._keys and 'icdf' in self._keys:
                self.set_pfun((self._partials['cdf'], self._partials['icdf']))
            if 'sfun' in self._keys:
                self._partials.update({
                    'sfun':
                    functools.partial(sympy_sfun, self._distr, dtype=float)
                })
            self.set_sfun(sympy_sfun, self._expr)
Ejemplo n.º 16
0
  def eval_step(self, pred_vals, succ_vals, reverse=False):
    """ Evaluates a successive values from previous values with an optional
    direction reversal flag, outputting a three-length tuple that includes the
    successive values in the first argument.

    :param pred_vals: predecessor values (NumPy array).
    :param succ_vals: succecessor values (see step()).
    :param reverse: boolean flag (default False) to reverse direction.

    :return vals: a dictionary including both predecessor and successor values.
    :return dims: a dictionary with dimension indices for the values in vals.
    :return kwargs: a dictionary that includes optional keywords for eval_tran()
    """

    if succ_vals is None:
      assert self._tran is not None, "No transitional function specified"
    if isinstance(pred_vals, dict):
      pred_vals = pred_vals[self.name]
    kwargs = dict() # to pass over to eval_tran()
    if succ_vals is None:
      if self._delta is None:
        succ_vals = {0} if isscalar(pred_vals) else pred_vals
      else:
        delta = self.eval_delta()
        succ_vals = self.apply_delta(pred_vals, delta)

    #---------------------------------------------------------------------------
    def _reshape_vals(pred, succ):
      dims = {}
      ndim = 0

      # Now reshape the values according to succ > prev dimensionality
      if issingleton(succ):
        dims.update({self._name+"'": None})
      else:
        dims.update({self._name+"'": ndim})
        ndim += 1
      if issingleton(pred):
        dims.update({self._name: None})
      else:
        dims.update({self._name: ndim})
        ndim += 1

      if ndim == 2: # pred_vals distributed along inner dimension:
        pred = pred.reshape([1, pred.size])
        succ = succ.reshape([succ.size, 1])
      return pred, succ, dims

    #---------------------------------------------------------------------------
    # Scalar treatment is the most trivial and ignores reverse
    if self._tran is None or self._tran.isscalar:
      if isunitsetint(succ_vals):
        succ_vals = self.evaluate(succ_vals, use_pfun=False)[self._name]
      elif isunitsetfloat(succ_vals):
        assert self._vtype in VTYPES[float], \
            "Inverse CDF sampling for scalar probabilities unavailable for " + \
            "{} data type".format(self._vtype)
        cdf_val = list(succ_vals)[0]
        lo, hi = min(self._limits), max(self._limits)
        succ_val = lo*(1.-cdf_val) + hi*cdf_val
        if self._ufun is not None:
          succ_val = self.ufun[-1](succ_val)

      prob = self._tran() if self._tran is not None else None
      pred_vals, succ_vals, dims = _reshape_vals(pred_vals, succ_vals)
                  
    # Handle discrete non-callables
    elif not self._tran.callable:
      if reverse and not self._tran.ismulti and not self.__sym_tran:
        warnings.warn("Reverse direction called from asymmetric transitional")
      prob = self._tran() if not self._tran.ismulti else \
             self._tran[int(reverse)]()
      if isunitset(succ_vals):
        succ_vals, pred_idx, succ_idx = matrix_cond_sample(pred_vals, 
                                                           succ_vals, 
                                                           prob=prob, 
                                                           vset=self._vset) 
        kwargs.update({'pred_idx': pred_idx, 'succ_idx': succ_idx})
      pred_vals, succ_vals, dims = _reshape_vals(pred_vals, succ_vals)

    # That just leaves callables
    else:
      kwds = {self._name: pred_vals}
      if isunitset(succ_vals):
        assert self._tfun is not None, \
            "Conditional sampling requires setting CDF and ICDF " + \
            "conditional functions using rv.set.tfun()"
        assert isscalar(pred_vals), \
            "Successor sampling only possible with scalar predecessors"
        succ_vals = list(succ_vals)[0]
        if type(succ_vals) in VTYPES[int] or type(succ_vals) in VTYPES[np.uint]:
          lo, hi = min(self._ulims), max(self._ulims)
          kwds.update({self._name+"'": np.array([lo, hi], dtype=float)})
          lohi = self._tfun[0](**kwds)
          lo, hi = float(min(lohi)), float(max(lohi))
          succ_vals = uniform(lo, hi, succ_vals,
                              isinstance(self._vset[0], tuple),
                              isinstance(self._vset[1], tuple))
        else:
          succ_vals = np.atleast_1d(succ_vals)
        kwds.update({self._name: pred_vals,
                     self._name+"'": succ_vals})
        succ_vals = self._tfun[1](**kwds)
      elif not isscalar(succ_vals):
        succ_vals = np.atleast_1d(succ_vals)
      pred_vals, succ_vals, dims = _reshape_vals(pred_vals, succ_vals)

    vals = collections.OrderedDict({self._name+"'": succ_vals,
                                    self._name: pred_vals})
    kwargs.update({'reverse': reverse})
    return vals, dims, kwargs
Ejemplo n.º 17
0
def slice_by_keyvals(spec, vals, prob, vals_dims=None, spec_dims=None):
    """ Slices prob by values of spec in vals.

  :param spec: dictionary of {key:val} to match with vals.
  :param vals: dictionary of {key:val} describing prob.
  :param prob: a multidimensional NumPy array to slice.
  :param vals_dims: dictionary of dimensional decription for vals.
  :param spec_dims: dictionary of dimensional decription for spec.

  If vals_dims and/or spec_dims are not entered, they are 'guessed' from vals
  and spec respectively, but correct guessing is not assured. Non-none dimensions
  for vals_dims and spec_dims must be mutually ordered monotically by key.
  """

    # Check for consistent keys
    keys = list(spec.keys())
    assert set(keys) == set(vals.keys()), "Keys for spec and vals unmatched"

    # Function to default dimensions if not given
    def dims_from_vals(vals_dict):
        if not isinstance(vals_dict, dict):
            raise TypeError("Dictionary type expected")
        dims = collections.OrderedDict()
        run_dim = 0
        for key, val in vals_dict.items():
            if isscalar(val):
                dims.update({key: None})
            else:
                assert val.size == np.product(vals.shape), \
                    "Multiple non-singleton dimensions: {}".format(val.size)
                if val.size > 1:
                    run_dim = np.argmax(val.shape)
                dims.update({key: run_dim})
                run_dim += 1

    # Default spec_dims and vals_dims if not given
    if spec_dims is None:
        spec_dims = dims_from_vals(spec)
    else:
        assert set(spec.keys()) == set(spec_dims.keys()), \
            "Keys for spec and spec_dims unmatched"
    if vals_dims is None:
        vals_dims = dims_from_vals(vals)
    else:
        assert set(vals.keys()) == set(vals_dims.keys()), \
            "Keys for spec and spec_dims unmatched"

    # Determine maximum dimensionality of input
    vals_ndim = 0
    for dim in vals_dims.values():
        if dim:
            vals_ndim = max(vals_ndim, dim)

    # Determine maximum dimensionality of output from spec and spec_dims
    spec_ndim = 0
    for key, dim in spec_dims.items():
        if dim:
            spec_ndim = max(spec_ndim, dim)
        if not isscalar(spec[key]):
            spec_ndim = max(spec[key].ndim, dim)

    # Check for monotonic ordering of dimensions
    dims = [dim for dim in vals_dims.values() if dim is not None]
    if len(dims) > 1:
        assert np.min(
            np.diff(dims)) > 0, "Dimensionality not monotically ordered"
    dims = [dim for dim in spec_dims.values() if dim is not None]
    if len(dims) > 1:
        assert np.min(
            np.diff(dims)) > 0, "Dimensionality not monotically ordered"

    # Evaluate reshape and slices from matches between spec and vals
    reshape = [1] * spec_ndim
    slices = [slice(None) for _ in range(vals_ndim + 1)]
    for i, key in enumerate(keys):
        if vals_dims[key] is None:  # If source is scalar
            if not isscalar(spec[key]):
                assert np.all(spec[key] == vals[key]), \
                  "Cannot slice by multiple values"
            elif spec[key] != vals[key]:
                return np.empty(np.zeros(len(dims), dtype=int), dtype=float)
            else:
                pass
        if spec_dims[key] is None:  # If target is scalar
            dim = vals_dims[key]
            match = np.ravel(vals[key]) == spec[key]
            n_matches = match.sum()
            if n_matches == 0:
                slices[dim] = slice(0, 0)
            elif n_matches == 1:
                slices[dim] = np.nonzero(match)[0]
            else:
                raise ValueError("Non-unique matches found")
        else:
            assert np.all(np.ravel(vals[key]) == np.ravel(spec[key])), \
                "Ambiguous specification with values mismatch"
            dim = spec_dims[key]
            reshape[dim] = vals[key].size
    return prob[tuple(slices)].reshape(reshape)
Ejemplo n.º 18
0
    def apply_delta(self, values, delta=None, bound=None):
        """ Applies delta operation  to values optionally contrained by bounds.

    :param values: Numpy array values to apply.
    :param delta: delta value(s) to offset to the values
    :param bound: optional argument to contrain outputs.

    :return: Returns the values following the delta operation.

    If delta is not entered, then the default set by Variable.set_delta() is used.
    Delta may be a scalar or a single scalar value contained in a tuple or list.

    1. A scalar value: is summated to values (transformed if ufun is specified).
    2. A tuple: the polarity of the scalar value is randomised for the delta.
    3. A list: the delta is uniformly sampled in the range [0, scalar].
    """

        # If dictionary input type, values are keyed by variable name
        if isinstance(values, dict):
            values = values[self.name]

        # Call eval_delta() if values is a list and return values if delta is None
        delta = delta or self._delta
        if isinstance(delta, Expression):
            if delta.ret_callable():
                return delta(values)
            delta = delta()
        elif self._vtype not in VTYPES[bool]:
            if isinstance(delta, (list, tuple)):
                delta = self.eval_delta(delta)
        if isinstance(delta, self._Delta):
            delta = delta[0]
        if delta is None:
            return values

        # Apply the delta, treating bool as a special case
        if self._vtype in VTYPES[bool]:
            orand = isinstance(delta, tuple)
            urand = isinstance(delta, list)
            if orand or urand:
                assert len(
                    delta) == 1, "Tuple/list delta must contain one element"
                delta = delta[0]
                if isscalar(values) or orand:
                    vals = values if delta > np.random.uniform() > 0.5 \
                           else np.logical_not(values)
                else:
                    flip = delta > np.random.uniform(size=values.shape)
                    vals = np.copy(values)
                    vals[flip] = np.logical_not(vals[flip])
            else:
                vals = np.array(values, dtype=int) + np.array(delta, dtype=int)
                vals = np.array(np.mod(vals, 2), dtype=bool)
        elif self._ufun is None or self.__no_ucov:
            vals = values + delta
        else:
            transformed_vals = self.ufun[0](values) + delta
            vals = self.ufun[1](transformed_vals)
        vals = revtype(vals, self._vtype)

        # Apply bounds
        if bound is None:
            bound = False if 'bound' not in self._delta_kwds \
                   else self._delta_kwds['bound']
        if not bound:
            return vals
        maybe_bounce = [False] if self._vtype not in VTYPES[float] else \
                       [isinstance(self._vset[0], tuple),
                        isinstance(self._vset[1], tuple)]
        if not any(maybe_bounce):
            return np.maximum(self._vlims[0], np.minimum(self._vlims[1], vals))

        # Bouncing scalars and arrays without and with boolean indexing respectively
        if isscalar(vals):
            if all(maybe_bounce):
                if not self._inside(vals):
                    vals = values
            elif maybe_bounce[0]:
                if vals < self._vlims[0]:
                    vals = values
                else:
                    vals = np.minimum(self._vlims[1], vals)
            else:
                if vals > self._vlims[1]:
                    vals = values
                else:
                    vals = np.maximum(self._vlims[0], vals)
        else:
            if all(maybe_bounce):
                outside = np.logical_not(self._inside(vals))
                vals[outside] = values[outside]
            elif maybe_bounce[0]:
                outside = vals <= self._vlims[0]
                vals[outside] = values[outside]
                vals = np.minimum(self._vlims[1], vals)
            else:
                outside = vals >= self._vlims[1]
                vals[outside] = values[outside]
                vals = np.maximum(self._vlims[0], vals)
        return vals
Ejemplo n.º 19
0
def product(*args, **kwds):
  """ Multiplies two or more PDs subject to the following:
  1. They must not share the same marginal variables. 
  2. Conditional variables must be identical unless contained as marginal from
     another distribution.
  """
  from probayes.pd import PD

  # Check pscales, scalars, possible fasttrack
  if not len(args):
    return None
  kwds = dict(kwds)
  pscales = [arg.pscale for arg in args]
  pscale = kwds.get('pscale', None) or prod_pscale(pscales)
  aresingleton = [arg.issingleton for arg in args]
  maybe_fasttrack = all(aresingleton) and \
                    np.all(pscale == np.array(pscales)) and \
                    pscale in [0, 1.]


  # Collate vals, probs, marg_names, and cond_names as lists
  vals = [collections.OrderedDict(arg) for arg in args]
  probs = [arg.prob for arg in args]
  marg_names = [list(arg.marg.values()) for arg in args]
  cond_names = [list(arg.cond.values()) for arg in args]

  # Detect uniqueness in marginal keys and identical conditionals
  all_marg_keys = []
  for arg in args:
    all_marg_keys.extend(list(arg.marg.keys()))
  marg_sets = None
  if len(all_marg_keys) != len(set(all_marg_keys)):
    marg_keys, cond_keys, marg_sets, = None, None, None
    for arg in args:
      if marg_keys is None:
        marg_keys = list(arg.marg.keys())
      elif marg_keys != list(arg.marg.keys()):
        marg_keys = None
        break
      if cond_keys is None:
        cond_keys = list(arg.cond.keys())
      elif cond_keys != list(arg.cond.keys()):
        marg_keys = None
        break
      if marg_keys:  
        are_marg_sets = np.array([isunitsetint(arg[marg_key]) for
                                  marg_key in marg_keys])
        if marg_sets is None:
          if np.any(are_marg_sets):
            marg_sets = are_marg_sets
          else:
            marg_keys = None
            break
        elif not np.all(marg_sets == are_marg_sets):
          marg_keys = None
          break
    assert marg_keys is not None and marg_sets is not None, \
      "Non-unique marginal variables for currently not supported: {}".\
      format(all_marg_keys)
    maybe_fasttrack = True

  # Maybe fast-track identical conditionals
  if maybe_fasttrack:
    marg_same = True
    cond_same = True
    if marg_sets is None: # no need to recheck if not None (I think)
      marg_same = True
      for name in marg_names[1:]:
        if marg_names[0] != name:
          marg_same = False
          break
      cond_same = not any(cond_names)
      if not cond_same:
        cond_same = True
        for name in cond_names[1:]:
          if cond_names[0] != name:
            cond_same = False
            break
    if marg_same and cond_same:
      marg_names = marg_names[0]
      cond_names = cond_names[0]
      prod_marg_name = ','.join(marg_names)
      prod_cond_name = ','.join(cond_names)
      prod_name = '|'.join([prod_marg_name, prod_cond_name])
      prod_vals = collections.OrderedDict()
      for i, val in enumerate(vals):
        areunitsetints = np.array([isunitsetint(_val) 
                                   for _val in val.values()])
        if not np.any(areunitsetints):
          prod_vals.update(val)
        else:
          assert marg_sets is not None, "Variable mismatch"
          assert np.all(marg_sets == areunitsetints[:len(marg_sets)]), \
              "Variable mismatch"
          if not len(prod_vals):
            prod_vals.update(collections.OrderedDict(val))
          else:
            for j, key in enumerate(prod_vals.keys()):
              if areunitsetints[j]:
                prod_vals.update({key: {list(prod_vals[key])[0] + \
                                        list(val[key])[0]}})
      if marg_sets is not None:
        prob, pscale = prod_rule(*tuple(probs), pscales=pscales, pscale=pscale)
        return PD(prod_name, prod_vals, dims=args[0].dims, prob=prob, pscale=pscale)
      else:
        prod_prob = float(sum(probs)) if iscomplex(pscale) else float(np.prod(probs))
        return PD(prod_name, prod_vals, prob=prod_prob, pscale=pscale)

  # Check cond->marg accounts for all differences between conditionals
  prod_marg = [name for dist_marg_names in marg_names \
                          for name in dist_marg_names]
  prod_marg_name = ','.join(prod_marg)
  flat_cond_names = [name for dist_cond_names in cond_names \
                          for name in dist_cond_names]
  cond2marg = [cond_name for cond_name in flat_cond_names \
                         if cond_name in prod_marg]
  prod_cond = [cond_name for cond_name in flat_cond_names \
                         if cond_name not in cond2marg]
  cond2marg_set = set(cond2marg)

  # Check conditionals compatible
  prod_cond_set = set(prod_cond)
  cond2marg_dict = {name: None for name in cond2marg}
  for i, arg in enumerate(args):
    cond_set = set(cond_names[i]) - cond2marg_set
    if cond_set:
      assert prod_cond_set == cond_set, \
          "Incompatible product conditional {} for conditional set {}: ".format(
              prod_cond_set, cond_set)
    for name in cond2marg:
      if name in arg.keys():
        values = arg[name]
        if not isscalar(values):
          values = np.ravel(values)
        if cond2marg_dict[name] is None:
          cond2marg_dict[name] = values
        elif not np.allclose(cond2marg_dict[name], values):
          raise ValueError("Mismatch in values for condition {}".format(name))

  # Establish product name, values, and dimensions
  prod_keys = str2key(prod_marg + prod_cond)
  prod_nkeys = len(prod_keys)
  prod_aresingleton = np.zeros(prod_nkeys, dtype=bool)
  prod_areunitsetints = np.zeros(prod_nkeys, dtype=bool)
  prod_cond_name = ','.join(prod_cond)
  prod_name = prod_marg_name if not len(prod_cond_name) \
              else '|'.join([prod_marg_name, prod_cond_name])
  prod_vals = collections.OrderedDict()
  for i, key in enumerate(prod_keys):
    values = None
    for val in vals:
      if key in val.keys():
        values = val[key]
        prod_areunitsetints[i] = isunitsetint(val[key])
        if prod_areunitsetints[i]:
          values = {0}
        break
    assert values is not None, "Values for key {} not found".format(key)
    prod_aresingleton[i] = issingleton(values)
    prod_vals.update({key: values})
  if np.any(prod_areunitsetints):
    for i, key in enumerate(prod_keys):
      if prod_areunitsetints[i]:
        for val in vals:
          if key in val:
            assert isunitsetint(val[key]), "Mismatch in variables {} vs {}".\
                format(prod_vals, val)
            prod_vals.update({key: {list(prod_vals[key])[0] + list(val[key])[0]}})
  prod_newdims = np.array(np.logical_not(prod_aresingleton))
  dims_shared = False
  for arg in args:
    argdims = [dim for dim in arg.dims.values() if dim is not None]
    if len(argdims) != len(set(argdims)):
      dims_shared = True

  # Shared dimensions limit product dimensionality
  if dims_shared:
    seen_keys = set()
    for i, key in enumerate(prod_keys):
      if prod_newdims[i] and key not in seen_keys:
        for arg in args:
          if key in arg.dims:
            dim = arg.dims[key]
            seen_keys.add(key)
            for argkey, argdim in arg.dims.items():
              seen_keys.add(argkey)
              if argkey != key and argdim is not None:
                if dim == argdim:
                  index = prod_keys.index(argkey)
                  prod_newdims[index] = False

  prod_cdims = np.cumsum(prod_newdims)
  prod_ndims = prod_cdims[-1]

  # Fast-track scalar products
  if maybe_fasttrack and prod_ndims == 0:
     prob = float(sum(probs)) if iscomplex(pscale) else float(np.prod(probs))
     return PD(prod_name, prod_vals, prob=prob, pscale=pscale)

  # Reshape values - they require no axes swapping
  ones_ndims = np.ones(prod_ndims, dtype=int)
  prod_shape = np.ones(prod_ndims, dtype=int)
  scalarset = set()
  prod_dims = collections.OrderedDict()
  for i, key in enumerate(prod_keys):
    if prod_aresingleton[i]:
      scalarset.add(key)
    else:
      values = prod_vals[key]
      re_shape = np.copy(ones_ndims)
      dim = prod_cdims[i]-1
      prod_dims.update({key: dim})
      re_shape[dim] = values.size
      prod_shape[dim] = values.size
      prod_vals.update({key: values.reshape(re_shape)})
  
  # Match probability axes and shapes with axes swapping then reshaping
  for i in range(len(args)):
    prob = probs[i]
    if not isscalar(prob):
      dims = collections.OrderedDict()
      for key, val in args[i].dims.items():
        if val is not None:
          dims.update({val: prod_dims[key]})
      old_dims = []
      new_dims = []
      for key, val in dims.items():
        if key not in old_dims:
          old_dims.append(key)
          new_dims.append(val)
      if len(old_dims) > 1 and not old_dims == new_dims:
        max_dims_inc = max(new_dims) + 1
        while prob.ndim < max_dims_inc:
          prob = np.expand_dims(prob, -1)
        prob = np.moveaxis(prob, old_dims, new_dims)
      re_shape = np.copy(ones_ndims)
      for dim in new_dims:
        re_shape[dim] = prod_shape[dim]
      probs[i] = prob.reshape(re_shape)

  # Multiply the probabilities and output the result as a distribution instance
  prob, pscale = prod_rule(*tuple(probs), pscales=pscales, pscale=pscale)

  return PD(prod_name, prod_vals, dims=prod_dims, prob=prob, pscale=pscale)