Exemplo n.º 1
0
        def __init__(self, *args, **kwds):
            (dtype, name, parent_names, parents_default, docstr, logp, random,
             mv, logp_partial_gradients) = new_class_args
            parents = parents_default

            # Figure out what argument names are needed.
            arg_keys = [
                'name', 'parents', 'value', 'observed', 'size', 'trace',
                'rseed', 'doc', 'debug', 'plot', 'verbose'
            ]
            arg_vals = [
                None, parents, None, False, None, True, True, None, False,
                None, -1
            ]
            if 'isdata' in kwds:
                warnings.warn(
                    '"isdata" is deprecated, please use "observed" instead.')
                kwds['observed'] = kwds['isdata']
                pass

            # No size argument allowed for multivariate distributions.
            if mv:
                arg_keys.pop(4)
                arg_vals.pop(4)

            arg_dict_out = dict(zip(arg_keys, arg_vals))
            args_needed = ['name'] + parent_names + arg_keys[2:]

            # Sort positional arguments
            for i in xrange(len(args)):
                try:
                    k = args_needed.pop(0)
                    if k in parent_names:
                        parents[k] = args[i]
                    else:
                        arg_dict_out[k] = args[i]
                except:
                    raise ValueError(
                        'Too many positional arguments provided. Arguments for class '
                        + self.__class__.__name__ + ' are: ' +
                        str(args_needed))

            # Sort keyword arguments
            for k in args_needed:
                if k in parent_names:
                    try:
                        parents[k] = kwds.pop(k)
                    except:
                        if k in parents_default:
                            parents[k] = parents_default[k]
                        else:
                            raise ValueError('No value given for parent ' + k)
                elif k in arg_dict_out.keys():
                    try:
                        arg_dict_out[k] = kwds.pop(k)
                    except:
                        pass

            # Remaining unrecognized arguments raise an error.
            if len(kwds) > 0:
                raise TypeError('Keywords ' + str(kwds.keys()) +
                                ' not recognized. Arguments recognized are ' +
                                str(args_needed))

        # Determine size desired for scalar variables.
        # Notes
        # -----
        # Case | init_val     | parents       | size | value.shape | bind size
        # ------------------------------------------------------------------
        # 1.1  | None         | scalars       | None | 1           | 1
        # 1.2  | None         | scalars       | n    | n           | n
        # 1.3  | None         | n             | None | n           | 1
        # 1.4  | None         | n             | n(m) | n (Error)   | 1 (-)
        # 2.1  | scalar       | scalars       | None | 1           | 1
        # 2.2  | scalar       | scalars       | n    | n           | n
        # 2.3  | scalar       | n             | None | n           | 1
        # 2.4  | scalar       | n             | n(m) | n (Error)   | 1 (-)
        # 3.1  | n            | scalars       | None | n           | n
        # 3.2  | n            | scalars       | n(m) | n (Error)   | n (-)
        # 3.3  | n            | n             | None | n           | 1
        # 3.4  | n            | n             | n(m) | n (Error)   | 1 (-)

            if not mv:

                shape = arg_dict_out.pop('size')
                shape = None if shape is None else tuple(np.atleast_1d(shape))

                init_val = arg_dict_out['value']
                init_val_shape = None if init_val is None else np.shape(
                    init_val)

                if len(parents) > 0:
                    pv = [np.shape(utils.value(v)) for v in parents.values()]
                    biggest_parent = np.argmax([(np.prod(v) if v else 0)
                                                for v in pv])
                    parents_shape = pv[biggest_parent]

                    # Scalar parents can support any shape.
                    if np.prod(parents_shape) < 1:
                        parents_shape = None

                else:
                    parents_shape = None

                def shape_error():
                    raise ValueError(
                        'Shapes are incompatible: value %s, largest parent %s, shape argument %s'
                        % (shape, init_val_shape, parents_shape))

                if init_val_shape is not None and shape is not None and init_val_shape != shape:
                    shape_error()

                given_shape = init_val_shape or shape
                bindshape = given_shape or parents_shape

                # Check consistency of bindshape and parents_shape
                if parents_shape is not None:
                    # Uncomment to leave broadcasting completely up to NumPy's random functions
                    # if bindshape[-np.alen(parents_shape):]!=parents_shape:
                    # Uncomment to limit broadcasting flexibility to what the Fortran likelihoods can handle.
                    if bindshape < parents_shape:
                        shape_error()

                if random is not None:
                    random = bind_size(random, bindshape)

            elif 'size' in kwds.keys():
                raise ValueError(
                    'No size argument allowed for multivariate stochastic variables.'
                )

            # Call base class initialization method
            if arg_dict_out.pop('debug'):
                logp = debug_wrapper(logp)
                random = debug_wrapper(random)
            else:
                Stochastic.__init__(self,
                                    logp=logp,
                                    random=random,
                                    dtype=dtype,
                                    **arg_dict_out)
Exemplo n.º 2
0
 def __init__(self, *args, **kwds):
     new_class.__init__(self, *args, **kwds)
     self.args, self.kwds = separate_shape_args(self.parents, shape_args)
     self.frozen_rv = self.rv(self.args, self.kwds)
     self._random = bind_size(self._random, self.shape)
Exemplo n.º 3
0
 def __init__(self, *args, **kwds):
     new_class.__init__(self, *args, **kwds)
     self.args, self.kwds = separate_shape_args(self.parents,
                                                shape_args)
     self.frozen_rv = self.rv(self.args, self.kwds)
     self._random = bind_size(self._random, self.shape)
Exemplo n.º 4
0
        def __init__(self, *args, **kwds):
            (dtype, name, parent_names, parents_default, docstr, logp, random, mv, logp_partial_gradients) = new_class_args
            parents=parents_default

            # Figure out what argument names are needed.
            arg_keys = ['name', 'parents', 'value', 'observed', 'size', 'trace', 'rseed', 'doc', 'debug', 'plot', 'verbose']
            arg_vals = [None, parents, None, False, None, True, True, None, False, None, -1]
            if 'isdata' in kwds:
                warnings.warn('"isdata" is deprecated, please use "observed" instead.')
                kwds['observed'] = kwds['isdata']
                pass


            # No size argument allowed for multivariate distributions.
            if mv:
                arg_keys.pop(4)
                arg_vals.pop(4)

            arg_dict_out = dict(list(zip(arg_keys, arg_vals)))
            args_needed = ['name'] + parent_names + arg_keys[2:]

            # Sort positional arguments
            for i in range(len(args)):
                try:
                    k = args_needed.pop(0)
                    if k in parent_names:
                        parents[k] = args[i]
                    else:
                        arg_dict_out[k] = args[i]
                except:
                    raise ValueError('Too many positional arguments provided. Arguments for class ' + self.__class__.__name__ + ' are: ' + str(args_needed))


            # Sort keyword arguments
            for k in args_needed:
                if k in parent_names:
                    try:
                        parents[k] = kwds.pop(k)
                    except:
                        if k in parents_default:
                            parents[k] = parents_default[k]
                        else:
                            raise ValueError('No value given for parent ' + k)
                elif k in list(arg_dict_out.keys()):
                    try:
                        arg_dict_out[k] = kwds.pop(k)
                    except:
                        pass

            # Remaining unrecognized arguments raise an error.
            if len(kwds) > 0:
                raise TypeError('Keywords '+ str(list(kwds.keys())) + ' not recognized. Arguments recognized are ' + str(args_needed))

        # Determine size desired for scalar variables.
        # Notes
        # -----
        # Case | init_val     | parents       | size | value.shape | bind size
        # ------------------------------------------------------------------
        # 1.1  | None         | scalars       | None | 1           | 1
        # 1.2  | None         | scalars       | n    | n           | n
        # 1.3  | None         | n             | None | n           | 1
        # 1.4  | None         | n             | n(m) | n (Error)   | 1 (-)
        # 2.1  | scalar       | scalars       | None | 1           | 1
        # 2.2  | scalar       | scalars       | n    | n           | n
        # 2.3  | scalar       | n             | None | n           | 1
        # 2.4  | scalar       | n             | n(m) | n (Error)   | 1 (-)
        # 3.1  | n            | scalars       | None | n           | n
        # 3.2  | n            | scalars       | n(m) | n (Error)   | n (-)
        # 3.3  | n            | n             | None | n           | 1
        # 3.4  | n            | n             | n(m) | n (Error)   | 1 (-)

            if not mv:

                shape = arg_dict_out.pop('size')
                shape = None if shape is None else tuple(np.atleast_1d(shape))

                init_val = arg_dict_out['value']
                init_val_shape = None if init_val is None else np.shape(init_val)

                if len(parents) > 0:
                    pv = [np.shape(utils.value(v)) for v in list(parents.values())]
                    biggest_parent = np.argmax([(np.prod(v) if v else 0) for v in pv])
                    parents_shape = pv[biggest_parent]

                    # Scalar parents can support any shape.
                    if np.prod(parents_shape) < 1:
                        parents_shape = None

                else:
                    parents_shape = None

                def shape_error():
                    raise ValueError('Shapes are incompatible: value %s, largest parent %s, shape argument %s'%(shape, init_val_shape, parents_shape))

                if init_val_shape is not None and shape is not None and init_val_shape != shape:
                    shape_error()

                given_shape = init_val_shape or shape
                bindshape = given_shape or parents_shape

                # Check consistency of bindshape and parents_shape
                if parents_shape is not None:
                    # Uncomment to leave broadcasting completely up to NumPy's random functions
                    # if bindshape[-np.alen(parents_shape):]!=parents_shape:
                    # Uncomment to limit broadcasting flexibility to what the Fortran likelihoods can handle.
                    if bindshape<parents_shape:
                        shape_error()

                if random is not None:
                    random = bind_size(random, bindshape)


            elif 'size' in list(kwds.keys()):
                raise ValueError('No size argument allowed for multivariate stochastic variables.')


            # Call base class initialization method
            if arg_dict_out.pop('debug'):
                logp = debug_wrapper(logp)
                random = debug_wrapper(random)
            else:
                Stochastic.__init__(self, logp=logp, random=random, dtype=dtype, **arg_dict_out)