Ejemplo n.º 1
0
    def _generate_sample_chain(self, data):

        # check if model should be expanded for getting the the initial state
        local_hidden = [
            n for n, v in self.pmodel.vars.items()
            if v.is_datamodel and n not in data.keys()
        ]
        if len(local_hidden) > 0:
            init_vars, _ = self.pmodel.expand_model(self.plate_size)
        else:
            init_vars = self.pmodel.vars

        # sample the initial state
        self.hiddenvars_name = []
        initial_state = []
        for name, var in init_vars.items():
            if name not in data:
                # sample vars to use them as initial state
                initial_state.append(var)
                self.hiddenvars_name.append(name)
        initial_state = util.get_session().run(initial_state)

        # initialize MCMC
        hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=self._target_log_prob_fn,
            step_size=self.step_size,
            num_leapfrog_steps=self.num_leapfrog_steps)

        self._states_tensor, self._kernel_results_tensor = tfp.mcmc.sample_chain(
            num_results=self.num_results,
            current_state=initial_state,
            kernel=hmc_kernel,
            num_burnin_steps=self.num_burnin_steps)
Ejemplo n.º 2
0
    def update(self, data):
        # data must be a sample dictionary
        sample_dict = build_sample_dict(data)
        # ensure that the size of the data matches with the self.plate_size
        data_size = util.iterables.get_plate_size(self.pmodel.vars,
                                                  sample_dict)
        if data_size != self.plate_size:
            raise ValueError(
                "The size of the data must be equal to the plate size: {}".
                format(self.plate_size))

        sess = util.get_session()

        with util.interceptor.disallow_conditions():
            with ed.interception(util.interceptor.set_values(**sample_dict)):
                # create the hmc kernel
                self._generate_sample_chain(sample_dict)

                variables_states, _ = sess.run(
                    [self._states_tensor, self._kernel_results_tensor])

        # event_ndims is the number of dims of states minus 1 because of the dimension of number os samples
        self.states = {
            name: models.Empirical(states,
                                   event_ndims=len(states.shape) - 1,
                                   name=name)
            for name, states in zip(self.hiddenvars_name, variables_states)
        }
Ejemplo n.º 3
0
def try_run(obj):
    try:
        ev_obj = util.get_session().run(obj)
        return ev_obj
    except (RuntimeError, TypeError, ValueError):
        # cannot evaluate the result, return the obj
        return obj
Ejemplo n.º 4
0
    def update(self, data):

        # create the input_data tensor
        data_loader = build_data_loader(data)
        input_data = self.create_input_data_tensor(data_loader)

        t = []
        sess = util.get_session()
        for i in range(self.epochs):
            for j in range(self.batches):
                # evaluate the data tensor to get an evaluated one which can be used to observe varoables
                local_input_data = sess.run(input_data)
                # reshape data in case it does not match exactly with the shape used when building the random variable
                # i.e.: (..., 1) dimension
                clean_local_input_data = {k: np.reshape(v, self.expanded_variables["p"][k].observed_value.shape.as_list())
                                          for k, v in local_input_data.items()}
                with contextmanager.observe(self.expanded_variables["p"], clean_local_input_data):
                    with contextmanager.observe(self.expanded_variables["q"], clean_local_input_data):
                        sess.run(self.train_tensor)

                        t.append(sess.run(self.debug.loss_tensor))
                        if j == 0 and i % 200 == 0:
                            print("\n {} epochs\t {}".format(i, t[-1]), end="", flush=True)
                        if j == 0 and i % 20 == 0:
                            print(".", end="", flush=True)

        # set the protected _losses attribute for the losses property
        self.debug.losses += t
Ejemplo n.º 5
0
def _tensor_conversion_function(rv, dtype=None, name=None, as_ref=False):
    """
        Function that converts the inferpy variable into a Tensor.
        This will enable the use of enable tf.convert_to_tensor(rv)

        If the variable needs to be broadcast_to, do it right now
    """
    # return the tf.Variable last snapshot if it is observed, and the ed2 evaluation (ed2.value) otherwise
    return tf.convert_to_tensor(rv.observed_value.value() if util.get_session(
    ).run(rv.is_observed) else rv.var)
Ejemplo n.º 6
0
    def _build_model(self):
        # get the global variables defined before building the model
        _before_global_variables = tf.global_variables()

        with contextmanager.randvar_registry.init(self.graph):
            # use edward2 model tape to capture RandomVariable declarations
            with ed.tape() as model_tape:
                self.builder()

            # get variables from parameters
            var_parameters = contextmanager.randvar_registry.get_var_parameters(
            )

            # wrap captured edward2 RVs into inferpy RVs
            model_vars = OrderedDict()
            for k, v in model_tape.items():
                registered_rv = contextmanager.randvar_registry.get_variable(k)
                if registered_rv is None:
                    # a ed Random Variable. Create a inferpy Random Variable and assign the var directly.
                    # do not know the args and kwars used to build the ed random variable. Use None.
                    model_vars[k] = RandomVariable(v,
                                                   name=k,
                                                   is_datamodel=False,
                                                   ed_cls=None,
                                                   var_args=None,
                                                   var_kwargs=None,
                                                   sample_shape=())
                else:
                    model_vars[k] = registered_rv

        # get the global variables defined after building the model
        _after_global_variables = tf.global_variables()
        # compute the new global variables defined when building the model
        created_vars = [
            v for v in _after_global_variables
            if v not in _before_global_variables
        ]
        util.get_session().run(tf.variables_initializer(created_vars))

        return model_vars, var_parameters
Ejemplo n.º 7
0
    def build_in_session(self, sess):
        """
        Allow to build a copy of the random variable but running previously each parameter in the tf session.
        This way, it uses the value of each tf variable or placeholder as a tensor, not as a tf variable or placeholder.
        If this random variable is a ed random variable directly assigned to .var, we cannot re-create it. In this
        case, return self.
        :param sess: tf session used to run each parameter used to build this random variable.
        :returns: the random variable object
        """
        # Cannot re-create the random variable. Return this var itself
        if self._ed_cls is None:
            return self

        # create the ed random variable evaluating each parameter in a tf session
        ed_random_var = self._ed_cls(*[_try_sess_run(a, sess) for a in self._var_args],
                                     **{k: _try_sess_run(v, sess) for k, v in self._var_kwargs.items()},
                                     sample_shape=self._sample_shape)

        initial_value = util.get_session().run(self.observed_value_var)
        is_observed, is_observed_var, observed_value_var = _make_predictable_variables(initial_value, self.name)
        # build the random variable by using the ed random var
        rv = RandomVariable(
            var=ed_random_var,
            name=self.name,
            is_datamodel=self.is_datamodel,
            ed_cls=self._ed_cls,
            var_args=self._var_args,
            var_kwargs=self._var_kwargs,
            sample_shape=self._sample_shape,
            is_observed=self.is_observed,
            is_observed_var=self.is_observed_var,
            observed_value_var=self.observed_value_var
        )

        # put the docstring and the name as well as in _make_random_variable function
        docs = RandomVariable.__doc__ + '\n Random Variable information:\n' + \
            ('-' * 30) + '\n' + self._ed_cls.__doc__
        name = self._ed_cls.__name__

        rv.__doc__ += docs
        rv.__name__ = name

        return rv
Ejemplo n.º 8
0
    def update(self, data):

        # data must be a sample dictionary
        sample_dict = build_sample_dict(data)
        sample_dict["x"].shape
        # ensure that the size of the data matches with the self.plate_size
        data_size = util.iterables.get_plate_size(self.pmodel.vars,
                                                  sample_dict)
        if data_size != self.plate_size:
            raise ValueError(
                "The size of the data must be equal to the plate size: {}".
                format(self.plate_size))

        t = []
        sess = util.get_session()
        # reshape data in case it does not match exactly with the shape used when building the random variable
        # i.e.: (..., 1) dimension
        clean_sample_dict = {
            k: np.reshape(
                v,
                self.expanded_variables["p"][k].observed_value.shape.as_list())
            for k, v in sample_dict.items()
        }
        with contextmanager.observe(self.expanded_variables["p"],
                                    clean_sample_dict):
            with contextmanager.observe(self.expanded_variables["q"],
                                        clean_sample_dict):
                for i in range(self.epochs):
                    sess.run(self.train_tensor)

                    t.append(sess.run(self.debug.loss_tensor))
                    if i % 200 == 0:
                        print("\n {} epochs\t {}".format(i, t[-1]),
                              end="",
                              flush=True)
                    if i % 10 == 0:
                        print(".", end="", flush=True)

        # set the protected _losses attribute for the losses property
        self.debug.losses += t
Ejemplo n.º 9
0
    def wrapper(*args, **kwargs):
        # first obtains the tf_run, a bool which tells if we need to eval the output in a session or not
        if "tf_run" in kwargs:
            tf_run = kwargs.pop("tf_run")
        else:
            tf_run = __tf_run_default

        # use this context to keep track of the decorated functions calls (recursive depth level)
        with runner_scope():
            # now execute the function
            obj = f(*args, **kwargs)
            if tf_run and runner_context['runner_recursive_depth'] == 1:
                # first recursive depth, and tf_run is True: we can eval the function
                try:
                    ev_obj = util.get_session().run(obj)
                    return ev_obj
                except (TypeError, ValueError):
                    # cannot evaluate the result, return the obj
                    return obj
            else:
                # tf_run is False or we are in a deeper runner levels than 1 (do not eval the result yet)
                return obj
Ejemplo n.º 10
0
    def type(self):
        first_part = 'LOCAL' if self.is_datamodel else 'GLOBAL'
        second_part = 'OBSERVED' if util.get_session().run(self.is_observed) else 'HIDDEN'

        return getattr(Kind, "{}_{}".format(first_part, second_part))