def test_tensor_register():
    # This allows to run a inferpy.inf.RandomVariable directly in a tf session.

    x = inf.Normal(5., 0., name='foo')

    assert inf.get_session().run(x) == 5.
    assert isinstance(tf.convert_to_tensor(x), tf.Tensor)
    assert inf.get_session().run(tf.convert_to_tensor(x)) == 5.
    assert inf.get_session().run(tf.constant(5.) + x) == 10.
    assert inf.get_session().run(x + tf.constant(5.)) == 10.
Exemple #2
0
    def _generate_train_tensor(self, extra_loss_tensor, **kwargs):
        """ This function expand the p and q models. Then, it uses the  loss function to create the loss tensor
            and store it into the debug object as a new attribute.
            Then, uses the optimizer to create the train tensor used in the gradient descent iterative process.
            It store the expanded random variables and parameters from the p and q models in self.expanded_variables
            and self.expanded_parameters dicts.

            Returns:
                The `tf.Tensor` train tensor used in the gradient descent iterative process.

        """
        # expand the p and q models
        # expand de qmodel
        qvars, qparams = self.qmodel.expand_model(self.plate_size)

        # expand de pmodel, using the intercept.set_values function, to include the sample_dict and the expanded qvars
        # the True first value enable to use tf.condition and observe RandomVariables modifying a tf.Variable value
        with ed.interception(util.interceptor.set_values(**qvars)):
            pvars, pparams = self.pmodel.expand_model(self.plate_size)

        # create the loss tensor and trainable tensor for the gradient descent process
        loss_tensor = self.loss_fn(pvars, qvars, **kwargs)
        # if extra_loss_tensor is not None, it must be a tensor with the inf.layers.Sequential losses
        if extra_loss_tensor is not None:
            loss_tensor += extra_loss_tensor
        # use the optimizer to create the train tensor
        train = self.optimizer.minimize(loss_tensor)

        # save the expanded variables and parameters
        self.expanded_variables = {"p": pvars, "q": qvars}
        self.expanded_parameters = {"p": pparams, "q": qparams}
        # save the loss tensor for debug purposes
        self.debug.loss_tensor = loss_tensor

        # Initialize all variables which are not in the probmodel p, because they have been initialized before
        model_variables = [
            v for v in itertools.chain(
                self.pmodel.params.values(
                ),  # do not re-initialize prior p model parameters
                pparams.values(
                ),  # do not re-initialize posterior p model parameters
                self.qmodel.params.values(
                ),  # do not re-initialize prior q model parameters
                qparams.values(
                ),  # do not re-initialize posterior q model parameters
            )
        ]
        inf.get_session().run(
            tf.variables_initializer([
                v for v in tf.global_variables() if v not in model_variables
                and not v.name.startswith("inferpy-")
            ]))

        return train
Exemple #3
0
    def run(self, pmodel, sample_dict):
        # NOTE: right now we use a session in a with context, so it is open and close.
        # If we want to use consecutive inference, we need the same session to reuse the same variables.
        # In this case, the build_in_session function from RandomVariables should not be used.

        # get the plate size
        plate_size = util.iterables.get_plate_size(pmodel.vars, sample_dict)
        # Create the loss function tensor
        loss_tensor = self.loss_fn(pmodel, self.qmodel, plate_size=plate_size)

        train = self.optimizer.minimize(loss_tensor)

        t = []

        sess = inf.get_session()
        # Initialize all variables which are not in the probmodel p, because they have been initialized before
        model_variables = [
            v for v in itertools.chain(pmodel.params.values(), (
                pmodel._last_expanded_params or {}).values(), (
                    pmodel._last_fitted_params or {}
                ).values(), self.qmodel.params.values(), (
                    self.qmodel._last_expanded_params or {}).values(), (
                        self.qmodel._last_fitted_params or {}).values())
        ]

        sess.run(
            tf.variables_initializer([
                v for v in tf.global_variables() if v not in model_variables
                and not v.name.startswith("inferpy-")
            ]))

        with contextmanager.observe(pmodel._last_expanded_vars, sample_dict):
            with contextmanager.observe(self.qmodel._last_expanded_vars,
                                        sample_dict):
                for i in range(self.epochs):
                    sess.run(train)

                    t.append(sess.run(loss_tensor))
                    if i % 200 == 0:
                        print("\n {} epochs\t {}".format(i, t[-1]),
                              end="",
                              flush=True)
                    if i % 10 == 0:
                        print(".", end="", flush=True)

        # set the private __losses attribute for the losses property
        self.__losses = t

        return self.qmodel._last_expanded_vars, self.qmodel._last_expanded_params
def test_convert_random_variables_to_tensors():
    # element without RVs
    element = 1
    element = sanitize_input_arg(element)
    assert element == element

    # list of elements different from RVs
    element = [1, 1]
    element = sanitize_input_arg(element)
    assert element == element

    # numpy array of elements different from RVs
    element = np.ones((3, 2))
    element = inf.get_session().run(sanitize_input_arg(element))
    assert (element == element).all()

    # A single Random Variable
    element = inf.Normal(0, 1)
    result = sanitize_input_arg(element)
    assert isinstance(element,
                      random_variable.RandomVariable) and not isinstance(
                          result, random_variable.RandomVariable)

    # A list with some Random Variables
    element = [inf.Normal(0, 1), 1, inf.Normal(0, 1), 2]
    result = sanitize_input_arg(element)
    assert all([
        isinstance(element[i], random_variable.RandomVariable)
        and not isinstance(result[i], random_variable.RandomVariable)
        for i in [0, 2]
    ])

    # A list with some nested Random Variables
    element = [[inf.Normal(0, 1), 1, inf.Normal(0, 1), 2]]
    result = sanitize_input_arg(element)
    assert all([
        isinstance(element[0][i], random_variable.RandomVariable)
        and not isinstance(result[0][i], random_variable.RandomVariable)
        for i in [0, 2]
    ])
def test_operations(tensor, expected):
    result = inf.get_session().run(eval(tensor))
    # assert that it is equal to expected
    assert np.array_equal(result, expected)
def test_run_in_session():
    inf.get_session()
    x = inf.Normal(1, 0)
    assert inf.get_session().run(x) == 1
Exemple #7
0

# Plot the evolution of the loss

L = inf_method.losses
plt.plot(range(len(L)), L)

plt.xlabel('epochs')
plt.ylabel('Loss')
plt.title('Loss evolution')
plt.grid(True)
plt.show()


# posterior sample from the hidden variable z, given the training data
sess = inf.get_session()
postz = m.posterior("z", data={"x": x_train}).sample()


# for each input instance, plot the hidden encoding coloured by the number that it represents
markers = ["x", "+", "o"]
colors = [plt.get_cmap("gist_rainbow")(0.05),
          plt.get_cmap("gnuplot2")(0.08),
          plt.get_cmap("gist_rainbow")(0.33)]
transp = [0.9, 0.9, 0.5]

fig = plt.figure()

for c in range(0, len(DIG)):
    col = colors[c]
    plt.scatter(postz[y_train == DIG[c], 0], postz[y_train == DIG[c], 1], color=col,
Exemple #8
0
def test_run_in_session():
    x = inf.Parameter(0)
    assert inf.get_session().run(x) == 0
Exemple #9
0
    def run(self, pmodel, sample_dict):
        # create a tf dataset and an iterator, specifying the batch size
        plate_size = util.iterables.get_plate_size(pmodel.vars, sample_dict)
        batches = int(plate_size / self.batch_size)  # M/N
        batch_weight = self.batch_size / plate_size  # N/M

        tfdataset = (
            tf.data.Dataset.from_tensor_slices(sample_dict).shuffle(
                plate_size
            )  # use the size of the complete dataset for shuffle buffer, so we use a perfect shuffle
            .batch(
                self.batch_size, drop_remainder=True
            )  # discard the remainder batch with less elements if exists
            .repeat())
        iterator = tfdataset.make_one_shot_iterator()
        input_data = iterator.get_next(
        )  # each time this tensor is evaluated in a session it contains new data

        # Create the loss function tensor
        loss_tensor = self.loss_fn(pmodel,
                                   self.qmodel,
                                   plate_size=self.batch_size,
                                   batch_weight=batch_weight)

        train = self.optimizer.minimize(loss_tensor)

        t = []

        sess = inf.get_session()
        # Initialize all variables which are not in the probmodel p, because they have been initialized before
        model_variables = set([
            v for v in itertools.chain(pmodel.params.values(), (
                pmodel._last_expanded_params or {}).values(), (
                    pmodel._last_fitted_params or {}
                ).values(), self.qmodel.params.values(), (
                    self.qmodel._last_expanded_params or {}).values(), (
                        self.qmodel._last_fitted_params or {}).values())
        ])
        sess.run(
            tf.variables_initializer([
                v for v in tf.global_variables() if v not in model_variables
                and not v.name.startswith("inferpy-")
            ]))

        for i in range(self.epochs):
            for j in range(batches):
                # evaluate the data tensor to get an evaluated one which can be used to observe varoables
                local_input_data = sess.run(input_data)
                with contextmanager.observe(pmodel._last_expanded_vars,
                                            local_input_data):
                    with contextmanager.observe(
                            self.qmodel._last_expanded_vars, local_input_data):
                        sess.run(train)

                        t.append(sess.run(loss_tensor))
                        if i % 200 == 0:
                            print("\n {} epochs\t {}".format(i, t[-1]),
                                  end="",
                                  flush=True)
                        if i % 20 == 0:
                            print(".", end="", flush=True)

        # set the private __losses attribute for the losses property
        self.__losses = t

        return self.qmodel._last_expanded_vars, self.qmodel._last_expanded_params