Beispiel #1
0
def test_build_data_loader(data, expected_size, expected_type, exp_keys):
    # build the data loader object
    data_loader = build_data_loader(data)

    assert data_loader.size == expected_size
    assert type(data_loader).__name__ == expected_type
    assert set(data_loader.variables) == set(exp_keys)
Beispiel #2
0
    def fit(self, data, inference_method):
        # Parameter checkings
        # sample_dict must be a non empty python dict or dataloader
        data_loader = build_data_loader(data)
        plate_size = data_loader.size

        if len(data_loader.variables) == 0:
            raise ValueError(
                'The number of mapped variables must be at least 1.')

        # if fit was called before, warn that it restarts everything
        if self.inference_method:
            warnings.warn(
                "Fit was called before. This will restart the inference method and \
                re-build the expanded model.")

        # set the inference method
        self.inference_method = inference_method

        # compile the inference method
        # if the inference method needs to intercept random variables, enable this context using a boolean
        # tf.Variable defined in this inference method object
        with util.interceptor.enable_interceptor(
                *
                self.inference_method.get_interceptable_condition_variables()):
            inference_method.compile(self, plate_size, self.layer_losses)
            # and run the update method with the data
            inference_method.update(data_loader)

        # If it works, set the observed variables
        self.observed_vars = data_loader.variables
Beispiel #3
0
    def update(self, data):

        # create the input_data tensor
        data_loader = build_data_loader(data)
        input_data = self.create_input_data_tensor(data_loader)

        t = []
        sess = util.get_session()
        for i in range(self.epochs):
            for j in range(self.batches):
                # evaluate the data tensor to get an evaluated one which can be used to observe varoables
                local_input_data = sess.run(input_data)
                # reshape data in case it does not match exactly with the shape used when building the random variable
                # i.e.: (..., 1) dimension
                clean_local_input_data = {k: np.reshape(v, self.expanded_variables["p"][k].observed_value.shape.as_list())
                                          for k, v in local_input_data.items()}
                with contextmanager.observe(self.expanded_variables["p"], clean_local_input_data):
                    with contextmanager.observe(self.expanded_variables["q"], clean_local_input_data):
                        sess.run(self.train_tensor)

                        t.append(sess.run(self.debug.loss_tensor))
                        if j == 0 and i % 200 == 0:
                            print("\n {} epochs\t {}".format(i, t[-1]), end="", flush=True)
                        if j == 0 and i % 20 == 0:
                            print(".", end="", flush=True)

        # set the protected _losses attribute for the losses property
        self.debug.losses += t