コード例 #1
0
    def predict(self, test_inputs, batch_size=None):
        """
        Predict outputs given inputs.

        Parameters
        ----------
        test_inputs : ndarray
            Points on which we wish to make predictions. Dimensions: num_test * input_dim.
        batch_size : int
            The size of the batches we make predictions on. If batch_size is None, predict on the
            entire test set at once.

        Returns
        -------
        ndarray
            The predicted mean of the test inputs. Dimensions: num_test * output_dim.
        ndarray
            The predicted variance of the test inputs. Dimensions: num_test * output_dim.
        """
        if batch_size is None:
            num_batches = 1
        else:
            num_batches = util.ceil_divide(test_inputs.shape[0], batch_size)

        test_inputs = np.array_split(test_inputs, num_batches)
        pred_means = util.init_list(0.0, [num_batches])
        pred_vars = util.init_list(0.0, [num_batches])
        for i in xrange(num_batches):
            pred_means[i], pred_vars[i] = self.session.run(
                self.predictions, feed_dict={self.test_inputs: test_inputs[i]})

        return np.concatenate(pred_means, axis=0), np.concatenate(pred_vars,
                                                                  axis=0)
コード例 #2
0
    def _build_interim_vals(self, kernel_chol, inducing_inputs, train_inputs):
        kern_prods = util.init_list(0.0, [self.num_latent])
        kern_sums = util.init_list(0.0, [self.num_latent])
        for i in xrange(self.num_latent):
            ind_train_kern = self.kernels[i].kernel(inducing_inputs[i, :, :],
                                                    train_inputs)
            # Compute A = Kxz.Kzz^(-1) = (Kzz^(-1).Kzx)^T.
            kern_prods[i] = tf.transpose(
                tf.cholesky_solve(kernel_chol[i, :, :], ind_train_kern))
            # We only need the diagonal components.
            kern_sums[i] = (self.kernels[i].diag_kernel(train_inputs) -
                            util.diag_mul(kern_prods[i], ind_train_kern))

        kern_prods = tf.stack(kern_prods, 0)
        kern_sums = tf.stack(kern_sums, 0)
        return kern_prods, kern_sums
コード例 #3
0
    def _build_sample_info(self, kern_prods, kern_sums, means, covars):
        sample_means = util.init_list(0.0, [self.num_latent])
        sample_vars = util.init_list(0.0, [self.num_latent])
        for i in xrange(self.num_latent):
            if self.diag_post:
                quad_form = util.diag_mul(kern_prods[i, :, :] * covars[i, :],
                                          tf.transpose(kern_prods[i, :, :]))
            else:
                full_covar = tf.matmul(covars[i, :, :],
                                       tf.transpose(covars[i, :, :]))
                quad_form = util.diag_mul(
                    tf.matmul(kern_prods[i, :, :], full_covar),
                    tf.transpose(kern_prods[i, :, :]))
            sample_means[i] = tf.matmul(kern_prods[i, :, :],
                                        tf.expand_dims(means[i, :], 1))
            sample_vars[i] = tf.expand_dims(kern_sums[i, :] + quad_form, 1)

        sample_means = tf.concat(sample_means, 1)
        sample_vars = tf.concat(sample_vars, 1)
        return sample_means, sample_vars
コード例 #4
0
    def _build_entropy(self, weights, means, covars):
        # First build half a square matrix of normals. This avoids re-computing symmetric normals.
        log_normal_probs = util.init_list(
            0.0, [self.num_components, self.num_components])
        for i in xrange(self.num_components):
            for j in xrange(i, self.num_components):
                for k in xrange(self.num_latent):
                    if self.diag_post:
                        normal = util.DiagNormal(
                            means[i, k, :], covars[i, k, :] + covars[j, k, :])
                    else:
                        if i == j:
                            # Compute chol(2S) = sqrt(2)*chol(S).
                            covars_sum = tf.sqrt(2.0) * covars[i, k, :, :]
                        else:
                            # TODO(karl): Can we just stay in cholesky space somehow?
                            covars_sum = tf.cholesky(
                                util.mat_square(covars[i, k, :, :]) +
                                util.mat_square(covars[j, k, :, :]))
                        normal = util.CholNormal(means[i, k, :], covars_sum)
                    log_normal_probs[i][j] += normal.log_prob(means[j, k, :])

        # Now compute the entropy.
        entropy = 0.0
        for i in xrange(self.num_components):
            weighted_log_probs = util.init_list(0.0, [self.num_components])
            for j in xrange(self.num_components):
                if i <= j:
                    weighted_log_probs[j] = tf.log(
                        weights[j]) + log_normal_probs[i][j]
                else:
                    weighted_log_probs[j] = tf.log(
                        weights[j]) + log_normal_probs[j][i]

            entropy -= weights[i] * util.logsumexp(
                tf.stack(weighted_log_probs))

        return entropy
コード例 #5
0
    def _build_predict(self, weights, means, covars, inducing_inputs,
                       kernel_chol, test_inputs):
        kern_prods, kern_sums = self._build_interim_vals(
            kernel_chol, inducing_inputs, test_inputs)
        pred_means = util.init_list(0.0, [self.num_components])
        pred_vars = util.init_list(0.0, [self.num_components])
        for i in xrange(self.num_components):
            covar_input = covars[i, :, :] if self.diag_post else covars[
                i, :, :, :]
            sample_means, sample_vars = self._build_sample_info(
                kern_prods, kern_sums, means[i, :, :], covar_input)
            pred_means[i], pred_vars[i] = self.likelihood.predict(
                sample_means, sample_vars)

        pred_means = tf.stack(pred_means, 0)
        pred_vars = tf.stack(pred_vars, 0)

        # Compute the mean and variance of the gaussian mixture from their components.
        weights = tf.expand_dims(tf.expand_dims(weights, 1), 1)
        weighted_means = tf.reduce_sum(weights * pred_means, 0)
        weighted_vars = (tf.reduce_sum(weights *
                                       (pred_means**2 + pred_vars), 0) -
                         tf.reduce_sum(weights * pred_means, 0)**2)
        return weighted_means, weighted_vars
コード例 #6
0
 def load_data(self):
   loaded_data = load_json(self.get_path(), dict, "Storage")
   self.swaps =     init_list(loaded_data["trades"],         SwapTrade) if "trades"   in loaded_data else []
   self.locks =     init_list(loaded_data["locks"],               dict) if "locks"    in loaded_data else []
   self.history =   init_list(loaded_data["history"],  SwapTransaction) if "history"  in loaded_data else []
   self.addresses = init_list(loaded_data["addresses"],           dict) if "addresses"in loaded_data else [{"name": "default", "addresses": []}]