示例#1
0
  def testValidateArgs(self):
    k = psd_kernels.MaternOneHalf(
        amplitude=-1., length_scale=-1., validate_args=True)
    with self.assertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(k.amplitude)

    with self.assertRaises(tf.errors.InvalidArgumentError):
      self.evaluate(k.length_scale)

    # But `None`'s are ok
    k = psd_kernels.MaternOneHalf(
        amplitude=None, length_scale=None, validate_args=True)
    self.evaluate(k.apply([1.], [1.]))
示例#2
0
 def testBatchShape(self):
     amplitude = np.random.uniform(2, 3., size=[3, 1, 2]).astype(np.float32)
     length_scale = np.random.uniform(2, 3., size=[1, 3,
                                                   1]).astype(np.float32)
     k = psd_kernels.MaternOneHalf(amplitude, length_scale)
     self.assertAllEqual(tf.TensorShape([3, 3, 2]), k.batch_shape)
     self.assertAllEqual([3, 3, 2], self.evaluate(k.batch_shape_tensor()))
示例#3
0
def graph_GP(t_norm_,
             w_pred_linsp_Var,
             e_xyztp_s,
             amplitude_init=np.array([0.1, 0.1]),
             length_scale_init=np.array([.001, .001]),
             obs_noise_var_init=1e-3,
             LEARNING_RATE=.1,
             NUM_SAMPLES=8
             ):

    with tf.name_scope("GP"):
        # ===================================================================
        # AMP LENSC
        # ===================================================================
        with tf.name_scope("amplitude_lengthscale"):
            amp, amp_assign, amp_p, \
            lensc, lensc_assign, lensc_p, \
            emb, emb_assign, emb_p, \
            obs_noise_var \
                = gpf.tf_Placeholder_assign_test(amplitude_init, length_scale_init, obs_noise_var_init)

        # ===================================================================
        # KERNEL
        # ===================================================================
        with tf.name_scope("kernel"):
            kernel = tfkern.MaternOneHalf(amp, lensc)  # ExponentiatedQuadratic # MaternOneHalf

        # ===================================================================
        # GP_FIT
        # ===================================================================
        with tf.name_scope("GP_fit"):
            gp = tfd.GaussianProcess(
                kernel=kernel,  # ([2,],[2,])
                index_points=e_xyztp_s,
                observation_noise_variance=obs_noise_var,
                validate_args=True)

            log_likelihood = gp.log_prob(tf.transpose(tf.cast(t_norm_, dtype=tf.float64)))
            tf.summary.scalar("log_likelihood_0", log_likelihood[0])
            tf.summary.scalar("log_likelihood_1", log_likelihood[1])  # 2D GP input case
            tf.summary.scalar("length_scale", lensc[0])
            tf.summary.scalar("amplitude", amp[0])
            train_op = gpf.tf_train_gp_adam(log_likelihood, LEARNING_RATE)

        # ===================================================================
        # GP REGRESSION MODEL
        # ===================================================================
        with tf.name_scope("GP_regression_model"):
            gprm = gpf.tf_gp_regression_model(kernel,
                                              w_pred_linsp_Var,  # pred_idx_pts 1D_emb:(400,1), 2D_emb:(200,2)
                                              e_xyztp_s, # e_xyztp_s # obs_idx_pts(15,1) (15,2)
                                              t_norm_,  # obs (15,) (15,)
                                              obs_noise_var, 0.)
            samples_1d = gprm.sample(NUM_SAMPLES)

    return amp, amp_assign, amp_p, lensc, lensc_assign, lensc_p, log_likelihood, samples_1d, train_op, obs_noise_var
示例#4
0
  def testShapesAreCorrect(self):
    k = psd_kernels.MaternOneHalf(amplitude=1., length_scale=1.)

    x = np.ones([4, 3], np.float32)
    y = np.ones([5, 3], np.float32)

    self.assertAllEqual(k.matrix(x, y).shape, [4, 5])
    self.assertAllEqual(
        k.matrix(tf.stack([x] * 2), tf.stack([y] * 2)).shape, [2, 4, 5])

    k = psd_kernels.MaternOneHalf(
        amplitude=np.ones([2, 1, 1], np.float32),
        length_scale=np.ones([1, 3, 1], np.float32))
    self.assertAllEqual(
        k.matrix(
            tf.stack([x] * 2),  # shape [2, 4, 3]
            tf.stack([y] * 2)  # shape [2, 5, 3]
        ).shape,
        [2, 3, 2, 4, 5])
示例#5
0
  def testValuesAreCorrect(self, feature_ndims, dtype, dims):
    amplitude = np.array(5., dtype=dtype)
    length_scale = np.array(.2, dtype=dtype)

    np.random.seed(42)
    k = psd_kernels.MaternOneHalf(amplitude, length_scale, feature_ndims)
    shape = [dims] * feature_ndims
    for _ in range(5):
      x = np.random.uniform(-1, 1, size=shape).astype(dtype)
      y = np.random.uniform(-1, 1, size=shape).astype(dtype)
      self.assertAllClose(
          self.evaluate(k.apply(x, y)),
          self._matern_one_half(amplitude, length_scale, x, y))
示例#6
0
 def testMismatchedFloatTypesAreBad(self):
   with self.assertRaises(ValueError):
     psd_kernels.MaternOneHalf(np.float32(1.), np.float64(1.))
示例#7
0
def generate_noiseless_data(X, d_true, data_type,
                            random_seed=100):
    """A Linear Function to generates toy data for training.

    Args:
        X: (np.ndarray) A matrix of input features.
        d_true: (int)  Number of real input features.
        data_type: (str)  Types of data to generate.
        random_seed: (int) Random seed to set for data generation.

    Returns:
        y: (np.ndarray of NP_DTYPE) A vector of response, shape (n, ).
        variable_importance: (np.ndarray of NP_DTYPE) A vector of variable
         importance for each input features, shape (d, ).

    Raises:
        (ValueError) If data_type does not belong to AVAIL_DATA_TYPE.
    """
    if not data_type in AVAIL_DATA_TYPE:
        raise ValueError("data type '{}' not available.".format(data_type))

    np.random.seed(random_seed)

    n, d = X.shape
    x = tf.Variable(initial_value=X, dtype=dtype_util.TF_DTYPE)

    if data_type == "linear":
        # produce coefficient
        linear_coef = np.zeros(shape=d).astype(dtype_util.NP_DTYPE)
        linear_coef[:d_true] = np.random.normal(loc=1., scale=.25,
                                                size=d_true)
        # produce function
        f = tf.tensordot(x, linear_coef, axes=1)
    elif data_type == "barron":
        if not d_true == 5:
            raise ValueError("Barron class function only supports d_true=5.")

        f = (5 * tf.sin(tf.reduce_max(x[:, :2], axis=1)) +
             tf.atan(x[:, 1])) / (1 + (x[:, 0] + x[:, 4]) ** 2) + \
            tf.sin(0.5 * x[:, 2]) * (1 + tf.exp(x[:, 3] - 0.5 * x[:, 2])) + \
            x[:, 2] ** 2 + 2 * tf.sin(x[:, 3]) + 2 * x[:, 4]
        # f = (10 * tf.sin(tf.reduce_max(x[:, :2], axis=1)) +
        #      tf.exp(x[:, 1])) / (1 + (x[:, 0] + x[:, 4]) ** 2) + \
        #     tf.sin(0.5 * x[:, 2]) * (1 + tf.exp(x[:, 3] - 0.5 * x[:, 2])) + \
        #     x[:, 2] ** 2 + 2 * tf.sin(x[:, 3]) + 2 * x[:, 4]

    elif data_type == "sobolev":
        sobolev_kernel = tfk.MaternOneHalf(amplitude=1.,
                                           length_scale=.8)
        x2 = tf.stop_gradient(x[:, :d_true])
        sobolev_mat = sobolev_kernel.matrix(x[:, :d_true], x2)

        alpha = np.random.normal(size=n).astype(dtype_util.NP_DTYPE)
        f = tf.tensordot(sobolev_mat, alpha, axes=1)

        # with tf.Session() as sess:
        #     sess.run(tf.global_variables_initializer())
        #     sess.run(tf.local_variables_initializer())
        #     f_val, x_val = sess.run([f, x[:, :d_true]])
        #
        #     import pandas as pd
        #     pd_plot = pd.DataFrame({"x": x_val.flatten(), "f": f_val.flatten()})
        #     pd_plot = pd_plot.sort_values(by="x")
        #     plt.plot(pd_plot.x, pd_plot.f)
    else:
        raise ValueError("data type {} not available.".format(data_type))

    # produce variable importance
    var_imp = tf.reduce_mean(tf.gradients(f, x)[0] ** 2, axis=0)

    # evaluate
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        f_val, var_imp_val = sess.run([f, var_imp])

    return f_val, var_imp_val
示例#8
0
def create_cov_kernel(amp, lensc):

    kernel = tfkern.MaternOneHalf(amp, lensc) #ExponentiatedQuadratic # MaternOneHalf
    return kernel
示例#9
0
文件: main.py 项目: roxarcucci/VGPosp
def graph_GP(et_Var,
             t_norm_,
             w_pred_linsp_Var,
             e_xyztp_s,
             amplitude_init=np.array([0.1, 0.1]),
             length_scale_init=np.array([.001, .001]),
             obs_noise_var_init=1e-3,
             LEARNING_RATE=.1,
             NUM_SAMPLES=8):

    with tf.name_scope("GP"):
        # ===================================================================
        # AMP LENSC
        # ===================================================================
        with tf.name_scope("amplitude_lengthscale"):
            amp, amp_assign, amp_p, \
            lensc, lensc_assign, lensc_p, \
            emb, emb_assign, emb_p, \
            obs_noise_var \
                = gpf.tf_Placeholder_assign_test(amplitude_init, length_scale_init, obs_noise_var_init)

        # ===================================================================
        # KERNEL
        # ===================================================================
        with tf.name_scope("kernel"):
            kernel = tfkern.MaternOneHalf(
                amp, lensc)  # ExponentiatedQuadratic # MaternOneHalf

        # ===================================================================
        # GP_FIT
        # ===================================================================
        with tf.name_scope("GP_fit"):
            # gp = gpf.fit_gp(kernel, np.array(enc_df).reshape(-1, enc_df.shape[1]).astype(np.float64), obs_noise_var)  # GP fit to 3D XYZ, where Z is sinusoid of XY
            gp = gpf.fit_gp(
                kernel,
                # np.array(enc_df).reshape(-1, enc_df.shape[1]).astype(np.float64),
                e_xyztp_s,  # et_Var
                obs_noise_var)  # GP fit to 3D XYZ, where Z is sinusoid of XY
            log_likelihood = gp.log_prob(
                tf.transpose(tf.cast(t_norm_, dtype=tf.float64)))
            tf.summary.scalar("log_likelihood_0", log_likelihood[0])
            tf.summary.scalar("log_likelihood_1",
                              log_likelihood[1])  # 2D GP input case
            tf.summary.scalar("length_scale", lensc[0])
            tf.summary.scalar("amplitude", amp[0])
            train_op = gpf.tf_train_gp_adam(log_likelihood, LEARNING_RATE)

        # ===================================================================
        # GP REGRESSION MODEL
        # ===================================================================
        with tf.name_scope("GP_regression_model"):
            gprm = gpf.tf_gp_regression_model(
                kernel,
                w_pred_linsp_Var,  # pred_idx_pts 1D_emb:(400,1), 2D_emb:(200,2)
                e_xyztp_s,  # e_xyztp_s # obs_idx_pts(15,1) (15,2)
                t_norm_,  # obs (15,) (15,)
                obs_noise_var,
                0.)
            samples_1d = gprm.sample(NUM_SAMPLES)

    return amp, amp_assign, amp_p, lensc, lensc_assign, lensc_p, log_likelihood, samples_1d, train_op, obs_noise_var