Пример #1
0
def fit_gp(kernel, obs_idx_pts, obs_noise_var):
    gp = tfd.GaussianProcess(
        kernel=kernel,  # ([2,],[2,])
        index_points=obs_idx_pts,
        observation_noise_variance=obs_noise_var,
        validate_args=True)
    return gp
Пример #2
0
def gp_priordistrib(krl,ipts,onv):
    ''' Create the GP prior distribution,
    which we will use to train the model parameters.'''
    gp = tfd.GaussianProcess(
        kernel=krl,
        index_points=ipts,
        observation_noise_variance=onv)
    return gp
Пример #3
0
def train_gpr_tfp(l=None):
    if l is None:
        l = get_data()
    amplitude = (np.finfo(np.float64).tiny + tf.nn.softplus(
        tf.Variable(initial_value=1., name='amplitude', dtype=np.float64)))
    length_scale = (np.finfo(np.float64).tiny + tf.nn.softplus(
        tf.Variable(initial_value=1., name='length_scale', dtype=np.float64)))
    observation_noise_variance = (np.finfo(np.float64).tiny + tf.nn.softplus(
        tf.Variable(initial_value=1e-6,
                    name='observation_noise_variance',
                    dtype=np.float64)))
    kernel = tfk.ExponentiatedQuadratic(amplitude, length_scale)
    model_train = tfd.GaussianProcess(
        kernel=kernel,
        index_points=l.X_train.values,
        observation_noise_variance=observation_noise_variance)
    log_likelihood = model_train.log_prob(l.y_train.values.squeeze())
    optimizer = tf.train.AdamOptimizer(learning_rate=.01)
    train_op = optimizer.minimize(-log_likelihood)

    # training
    num_iters = 2000
    # Store the likelihood values during training, so we can plot the progress
    lls_ = np.zeros(num_iters, np.float64)
    sess.run(tf.global_variables_initializer())
    for i in range(num_iters):
        _, lls_[i] = sess.run([train_op, log_likelihood])
    [amplitude_, length_scale_, observation_noise_variance_
     ] = sess.run([amplitude, length_scale, observation_noise_variance])
    print('Trained parameters:'.format(amplitude_))
    print('amplitude: {}'.format(amplitude_))
    print('length_scale: {}'.format(length_scale_))
    print('observation_noise_variance: {}'.format(observation_noise_variance_))

    # Plot the loss evolution
    plt.figure(1, figsize=(12, 4))
    clf()
    plt.plot(lls_)
    plt.xlabel("Training iteration")
    plt.ylabel("Log marginal likelihood")
    plt.show()

    # tfp is a bit weird ... you need to create another model for inference ... it isn't a model really it is the thing that represents the distribution
    # notice that we now provide more arguments

    model_infer = TFP_GRP_Wrapper(model_train, l.y_train.values.squeeze())

    num_samples = 50
    samples = model_infer.sample(num_samples)

    return attributedict_from_locals('model_train,model_infer,samples')
Пример #4
0
    sigma = tf.Variable(initial_value=config.amplitude,
                        name='sigma',
                        dtype=np.float64)
    lambda_ = tf.Variable(initial_value=config.length_scale,
                          name='lambda',
                          dtype=np.float64)
    amplitude = (np.finfo(np.float64).tiny + tf.nn.softplus(sigma))
    length_scale = (np.finfo(np.float64).tiny + tf.nn.softplus(lambda_))

with tf.name_scope('function_process'):
    with tf.name_scope('kernel'):
        kernel = tfk.ExponentiatedQuadratic(amplitude, length_scale)
    with tf.name_scope('neg-loglike'):
        function_GP = tfd.GaussianProcess(
            kernel=kernel,
            mean_fn=lambda x: tf.reduce_mean(y, keep_dims=True),
            index_points=Xt,
            observation_noise_variance=0.,
            jitter=1e-10)
        log_ll = function_GP.log_prob(y)
        tf.summary.scalar('log_likelihood', log_ll)
    with tf.name_scope('train'):
        optimizer = tf.train.AdamOptimizer(config.learning_rate)
        train_step = optimizer.minimize(-log_ll)

merged = tf.summary.merge_all()
saver = tf.train.Saver({
    'hyperparameter/amplitude': sigma,
    'hyperparameter/length_scale': lambda_
})

with tf.Session() as sess:
Пример #5
0
def gp_priordistrib(k,i,o):
    gp = tfd.GaussianProcess(
        kernel=k,
        index_points=i,
        observation_noise_variance=o)
    return gp
Пример #6
0
                        dtype=tf.float64,
                        initializer=np.float64(1.)))

    observations_ = small_x_train.reshape(N, -1).transpose()

    init_ = np.random.normal(size=(N, 2))
    latent_index_points = tf.get_variable(name='latent_index_points',
                                          dtype=tf.float64,
                                          initializer=init_)

    kernel = tfk.ExponentiatedQuadratic(amplitude, length_scale)
    kernel_ = Probability_box(100, 10, amplitude,
                              length_scale).kernel  # some errors...!

    gp_ = tfd.GaussianProcess(
        kernel=kernel_,
        index_points=latent_index_points,
        observation_noise_variance=observation_noise_variance)

    log_probs = gp_.log_prob(observations_, name='log_prob')

    loss = -tf.reduce_mean(log_probs)
    optimizer = tf.train.AdamOptimizer(learning_rate=.1)
    train_op = optimizer.minimize(loss)

    # Initialize variables and train!
    sess.run(tf.global_variables_initializer())
    num_iters = 100
    log_interval = 20
    lips_ = np.zeros((num_iters, N, 2), np.float64)
    for i in range(num_iters):
        _, loss_, lips_[i] = sess.run([train_op, loss, latent_index_points])
Пример #7
0
                name='observation_noise_variance',
                dtype=np.float64)))

# In[32]:

# Create the covariance kernel, which will be shared between the prior (which we
# use for maximum likelihood training) and the posterior (which we use for
# posterior predictive sampling)
kernel = tfk.ExponentiatedQuadratic(amplitude, length_scale)

# In[33]:

# Create the GP prior distribution, which we will use to train the model
# parameters.
gp = tfd.GaussianProcess(kernel=kernel,
                         index_points=observation_index_points_,
                         observation_noise_variance=observation_noise_variance)

# This lets us compute the log likelihood of the observed data. Then we can
# maximize this quantity to find optimal model parameters.
log_likelihood = gp.log_prob(observations_)  # + gp.log_prob(observations_1)

# In[34]:

# Define the optimization ops for maximizing likelihood (minimizing neg
# log-likelihood!)
optimizer = tf.train.AdamOptimizer(learning_rate=.01)
train_op = optimizer.minimize(-log_likelihood)

# In[35]:
Пример #8
0
# \alpha^2 \cdot \exp\left\{-\frac{d^2}{2\rho^2}\right\}
# $$
#
# where $\alpha$ is the amplitude of the covariance, $\rho$ is the length scale which controls how slowly information decays with distance (larger $\rho$ means information about a point can be used for data far away); and $d$ is the distance.

# In[4]:

# Specify GP model
gp_model = tfd.JointDistributionNamed(
    dict(
        amplitude=tfd.LogNormal(dtype(0), dtype(0.1)),  # amplitude
        length_scale=tfd.LogNormal(dtype(0), dtype(1)),  # length scale
        v=tfd.LogNormal(dtype(0), dtype(1)),  # model sd
        obs=lambda length_scale, amplitude, v: tfd.GaussianProcess(
            kernel=tfp.math.psd_kernels.ExponentiatedQuadratic(
                amplitude, length_scale),
            index_points=X[..., np.newaxis],
            observation_noise_variance=v)))

# Run graph to make sure it works.
_ = gp_model.sample()

# Initial values.
initial_state = [
    1e-1 * tf.ones([], dtype=np.float64, name='amplitude'),
    1e-1 * tf.ones([], dtype=np.float64, name='length_scale'),
    1e-1 * tf.ones([], dtype=np.float64, name='v')
]

# Bijectors (from unconstrained to constrained space)
bijectors = [