Beispiel #1
0
    x_mu = tf.placeholder(shape=[None, 2], dtype=tf.float32)
    x_theta = tf.placeholder(shape=[None, 2, N_GEN], dtype=tf.float32)
    inds_gen = tf.placeholder(
        shape=[None, 2, N_GEN],
        dtype=tf.float32)  # determines which generator is chosen for each
    # of the samples.
    # should be constant along the second dimension,
    # with entries either 0 or 1 so that
    # inds[i, j, :] sums to one
    y = 0
    for i in range(N_GEN):
        y += inds_gen[:, :, i] * ff_net(x_theta[:, :, i],
                                        'T_' + str(i),
                                        input_dim=2,
                                        output_dim=2,
                                        activation=ACT_T,
                                        n_layers=LAYERS_T,
                                        hidden_dim=HIDDEN_T)
    T_theta = y

    h_mu = 0  # sum over h evaluated at samples of mu
    h_T_theta = 0  # sum over h evaluated at samples of theta
    for i in range(2):
        h_mu += ff_net(x_mu[:, i:(i + 1)],
                       'h' + str(i),
                       input_dim=1,
                       output_dim=1,
                       activation=ACT_H,
                       n_layers=LAYERS_H,
                       hidden_dim=HIDDEN_H)
Beispiel #2
0
    return -tf.reduce_sum(tf.square(u[:, 0:D_EACH] - u[:, D_EACH:D]), axis=1)


# build tensorflow graph
for run_K in range(N_RUNS):
    t0 = time.time()
    tf.reset_default_graph()
    #
    x_mu = tf.placeholder(shape=[None, D], dtype=tf.float32)  # samples from mu
    x_theta = tf.placeholder(shape=[None, D_THETA],
                             dtype=tf.float32)  # samples from theta
    #
    T_theta = ff_net(x_theta,
                     'T',
                     input_dim=D_THETA,
                     output_dim=D,
                     activation=ACT_T,
                     n_layers=LAYERS_T,
                     hidden_dim=HIDDEN_T)
    #
    h_mu = 0  # sum over h evaluated at samples of mu
    h_T_theta = 0  # sum over h evaluated at samples of theta
    for i in range(2):
        h_mu_h = ff_net(x_mu[:, i * D_EACH:(i + 1) * D_EACH],
                        'h_' + str(i),
                        input_dim=D_EACH,
                        output_dim=1,
                        activation=ACT_H,
                        n_layers=LAYERS_H,
                        hidden_dim=HIDDEN_H)
        h_mu += h_mu_h  # + psi_star(h_mu_h)
Beispiel #3
0
# build tensorflow graph
for run_K in range(N_RUNS):
    t0 = time.time()
    tf.reset_default_graph()

    x_mu = tf.placeholder(shape=[None, 2], dtype=tf.float32)  # samples from mu
    x_theta = tf.placeholder(shape=[None, 2],
                             dtype=tf.float32)  # samples from theta
    x_kappa = tf.placeholder(shape=[None, 1],
                             dtype=tf.float32)  # samples from kappa

    T_theta = ff_net(x_theta,
                     'T',
                     input_dim=2,
                     output_dim=2,
                     activation=ACT_T,
                     n_layers=LAYERS_T,
                     hidden_dim=HIDDEN_T)

    h_mu = 0  # sum over h evaluated at samples of mu
    h_T_theta = 0  # sum over h evaluated at samples of theta
    for i in range(2):
        h_mu += ff_net(x_mu[:, i:(i + 1)],
                       'h_' + str(i),
                       input_dim=1,
                       output_dim=1,
                       activation=ACT_H,
                       n_layers=LAYERS_H,
                       hidden_dim=HIDDEN_H)
        h_T_theta += ff_net(T_theta[:, i:(i + 1)],
Beispiel #4
0

def f_objective(u):
    return tf.nn.relu(tf.reduce_sum(u, axis=1))


# build tensorflow graph
for run_K in range(N_RUNS):
    t0 = time.time()
    tf.reset_default_graph()

    x_mu = tf.placeholder(shape=[None, 2], dtype=tf.float32)  # samples from mu
    x_theta = tf.placeholder(shape=[None, 2], dtype=tf.float32)  # samples from theta
    x_kappa = tf.placeholder(shape=[None, 1], dtype=tf.float32)  # samples from kappa

    T_theta = ff_net(x_theta, 'T', input_dim=2, output_dim=2, activation=ACT_T, n_layers=LAYERS_T, hidden_dim=HIDDEN_T)

    h_mu = 0  # sum over h evaluated at samples of mu
    h_T_theta = 0  # sum over h evaluated at samples of theta
    for i in range(2):
        h_mu_h = ff_net(x_mu[:, i:(i + 1)], 'h_' + str(i), input_dim=1, output_dim=1, activation=ACT_H,
                       n_layers=LAYERS_H, hidden_dim=HIDDEN_H)
        h_mu += h_mu_h + psi_star(h_mu_h)
        h_T_theta += ff_net(T_theta[:, i:(i + 1)], 'h_' + str(i), input_dim=1, output_dim=1, activation=ACT_H,
                            n_layers=LAYERS_H, hidden_dim=HIDDEN_H)

    h_kappa_mu_h = ff_net(x_kappa, 'h_kappa', input_dim=1, output_dim=1, activation=ACT_H, n_layers=LAYERS_H,
                        hidden_dim=HIDDEN_H)
    h_kappa_mu = h_kappa_mu_h + psi_star(h_kappa_mu_h)
    h_kappa_T_theta = ff_net(T_theta[:, 0:1] - T_theta[:, 1:2], 'h_kappa', input_dim=1, output_dim=1,
                             activation=ACT_H, n_layers=LAYERS_H, hidden_dim=HIDDEN_H)