Exemplo n.º 1
0
def main():
    with tf.device(config.device):
        t = build_graph(is_test=True)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:

        logger.info(config.ckpt_path)
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(config.ckpt_path))
        logger.info("Loading model completely")
        z_latent = sampler_switch(config)
        d_q = sess.run(t.p_o,
                       feed_dict={
                           t.z_e: dp.test.e,
                           t.x_c: dp.test.c,
                           t.z_l: z_latent,
                           t.p_in: dp.test.rd,
                       })
        r_p = sess.run(t.p_i,
                       feed_dict={
                           t.x_c: dp.test.c,
                           t.z_l: z_latent,
                           t.z_e: dp.test.e,
                           t.p_in: dp.test.rd
                       })

        # inverse the scaled output
        qm, qr, rdm, rdr = dp.out.qm, dp.out.qr, dp.out.rdm, dp.out.rdr
        actual_Q = anti_norm(dp.test.q, qm, qr)
        result_Q = anti_norm(d_q, qm, qr)
        actual_r = anti_norm(dp.test.rd, rdm, rdr)
        result_r = anti_norm(r_p, rdm, rdr)

        # save the result
        ensemble = {
            'actual_Q': actual_Q,
            'result_Q': result_Q,
            'actual_r': actual_r,
            'result_r': result_r
        }

        path = os.path.join(config.logs_path, config.description + '-test.pkl')
        pickle_save(ensemble, 'test_result', path)
        copy_file(path, config.history_test_path)

        # visualize the process
        vis.cplot(actual_Q[:, 0], result_Q[:, 0], ['Q1', 'origin', 'modify'],
                  config.t_p)
        vis.cplot(actual_Q[:, 1], result_Q[:, 1], ['Q2', 'origin', 'modify'],
                  config.t_p)
        for num in range(6):
            vis.cplot(actual_r[:, num], result_r[:, num],
                      ['R{}'.format(num + 1), 'origin', 'modify'], config.t_p)
Exemplo n.º 2
0
 def log(out):
     logger.info(out.get_shape())
Exemplo n.º 3
0
    out = flatten(out)
    out = mlp(out, 256, 'leaky', is_training, norm=True, name='mlp_1')
    if latent is 'dm':
        out = mlp(out, 2, 'tanh', is_training, norm=False, name='mlp_2')
    elif latent is 'gs':
        mu = mlp(out, 2, None, is_training, norm=False, name='mlp_2')
        sg = mlp(out, 2, None, is_training, norm=False, name='mlp_3')
        es = tf.random_normal(shape=tf.shape(mu), mean=0, stddev=0.3)
        out = mu + es * sg
    else:
        raise Exception('please input a valid mode')
    return out


if __name__ == "__main__":

    def log(out):
        logger.info(out.get_shape())

    img = tf.placeholder(dtype=tf.float32, shape=[32, 1, 15, 1])
    logger.info(img.get_shape())
    process(img, is_training=True, latent='gs')

    [logger.info(var) for var in tf.trainable_variables()]
    # f= flatten(img)
    # log(f)
    # import numpy as np
    # print(np.prod(img.get_shape().as_list()[1:]))
    # log(tf.reshape(img, [-1, np.prod(img.get_shape().as_list()[1:])]))
    # logger.info(f.get_shape())
Exemplo n.º 4
0
    if latent is 'dm':
        out = mlp(out, 2, None, is_training, norm=False, name='mlp_2')
    elif latent is 'gs':
        mu = mlp(out, 2, None, is_training, norm=False, name='mlp_2')
        sg = mlp(out, 2, None, is_training, norm=False, name='mlp_3')
        es = tf.random_normal(shape=tf.shape(mu), mean=0, stddev=0.3)
        out = mu + es * sg
    else:
        raise Exception('please input a valid mode')
    return out


if __name__ == "__main__":

    def log(out):
        logger.info(out.get_shape())

    img = tf.placeholder(dtype=tf.float32, shape=[32, 1, 15, 1])
    logger.info(img.get_shape())
    logger.info(type(img))
    logger.info('{}'.format(isinstance(img, tf.Tensor)))
    # encoder(img, is_training=True, latent='gs')

    # [logger.info(var) for var in tf.trainable_variables()]
    # f= flatten(img)
    # log(f)
    # import numpy as np
    # print(np.prod(img.get_shape().as_list()[1:]))
    # log(tf.reshape(img, [-1, np.prod(img.get_shape().as_list()[1:])]))
    # logger.info(f.get_shape())
Exemplo n.º 5
0
                  config.t_p)
        vis.cplot(actual_Q[:, 1], result_Q[:, 1], ['Q2', 'origin', 'modify'],
                  config.t_p)
        for num in range(6):
            vis.cplot(actual_r[:, num], result_r[:, num],
                      ['R{}'.format(num + 1), 'origin', 'modify'], config.t_p)


if __name__ == "__main__":
    # test result
    main()

    # process loss
    path = os.path.join(config.logs_path, config.description + '-train.pkl')

    logger.info("{}".format(path))
    hist_value, hist_head = pickle_load(path, use_pd=True)
    for loss_name in [
            'R_err',
            'GE_err',
            'EG_err',
            'GPt_err',
            'GP_err',
            'Pt_err',
            'P_err',
    ]:
        vis.tsplot(hist_value[loss_name], loss_name, config.loss_path)

    vis.dyplot(hist_value['Dz_err'], hist_value['Ez_err'], 'z',
               config.loss_path)
    vis.dyplot(hist_value['Di_err'], hist_value['Gi_err'], 'img',
Exemplo n.º 6
0
def build_graph(is_test=False):
    logger.info("Set a placeholder")
    img = tf.placeholder(dtype=tf.float32,
                         shape=[config.batch_size, 1, config.ndim_x, 1])
    z_prior = tf.placeholder(dtype=tf.float32,
                             shape=[config.batch_size, config.ndim_z])
    z_ept = tf.placeholder(dtype=tf.float32,
                           shape=[config.batch_size, config.ndim_z])
    z_lat = tf.placeholder(dtype=tf.float32,
                           shape=[config.batch_size, config.ndim_z])
    img_prior = tf.placeholder(dtype=tf.float32,
                               shape=[config.batch_size, 1, config.ndim_x, 1])
    img_cond = tf.placeholder(dtype=tf.float32,
                              shape=[config.batch_size, 1, config.ndim_x, 1])
    lr = tf.placeholder(dtype=tf.float32, shape=[])
    # process
    pt = tf.placeholder(dtype=tf.float32,
                        shape=[config.batch_size, config.ndim_z])
    pi_int = tf.placeholder(dtype=tf.float32,
                            shape=[config.batch_size, 1, config.ndim_x, 1])

    logger.info("A model is being built")
    # conditional AAE: B-> z -> B'
    z_img = encoder_x_z(img)
    img_z = decoder_z_x(img_cond, z_img, z_ept)
    # conditional Latent Regressor-GAN: z -> B' -> z'
    img_latent = decoder_z_x(img_cond, z_lat, z_ept, reuse=True)
    z_let = encoder_x_z(img_latent, reuse=True)

    # optimal_adjustment: r = r' + dr
    with tf.variable_scope('optimal_adjustment'):
        r, d = tf.split(img_latent, [config.ndim_r, config.ndim_d], axis=2)
        rd = tf.concat([r, tf.zeros_like(d)], axis=2)
        pi = pi_int + rd

    # Process Validation: r -> Q
    po = process_x(pi)
    dq = po - pt

    # Test Phase
    if is_test:
        test_handle = Object()
        test_handle.z_e = z_ept
        test_handle.z_l = z_lat
        test_handle.z_img = z_img
        test_handle.x = img
        test_handle.x_c = img_cond
        test_handle.x_r = img_z
        test_handle.x_lat = img_latent
        test_handle.p_in = pi_int
        test_handle.p_i = pi
        test_handle.p_o = po
        test_handle.p_t = pt
        test_handle.dq = dq
        return test_handle

    # Discriminator on z
    D_z = discriminator_z(z_img)
    D_z_prior = discriminator_z(z_prior, reuse=True)

    # Discriminator on img
    D_img = discriminator_img(img_cond, img_latent, z_ept)
    D_img_prior = discriminator_img(img_cond, img_prior, z_ept, reuse=True)

    logger.info("The model has been built")

    logger.info("Start define loss function")
    class_true = tf.ones(shape=(config.batch_size, config.ndim_z / 2),
                         dtype=tf.int32)
    class_fake = tf.zeros(shape=(config.batch_size, config.ndim_z / 2),
                          dtype=tf.int32)
    loss_discriminator_z = Loss.softmax_cross_entropy(D_z, D_z_prior,
                                                      class_fake, class_true)
    loss_encoder_z = Loss.softmax_cross_entropy(D_z,
                                                D_z_prior,
                                                class_fake,
                                                class_true,
                                                for_generator=True)
    loss_discriminator_img = Loss.softmax_cross_entropy(
        D_img, D_img_prior, class_fake, class_true)
    loss_decoder_img = Loss.softmax_cross_entropy(D_img,
                                                  D_img_prior,
                                                  class_fake,
                                                  class_true,
                                                  for_generator=True)
    logger.info('L2 latent loss function')
    loss_latent = Loss.euclidean_distance(z_lat, z_let)
    loss_r = Loss.euclidean_distance(img, img_z)
    # process
    loss_process = Loss.euclidean_distance(po, pt)
    # additional loss function
    loss_dq = Loss.euclidean_distance(z_ept, dq)
    loss_tv = Loss.euclidean_distance(r, tf.zeros_like(r))
    logger.info('To sum up all the loss function')
    loss_EG = loss_r * config.coeff_rest + \
              loss_decoder_img + \
              loss_encoder_z * config.coeff_z + \
              loss_latent * config.coeff_lat
    loss_Dz = loss_discriminator_z
    loss_Di = loss_discriminator_img
    loss_GP = loss_dq + \
              loss_tv * config.coeff_tv
    loss_P = loss_process

    logger.info('Variables Collection')
    variables_encoder = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope='encoder')
    variables_decoder = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope='decoder')
    variables_discriminator_z = tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator_z')
    variables_discriminator_img = tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator_img')
    variables_process = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope='process')

    [logger.info(var) for var in tf.trainable_variables()]
    logger.info('Optimizer')
    global_step = tf.Variable(trainable=False,
                              initial_value=0,
                              dtype=tf.float32)

    opt_EG = optimize(loss_EG,
                      variables_decoder + variables_encoder,
                      global_step=global_step,
                      learning_rate=lr)
    opt_Dz = optimize(loss_Dz,
                      variables_discriminator_z,
                      learning_rate=lr,
                      global_step=None)
    opt_Di = optimize(loss_Di,
                      variables_discriminator_img,
                      learning_rate=lr,
                      global_step=None)
    opt_P = optimize(loss_P,
                     variables_process,
                     learning_rate=lr,
                     global_step=None)

    opt_GP = optimize(
        loss_GP,
        variables_decoder,  # variables_process + variables_decoder
        learning_rate=lr,
        global_step=None)

    # output what we want
    graph_handle = Object()
    graph_handle.x = img
    graph_handle.z_r = z_img
    graph_handle.z_p = z_prior
    graph_handle.z_l = z_lat
    graph_handle.z_e = z_ept
    graph_handle.x_c = img_cond
    graph_handle.x_s = img_prior
    graph_handle.x_ = img_z
    graph_handle.p_in = pi_int
    graph_handle.p_i = pi
    graph_handle.p_o = po
    graph_handle.p_ot = pt

    graph_handle.opt_r = opt_EG
    graph_handle.opt_dz = opt_Dz
    graph_handle.opt_dimg = opt_Di
    graph_handle.opt_p = opt_P
    graph_handle.opt_q = opt_GP

    graph_handle.loss_r = loss_r
    graph_handle.loss_e = loss_encoder_z
    graph_handle.loss_d = loss_decoder_img
    graph_handle.loss_l = loss_latent
    graph_handle.loss_eg = loss_EG

    graph_handle.loss_dz = loss_discriminator_z
    graph_handle.loss_dimg = loss_discriminator_img
    graph_handle.loss_p = loss_process

    graph_handle.loss_q = loss_dq
    graph_handle.loss_tv = loss_tv
    graph_handle.loss_gp = loss_GP
    graph_handle.lr = lr

    return graph_handle
Exemplo n.º 7
0

def bias_variable(shape, name='bias'):
    initial = tf.constant(0.0, shape=shape)
    return tf.get_variable(name=name, initializer=initial)

if __name__ == '__main__':
    from sequential.utils import logger
    def log(out):
        logger.info(out.get_shape())

    _input=tf.placeholder(
        dtype=tf.float32,
        shape=[64, 1, 15, 1]
    )
    z = tf.placeholder(
        dtype=tf.float32
        ,shape=[64, 2]
    )
    # z = concat_label(_input, _input)
    # z = concat_label(z, z)
    x = _input
    label = z
    x_shape = x.get_shape().as_list()
    label_shape = label.get_shape().as_list()
    dnum_x = len(x_shape)
    dnum_l = len(label_shape)
    logger.info('{},{}'.format(dnum_x,dnum_l))
    z = concat_label(_input, z)
    log(z)
Exemplo n.º 8
0
    # [logger.info(var) for var in tf.trainable_variables()]
    #
    # img_latent = decoder_z_x(img_cond, z_lat, z_ept, reuse=True)
    # z_let = encoder_x_z(img_latent, reuse=True)
    # process
    # with tf.variable_scope('get_the_optimal_adjustment'):
    #     r, d = tf.split(
    #         img, [config.ndim_r, config.ndim_d], axis=2)
    #     rd = tf.concat(
    #         [r, tf.zeros_like(d)], axis=2)
    #     pi = pi_int + rd
    # logger.info(r.get_shape)
    # with tf.variable_scope('process'):
    po = process_x(img)

    [logger.info(var) for var in tf.trainable_variables()]
    # dq = po - z_lat
    # logger.info(z_prior.get_shape())
    # D_z = discriminator_z(
    #     z_prior
    # )
    # [logger.info(var) for var in tf.trainable_variables()]
    # tf.trainable_variables()
    # logger.info(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator_z'))
    # D_z_prior = discriminator_z(
    #     z_prior,
    #     reuse=True
    # )
    # D_img = discriminator_img(
    #     img_cond, z_lat, z_ept
    # )
Exemplo n.º 9
0
if __name__ == "__main__":
    # main
    main()
    # Save data
    file_path = '{}/{}-metric.pkl'.format(config.logs_path, config.description)
    # Load data
    [q_errors, r_adjs, z_adjs], name = pickle_load(file_path)

    #  MSE evaluate
    q_errors_ = np.sum(
        q_errors, axis=-1
    )  # * (np.square(np.array(dp.out.qr)).reshape(1, 1, config.ndim_y))
    mse = np.mean(q_errors_, axis=-1)
    mean_mse = np.mean(mse)
    std_mse = np.std(mse)
    logger.info("{}- MSE - mean and std: {:0.4f}+/- {:0.4f}".format(
        config.description, mean_mse, std_mse))

    # Energy evaluate
    r = np.reshape(r_adjs,
                   [test_times, config.batch_size, -1])[:, :, :config.ndim_r]
    eng = np.mean(np.sum(np.square(r), axis=-1), axis=-1)
    mean_eng = np.mean(eng)
    std_eng = np.std(eng)
    logger.info("{}- Eng - mean and std: {:0.4f}+/- {:0.4f}".format(
        config.description, mean_eng, std_eng))

    # MI evaluate
    train_set = dp.out.eg
    train_set = np.squeeze(train_set, axis=[1, 3])[:, :config.ndim_r]
    test_set = r_adjs[:, :config.ndim_r]
    sigmas = np.linspace(1e-4, 2, 100)