Пример #1
0
def test_restore_chkpt_in_graph():

    ######################################################################
    # CONSTANTS ##########################################################

    # Switches #
    PYTHON_COV_CALC = False

    # Files #
    SAVED_MODEL_PATH = "./vae_training/model.ckpt"
    SAVED_MODEL_DIR = "./vae_model/"

    CHECKPOINT_PATH = "./vae_training/model.ckpt"
    print('checkpoint: ', CHECKPOINT_PATH, '\n')
    chkp.print_tensors_in_checkpoint_file(CHECKPOINT_PATH, tensor_name='', all_tensors=True)
    print('\n')

    LOGDIR = "./log_dir_/"+datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H:%M:%S')+"_gp"
    LOGCHECKPT = "model.ckpt"
    NORMCHECKPT = 'normalize.ckpt'
    CSV = 'roomselection_1800.csv'
    dim1 = 'Points:0'
    dim2 = 'Points:1'
    dim3 = 'Points:2'
    dim4 = 'Temperature'
    dim5 = 'Pressure'
    dim6 = 'Tracer'
    FEATURES = [dim1, dim2, dim3, dim4, dim5, dim6]
    df = pd.read_csv(CSV, encoding='utf-8', engine='c')

    # Initializations #
    AMPLITUDE_INIT = np.array([.1, .1]) # [0.1, 0.1]
    LENGTHSCALE_INIT = np.array([.001, .001]) # [0.1, 0.1]
    K_SENSORS = 7
    SPATIAL_COVER = 7
    SPATIAL_COVER_PRESSURE_TEMP = 7

    # Hyperparameters #
    ENCODED_SIZE = 1
    SELECT_ROW_NUM = 8000#8000
    INIT_OBSNOISEVAR_ = 0.001
    INIT_OBSNOISEVAR = 1e-6
    LEARNING_RATE = .1 #.01
    NUM_ITERS = 10000  # 1000 optimize log-likelihood
    PRED_FRACTION = 50  # 50
    NUM_SAMPLES = 8 # 50
    LINSPACE_GP_INDEX_SAMPLE = 300 # plot GP fragmentation
    XEDGES = 160  # plot ampl and lengthscale optimization
    YEDGES = 160
    ROW_REDUCED = 100  # select fraction of encoded- and tracer row
    ROWS_FOR_EACH_COORD = 100


    ######################################################################
    # DATA ###############################################################

    # vae = tf.saved_model.load(SAVED_MODEL_DIR)
    # VAE
    # vae, prior, encoder, decoder = gpf.create_model(ENCODED_SIZE, 5)
    # encoder_saver = tf.train.Saver()  # when parsing the default graph, it has only the VAE_ yet

    with tf.name_scope("data_preprocessing"):
        # VAE : at this point we have mean and stdev as extra
        vae, prior, encoder, decoder = gpf.create_model(ENCODED_SIZE, 5)
        saverdef_dict = get_variables_for_tfk_sequential(encoder, 'data_preprocessing/')
        encoder_saver = tf.train.Saver(saverdef_dict)  # too late to use the default, saverdef_dict is needed

        vae.summary()


        col_len = len(FEATURES)
        sample_len = SELECT_ROW_NUM // 2
        values, normal, mean_var, stdev_var = gpf.graph_normalization_factors_from_training_data(sample_len, col_len)
        xyztp_norm = tf.slice(normal, begin=[0, 0], size=[25, 5], name="xyztp_norm")
        t_norm_ = tf.cast(tf.slice(normal, begin=[0, 5], size=[25, 1]), dtype=tf.float64, name="t_norm_")
        t_norm = tf.reshape(t_norm_, shape=[-1], name="t_norm")

        #TODO what are these checkpoints
        # checkpoint = tf.train.Checkpoint(x=vae)
        # checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_PATH))

        # Encoded data
        e_xyztp = encoder(xyztp_norm)
        assert isinstance(e_xyztp, tfd.Distribution)
        e_xyztp_s = e_xyztp.sample() # .sample()
        e_xyztp_s = tf.cast(e_xyztp_s, dtype=tf.float64, name="encoded_sample")
        stack2 = tf.stack([e_xyztp_s, t_norm_])
        stack2 = tf.cast(stack2, dtype=tf.float64, name="enc_tr_")

        et_Var = tf.cast(stack2, dtype=tf.float64)
        w_pred_linsp_Var = tf.linspace(tf.reduce_min(e_xyztp_s), tf.reduce_max(e_xyztp_s), LINSPACE_GP_INDEX_SAMPLE,
                                       name="e_pred_linspace")
        w_pred_linsp_Var = tf.reshape(w_pred_linsp_Var, [-1, ENCODED_SIZE], name="reshape_")

        # Sample decoder
        z = prior.sample(SELECT_ROW_NUM)
        d_xyztp = decoder(z)
        assert isinstance(d_xyztp, tfd.Distribution)
        d_xyztp_s = d_xyztp.sample()


    ######################################################################
    # GRAPH CALLS ########################################################

    amp, amp_assign, amp_p, lensc, lensc_assign, lensc_p, \
    log_likelihood, samples_1d, train_op, obs_noise_var \
        = main.graph_GP(  et_Var,
                     t_norm,
                     w_pred_linsp_Var,
                     e_xyztp_s,
                     amplitude_init=AMPLITUDE_INIT,
                     length_scale_init=LENGTHSCALE_INIT,
                     obs_noise_var_init=INIT_OBSNOISEVAR,
                     LEARNING_RATE=LEARNING_RATE,
                     NUM_SAMPLES=NUM_SAMPLES
                   )

    # if GRAPH_COV_CALC == True:
    [cov_vv, last_cov_vv, while_i0_idx, while_i0_end] = main.graph_cov(SPATIAL_COVER, SPATIAL_COVER_PRESSURE_TEMP, encoder)

    [sel_idx, _, _, _] = snps2.sparse_placement_algorithm_2(cov_vv, K_SENSORS)

    sel_idx_ = main.graph_output(sel_idx)

    ######################################################################
    # GRAPH SAVER ########################################################
    print("LOGDIR",LOGDIR)
    # for i, var in enumerate(saver._var_list):
    #     print('Var {}: {}'.format(i, var))
    summ = tf.summary.merge_all()
    writer = tf.summary.FileWriter(LOGDIR)
    saver = tf.train.Saver()  # all the object in the graph mapped

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())  # after the init
    # allmodel_saved_path = saver.save(sess, './saved_variable')
    # print('model saved in {}'.format(allmodel_saved_path))
    writer.add_graph(sess.graph)

    checkpoint = tf.train.Checkpoint(x=vae)
    checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_PATH))


    ######################################################################
    # SESS RUNS ##########################################################
    for _ in range(1):
        e_test = encoder(tf.reshape(tf.constant([1., 1., 1., 1., 1.]), [-1, 5]))
    assert isinstance(e_test, tfd.Distribution)
    e_test_s = e_test.mean()
    print('encoder([1,1,1,1,1]) before restore', sess.run(e_test_s))

    print(CHECKPOINT_PATH)

    for _ in range(1):
        e_test = encoder(tf.reshape(tf.constant([1., 1., 1., 1., 1.]), [-1, 5]))
    assert isinstance(e_test, tfd.Distribution)
    e_test_s = e_test.mean()
    print('encoder([1,1,1,1,1]) after restore', sess.run(e_test_s))

    for _ in range(1):
        e_test = encoder(tf.reshape(tf.constant([1., 1., 1., 1., 1.]), [-1, 5]))
        assert isinstance(e_test, tfd.Distribution)
        e_test_s = e_test.mean()
        print('encoder([1,1,1,1,1]) before restore', sess.run(e_test_s))

    print(CHECKPOINT_PATH)
    encoder_saver.restore(sess, CHECKPOINT_PATH)
    # checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_PATH))

    for _ in range(1):
        e_test = encoder(tf.reshape(tf.constant([1., 1., 1., 1., 1.]), [-1, 5]))
        assert isinstance(e_test, tfd.Distribution)
        e_test_s = e_test.mean()
        print('encoder([1,1,1,1,1]) after restore', sess.run(e_test_s))

    # saver.restore(sess, chkpt_name)
    pass
Пример #2
0
def TEST_VAE():
    '''
    Phases of VAE
    - Train NN with 1 neuron in middle layer
        1. import dataset for 1 timestep
        2. randomize, reduce size to (100, 6)
        2. pause after training is complete
        3. save NN model
    - Create reduced space dataset, use encoder
        4. save new red_ord_dataset (100, 1)
    - Plot new data
        5. 2 d plot, see if its possible to fit GP to it
    - fit GP to new data
        6. fit GP to 2D points.
    - sample from GP
        7. compare if that sample point transformed by decoder is epsilon distance from another given point. > see error
    '''
    SAVED_MODEL_PATH = "./vae_training/model.ckpt"
    SAVED_MODEL_DIR = "./vae_model/"
    CHECKPOINT_PATH = "./vae_training/model.ckpt"
    INPUT_SHAPE = 5
    ENCODED_SIZE = 1
    CSV = 'roomselection_1800.csv'
    SELECT_ROW_NUM = 8000
    dim1 = 'Points:0'
    dim2 = 'Points:1'
    dim3 = 'Points:2'
    dim4 = 'Temperature'
    dim5 = 'Pressure'
    dim6 = 'Tracer'

    # vae = tf.saved_model.load(SAVED_MODEL_DIR)
    ds = drs.load_csv('roomselection_1800.csv')
    xyzt_idx = np.array(ds.loc[:, [dim1, dim2, dim3, dim4, dim5]])
    # xyzt_idx, train_dataset, test_dataset = gpf.load_randomize_select_train_test(CSV, SELECT_ROW_NUM, dim1, dim2, dim3,
    #                                                                              dim4, dim5, dim6)
    train_dataset, test_dataset = gpf.load_randomize_select_train_test(
        xyzt_idx)  # tracer is unrelated - how
    assert train_dataset.shape[1] == INPUT_SHAPE

    vae, prior, encoder, decoder = gpf.create_model(ENCODED_SIZE, INPUT_SHAPE)
    vae.summary()
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    checkpoint = tf.train.Checkpoint(x=vae)
    checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_PATH))

    # sample decoder
    z = prior.sample(8000)
    xtilde = decoder(z)
    assert isinstance(xtilde, tfd.Distribution)
    xtilde_s = xtilde.sample()
    xtilde_s_val = sess.run(xtilde_s)

    etilde = encoder(test_dataset)
    assert isinstance(etilde, tfd.Distribution)
    etilde_s = etilde.sample()
    etilde_s_val = sess.run(etilde_s)

    plts.plot_encoder_output_distribution(12, 12, test_dataset[:, 0])
    plts.plot_encoder_output_distribution(12, 12, etilde_s_val)
    plts.plot_decoder_output_distribution(12, 12, xtilde_s_val[:, 0, 0, :])

    pass
Пример #3
0
def tb_graph_GP(LOGDIR='./tb_graph_GP/'):

    CSV = 'roomselection_1800.csv'
    dim1 = 'Points:0'
    dim2 = 'Points:1'
    dim3 = 'Points:2'
    dim4 = 'Temperature'
    dim5 = 'Pressure'
    dim6 = 'Tracer'
    FEATURES = [dim1, dim2, dim3, dim4, dim5, dim6]
    df = pd.read_csv(CSV, encoding='utf-8', engine='c')

    # Initializations
    AMPLITUDE_INIT = np.array([.1, .1]) # [0.1, 0.1]
    LENGTHSCALE_INIT = np.array([.001, .001]) # [0.1, 0.1]
    K_SENSORS = 1
    SPATIAL_COVER = 2
    SPATIAL_COVER_PRESSURE_TEMP = 2

    # Hyperparameters
    ENCODED_SIZE = 1
    GP_INPUT_IDX_DIM = ENCODED_SIZE
    SELECT_ROW_NUM = 8000#8000
    INIT_OBSNOISEVAR_ = 0.001
    INIT_OBSNOISEVAR = 1e-6
    LEARNING_RATE = .1 #.01
    NUM_ITERS = 10000  # 1000 optimize log-likelihood
    PRED_FRACTION = 50  # 50
    NUM_SAMPLES = 8 # 50
    LINSPACE_GP_INDEX_SAMPLE = 300 # plot GP fragmentation
    XEDGES = 60  # plot ampl and lengthscale optimization
    YEDGES = 60
    ROW_REDUCED = 100  # select fraction of encoded- and tracer row
    ROWS_FOR_EACH_COORD = 100


    # DATA ###############################################################

    # vae = tf.saved_model.load(SAVED_MODEL_DIR)
    col_len = len(FEATURES)
    sample_len = SELECT_ROW_NUM // 2
    values, normal, mean_var, stdev_var = gpf.graph_normalization_factors_from_training_data(sample_len, col_len)
    xyztp_norm = tf.slice(normal, begin=[0, 0], size=[25, 5], name="xyztp_norm")
    t_norm_ = tf.slice(normal, begin=[0, 5], size=[25, 1], name="t_norm_")
    t_norm = tf.reshape(t_norm_, shape=[-1], name="t_norm")
    # VAE
    vae, prior, encoder, decoder = gpf.create_model(ENCODED_SIZE, 5)
    vae.summary()
    #TODO what are these checkpoints
    checkpoint = tf.train.Checkpoint(x=vae)
    # checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_PATH))

    # Encoded data
    e_xyztp = encoder(xyztp_norm)
    assert isinstance(e_xyztp, tfd.Distribution)
    e_xyztp_s = e_xyztp.sample()
    stack2 = tf.stack([e_xyztp_s, t_norm_])

    et_Var = tf.cast(stack2, dtype=tf.float64)
    w_pred_linsp_Var = tf.linspace(tf.reduce_min(e_xyztp_s), tf.reduce_max(e_xyztp_s), LINSPACE_GP_INDEX_SAMPLE)
    w_pred_linsp_Var = tf.reshape(w_pred_linsp_Var,[-1,GP_INPUT_IDX_DIM], name="reshape_")

    tf.logging.warn('warn')
    tf.logging.error('error')
    tf.logging.fatal('fatal')
    # Sample decoder
    z = prior.sample(SELECT_ROW_NUM)
    d_xyztp = decoder(z)
    assert isinstance(d_xyztp, tfd.Distribution)
    d_xyztp_s = d_xyztp.sample()


    amp, amp_assign, amp_p, lensc, lensc_assign, lensc_p, log_likelihood, samples_1d, train_op, obs_noise_var = \
        main.graph_GP(  et_Var,
                        t_norm_,
                        w_pred_linsp_Var,
                        e_xyztp_s
                     )

    summ = tf.summary.merge_all()
    saver = tf.train.Saver()

    with tf.Session() as sess:
        writer = tf.summary.FileWriter(LOGDIR, sess.graph)

        sess.run(tf.global_variables_initializer())
        writer.flush()
Пример #4
0
from tensorflow.python.training import saver as saver_util

from tensorflow.python.training import checkpoint_management

from tensorflow.python.tools import inspect_checkpoint as chkp
# print all tensors in checkpoint file
CHECKPOINT_PATH = "./vae_training/model.ckpt"
chkp.print_tensors_in_checkpoint_file(CHECKPOINT_PATH,
                                      tensor_name='',
                                      all_tensors=True)

ENCODED_SIZE = 1
LOGDIR = "./log_dir_/" + datetime.datetime.fromtimestamp(
    time.time()).strftime('%Y-%m-%d-%H:%M:%S') + "_gp"

vae, prior, encoder, decoder = gpf.create_model(ENCODED_SIZE, 5)
vae.summary()

######################################################################
# GRAPH SAVER ########################################################
print("LOGDIR", LOGDIR)
# for i, var in enumerate(saver._var_list):
#     print('Var {}: {}'.format(i, var))
summ = tf.summary.merge_all()
writer = tf.summary.FileWriter(LOGDIR)

saveable_names =\
    ['VAE_/decoder_/d_dense_/bias',
     'VAE_/decoder_/d_dense_/kernel',
     'VAE_/decoder_/d_dense_10/bias',
     'VAE_/decoder_/d_dense_10/kernel',
Пример #5
0
xyztp_test, tr_test =  gpf.separate_to_encode_dataset_and_tracer_dataset(test_dataset)

INPUT_SHAPE = xyztp_train.shape[1]# we dont want to encode tracer, that is observation

h = pd.DataFrame(xyztp_train)
plts.pairplots(h)

#========================================================
# Create Graph
#========================================================

tf.reset_default_graph()
tf.keras.backend.clear_session()
sess = gpf.reset_session()

vae, prior, encoder, decoder = gpf.create_model(encoded_size, INPUT_SHAPE)


#========================================================
# Instanciation
#========================================================
saver = tf.train.Saver()
summ = tf.summary.merge_all()
sess.run(tf.global_variables_initializer())  # after the init
writer = tf.summary.FileWriter(LOGDIR)
writer.add_graph(sess.graph)


#========================================================
# Graph calls
#========================================================
Пример #6
0
def TEST_encode_to_GP_fn():

    #===================================================================
    # CONSTANTS
    #===================================================================
    # Files #
    CHECKPOINT_PATH = "./vae_training/model.ckpt"
    LOGDIR = "./log_dir_/" + datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d-%H:%M:%S') + "_gp"
    LOGCHECKPT = "model.ckpt"
    CSV = 'roomselection_1800.csv'
    dim1 = 'Points:0'
    dim2 = 'Points:1'
    dim3 = 'Points:2'
    dim4 = 'Temperature'
    dim5 = 'Pressure'
    dim6 = 'Tracer'
    FEATURES = [dim1, dim2, dim3, dim4, dim5, dim6]
    df = pd.read_csv(CSV, encoding='utf-8', engine='c')

    # Initializations #
    AMPLITUDE_INIT = np.array([.1, .1])  # [0.1, 0.1]
    LENGTHSCALE_INIT = np.array([.001, .001])  # [0.1, 0.1]
    K_SENSORS = 7
    SPATIAL_COVER = 7
    SPATIAL_COVER_PRESSURE_TEMP = 7

    # Hyperparameters #
    ENCODED_SIZE = 1
    SELECT_ROW_NUM = 8000  #8000
    INIT_OBSNOISEVAR_ = 0.001
    INIT_OBSNOISEVAR = 1e-6
    LEARNING_RATE = .1  #.01
    NUM_ITERS = 10000  # 1000 optimize log-likelihood
    PRED_FRACTION = 50  # 50
    NUM_SAMPLES = 8  # 50
    LINSPACE_GP_INDEX_SAMPLE = 300  # plot GP fragmentation
    XEDGES = 160  # plot ampl and lengthscale optimization
    YEDGES = 160
    ROW_REDUCED = 100  # select fraction of encoded- and tracer row
    ROWS_FOR_EACH_COORD = 100

    #===================================================================
    # DATA
    #===================================================================
    # vae = tf.saved_model.load(SAVED_MODEL_DIR)
    # VAE
    vae, prior, encoder, decoder = gpf.create_model(ENCODED_SIZE, 5)
    encoder_saver = tf.train.Saver(
    )  # when parsing the default graph, it has only the VAE_ yet

    with tf.name_scope("data_preprocessing"):
        col_len = len(FEATURES)
        sample_len = SELECT_ROW_NUM // 2
        values, normal, mean_var, stdev_var = gpf.graph_normalization_factors_from_training_data(
            sample_len, col_len)
        xyztp_norm = tf.slice(normal,
                              begin=[0, 0],
                              size=[25, 5],
                              name="xyztp_norm")
        t_norm_ = tf.cast(tf.slice(normal, begin=[0, 5], size=[25, 1]),
                          dtype=tf.float64,
                          name="t_norm_")
        t_norm = tf.reshape(t_norm_, shape=[-1], name="t_norm")

        # VAE : at this point we have mean and stdev as extra
        # vae, prior, encoder, decoder = gpf.create_model(ENCODED_SIZE, 5)
        # encoder_saver = tf.train.Saver()  # too late, mean will be missing from the checkpoint
        """ [   
            'VAE_/decoder_/d_dense_/bias', 
            'VAE_/decoder_/d_dense_/kernel', 
            'VAE_/decoder_/d_dense_10/bias', 
            'VAE_/decoder_/d_dense_10/kernel', 
            'VAE_/encoder_/e_dense_/bias', 
            'VAE_/encoder_/e_dense_/kernel',
            'VAE_/encoder_/e_dense_10/bias', 
            'VAE_/encoder_/e_dense_10/kernel', 
            'VAE_/encoder_/e_mvn_dense_/bias', 
            'VAE_/encoder_/e_mvn_dense_/kernel', 
            'mean', 
            'stdev'
        ] """
        # 'trick' to figure out the list of variables to restore later

        vae.summary()

        #TODO what are these checkpoints
        # checkpoint = tf.train.Checkpoint(x=vae)
        # checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_PATH))

        # Encoded data
        e_xyztp = encoder(xyztp_norm)
        assert isinstance(e_xyztp, tfd.Distribution)
        e_xyztp_s = e_xyztp.sample()  # .sample()
        e_xyztp_s = tf.cast(e_xyztp_s, dtype=tf.float64, name="encoded_sample")
        stack2 = tf.stack([e_xyztp_s, t_norm_])
        stack2 = tf.cast(stack2, dtype=tf.float64, name="enc_tr_")

        et_Var = tf.cast(stack2, dtype=tf.float64)
        w_pred_linsp_Var = tf.linspace(tf.reduce_min(e_xyztp_s),
                                       tf.reduce_max(e_xyztp_s),
                                       LINSPACE_GP_INDEX_SAMPLE,
                                       name="e_pred_linspace")
        w_pred_linsp_Var = tf.reshape(w_pred_linsp_Var, [-1, ENCODED_SIZE],
                                      name="reshape_")

        # Sample decoder
        z = prior.sample(SELECT_ROW_NUM)
        d_xyztp = decoder(z)
        assert isinstance(d_xyztp, tfd.Distribution)
        d_xyztp_s = d_xyztp.sample()

    #===================================================================
    # GRAPH CALLS
    #===================================================================
    # I.3 - GP training
    amp, amp_assign, amp_p, lensc, lensc_assign, lensc_p, \
        log_likelihood, samples_1d, train_op, obs_noise_var \
        = graph_GP(et_Var,
                 t_norm,
                 w_pred_linsp_Var,
                 e_xyztp_s,
                 amplitude_init=AMPLITUDE_INIT,
                 length_scale_init=LENGTHSCALE_INIT,
                 obs_noise_var_init=INIT_OBSNOISEVAR,
                 LEARNING_RATE=LEARNING_RATE,
                 NUM_SAMPLES=NUM_SAMPLES
                 )

    #===================================================================
    # II.1 - COV calc
    #===================================================================
    [cov_vv, last_cov_vv, while_i0_idx, while_i0_end, xyz_cov_idxs] \
        = graph_cov(SPATIAL_COVER, SPATIAL_COVER_PRESSURE_TEMP, encoder)

    #===================================================================
    # II.2 - PLACEMENT
    #===================================================================
    [sel_idx, _, _, _] = snps2.sparse_placement_algorithm_2(cov_vv, K_SENSORS)

    # sel_coord = graph_output(sel_idx)  # doesnt work yet

    #===================================================================
    # GRAPH SAVER
    #===================================================================
    print("LOGDIR", LOGDIR)
    # for i, var in enumerate(saver._var_list):
    #     print('Var {}: {}'.format(i, var))
    summ = tf.summary.merge_all()
    writer = tf.summary.FileWriter(LOGDIR)
    saver = tf.train.Saver()  # all the object in the graph mapped
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())  # after the init
    # allmodel_saved_path = saver.save(sess, './saved_variable')
    # print('model saved in {}'.format(allmodel_saved_path))
    writer.add_graph(sess.graph)

    checkpoint = tf.train.Checkpoint(x=vae)
    checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_PATH))

    #===================================================================
    # ASSERTS
    #===================================================================
    for _ in range(1):
        e_test = encoder(tf.reshape(tf.constant([1., 1., 1., 1., 1.]),
                                    [-1, 5]))
    assert isinstance(e_test, tfd.Distribution)
    e_test_s = e_test.mean()
    print('encoder([1,1,1,1,1]) before restore', sess.run(e_test_s))
    print(CHECKPOINT_PATH)

    for _ in range(1):
        e_test = encoder(tf.reshape(tf.constant([1., 1., 1., 1., 1.]),
                                    [-1, 5]))
    assert isinstance(e_test, tfd.Distribution)
    e_test_s = e_test.mean()
    print('encoder([1,1,1,1,1]) after restore', sess.run(e_test_s))

    for _ in range(1):
        e_test = encoder(tf.reshape(tf.constant([1., 1., 1., 1., 1.]),
                                    [-1, 5]))
        assert isinstance(e_test, tfd.Distribution)
        e_test_s = e_test.mean()
        print('encoder([1,1,1,1,1]) before restore', sess.run(e_test_s))
    print(CHECKPOINT_PATH)
    encoder_saver.restore(sess, CHECKPOINT_PATH)
    # checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_PATH))

    for _ in range(1):
        e_test = encoder(tf.reshape(tf.constant([1., 1., 1., 1., 1.]),
                                    [-1, 5]))
        assert isinstance(e_test, tfd.Distribution)
        e_test_s = e_test.mean()
        print('encoder([1,1,1,1,1]) after restore', sess.run(e_test_s))

    # saver.restore(sess, chkpt_name)

    #===================================================================
    # DATA - LOAD RANDOMIZE SELECT SPLIT TO TRAIN, TEST
    #===================================================================
    xyztpt_idx_df = gpf.randomize_df(df, SELECT_ROW_NUM, dim1, dim2, dim3,
                                     dim4, dim5, dim6)
    xyztpt_idx = xyztpt_idx_df.to_numpy()
    minmax_x = [np.min(xyztpt_idx[:, 0]), np.max(xyztpt_idx[:, 0])]
    minmax_y = [np.min(xyztpt_idx[:, 1]), np.max(xyztpt_idx[:, 1])]
    minmax_z = [np.min(xyztpt_idx[:, 2]), np.max(xyztpt_idx[:, 2])]
    minmax_temperature = [np.min(xyztpt_idx[:, 3]), np.max(xyztpt_idx[:, 3])]
    minmax_pressure = [np.min(xyztpt_idx[:, 4]), np.max(xyztpt_idx[:, 4])]
    # INPUT_SHAPE = xyztpt_idx.shape[1]-1
    train_dataset, test_dataset = gpf.load_randomize_select_train_test(
        xyztpt_idx)

    #===================================================================
    # SESS RUNS
    #===================================================================
    normal_v = sess.run([normal], feed_dict={values: train_dataset})
    [obs_noise_var_] = sess.run([obs_noise_var])  # run session graph
    lls = gpf.tf_optimize_model_params(
        sess,
        NUM_ITERS,
        train_op,
        log_likelihood,  # (2,0)
        summ,
        writer,
        saver,
        LOGDIR,
        LOGCHECKPT,
        train_dataset,
        values)  # takes long!
    [amp, lensc, obs_noise_var] = sess.run([amp, lensc, obs_noise_var])

    [samples_1d_
     ] = sess.run([samples_1d],
                  feed_dict={values: train_dataset
                             })  # 1D_emb:(5, ?, ?), 2D_emb:(5,2,200)

    H = gpf.calc_H(XEDGES, YEDGES, lensc, lensc_assign, lensc_p, amp,
                   amp_assign, amp_p, log_likelihood, sess, values,
                   train_dataset)

    e_xyztp_s_v = sess.run(e_xyztp_s, feed_dict={values: train_dataset})
    d_xyztp_s_v = sess.run(d_xyztp_s, feed_dict={values: train_dataset})
    w_pred_idx_linsp = sess.run(w_pred_linsp_Var,
                                feed_dict={values: train_dataset})
    e_reduced = sess.run(e_xyztp_s, feed_dict={
        values: test_dataset
    }).astype(np.float64)[:ROW_REDUCED, :]
    e_v = sess.run(et_Var, feed_dict={values: train_dataset})
    enc_df = pd.DataFrame(e_v[:, :, 0].T)
    tr_idx = sess.run(t_norm_, feed_dict={values: train_dataset})
    tr_idx_reduced = np.array(tr_idx[:ROW_REDUCED]).flatten()
    xyztp_idx = sess.run(xyztp_norm, feed_dict={values: train_dataset})
    print(xyztp_idx)

    [w0, w1, e] = sess.run([while_i0_idx, while_i0_end, cov_vv],
                           feed_dict={values: test_dataset})
    elast = sess.run(last_cov_vv, feed_dict={values: train_dataset})

    print("cov_vv, after running while_op")
    print(w0, w1)
    print(e)
    print("last_cov_vv_ij")
    print(elast)
    cov_vv_ = sess.run(cov_vv)
    print(cov_vv_)
    df = pd.DataFrame(cov_vv_)
    df.to_csv('cov_vv.csv')

    # np_algo1 = alg2.placement_algorithm_1(cov_vv_, 7)
    # print("np algorithm 1",np_algo1)

    np_algo2 = alg2.placement_algorithm_2(cov_vv_, 7)
    print("np algorithm 2", np_algo2)

    select_placement_points = sess.run(sel_idx.values)  # after the print node
    print('tf algorithm 2, select_placement_points: ', select_placement_points)
    xyz_idxs = sess.run(xyz_cov_idxs)
    sel_norm_coord = gpf.py_get_coord_idxs(select_placement_points, xyz_idxs)
    print("xyz_coordinates", sel_norm_coord)
    sel_coord = gpf.denormalize_coord(sel_norm_coord)
    print("denormalized xyz", sel_coord)
    # p = sess.run(force['x'], feed_dict={values: test_dataset})
    # print("p",p)

    # [_, assigned_val] = sess.run([assign_op, cov_vv_ij], feed_dict={assign_placeholder:. val})

    # bigwhileloop = tf.map_fn(simple_args_test, vec3)  #  xyz = tf.constant
    ######################################################################
    #### TEST: 3 sensors from 8 indices ####
    # if GRAPH_COV_CALC==True:

    # if PYTHON_COV_CALC == True:
    #     cov_vv_python = gpf.create_cov_matrix_while_loops(minmax_x, minmax_y, minmax_z, minmax_pressure, minmax_temperature, SPATIAL_COVER, SPATIAL_COVER_PRESSURE_TEMP, encoder, sess)
    #     print(cov_vv_python)
    #     A = snps.placement_algorithm_2(cov_vv_python, k=K_SENSORS)
    #     print("A: ",A)

    # DELETE
    # cov_vv = gpf.tf_create_cov_matrix(minmax_x_tnsr, minmax_y_tnsr, minmax_z_tnsr, minmax_p_tnsr, minmax_t_tnsr, sptl_const, sptl_pt_const, encoder, sess)
    # cov_vv = gpf.create_cov_matrix(minmax_x,minmax_y,minmax_z,minmax_pressure, minmax_temperature, SPATIAL_COVER, SPATIAL_COVER_PRESSURE_TEMP, encoder, sess)

    #===================================================================
    # PLOTS
    #===================================================================
    plts.pairplots(enc_df)  # e_t_df
    plts.plot_encoder_output_distribution(12, 12, xyztp_idx[:, 0])
    plts.plot_encoder_output_distribution(12, 12, e_xyztp_s_v[:, 0])
    plts.plot_decoder_output_distribution(12, 12, d_xyztp_s_v[:, 0, 0, :])

    plts.plot_loss_evolution(12, 4, lls)
    plts.plot_marginal_likelihood3D(XEDGES, H)
    plts.plot_gp_linesamples(
        12,
        4,
        e_reduced,  # (15,1)
        tr_idx_reduced,  # (15,)
        w_pred_idx_linsp,  # (100,1)
        samples_1d_,  # (8,2,100)
        NUM_SAMPLES)  # (8)

    plts.plot_range_of_values(12, 4, e_xyztp_s_v, tr_idx)
    # plts.plot_placement_xyz(12,12,select_placement_points)
    ''' DIMENSIONS 
    plts.plot2d_sinusoid_samples_section(12, 4, ext_sel_pts,  # (12,4)
                                         line_idx,  # (200,1)
                                         obs_proj_idx_pts,  # (60,2)
                                         line_obs,  # (200,)
                                         samples_section_,  # (50,2,200)
                                         NUM_SAMPLES,  # (50)
                                         BEGIN, END)  # (3,)
                                         '''
    print("==============")