#     os.symlink(os.path.abspath(temp_dir), os.path.abspath(global_args.exp_dir))
#     global_args.exp_dir = temp_dir

_, _, batch = next(data_loader)
try: fixed_batch_data = batch['observed']['data']['image'].copy()
except: pass
with tf.Graph().as_default():
    tf.set_random_seed(global_args.seed)
    model = Model(vars(global_args))

    global_step = tf.Variable(0.0, name='global_step', trainable=False)
    with tf.variable_scope("training"):
        tf.set_random_seed(global_args.seed)
        
        additional_inputs_tf = tf.placeholder(tf.float32, [2])
        batch_tf, input_dict_func = helper.tf_batch_and_input_dict(batch, additional_inputs_tf)
        train_outs_dict, test_outs_dict = model.inference(batch_tf, additional_inputs_tf)
        generative_dict = model.generative_model(batch_tf)
        inference_obs_dist = model.obs_dist
        transport_dist = model.transport_dist
        rec_dist = model.rec_dist

        discriminator_vars = [v for v in tf.trainable_variables() if 'Discriminator' in v.name]
        generator_vars = [v for v in tf.trainable_variables() if 'Decoder' in v.name] 
        transport_vars = [v for v in tf.trainable_variables() if 'TransportPlan' in v.name or 'EncodingPlan' in v.name or 'MixingPlan' in v.name] 

        # Weight clipping
        discriminator_vars_flat_concat = tf.concat([tf.reshape(e, [-1]) for e in discriminator_vars], axis=0)
        max_abs_discriminator_vars = tf.reduce_max(tf.abs(discriminator_vars_flat_concat))
        clip_op_list = []
        for e in discriminator_vars:
print("TENSORBOARD: Mac:\nhttp://0.0.0.0:" +
      str(20000 + int(global_args.exp_dir[-4:-1], 16)))
print("\n\n\n")

shutil.copyfile('./models/SLVM.py', global_args.exp_dir + 'SLVM.py')
shutil.copyfile('./models/ModelGTM.py', global_args.exp_dir + 'ModelGTM.py')

_, _, batch = next(data_loader)
with tf.Graph().as_default():
    tf.set_random_seed(global_args.seed)
    model = SLVM(vars(global_args))

    global_step = tf.Variable(0.0, name='global_step', trainable=False)
    with tf.variable_scope("training"):
        tf.set_random_seed(global_args.seed)
        batch_tf, input_dict_func = helper.tf_batch_and_input_dict(batch)

        train_out_list, test_out_list = model.inference(batch_tf, global_step)
        batch_loss_tf = train_out_list[0]
        obs_dist = model.obs_dist
        sample_obs_dist, obs_sample_out_tf, latent_sample_out_tf = model.generative_model(
            batch_tf)

    if global_args.optimizer_class == 'RmsProp':
        train_step_tf = tf.train.RMSPropOptimizer(
            learning_rate=global_args.learning_rate,
            momentum=0.9).minimize(batch_loss_tf, global_step=global_step)
    elif global_args.optimizer_class == 'Adam':
        train_step_tf = tf.train.AdamOptimizer(
            learning_rate=global_args.learning_rate,
            beta1=0.9,