コード例 #1
0
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D

device = torch.device("cuda")
image_size = 240
dimz = 50
# test codes
model = models.AE(dimz, 3).to(device)
# model load pretrained params
# model.load_weights_from_file('../Disentangling/params/ClothV2_env_all_dimZ_100alpha_0.05')BlocksV3_env_all_dimZ_100alpha_0.03
model.load_weights_from_file('params/AE_Cloth_prev_human12_dimZ_50')
mixedHR = uts.load_images_sequence(
    '../../Experiments/Dataset/Cloth_prev/mixedHR', 200, image_size)
finalImages = uts.load_images_sequence(
    '../../Experiments/Dataset/Cloth_prev/final', 20, image_size)
plot_r = pr.plot_task_map('../../Experiments/Dataset/Cloth_prev', None, device)

plot_r.save_results_41(model, "AE_mixedHR", mixedHR, "mixedHR")
plot_r.save_results_41(model, "AE_final_states", finalImages, "final states")

# plot_r.save_results_4(model, 'save_test')
# plot_r.compare_exp(model, 'another_test', 144)
input("Press Enter to continue...")
# plot_r.animation_exp(model, zt_marks, '../Experiments/Dataset/Blocks/results/raw_12.jpg', image_size,
#                      '../Experiments/Dataset/Blocks/results/animation', 20, 1)

# sequence = uts.load_images_sequence('../Experiments/Dataset/ClothV2/human2', 135, image_size)
# plot_r = pr.plot_task_map('../Experiments/Dataset/ClothV2', dimz, 0.05, 172.8, sequence, device)
# plot_r.save_results_4(model, 'save_test')
# zt_marks = [0,1]
#
コード例 #2
0
 image_folder = '../../Experiments/Dataset/' + task_name + '/' + data_set_name
 # train_loader, test_loader = uts.wrap_data_loader_images(image_folder, args.image_size, kwargs, 0, args.batch_size)
 train_loader = uts.wrap_data_loader_images(image_folder,
                                            args.image_size, kwargs,
                                            0, args.batch_size)
 for m in range(len(dim_Zs)):  # dim Z level
     dim_Zt = dim_Zs[m]
     torch.manual_seed(
         args.seed
     )  # manually set random seed for CPU random number generation.
     # construct model and optimizer
     model = models.AE(dim_Zt, args.input_channel).to(device)
     # initialize an optimizer
     optimizer = optim.Adam(model.parameters(), lr=5e-4)
     plot_r = pr.plot_task_map(
         '../../Experiments/Dataset/' + task_name, sample_sequence,
         device)
     train_losss = []
     for epoch in range(1, args.epochs + 1):
         trainLoss = train(epoch, train_loader, model, optimizer,
                           task_name, data_set_name, dim_Zt, plot_r)
         train_losss.append(trainLoss)
         # test(epoch, test_loader, model, alpha, beta, task_name, data_set_name, dim_Zt, plot_r)
     # save the model
     model.save("AE_" + task_name + "_" + data_set_name + "_dimZ_" +
                str(dim_Zt))
     np.save(
         "train_loss_AE_" + task_name + "_" + data_set_name +
         "_dimZ_" + str(dim_Zt), train_losss)
     # del model and plot_r
     del model, optimizer, plot_r
コード例 #3
0
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D

device = torch.device("cuda")
image_size = 240
dimz = 50
# test codes
model = models.VAE(dimz, 3).to(device)
# model load pretrained params
# model.load_weights_from_file('../Disentangling/params/ClothV2_env_all_dimZ_100alpha_0.05')BlocksV3_env_all_dimZ_100alpha_0.03
model.load_weights_from_file('params/VAE_BlocksV2_human12_dimZ_50')
mixedHR = uts.load_images_sequence(
    '../../Experiments/Dataset/BlocksV2/mixedHR', 200, image_size)
finalImages = uts.load_images_sequence(
    '../../Experiments/Dataset/BlocksV2/final', 20, image_size)
plot_r = pr.plot_task_map(dimz, '../../Experiments/Dataset/BlocksV2', None,
                          device)

plot_r.save_results_41(model, "VAE_mixedHR", mixedHR, "mixedHR")
# plot_r.save_results_41(model, "VAE_final_states", finalImages, "final states")

# plot_r.save_results_4(model, 'save_test')
# plot_r.compare_exp(model, 'another_test', 144)
#input("Press Enter to continue...")
# plot_r.animation_exp(model, zt_marks, '../Experiments/Dataset/Blocks/results/raw_12.jpg', image_size,
#                      '../Experiments/Dataset/Blocks/results/animation', 20, 1)

# sequence = uts.load_images_sequence('../Experiments/Dataset/ClothV2/human2', 135, image_size)
# plot_r = pr.plot_task_map('../Experiments/Dataset/ClothV2', dimz, 0.05, 172.8, sequence, device)
# plot_r.save_results_4(model, 'save_test')
# zt_marks = [0,1]
#
コード例 #4
0
import gc

from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib.colors as colors

device = torch.device("cuda")

dimz = 300
alpha = 0.05

model_params_path = '../Disentangling/params/BlocksV2_h1r1_dimZ_' + str(
    dimz) + 'alpha_' + str(alpha)
final_states_folder = '../Experiments/Dataset/BlocksV2/final_states'
image_size = 240
final_states_num = 40
# test codes
model = models.beta_VAE(dimz, 3).to(device)
# model load pretrained params
model.load_weights_from_file(model_params_path)
save_folder = '../Experiments/Dataset/BlocksV2'

all_inputs = uts.load_images_sequence(final_states_folder, final_states_num,
                                      image_size)
# plot_r = pr.plot_task_map('../Experiments/Dataset/Toy_Example', 100, 0.2, 345.6, sample_sequence, device)
plot_r = pr.plot_task_map(save_folder, dimz, alpha, 172.8, all_inputs, device)

plot_r.save_results_4(model,
                      'eva_final_states_dim' + str(dimz) + '_' + str(alpha),
                      False)
コード例 #5
0
 for m in range(len(dim_Zs)):  # dim Z level
     dim_Zt = dim_Zs[m]
     for n in range(len(alphas)):  # alpha level, beta level
         alpha = alphas[n]
         beta = alpha * args.image_size * args.image_size * args.input_channel / dim_Zt
         torch.manual_seed(
             args.seed
         )  # manually set random seed for CPU random number generation.
         # construct model and optimizer
         model = models.beta_VAE(dim_Zt,
                                 args.input_channel).to(device)
         # initialize an optimizer
         optimizer = optim.Adam(model.parameters(), lr=5e-4)
         train_losss = []
         plot_r = pr.plot_task_map(
             '../Experiments/Dataset/' + task_name, dim_Zt, alpha,
             beta, sample_sequence, device)
         for epoch in range(1, args.epochs + 1):
             trainLoss = train(epoch, train_loader, model,
                               optimizer, alpha, beta, task_name,
                               data_set_name, dim_Zt, plot_r)
             train_losss.append(trainLoss)
             # test(epoch, test_loader, model, alpha, beta, task_name, data_set_name, dim_Zt, plot_r)
         # save the model
         model.save("betaVAE_" + task_name + "_" + data_set_name +
                    "_dimZ_" + str(dim_Zt) + "alpha_" + str(alpha))
         np.save(
             "train_loss_betaVAE_" + task_name + "_" +
             data_set_name + "_dimZ_" + str(dim_Zt), train_losss)
         # del model and plot_r
         del model, optimizer, plot_r