def random_eval(): (ss, ls), ims = util.load_data() test_index = 50 r_l = 10 end_index = test_index + r_l sample_s, sample_l, sample_i = ss[test_index:end_index], ls[ test_index:end_index], ims[test_index:end_index] obj_vec_d = 300 model = Scence2Image(encoding_d=9, obj_vec_d=obj_vec_d) # model = Scence2Image() if torch.cuda.is_available(): model.cuda() SAVE_PATH = "/media/easonnie/Seagate Expansion Drive/RL_net/m_32999_4.3951619817199825" model.load_state_dict(torch.load(SAVE_PATH)) model.eval() sample_s_v = Variable(sample_s) sample_l_v = Variable(sample_l) sample_i_v = Variable(sample_i) print(sample_s_v) if torch.cuda.is_available(): sample_s_v = sample_s_v.cuda() sample_l_v = sample_l_v.cuda() sample_i_v = sample_i_v.cuda() vecs = model.scence2vec(sample_s_v, sample_l_v) vec1 = vecs[0] vec2 = vecs[2] ims = util.abstract_changing(vec1, vec2, model) # print(ims) g_im = vutil.make_grid(ims.data, nrow=8, padding=15) Image.fromarray(util.tensor2im(g_im)).save("g_im_changing_8_(0-2).png")
def produce_dev_images(): import util (ss, ls), ims = util.load_data(mode='dev') test_index = 50 r_l = 30 end_index = test_index + r_l sample_s, sample_l, sample_i = ss[test_index:end_index], ls[test_index:end_index], ims[test_index:end_index] obj_vec_d = 2400 model = AutoEncoder(obj_vec_d=obj_vec_d) # model = Scence2Image() if torch.cuda.is_available(): model.cuda() SAVE_PATH = "/media/easonnie/Seagate Expansion Drive/RL_net/m_82999_0.2122452172755806_d:2400_auto_encoder" model.load_state_dict(torch.load(SAVE_PATH)) model.eval() sample_s_v = Variable(sample_s) sample_l_v = Variable(sample_l) sample_i_v = Variable(sample_i) if torch.cuda.is_available(): sample_s_v = sample_s_v.cuda() sample_l_v = sample_l_v.cuda() sample_i_v = sample_i_v.cuda() vecs = model.im2vec(sample_i_v) im_list = [] vec1 = vecs[0] vec2 = vecs[2] ims = util.abstract_changing(vec1, vec2, model) im_list.append(ims) vec1 = vecs[1] vec2 = vecs[3] ims = util.abstract_changing(vec1, vec2, model) im_list.append(ims) vec1 = vecs[4] vec2 = vecs[5] ims = util.abstract_changing(vec1, vec2, model) im_list.append(ims) vec1 = vecs[12] vec2 = vecs[6] ims = util.abstract_changing(vec1, vec2, model) im_list.append(ims) vec1 = vecs[21] vec2 = vecs[9] ims = util.abstract_changing(vec1, vec2, model) im_list.append(ims) vec1 = vecs[14] vec2 = vecs[20] ims = util.abstract_changing(vec1, vec2, model) im_list.append(ims) ims = torch.cat(im_list, dim=0) # print(ims) g_im = vutil.make_grid(ims.data, nrow=8, padding=15) Image.fromarray(util.tensor2im(g_im)).save("vector_shifting_3_auto.png")
# loss = l1_loss(ims, sample_i_v) optimizer.zero_grad() loss.backward() # print([param for param in model.fc_dcgan._obj2vec.parameters()]) # print(loss) optimizer.step() model.eval() ims = model(sample_s_v, sample_l_v) vecs = model.scence2vec(sample_s_v, sample_l_v) vec1 = vecs[0] vec2 = vecs[2] ims = util.abstract_changing(vec1, vec2, model) # print(ims) g_im = vutil.make_grid(ims.data, nrow=8, padding=15) Image.fromarray(util.tensor2im(g_im)).save("g_im_changing_8.png") # print(sample_s_v) # print(ims[0]) # g_im = vutil.make_grid([ims[0].data, ims[1].data, ims[2].data], nrow=3, padding=15) # # Image.fromarray(util.tensor2im(g_im)).save("g_im1_l1.png") # t_im = Image.fromarray(util.tensor2im(ims[0].data)) # t_im.save("t_im_0.png") # t_im = Image.fromarray(util.tensor2im(ims[1].data))