def get_test_result(a_jpg_path, b_jpg_path, model, opt): ### 1. 还要把val几个文件的JSON信息也储存到另外的文件,这样比直接读大JSON文件快。 a_parsing_path = a_jpg_path.replace('.jpg', '.png').replace('img/', 'img_parsing_all/') #b_parsing_path = b_jpg_path.replace('.jpg', '.png').replace('img/', 'img_parsing_all/') a_json_path = a_jpg_path.replace('.jpg', '_keypoints.json').replace('img/', 'img_keypoint_json/') b_json_path = b_jpg_path.replace('.jpg', '_keypoints.json').replace('img/', 'img_keypoint_json/') src = a_jpg_path.replace('.jpg', '_vis.png').replace('img/', 'img_parsing_all/').split(os.sep) dst = b_jpg_path.replace('.jpg', '_vis.png').replace('img/', 'img_parsing_all/').split(os.sep) theta_pair_key = src[-2] + '_' + src[-1] + "=" + dst[-2] + '_' + dst[-1] # "id_00000006_01_1_front_TO_id_00000006_01_2_side__fake_b_parsing.png" b_parsing_label_filename = theta_pair_key.replace('=', '_TO_') b_parsing_label_filename = b_parsing_label_filename.replace('_vis.png', '') + '__fake_b_parsing.png' b_parsing_path = os.path.join(opt.joint_test_data_dir, b_parsing_label_filename) # a_parsing_tensor = get_parsing_label_tensor(a_parsing_path, self.opt) b_parsing_tensor = get_parsing_label_tensor(b_parsing_path, opt) # b_parsing_tensor = get_parsing_tensor(b_parsing_path, opt) b_label_tensor, b_label_show_tensor = get_label_tensor(b_json_path, b_jpg_path, opt) a_image_tensor = get_image_tensor(a_jpg_path, opt) # 这里的parsingRGB应该用20块好。本来就没采用10块计算。 b_image_tensor = get_image_tensor(b_jpg_path, opt) # theta_aff_tensor, theta_tps_tensor, theta_aff_tps_tensor = get_thetas_tensor(theta_json_data, theta_pair_key) ### [1,18] --> 1*256*256 theta_aff_tensor, theta_aff_tps_tensor = get_thetas_affgrid_tensor(data_loader.dataset.affTnf, data_loader.dataset.tpsTnf, data_loader.dataset.theta_json_data, theta_pair_key) input_tensor = torch.cat([a_image_tensor, b_image_tensor, b_label_tensor, b_parsing_tensor, theta_aff_tensor, theta_aff_tps_tensor], dim=0) input_var = Variable(input_tensor[None, :, :, :].type(torch.cuda.FloatTensor)) if opt.isTrain: fake_b = model.module.inference(input_var) else: fake_b = model.inference(input_var) return fake_b, a_image_tensor, b_image_tensor, b_label_show_tensor, b_parsing_tensor
def main(): web_dir = os.path.join( opt.results_dir, opt.name, '%s_%s_%s' % (opt.phase, opt.which_epoch, "I_and_II_pose_seq")) webpage = html.HTML( web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) for i, data in enumerate(dataset): if i >= opt.how_many: break a_image_tensor = data['a_image_tensor'] # 3 b_image_tensor = data['b_image_tensor'] # 3 # b_label_tensor = data['b_label_tensor'] # 18 a_parsing_tensor = data['a_parsing_tensor'] # 1 # b_label_show_tensor = data['b_label_show_tensor'] a_jpg_path = data['a_jpg_path'] b_jpg_path = data['b_jpg_path'] a_parsing_rgb_tensor = parsingim_2_tensor( a_parsing_tensor[0], opt=opt, parsing_label_nc=opt.parsing_label_nc) # show_image_tensor_1 = torch.cat((a_image_tensor, a_parsing_rgb_tensor), dim=3) show_image_tensor_1 = a_image_tensor test_list = [] b_json_path_list = get_pose_seq_list(opt) for j in range(len(b_json_path_list)): b_json_path = b_json_path_list[j] b_label_tensor, b_label_show_tensor = get_label_tensor( b_json_path, b_jpg_path[0], opt) b_label_tensor = b_label_tensor.unsqueeze_(0) b_label_show_tensor = b_label_show_tensor.unsqueeze_(0) fake_b, fake_b_parsing_label_tensor = generate_fake_B( a_image_tensor, b_image_tensor, b_label_tensor, a_parsing_tensor) b_parsing_rgb_tensor = parsingim_2_tensor( fake_b_parsing_label_tensor[0], opt=opt, parsing_label_nc=opt.parsing_label_nc) # show_image_tensor_1 = torch.cat((show_image_tensor_1, b_label_show_tensor, b_parsing_rgb_tensor, fake_b.data[0:1, :, :, :].cpu()), dim=3) show_image_tensor_1 = torch.cat( (show_image_tensor_1, fake_b.data[0:1, :, :, :].cpu()), dim=3) test_list.append( ('fake_image_seq', util.tensor2im(show_image_tensor_1[0]))) ### save image visuals = OrderedDict(test_list) visualizer.save_images(webpage, visuals, a_jpg_path[0], b_jpg_path[0]) if i % 1 == 0: print('[%s]process image... %s' % (i, a_jpg_path[0])) webpage.save() image_dir = webpage.get_image_dir() print image_dir
def get_test_result(a_jpg_path, b_jpg_path, model, opt): ### 1. 还要把val几个文件的JSON信息也储存到另外的文件,这样比直接读大JSON文件快。 a_parsing_path = a_jpg_path.replace('.jpg', '.png').replace( 'img/', 'img_parsing_all/') # b_parsing_path = b_jpg_path.replace('.jpg', '.png').replace('img/', 'img_parsing_all/') # a_json_path = a_jpg_path.replace('.jpg', '_keypoints.json').replace('img/', 'img_keypoint_json/') b_json_path = b_jpg_path.replace('.jpg', '_keypoints.json').replace( 'img/', 'img_keypoint_json/') src = a_jpg_path.replace('.jpg', '_vis.png').replace( 'img/', 'img_parsing_all/').split(os.sep) dst = b_jpg_path.replace('.jpg', '_vis.png').replace( 'img/', 'img_parsing_all/').split(os.sep) theta_pair_key = src[-2] + '_' + src[-1] + "=" + dst[-2] + '_' + dst[-1] b_parsing_label_filename = theta_pair_key.replace('=', '_TO_') b_parsing_label_filename = b_parsing_label_filename.replace( '_vis.png', '') + '__fake_b_parsing.png' b_parsing_path = os.path.join(opt.joint_test_data_dir, b_parsing_label_filename) a_parsing_tensor = get_parsing_label_tensor(a_parsing_path, opt) b_parsing_tensor = get_parsing_label_tensor(b_parsing_path, opt) b_label_tensor, b_label_show_tensor = get_label_tensor( b_json_path, b_jpg_path, opt) a_image_tensor = get_image_tensor(a_jpg_path, opt) b_image_tensor = get_image_tensor(b_jpg_path, opt) a_parsing_rgb_tensor = parsingim_2_tensor( a_parsing_tensor, opt=opt, parsing_label_nc=opt.parsing_label_nc) b_parsing_rgb_tensor = parsingim_2_tensor( b_parsing_tensor, opt=opt, parsing_label_nc=opt.parsing_label_nc) theta_aff_tensor, theta_tps_tensor, theta_aff_tps_tensor = get_thetas_affgrid_tensor( data_loader.dataset.affTnf, data_loader.dataset.tpsTnf, data_loader.dataset.theta_json_data, theta_pair_key) input_tensor = torch.cat([a_image_tensor, b_image_tensor, b_label_tensor, a_parsing_tensor, b_parsing_tensor, \ theta_aff_tensor, theta_tps_tensor, theta_aff_tps_tensor], dim=0) input_var = Variable(input_tensor[None, :, :, :].type( torch.cuda.FloatTensor)) model.eval() if opt.isTrain: fake_b = model.module.inference(input_var) else: fake_b = model.inference(input_var) return fake_b, a_image_tensor, b_image_tensor, b_label_show_tensor
def get_test_result(a_jpg_path, b_jpg_path, model, opt): # a_json_path = a_jpg_path.replace('.jpg', '_keypoints.json').replace('img/', 'img_keypoint_json/') b_json_path = b_jpg_path.replace('.jpg', '_keypoints.json').replace( 'img/', 'img_keypoint_json/') b_label_tensor, b_label_show_tensor = get_label_tensor( b_json_path, b_jpg_path, opt) a_image_tensor = get_image_tensor(a_jpg_path, opt) b_image_tensor = get_image_tensor(b_jpg_path, opt) input_tensor = torch.cat([a_image_tensor, b_image_tensor, b_label_tensor], dim=0) input_var = Variable(input_tensor[None, :, :, :].type( torch.cuda.FloatTensor)) model.eval() if opt.isTrain: fake_b = model.module.inference(input_var) else: fake_b = model.inference(input_var) return fake_b, a_image_tensor, b_image_tensor, b_label_show_tensor
def get_test_result(a_jpg_path, b_jpg_path, model, opt): if 20 == opt.parsing_label_nc: a_parsing_path = a_jpg_path.replace('.jpg', '.png').replace( 'img/', 'img_parsing_all/') b_parsing_path = b_jpg_path.replace('.jpg', '.png').replace( 'img/', 'img_parsing_all/') elif 10 == opt.parsing_label_nc: a_parsing_path = a_jpg_path.replace('.jpg', '.png').replace( 'img/', 'img_parsing_all_10channel/') b_parsing_path = b_jpg_path.replace('.jpg', '.png').replace( 'img/', 'img_parsing_all_10channel/') b_json_path = b_jpg_path.replace('.jpg', '_keypoints.json').replace( 'img/', 'img_keypoint_json/') # a_parsing_tensor = get_parsing_tensor(a_parsing_path) # b_parsing_tensor = get_parsing_tensor(b_parsing_path) a_parsing_tensor = get_parsing_label_tensor(a_parsing_path, opt) b_parsing_tensor = get_parsing_label_tensor(b_parsing_path, opt) b_label_tensor, b_label_show_tensor, _ = get_label_tensor( b_json_path, b_jpg_path, opt) input_dict = { 'b_label_tensor': b_label_tensor, \ 'a_parsing_tensor': a_parsing_tensor, \ 'b_parsing_tensor': b_parsing_tensor, \ 'b_label_show_tensor': b_label_show_tensor} # input_tensors = torch.cat((a_parsing_tensor, b_parsing_tensor, b_label_tensor), dim=0) # input_var = Variable(input_tensors[None, :, :, :].type(torch.cuda.FloatTensor)) ##torch.FloatTensor of size (1,34,256,256) # pdb.set_trace() model.eval() if opt.isTrain: fake_b_parsing = model.module.inference(input_dict) else: fake_b_parsing = model.inference(input_var) return a_parsing_tensor, b_parsing_tensor, fake_b_parsing, b_label_show_tensor