def SPMCS_dataset_onlyHR2AB(dataset_path, ABpath): """ :param dataset_path: e.g. ./datasets/SPMCS/test_set :param ABpath: the path to place A,B e.g. ./datasets/SPMCS :return: """ factor = 4 phase = "test" Apath = os.path.join(ABpath, phase, "A") Bpath = os.path.join(ABpath, phase, "B") assert (not os.path.exists(Apath)) and (not os.path.exists(Bpath)), "{} or {} already exist, if you want to " \ "generate new AB, please delete them " \ "first".format(Apath, Bpath) for video_name in tqdm(os.listdir(dataset_path)): Bdir = os.path.join(dataset_path, video_name, "truth") HRdir = os.path.join(Bpath, video_name) shutil.copytree(src=Bdir, dst=HRdir, symlinks=False) # iteration high resolution to get low resolution imagepath_list = sorted(make_images_dataset(HRdir)) mkdir(os.path.join(Apath, video_name)) for ith, img_path in enumerate(imagepath_list): img = Image.open(img_path) imgname = get_file_name(img_path) imgpathA = os.path.join(Apath, video_name, imgname + ".png") save_image(img, imgpathA, factor=factor, inverse=True)
def step(engine, batch): generated_imgs = generate(batch) condition_names = batch["P1_path"] target_names = batch["P2_path"] for i in range(generated_imgs.size(0)): # image height and width image_size = (generated_imgs.size(2), generated_imgs.size(3)) image = np.zeros( (image_size[0], image_size[1] * 3, 3)).astype(np.uint8) image[:, 0 * image_size[1]:1 * image_size[1], :] = util.tensor2image(batch["P1"].data[i]) image[:, 1 * image_size[1]:2 * image_size[1], :] = util.tensor2image(batch["P2"].data[i]) image[:, 2 * image_size[1]:3 * image_size[1], :] = util.tensor2image(generated_imgs.data[i]) if limit < 0: image_path = os.path.join( output_dir, "{}___{}_vis.jpg".format(condition_names[i], target_names[i])) else: image_path = os.path.join(output_dir, "{}.png".format(engine.state.idx)) engine.state.idx += 1 util.save_image(image, image_path) return
def check_transformation(): args = [ '--dataroot', '/data/mri/data/pdd_sliced', '--fineSize', '128', '--input_nc', '1', '--input_channels', '0', '--data_suffix', 'npy', '--T', '1' ] opt = TestOptions().parse(args) opt.same_hemisphere = True #opt.nThreads = 1 # test code only supports nThreads = 1 #opt.batchSize = 1 # test code only supports batchSize = 1 #opt.serial_batches = True # no shuffle dataset = SliceDataset(opt) for i, d in enumerate(dataset): #print(d['A'].shape, d['B'].shape, d['A_original'].shape, d['B_original'].shape) print('B:', d['B'].min(), d['B'].max(), 'B_original:', d['B_original'].min(), d['B_original'].max()) #np_B = util.tensor2im(d['B'], undo_norm=False) #np_B_original = util.tensor2im(d['B_original'], undo_norm=False) np_B = util.tensor2np(d['B']) np_B_original = util.tensor2np(d['B_original']) print('np_B:', np_B.min(), np_B.max(), 'np_B_original:', np_B_original.min(), np_B_original.max()) util.save_image(np_B, 'plots/%d_B_t.png' % i) util.save_image(np_B_original, 'plots/%d_B_real.png' % i) print('angular error') print(np.equal(np_B, np_B_original).sum(), np.equal(np_B, np_B).sum()) np_B = np_B * 2 - 1 np_B_original = np_B_original * 2 - 1 angular_errors(np_B, np_B) print('-----------------') angular_errors(np_B, np_B_original) if i == 0: break
def image_dataset_onlyHR2AB(HRpath, ABpath, factor=4, phase="train"): """ give only HR images , transfer to domain A(LR) and domain B(HR), write to Apath and Bpath :param HRpath: the path to HR images :param ABpath: the path to place AB :param factor: factor :return: """ Apath = os.path.join(ABpath, phase, "A") Bpath = os.path.join(ABpath, phase, "B") assert (not os.path.exists(Apath)) and (not os.path.exists(Bpath)), "{} or {} already exist, if you want to " \ "generate new AB, please delete them " \ "first".format(Apath, Bpath) assert check_whether_last_dir( HRpath ), 'when only HR for images, HRpath should be dir and contains only image files' imagepath_list = make_images_dataset(HRpath) mkdir(Apath) mkdir(Bpath) for i in tqdm(range(len(imagepath_list))): img = Image.open(imagepath_list[i]) imgname = os.path.basename(imagepath_list[i]) img.save(os.path.join(Bpath, imgname)) save_image(img, os.path.join(Apath, imgname), factor=factor, inverse=True)
def compute_fid(self, n_epoch, n_iter): dims = 2048 batch = 1 pathA = self.save_dir + '/fakeA/' + str(n_iter) + '_' + str(n_epoch) if not os.path.exists(pathA): os.mkdir(pathA) for i, temp_fake_A in enumerate(self.fake_A_pool.get_all()): save_image(tensor2im(temp_fake_A), pathA + '/' + str(i) + '.png', aspect_ratio=1.0) self.fakemA, self.fakesA = _compute_statistics_of_path( pathA, self.netFid, batch, dims, self.gpu_ids[0]) pathB = self.save_dir + '/fakeB/' + str(n_iter) + '_' + str(n_epoch) if not os.path.exists(pathB): os.mkdir(pathB) for j, temp_fake_B in enumerate(self.fake_B_pool.get_all()): save_image(tensor2im(temp_fake_B), pathB + '/' + str(j) + '.png', aspect_ratio=1.0) self.fakemB, self.fakesB = _compute_statistics_of_path( pathB, self.netFid, batch, dims, self.gpu_ids[0]) self.fidA = calculate_frechet_distance(self.realmA, self.realsA, self.fakemA, self.fakesA) self.fidB = calculate_frechet_distance(self.realmB, self.realsB, self.fakemB, self.fakesB)
def optimize_parameters(self): #self.forward() fake_B = self.forward() #print('fake_B dtype: ', fake_B.type()) fake_im = util.tensor2im(fake_B) img_name = 'image_%s_%s.png' % (self.opt.epoch, self.count) img_loc = os.path.join( '/root/AFS/pairwise_xray_augmentation/fake_image/', img_name) util.save_image(fake_im, img_loc) #real_data = #fid_loss = fid_loss.calculate_fid_given_paths(dataset, fake_B, 50, cuda:0, 2048) self.count += 1 # update D self.set_requires_grad(self.netD, True) self.optimizer_D.zero_grad() self.backward_D() self.optimizer_D.step() # update G self.set_requires_grad(self.netD, False) self.optimizer_G.zero_grad() self.backward_G() self.optimizer_G.step()
def store_prediction(self, sess, batch_x, batch_y, name): """ calculate stats on verification data :param sess: :param batch_x: test_x :param batch_y: test_y :param name: save prediction result as name :return: """ prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x, self.net.y: batch_y, self.net.keep_prob: 1.}) pred_shape = prediction.shape loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x, self.net.y: util.crop_to_shape(batch_y, pred_shape), self.net.keep_prob: 1.}) logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(error_rate(prediction, util.crop_to_shape(batch_y, prediction.shape)), loss)) img = util.combine_img_prediction(batch_x, batch_y, prediction) util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name)) return pred_shape
def cycle_GAN_interface(image_path, output_path, style): print(style) name = image_path.split('/')[-1][:-4] #output image name input_image = Image.open(image_path).convert('RGB') input_nc = 3 data = {} transform = base_dataset.get_transform(opt, grayscale=(input_nc == 1)) visual_data = transform(input_image) visual_data = visual_data.unsqueeze(0) data['A'] = visual_data data['A_paths'] = image_path if style == 'horse2zebra': model = model1 elif style == 'monet2photo': model = model2 model.set_input(data) # unpack data from data loader model.test() # run inference visuals = model.get_current_visuals() # get image results img_path = model.get_image_paths() # get image paths for label, im_data in visuals.items(): im = util.tensor2im(im_data) image_name = '%s_%s.png' % (name, label) #save_path = os.path.join(output_image_dir, image_name) util.save_image(im, output_path)
def save_images(webpage, visuals, image_number, aspect_ratio=1.0, width=256): """Save images to the disk. Parameters: webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs image_path (str) -- the string is used to create image paths aspect_ratio (float) -- the aspect ratio of saved images width (int) -- the images will be resized to width x width This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. """ import util.util image_dir = webpage.get_image_dir() name = image_number webpage.add_header(name) ims, txts, links = [], [], [] postfix = {'real_A': segmap_postfix, 'fake_B': photo_postfix, 'real_B': photo_postfix[:-4] + '_real.png'} for label, im_data in visuals.items(): im = util.tensor2im(im_data) image_name = name + postfix[label] save_path = os.path.join(image_dir, image_name) util.save_image(im, save_path, aspect_ratio=aspect_ratio) ims.append(image_name) txts.append(postfix[label]) links.append(image_name) webpage.add_images(ims, txts, links, width=width)
def generate(self): #pass data = self.get_network_input() model.set_input(data) visuals = model.get_latent_noise_visualization() image_dir = './imgs' for label, image_numpy in visuals.items(): image_name = 'test_%s.png' % (label) save_path = os.path.join(image_dir, image_name) util.save_image(image_numpy, save_path) ## convert back from pil image to cv2 image cv2_img = cv2.imread('imgs/test_fake_B_shadow.png') cv2_img = cv2.resize(cv2_img, (self.img_size, self.img_size)) img_gray = cv2.imread('imgs/test_0_L_fake_B_inter.png', cv2.IMREAD_GRAYSCALE) img_gray = cv2.resize(img_gray, (self.img_size, self.img_size)) (thresh, im_bw) = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) self.drawWidget.setShadowImage(cv2_img) self.visWidget.update_vis('imgs/fake_B_gallery.png')
async def run_network(self): await asyncio.sleep(0.1) dataset = create_dataset( self.opt ) # create a dataset given opt.dataset_mode and other options await asyncio.sleep(0.1) model = create_model( self.opt) # create a model given opt.model and other options await asyncio.sleep(0.1) model.setup(self.opt) if self.opt.eval: model.eval() for i, data in enumerate(dataset): if i >= self.opt.num_test: # only apply our model to opt.num_test images. break model.set_input(data) # unpack data from data loader model.test() # run inference await asyncio.sleep(0.1) visuals = model.get_current_visuals() # get image results im = util.tensor2im(visuals['fake']) await asyncio.sleep(0.1) util.save_image(im, self.output_path_file_name, aspect_ratio=self.opt.aspect_ratio) await asyncio.sleep(0.1)
def brighten_frame(img, frame_no, opt): print("Frame Number:", frame_no) input_path = '/notebooks/test_dataset/testA/input' + str(frame_no) + '.png' cv.imwrite(input_path, img) print("Writing image to test dataset location testA") print("Creating Data Loader") data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() # For some reason I am getting 46 images in the dataset each time I load in one image print("Number of images in dataset:", len(dataset)) for i, data in enumerate(dataset): if i > 0: break model.set_input(data) visuals = model.predict() img_path = model.get_image_paths() print('process image... %s' % img_path) # NK added image_dir = webpage.get_image_dir() short_path = os.path.basename(img_path[0]) name = os.path.splitext(short_path)[0] for label, image_numpy in visuals.items(): image_name = '%s.jpg' % ('frame' + str(frame_no)) save_path = os.path.join("../test_dataset/testB/", image_name) util.save_image(image_numpy, save_path) print("all done! cleaning up file", input_path) os.remove(input_path) return save_path
def draw_correspondence(A, B, correspondence, radius, save_dir, level=0, k_final=None): A_marked = util.tensor2im(A) B_marked = util.tensor2im(B) for i in range(len(correspondence[0])): color = color_map(i) center_1 = [correspondence[0][i][0], correspondence[0][i][1]] center_2 = [correspondence[1][i][0], correspondence[1][i][1]] if level < 3: A_marked = draw_circle(A_marked, center_1, color) B_marked = draw_circle(B_marked, center_2, color) else: # 3,4,5, square A_marked = draw_square( A_marked, [center_1[0] + radius, center_1[1] + radius], color, radius=radius) B_marked = draw_square( B_marked, [center_2[0] + radius, center_2[1] + radius], color, radius=radius) if k_final: A_name, B_name = f'A_top_{k_final}.png', f'B_top_{k_final}.png' else: A_name, B_name = f'A_level_{level}.png', f'B_level_{level}.png' util.save_image(A_marked, os.path.join(save_dir, A_name)) util.save_image(B_marked, os.path.join(save_dir, B_name))
def generate(self): #pass data = self.get_network_input() model.set_input(data) visuals = model.get_latent_noise_visualization() image_dir = './imgs' for label, image_numpy in visuals.items(): image_name = 'test_%s.png' % (label) save_path = os.path.join(image_dir, image_name) util.save_image(image_numpy, save_path) ## convert back from pil image to cv2 image if self.enable_shadow: cv2_img = cv2.imread('imgs/test_fake_B_shadow.png') else: cv2_img = cv2.imread('imgs/test_%d_L_fake_B_inter.png' % (self.which_shadow_img)) cv2_img = cv2.resize(cv2_img, (self.img_size, self.img_size)) self.drawWidget.setShadowImage(cv2_img) data = self.get_network_input_color() model_color.set_input(data) model_color.test() visuals = model_color.get_current_visuals() image_dir = './imgs' for label, image_numpy in visuals.items(): image_name = 'test_color_%s.png' % (label) save_path = os.path.join(image_dir, image_name) util.save_image(image_numpy, save_path) self.visWidget_color.update_vis('imgs/test_color_fake_B.png')
def rw_solve(request): if request.method == 'POST': image = request.FILES.get('image') dir = request.POST.get('dir') util.save_image(image) util.remove_water(r'G:/Transfer/tmp/src.jpg', dir) return HttpResponse(':8000/images/after_rm_water.jpg')
def swap_face_solve(request): if request.method == 'POST': image = request.FILES.get('image') dest_face = request.POST.get('dest') util.save_image(image) util.swap_face(r'G:/Transfer/tmp/src.jpg', r'G:/Transfer/tmp/' + dest_face + ".jpg") return HttpResponse(':8000/images/after_swap_face.jpg')
def gen(): if 'file' not in request.files: return error('file form-data not existed'), 412 if 'base64' in request.args: use_base64 = True if request.args.get('base64') == 'true' else False else: use_base64 = False image = request.files['file'] # Submit taylor.jpg ---> taylor_1234567.jpg (name + timestamp) image_name, ext = image.filename.rsplit('.', 1) image_name = image_name + '_' + str(int(time.time())) + '.' + ext # Save image to /upload image_path = os.path.join(app.config['UPLOAD_FOLDER'], image_name) image.save(image_path) print('hello', image_name) ## Crop here # image = cv2.imread(image_path) # image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB) # image = image[image[:, :, 3] == 255,:3] # cv2.imwrite(image_path, image) # image = resize(image, 256, 256) # cv2.imwrite(image_path, image) ## Load image and begin generating real = Image.open(image_path) preprocess = transforms.Compose([ transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) # Load input input_A = preprocess(real).unsqueeze_(0) model.input_A.resize_(input_A.size()).copy_(input_A) # Forward (model.real_A) through G and produce output (model.fake_B) model.test() # Convert image to numpy array fake = util.tensor2im(model.fake_B.data) output_path = os.path.join(app.config['GAN_FOLDER'], image_name) # Save image util.save_image(fake, output_path) # if not use_base64: # return send_file(output_path) # image = open(output_path, 'rb').read() # encoded = 'data:image/jpeg;base64,' + base64.b64encode(image).decode('utf-8') # return encoded print('hi', image_name) return '/cgan/' + image_name
def display_currnet_results(self, visuals, epoch, save_result): """Display current results on visdom. Parameters: visuals (OrderedDict) -- dictionary of images to display or save. epoch (int) -- the current epoch. save_result (bool) -- if save the results to an HTML file. """ if int(self.opt('display_id')) > 0: ncols = len(visuals) h, w = next(iter(visuals.values())).shape[:2] table_css = """<style> table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center} table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black} </style>""" % (w, h) title = self.opt('name') label_html = '' label_html_row = '' images = [] idx = 0 for label, image in visuals.items(): image_tensor, image_numpy = util.tensor2im(image) label_html_row += '<td>%s<td>' % label images.append(image_numpy) idx += 1 if idx % ncols == 0: label_html += '<tr>%s<tr>' % label_html_row label_html_row = '' self.vis.images(images, nrow=ncols, padding=2, opts=dict(title=title + ' images')) label_html = '<table>%s</table>' % label_html # self.vis.text(table_css + label_html,opts=dict(title=title + ' labels')) if save_result: for label, image in visuals.items(): img_tensor, image_numpy = util.tensor2im(image) image_path = os.path.join(self.img_dir, 'epoch%.3f_%s.png' % (epoch, label)) util.save_image(img_tensor, image_path) webpage = html.HTML(self.opt, self.web_dir, 'Experiment name=%s' % self.opt('name'), refresh=1) for n in range(epoch, 0, -1): webpage.add_header('epoch [%d]' % n) ims, txts, links = [], [], [] for label, image in visuals.items(): img_path = 'epoch%.3f_%s.png' % (n, label) ims.append(img_path) txts.append(label) links.append(img_path) webpage.add_images(ims, txts, links) webpage.save()
def save_images(img, aspect_ratio=1.0): im = tensor2im(img) image_name = 'output.png' save_path = os.path.join('./', image_name) h, w, _ = im.shape if aspect_ratio > 1.0: im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic') if aspect_ratio < 1.0: im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic') save_image(im, save_path)
def display_current_results(self, visuals, epoch, nrows=None, res=256): if self.display_id > 0: # show images in the browser title = self.name if (nrows is None): nrows = int(math.ceil(len(visuals.items()) / 2.0)) images = [] idx = 0 for label, image_numpy in visuals.items(): title += " | " if idx % nrows == 0 else ", " title += label img = image_numpy.transpose([2, 0, 1]) img = zoom_to_res(img, res=res, order=0) images.append(img) idx += 1 if len(visuals.items()) % 2 != 0: white_image = np.ones_like( image_numpy.transpose([2, 0, 1])) * 255 white_image = zoom_to_res(white_image, res=res, order=0) images.append(white_image) self.vis.images(images, nrow=nrows, win=self.display_id + 1, opts=dict(title=title)) if self.use_html: # save images to a html file for label, image_numpy in visuals.items(): img_path = os.path.join(self.img_dir, 'epoch%.3d_cnt%.6d_%s.png' % ( epoch, self.display_cnt, label)) util.save_image(zoom_to_res(image_numpy, res=res, axis=2), img_path) self.display_cnt += 1 self.display_cnt_high = np.maximum(self.display_cnt_high, self.display_cnt) # update website webpage = ownHTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1) for n in range(epoch, 0, -1): webpage.add_header('epoch [%d]' % n) if (n == epoch): high = self.display_cnt else: high = self.display_cnt_high for c in range(high - 1, -1, -1): ims = [] txts = [] links = [] for label, image_numpy in visuals.items(): img_path = 'epoch%.3d_cnt%.6d_%s.png' % (n, c, label) ims.append(os.path.join('images', img_path)) txts.append(label) links.append(os.path.join('images', img_path)) webpage.add_images(ims, txts, links, width=self.win_size) webpage.save()
def test(self): output_skeleton = self.net_G(self.input_skeleton) if self.opt.write_image: current_map = self.convert2skeleton(self.current_skeleton, kp_form='COCO_17') obtained_map = self.convert2skeleton(output_skeleton, kp_form='human36m_17') for j in range(len(current_map)): short_path = ntpath.basename(self.image_paths[j][0]) name = os.path.splitext(short_path)[0] util.mkdir(self.results_dir_base) gt = self.gen_images[0,j*3:j*3+3,...].detach().cpu().numpy() gt = np.transpose(((gt+1)/2 * 255), (1,2,0)) in_img_name = '%s_%s.%s' % (name, 'skeleton_in', 'png') img_path = os.path.join(self.results_dir_base, in_img_name) gt_ = np.copy(gt) gt_[current_map[j]!=0]=0 skeleton_in = (current_map[j]+gt_).astype(np.uint8) util.save_image(skeleton_in, img_path) print(img_path) out_img_name = '%s_%s.%s' % (name, 'skeleton_out', 'png') img_path = os.path.join(self.results_dir_base, out_img_name) gt_ = np.copy(gt) gt_[obtained_map[j]!=0]=0 skeleton_out = (obtained_map[j]+gt_).astype(np.uint8) util.save_image(skeleton_out, img_path) output_skeleton = (output_skeleton+1)/2 skeleton_pad = torch.ones(1,17).type_as(output_skeleton) for j in range(output_skeleton.shape[-1]): skeleton = output_skeleton[0,:,j].view(2,17) skeleton[0,:] = skeleton[0,:]*self.shape[-1][0] skeleton[1,:] = skeleton[1,:]*self.shape[0][0] skeleton = torch.cat((skeleton,skeleton_pad), 0) skeleton = skeleton[[1,0,2],:] skeleton = skeleton.transpose(1,0).contiguous().view(-1).detach().cpu().numpy() people = dict() people['pose_keypoints_2d']=skeleton.tolist() output_dict=dict() output_dict["version"]="Video to Pose 2D" output_dict["people"]=[people] short_path = ntpath.basename(self.image_paths[j][0]) name = os.path.splitext(short_path)[0] util.mkdir(self.results_dir_base) name = '%s.%s' % (name, 'json') name = os.path.join(self.results_dir_base, name) with open(name, 'w') as f: json.dump(output_dict, f)
def cal_sample(): predict_fname = '/data/mri/convrnn/results/t1_pdd_cosine_L1_unet128_2d/test_lowest_val/gaussian_0/numpy/IXI392-Guys_0030_fake_B.npy' gt_fname = '/data/mri/convrnn/results/t1_pdd_cosine_L1_unet128_2d/test_lowest_val/gaussian_0/numpy/IXI392-Guys_0030_real_B.npy' predict = np.load(predict_fname) * 255 gt = np.load(gt_fname) * 255 print(predict.mean(), predict.std()) print(gt.mean(), gt.std()) print(predict.shape) util.save_image(predict, 'predict.png') util.save_image(gt, 'gt.png') angular_errors(predict, gt)
def gen_base64(): if 'image' not in request.json: return error('Stupid request'), 412 if 'base64' in request.args: use_base64 = True if request.args.get('base64') == 'true' else False else: use_base64 = False image_data = str.encode(request.json['image']) image_data = image_data[23:] ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr) timestamp = datetime.datetime.now().isoformat() image_name = ip + '_' + timestamp + '.png' image_path = os.path.join(app.config['UPLOAD_FOLDER'], image_name) with open(image_path, "wb") as f: f.write(base64.decodebytes(image_data)) image = cv2.imread(image_path) image = resize(image, 256, 256) cv2.imwrite(image_path, image) ## Load image and begin generating real = Image.open(image_path) preprocess = transforms.Compose([ transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) # Load input input_A = preprocess(real).unsqueeze_(0) model.input_A.resize_(input_A.size()).copy_(input_A) # Forward (model.real_A) through G and produce output (model.fake_B) model.test() # Convert image to numpy array fake = util.tensor2im(model.fake_B.data) output_path = os.path.join(app.config['GAN_FOLDER'], image_name) # Save image util.save_image(fake, output_path) # if not use_base64: # return send_file(output_path) # image = open(output_path, 'rb').read() # encoded = 'data:image/jpeg;base64,' + base64.b64encode(image).decode('utf-8') # return encoded return '/cgan/' + image_name
def vimeo90K_dataset_onlyHR2AB(dataset_path, ABpath, phase="train", factor=4, can_continue=False): """ pre-deal make it suitable for this project for specific dataset: vimeo90K link: http://toflow.csail.mit.edu/ notice! the dir 00055/0896 only have one frame...... /(ㄒoㄒ)/~~! usage example: vimeo90K_dataset_onlyHR2AB(dataset_path="/opt/data/private/datasets/vimeo_septuplet/vimeo_septuplet", ABpath="/opt/data/private/datasets/vimeo_septuplet", phase="train", factor=4) :param datasetpath: the path to dataset dir, should have sep_testlist.txt and sep_trainlist.txt and sequences dir :param ABpath: the path to place AB :param phase: train or test :return: none """ Apath = os.path.join(ABpath, phase, "A") Bpath = os.path.join(ABpath, phase, "B") # assert (not os.path.exists(Apath)) and (not os.path.exists(Bpath)), "{} or {} already exist, if you want to " \ # "generate new AB, please delete them " \ # "first".format(Apath, Bpath) video_dir = os.path.join(dataset_path, "sequences") txt_path = os.path.join(dataset_path, "sep_{}list.txt".format(phase)) assert os.path.isdir(video_dir) assert os.path.isfile(txt_path) with open(txt_path) as f: for two_level_path in tqdm(f.readlines()): HR_dir_path = os.path.join(video_dir, two_level_path.strip( )) # e.g. dataset_path/sequences/00010/0558 if not os.path.isdir(HR_dir_path): print("illegal path: {} is not dir, continue!".format( HR_dir_path)) continue new_path = two_level_path.strip()[0:10].replace('/', '_') if can_continue and os.path.exists(os.path.join( Apath, new_path)) and os.path.exists( os.path.join(Bpath, new_path)): print("{} already dealed, continue!".format(new_path)) continue mkdir(os.path.join(Bpath, new_path)) # e.g. Bpath/00010_0558 mkdir(os.path.join(Apath, new_path)) imagepath_list = make_images_dataset(HR_dir_path) for ith, img_path in enumerate(imagepath_list): img = Image.open(img_path) imgpathB = os.path.join(Bpath, new_path, "frame_{:05d}".format(ith) + ".png") save_image(img, imgpathB) imgpathA = os.path.join(Apath, new_path, "frame_{:05d}".format(ith) + ".png") save_image(img, imgpathA, factor=factor, inverse=True)
def save_output(real, fake, real_dir, fake_dir, paths): num = real.size(0) for i in range(0, num): p = os.path.basename(paths[i]) im = real[i] util.save_image(util.tensor2im(im), os.path.join(real_dir, p.replace('.', '_real_A.'))) im = fake[i] util.save_image(util.tensor2im(im), os.path.join(fake_dir, p.replace('.', '_fake_B.')))
def image_pre_crop(path2img, crop_size, path2placeblocks): img = Image.open(path2img).convert('RGB') w, h = img.size w_list = list(range(0, w + 1, crop_size)) h_list = list(range(0, h + 1, crop_size)) id = 0 for i in range(len(w_list) - 1): for j in range(len(h_list) - 1): box = (w_list[i], h_list[j], w_list[i + 1], h_list[j + 1]) croped_img = img.crop(box) save_image(croped_img, os.path.join(path2placeblocks, "{}.png".format(id))) id += 1
def save_images(image_dir, visuals, image_path, aspect_ratio=1.0): short_path = ntpath.basename(image_path[0]) name = os.path.splitext(short_path)[0] im = visuals image_name = '%s.jpg' % (name) save_path = os.path.join(image_dir, image_name) h, w, _ = im.shape if aspect_ratio > 1.0: im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic') if aspect_ratio < 1.0: im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic') util.save_image(im, save_path)
def save_result(self, results, **kwargs): visuals_np = Visualizer.convert_visuals_to_numpy( results, batchSize=1, label_nc=self.opt.label_nc) # We only run the demo with batch Size 1, so let's remove the first dimension. visuals_np = OrderedDict([(k, v[0]) for k, v in visuals_np.items()]) filename = self._get_filename(kwargs) save_path = os.path.join(self.save_dir, filename) save_image(visuals_np["fake_image"], save_path, create_dir=True) save_style_matrix(results["encoded_style"][0], "{}.csv".format(save_path[:-4])) return self.save_dir
def getitem(A_path, B_path, inst_path, feat_path): ### input A (label maps) A = Image.open(A_path) params = get_params(opt, A.size) if opt.label_nc == 0: transform_A = get_transform(opt, params) A_tensor = transform_A(A.convert('RGB')) else: transform_A = get_transform(opt, params, method=Image.NEAREST, normalize=False) A_tensor = transform_A(A) * 255.0 B_tensor = inst_tensor = feat_tensor = 0 ### input B (real images) B = Image.open(B_path).convert('RGB') transform_B = get_transform(opt, params) B_tensor = transform_B(B) ### if using instance maps inst = Image.open(inst_path) inst_tensor = transform_A(inst) #get feat netE = networks.define_G(opt.output_nc, opt.feat_num, opt.nef, 'encoder', opt.n_downsample_E, norm=opt.norm, gpu_ids=opt.gpu_ids) feat_map = netE.forward( Variable(B_tensor[np.newaxis, :].cuda(), volatile=True), inst_tensor[np.newaxis, :].cuda()) feat_map = nn.Upsample(scale_factor=2, mode='nearest')(feat_map) image_numpy = util.tensor2im(feat_map.data[0]) util.save_image(image_numpy, feat_path) feat = Image.open(feat_path).convert('RGB') norm = normalize() feat_tensor = norm(transform_A(feat)) input_dict = { 'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, 'feat': feat_tensor, 'path': A_path } return get_features(input_dict['inst'], input_dict['feat'])
def save_results(self, save_data, data_name='none', data_ext='jpg'): """Save the training or testing results to disk""" img_paths = self.get_image_paths() for i in range(save_data.size(0)): print('process image ...... %s' % img_paths[i]) short_path = ntpath.basename(img_paths[i]) # get image path name = os.path.splitext(short_path)[0] img_name = '%s_%s.%s' % (name, data_name, data_ext) util.mkdir(self.opt.results_dir) img_path = os.path.join(self.opt.results_dir, img_name) img_numpy = util.tensor2im(save_data[i].data) util.save_image(img_numpy, img_path)
# test # Create or clear dir for saving generated samples if os.path.exists(opt.testing_path): shutil.rmtree(opt.testing_path) os.makedirs(opt.testing_path) img_comb = {} img_comb_row = {} for j,data in enumerate(dataset): model.set_input(data) model.test() visuals = model.get_current_visuals() # Combine 100 image into one for label, image_numpy in visuals.items(): if (j+1) % 10 == 1: img_comb_row[label] = image_numpy else: img_comb_row[label] = np.concatenate([img_comb_row[label], image_numpy], 1) if j == 9: img_comb[label] = img_comb_row[label] elif (j+1) % 10 == 0: img_comb[label] = np.concatenate([img_comb[label], img_comb_row[label]], 0) # Save for label, image_numpy in img_comb.items(): image_name = '%s_%s.png' % (opt.which_epoch, label) save_path = os.path.join(opt.testing_path, image_name) util.save_image(image_numpy, save_path)