def evaluate(self,loaders): args = self.args nets_ema = self.nets_ema self._load_checkpoint(90000) #output_dir1='/mnt/home/reid_stargan30_iter8000/DATA/Market1501/train_final' #output_dir2='/mnt/home/reid_stargan30_iter8000/DATA/Market1501/train_final_whole' #output_dir1='/home/xiaocaibi/reid_stargan/DATA/Market1501/train_final' output_dir2='/home/xiaocaibi/reid_stargan/DATA/Market1501/train_final_whole' #os.makedirs(output_dir, exist_ok=True) #filename定义 fname_list=[] count=0 for j in range(len(loaders.src)): fname1=loaders.src.dataset.samples[j][0].parts[3] fname2=loaders.src.dataset.samples[j][1].parts[3] compose1=fname1.split('.')[0] compose2=compose1.split('_')[1][1] compose3=fname2.split('.')[0].split('_')[1][1] fname="%s_fake_%sto%s.jpg"%(compose1,compose2,compose3) fname_list.append(fname) for i, _ in enumerate(tqdm(loaders.src, total=len(loaders.src))): x_real = _[0].cuda() x_ref=_[1].cuda() y_ref=_[2].cuda() s_trg = nets_ema.style_encoder(x_ref, y_ref) x_fake = nets_ema.generator(x_real, s_trg, masks=None) # save generated images to calculate FID later filename2 = os.path.join(output_dir2,fname_list[i]) utils.save_image(x_fake, ncol=1, filename=filename2)
def telegram_hook(request): update = json.loads(request.body) message = update.get('message') text = None if message is None: return HttpResponse('OK') user_id = message['from']['id'] if 'voice' in message.keys(): file_id = message['voice']['file_id'] file_path = get_telegram_file_path(file_id) voice_message = download_file(file_path) file_src = write_audio_file(voice_message, user_id) convert_audio_file(file_src, user_id) text = 'Your voice message saved' elif 'photo' in message.keys(): file_id = message['photo'][-1]['file_id'] file_path = get_telegram_file_path(file_id) img = download_file(file_path) count_faces = found_faces_on_image(img) if count_faces > 0: save_image(img, user_id) text = f'Found {count_faces} faces' if count_faces > 1 else f'Found {count_faces} face' else: text = 'Face not found ' invoke_telegram('sendMessage', chat_id=update['message']['chat']['id'], text=text) return HttpResponse('OK')
def eval(self, loaders): args = self.args nets = self.nets self._load_checkpoint(step="", fname=args.chkpt_path) """ define the fetcher for dataloading """ fetcher_eval = InputFetcher(loaders.eval, 'eval') gen_frame_list = [] gt_frame_list = [] for i in range(args.resume_iter, len(fetcher_eval)): #args.total_iters ): """ get input from eval from fetcher """ inputs = next(fetcher_eval) gt_land, gt, gt_mask, _ = inputs.gt_land, inputs.gt, inputs.gt_mask, inputs.prior #gt.shape ...: (batch, sync_t, c, h, w) #prior.shape: (batch, sync_t, c*2, h, w) gt_land = gt_land.flatten(0, 1) # (batch*sync_t, c*3, h, w) gt = gt.flatten(0, 1) gt_mask = gt_mask.flatten(0, 1) s_gt = nets.style_encoder(gt) #(batch*sync_t, 512) gen_frames = nets.generator(torch.cat((gt_land, gt_mask), dim=1), s_gt, gt) gen_frame_list.append(gen_frames) # (5,3,128,128) gt_frame_list.append(gt) result = torch.cat(gen_frame_list, dim=0) result_gt = torch.cat(gt_frame_list, dim=0) utils.save_image(result, filename="./result_fake.jpg") utils.save_image(result_gt, filename="./result_gt.jpg") fourcc = cv2.VideoWriter_fourcc(*'FMP4') out = cv2.VideoWriter('result.mp4', fourcc, 25.0, (128, 128)) for gen_frames in gen_frame_list: for gen_frame in gen_frames: # gen_frame: [3,128,128] gen_frame = utils.denormalize(gen_frame) #print("@@@ gen_frame.permute(1,2,0).detach().cpu().numpy().shape: ", gen_frame.permute(1,2,0).detach().cpu().numpy().shape) out.write( np.uint8( gen_frame.permute(1, 2, 0).detach().cpu().numpy() * 255.0)[::, ::, ::-1]) out.release() fourcc = cv2.VideoWriter_fourcc(*'FMP4') out = cv2.VideoWriter('result_gt.mp4', fourcc, 25.0, (128, 128)) for gt_frames in gt_frame_list: for gt_frame in gt_frames: # gen_frame: [3,128,128] gt_frame = utils.denormalize(gt_frame) #print("@@@ gen_frame.permute(1,2,0).detach().cpu().numpy().shape: ", gen_frame.permute(1,2,0).detach().cpu().numpy().shape) out.write( np.uint8( gt_frame.permute(1, 2, 0).detach().cpu().numpy() * 255.0)[::, ::, ::-1]) out.release() return 0
def profile_edit_avatar(): try: user = User.objects.get(id=current_user.id) avatars_path = app.config.get('AVATARS_PATH') save_image(request.files['avatar'], f'{avatars_path}/{user.id}', (200, 200)) except User.DoesNotExist: abort(404)
def calculate_metrics(nets, args, step, mode): print('Calculating evaluation metrics...') #assert mode in ['latent', 'reference'] device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') domains = [n for n in range(args.num_domains)] domains.sort() num_domains = len(domains) print('Number of domains: %d' % num_domains) for trg_idx, trg_domain in enumerate(domains): task = '%s' % trg_domain path_fake = os.path.join(args.eval_dir, task) shutil.rmtree(path_fake, ignore_errors=True) os.makedirs(path_fake) loader = get_sample_loader(root=args.val_img_dir, img_size=args.img_size, batch_size=args.val_batch_size, shuffle=False, num_workers=args.num_workers, drop_last=False, trg_domain=trg_domain, mode=mode, dataset_dir=args.dataset_dir, threshold=args.num_sample) fetcher = InputFetcher(loader, None, args.latent_dim, 'test') print('Generating images for %s...' % task) for i in tqdm(range(len(loader))): # fetch images and labels inputs = next(fetcher) x_src, x_ref, y = inputs.src, inputs.trg, inputs.y N = x_src.size(0) x_src = x_src.to(device) x_ref = x_ref.to(device) y_trg = torch.tensor([trg_idx] * N).to(device) masks = None s_trg = nets.style_encoder(x_ref, y_trg) x_fake = nets.generator(x_src, s_trg, masks=masks) # save generated images to calculate FID later for k in range(N): filename = os.path.join( path_fake, '%.4i.png' % (i * args.val_batch_size + (k + 1))) utils.save_image(x_fake[k], ncol=1, filename=filename) # calculate and report fid values fid_values, fid_mean = calculate_fid_for_all_tasks( args, domains, step=step, mode=mode, dataset_dir=args.dataset_dir) return fid_values, fid_mean
def form_valid(self, form): # code = Invite.objects.filter(code__iexact=form.cleaned_data.get('code')) # if code.exists(): # code = code[0] # if not code.use: super(UserThirdRegisterView, self).form_valid(form) t_type = form.cleaned_data.get('type') if t_type == 1: self.object.wechat_openid = form.cleaned_data.get('openid') self.object.wechat_bind = True elif t_type == 2: self.object.weibo_openid = form.cleaned_data.get('openid') self.object.weibo_bind = True elif t_type == 3: self.object.qq_openid = form.cleaned_data.get('openid') self.object.qq_bind = True status, path = save_image(form.cleaned_data.get('avatar'), name='avatar{0}.jpg'.format(unicode(time.time()).replace('.', ''))) if status: self.object.avatar = path self.token = self.create_token() self.object.token = self.token self.object.set_password(form.cleaned_data.get('password')) self.object.save() # code.use = True # code.belong = self.object # code.save() return self.render_to_response(self.object)
def profile_save(): data_fields = ['username', 'about'] user = User.objects.get(id=current_user.id) for field in data_fields: value = request.form.get(field) setattr(user, field, value) if not user.socials: user.socials = {} for field, value in request.form.items(): if field.startswith('socials.'): user.socials[field[len('socials.'):]] = value if request.form.get('delete'): Path.unlink(Path(f"{app.config.get('AVATARS_PATH')}/{user.id}")) user.image_path = None if request.files.get('avatar'): path = f"{app.config.get('AVATARS_PATH')}/{user.id}" save_image(request.files['avatar'], path, (200, 200)) user.image_path = path user.save() return redirect(url_for_user(user))
def align_faces(args: object, input_dir: object, output_dir: object) -> object: import os from torchvision import transforms from PIL import Image from core.utils import save_image aligner = FaceAligner(args.wing_path, args.lm_path, args.img_size) transform = transforms.Compose([ transforms.Resize((args.img_size, args.img_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ]) fnames = os.listdir(input_dir) os.makedirs(output_dir, exist_ok=True) fnames.sort() for fname in fnames: image = Image.open(os.path.join(input_dir, fname)).convert('RGB') x = transform(image).unsqueeze(0) x_aligned = aligner.align(x) save_image(x_aligned, 1, filename=os.path.join(output_dir, fname)) print('Saved the aligned image to %s...' % fname)
def get(self, request, *args, **kwargs): code = request.GET.get('code') state = request.GET.get('state') if code and state: access_token = get_access_token(code, state) email, nick, avatar = get_user_info(access_token) guest = Guest.objects.filter(email=email) token = self.create_token() if guest.exists(): guest = guest[0] guest.token = token guest.save() else: status, avatar_path = save_image( avatar, '{0}{1}.png'.format(nick, unicode(time.time()).split('.')[0])) guest = Guest(email=email, nick=nick, token=token) guest.set_password('123456q_+|') if status: guest.avatar = avatar_path guest.save() comment = Comment.objects.filter(state=state) aid = 0 if not comment.exists(): comment = CommentReply.objects.filter(state=state) else: aid = comment[0].belong.id send_mail('新评论', '你有一条新评论, 登陆查看 www.rapospectre.com', '*****@*****.**', ['*****@*****.**'], fail_silently=True) if comment.exists(): comment = comment[0] comment.author = guest comment.review = True comment.save() if aid == 0: aid = comment.comment.belong.id send_html_mail('评论回复', comment.to, comment.comment.belong, [comment.to.email]) request.session['token'] = token return HttpResponseRedirect('/blog/{0}'.format(aid))
def upload_image(request): """ @api {post} /upload_image 上传图片 @apiVersion 1.0.0 @apiGroup common @apiName 上传图片 @apiParam {Object} file @apiSuccess {String} url 图片url """ if request.method == 'POST': try: file = request.FILES["file"] except Exception: return json_failed(400, "请选择上传图片") image_path, name = save_image(file) url = os.path.join(settings.BASE_URL, "media", image_path, name) return json_success({"url": url}) return HttpResponseNotAllowed("POST")
def calculate_metrics(nets, args, step, mode): print('Calculating evaluation metrics...') assert mode in ['latent', 'reference'] device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') domains = os.listdir(args.val_img_dir) domains.sort() num_domains = len(domains) print('Number of domains: %d' % num_domains) lpips_dict = OrderedDict() for trg_idx, trg_domain in enumerate(domains): src_domains = [x for x in domains if x != trg_domain] if mode == 'reference': path_ref = os.path.join(args.val_img_dir, trg_domain) loader_ref = get_eval_loader(root=path_ref, img_size=args.img_size, batch_size=args.val_batch_size, imagenet_normalize=False, drop_last=True) for src_idx, src_domain in enumerate(src_domains): path_src = os.path.join(args.val_img_dir, src_domain) loader_src = get_eval_loader(root=path_src, img_size=args.img_size, batch_size=args.val_batch_size, imagenet_normalize=False) task = '%s2%s' % (src_domain, trg_domain) path_fake = os.path.join(args.eval_dir, task) shutil.rmtree(path_fake, ignore_errors=True) os.makedirs(path_fake) lpips_values = [] iter_ref = iter(loader_ref) print('Generating images and calculating LPIPS for %s...' % task) for i, x_src in enumerate(tqdm(loader_src, total=len(loader_src))): N = x_src.size(0) x_src = x_src.to(device) y_trg = torch.tensor([trg_idx] * N).to(device) masks = nets.fan.get_heatmap(x_src) if args.w_hpf > 0 else None # generate 10 outputs from the same input group_of_images = [] for j in range(args.num_outs_per_domain): if mode == 'latent': z_trg = torch.randn(N, args.latent_dim).to(device) s_trg = nets.mapping_network(z_trg, y_trg) else: try: x_ref = next(iter_ref).to(device) except (NameError, StopIteration): iter_ref = iter(loader_ref) x_ref = next(iter_ref).to(device) if x_ref.size(0) > N: x_ref = x_ref[:N] s_trg = nets.style_encoder(x_ref, y_trg) x_fake = nets.generator(x_src, s_trg, masks=masks) group_of_images.append(x_fake) # save generated images to calculate FID later for k in range(N): filename = os.path.join( path_fake, '%.4i_%.2i.png' % (i * args.val_batch_size + (k + 1), j + 1)) utils.save_image(x_fake[k], ncol=1, filename=filename) lpips_value = calculate_lpips_given_images(group_of_images) lpips_values.append(lpips_value) # calculate LPIPS for each task (e.g. cat2dog, dog2cat) lpips_mean = np.array(lpips_values).mean() lpips_dict['LPIPS_%s/%s' % (mode, task)] = lpips_mean # delete dataloaders del loader_src if mode == 'reference': del loader_ref del iter_ref # calculate the average LPIPS for all tasks lpips_mean = 0 for _, value in lpips_dict.items(): lpips_mean += value / len(lpips_dict) lpips_dict['LPIPS_%s/mean' % mode] = lpips_mean # report LPIPS values filename = os.path.join(args.eval_dir, 'LPIPS_%.5i_%s.json' % (step, mode)) utils.save_json(lpips_dict, filename) # calculate and report fid values calculate_fid_for_all_tasks(args, domains, step=step, mode=mode)
def calculate_metrics(nets, args, step, mode, eval_loader): print('Calculating evaluation metrics...') assert mode in ['latent', 'reference'] device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') domains = os.listdir(args.style_dir) domains.sort() num_domains = len(domains) print('Number of domains: %d' % num_domains) #generate_new = True #num_files = sum([len(files) for r, d, files in os.walk(args.eval_dir)]) #print("num_files", num_files, len(eval_loader), (1 + args.num_outs_per_domain)*len(eval_loader)*args.batch_size) #if num_files != (1 + args.num_outs_per_domain)*len(eval_loader): #shutil.rmtree(args.eval_dir, ignore_errors=True) #os.makedirs(args.eval_dir) generate_new = True tcl_dict = {} # prepare for d in range(1, num_domains): src_domain = "style0" trg_domain = "style" + str(d) t1 = '%s2%s' % (src_domain, trg_domain) t2 = '%s2%s' % (trg_domain, src_domain) tcl_dict[t1] = [] tcl_dict[t2] = [] if generate_new: create_task_folders(args, t1) create_task_folders(args, t2) # generate for i, x_src_all in enumerate(tqdm(eval_loader, total=len(eval_loader))): x_real, x_real2, y_org, x_ref, y_trg, mask, flow = x_src_all x_real = x_real.to(device) x_real2 = x_real2.to(device) y_org = y_org.to(device) x_ref = x_ref.to(device) y_trg = y_trg.to(device) mask = mask.to(device) flow = flow.to(device) N = x_real.size(0) masks = nets.fan.get_heatmap(x_real) if args.w_hpf > 0 else None for j in range(args.num_outs_per_domain): if mode == 'latent': z_trg = torch.randn(N, args.latent_dim).to(device) s_trg = nets.mapping_network(z_trg, y_trg) else: s_trg = nets.style_encoder(x_ref, y_trg) x_fake = nets.generator(x_real, s_trg, masks=masks) x_fake2 = nets.generator(x_real2, s_trg, masks=masks) x_warp = warp(x_fake, flow) tcl_err = ((mask*(x_fake2 - x_warp))**2).mean(dim=(1, 2, 3))**0.5 for k in range(N): src_domain = "style" + str(y_org[k].cpu().numpy()) trg_domain = "style" + str(y_trg[k].cpu().numpy()) if src_domain == trg_domain: continue task = '%s2%s' % (src_domain, trg_domain) tcl_dict[task].append(tcl_err[k].cpu().numpy()) path_ref = os.path.join(args.eval_dir, task + "/ref") path_fake = os.path.join(args.eval_dir, task + "/fake") #if not os.path.exists(path_ref): # os.makedirs(path_ref) #if not os.path.exists(path_fake): # os.makedirs(path_fake) if generate_new: filename = os.path.join(path_ref, '%.4i_%.2i.png' % (i*args.val_batch_size+(k+1), j+1)) utils.save_image(x_ref[k], ncol=1, filename=filename) filename = os.path.join(path_fake, '%.4i_%.2i.png' % (i*args.val_batch_size+(k+1), j+1)) utils.save_image(x_fake[k], ncol=1, filename=filename) #filename = os.path.join(args.eval_dir, task + "/tcl_losses.txt") #with open(filename, "a") as text_file: # text_file.write(str(tcl_err[k].cpu().numpy()) + "\n") # evaluate print("computing fid, lpips and tcl") tasks = [dir for dir in os.listdir(args.eval_dir) if os.path.isdir(os.path.join(args.eval_dir, dir))] tasks.sort() # fid and lpips fid_values = OrderedDict() lpips_dict = OrderedDict() tcl_values = OrderedDict() for task in tasks: print(task) path_ref = os.path.join(args.eval_dir, task + "/ref") path_fake = os.path.join(args.eval_dir, task + "/fake") #path_tcl = os.path.join(args.eval_dir, task + "/tcl_losses.txt") fake_group = load_images(path_fake) #with open(path_tcl, "r") as text_file: # tcl_data = text_file.read() #tcl_data = tcl_data.split("\n")[:-1] #tcl_data = [float(td) for td in tcl_data] tcl_data = tcl_dict[task] print("TCL", len(tcl_data)) tcl_mean = np.array(tcl_data).mean() print(tcl_mean) tcl_values['TCL_%s/%s' % (mode, task)] = float(tcl_mean) lpips_values = [] fake_chunks = chunks(fake_group, args.num_outs_per_domain) for cidx in range(len(fake_chunks)): lpips_value = calculate_lpips_given_images(fake_chunks[cidx]) lpips_values.append(lpips_value) print("LPIPS") # calculate LPIPS for each task (e.g. cat2dog, dog2cat) lpips_mean = np.array(lpips_values).mean() lpips_dict['LPIPS_%s/%s' % (mode, task)] = lpips_mean print("FID") fid_value = calculate_fid_given_paths(paths=[path_ref, path_fake], img_size=args.img_size, batch_size=args.val_batch_size) fid_values['FID_%s/%s' % (mode, task)] = fid_value # calculate the average LPIPS for all tasks lpips_mean = 0 for _, value in lpips_dict.items(): lpips_mean += value / len(lpips_dict) lpips_dict['LPIPS_%s/mean' % mode] = lpips_mean # report LPIPS values filename = os.path.join(args.eval_dir, 'LPIPS_%.5i_%s.json' % (step, mode)) utils.save_json(lpips_dict, filename) # calculate the average FID for all tasks fid_mean = 0 for _, value in fid_values.items(): fid_mean += value / len(fid_values) fid_values['FID_%s/mean' % mode] = fid_mean # report FID values filename = os.path.join(args.eval_dir, 'FID_%.5i_%s.json' % (step, mode)) utils.save_json(fid_values, filename) # calculate the average TCL for all tasks tcl_mean = 0 for _, value in tcl_values.items(): print(value, len(tcl_values)) tcl_mean += value / len(tcl_values) print(tcl_mean) tcl_values['TCL_%s/mean' % mode] = float(tcl_mean) # report TCL values filename = os.path.join(args.eval_dir, 'TCL_%.5i_%s.json' % (step, mode)) utils.save_json(tcl_values, filename)
image_fake = tensor_to_image(image_tensor_fake) plt.imshow(image_fake, cmap='gray') plt.show() #%% interplation @torch.no_grad() def interplation(net, image_content, image_style, content_label, style_label): s_con = net.style_encoder(image_content, content_label) s_ref = net.style_encoder(image_style, style_label) alphas = torch.linspace(0, 1, 10).cuda() # alphas = torch.FloatTensor(get_alphas(start=-1, end=1, step=0.5, len_tail=5)).cuda() x_fakes = [] for alpha in alphas: s_inter_ref = torch.lerp(s_con, s_ref, alpha) x_fake = net.generator(image_content, s_inter_ref, masks=None) x_fakes.append(x_fake) x_fakes = torch.cat(x_fakes, dim=0) return x_fakes image_inters = interplation(net, image_content, image_style, content_label, style_label) save_image(image_inters, image_inters.shape[0], ospj('expr', 'checkpoints', 'ccvdg_00', 'result', 'xxx.jpg'))