def collect_video_frames(config): cap = cv2.VideoCapture(config['input_files'][0]) base_path = config['output_dir'] counter = 0 images = [] files = [] while 1: ret, frame = cap.read() if ret == 0: break if counter % config['sampling_freq'] == 0: image_name = f'{base_path}/frame_{counter}.png' cv2.imwrite(image_name, frame) frame = align_face(image_name) frame.save(image_name) images.append(cv2.imread(image_name)) files.append(image_name) counter += 1 config['input_files'] = files config['output_files'] = files cap.release() out = cv2.VideoWriter(f'{base_path}/frames.avi', cv2.VideoWriter_fourcc(*'MJPG'), config['video_freq'], (1024, 1024)) for file in files: out.write(cv2.imread(file)) out.release()
def get_aligned(img_paths): xs = [] for img_path in img_paths: img = Image.open(img_path) img = img.convert('RGB') # if image is RGBA or Grayscale etc img = np.array(img) x, face_found = align_face(img) if face_found: xs.append(x) x = np.concatenate(xs, axis=0) return x
def get_aligned(img_paths): xs = [] for img_path in img_paths: img = Image.open(img_path) img = img.convert('RGB') # Nếu ảnh ở dạng RGBA hoặc Grayscale ,.... img = np.array(img) x, face_found = align_face(img) if face_found: xs.append(x) x = np.concatenate(xs, axis=0) return x
def get_aligned(img_paths): xs = [] c = 0 for img_path in img_paths: c += 1 print(c) img = Image.open(path + '/' + img_path) img = img.resize((width, height), Image.NEAREST) #print(img.shape) img = img.convert('RGB') # if image is RGBA or Grayscale etc img = np.array(img) x, face_found = align_face(img) if face_found: xs.append(x) x = np.concatenate(xs, axis=0) return x
def align_encode(): r = request img = get(r, 'img') # img = parse_img(img) if in jpg etc format img = deserialise_img(img) img, face_found = align_face(img) if face_found: img = np.reshape(img, [1, 256, 256, 3]) print(img.shape) z = model.encode(img) proj = model.project(z) # get projections. Not used result = img, z # jsonify(img=serialise_img(img), z=serialise_nparr(z)) return send_proj(result, proj) else: return jsonify(face_found=False)
def align_encode(): r = request img = get(r, 'img') print(img) # img = parse_img(img) if in jpg etc format img = deserialise_img(img) img, face_found = align_face(img) if face_found: img = np.reshape(img, [1, 256, 256, 3]) print(img.shape) z = model.encode(img) proj = model.project(z) # get projections. Not used result = img, z # jsonify(img=serialise_img(img), z=serialise_nparr(z)) return send_proj(result, proj) else: return jsonify(face_found=False)
import time import dlib # requires cmake import numpy as np from align_face import align_face from Library.Spout import Spout spout = Spout(silent = False, width = 1044, height = 1088) spout.createReceiver('input') spout.createSender('output') # python projector.py --target=out/seed0002.png --project-in-wplus --save-video --num-steps=1000 --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl while True : # check on close window spout.check() # receive data data = spout.receive() print(data.shape) data = align_face(data) print(data.shape) spout.send(data)
from pathlib import Path orig_img_path = Path('imgs') aligned_imgs_path = Path('aligned_imgs') if not aligned_imgs_path.exists(): aligned_imgs_path.mkdir() import dlib from align_face import align_face # Align all of our images using a landmark detection model! all_imgs = list(orig_img_path.iterdir()) for img in all_imgs: print(f'aligning {img}') try: align_face(str(img)).save(aligned_imgs_path / ('aligned_' + img.name)) except: print('no face') # aligned_img_set = list(aligned_imgs_path.iterdir()) # aligned_img_set.sort() # aligned_img_set = [Image.open(x) for x in aligned_img_set] # orig_img_set = list(orig_img_path.iterdir()) # orig_img_set.sort() # orig_img_set = [Image.open(x) for x in orig_img_set]
def align_images(config): for img_path in config['input_files']: new_img = align_face(img_path) new_img.save(img_path)