args = parser.parse_args() cache_dir = Path(args.cache_dir) cache_dir.mkdir(parents=True, exist_ok=True) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) print("Downloading Shape Predictor") f = open_url( "https://drive.google.com/uc?id=1xtxqSWYHADTEO9ptPG5174DbV3FYJPev", cache_dir=cache_dir, return_path=True) predictor = dlib.shape_predictor(f) for im in Path(args.input_dir).glob("*.*"): faces = align_face(str(im), predictor) for i, face in enumerate(faces): if (args.output_size): factor = 1024 // args.output_size assert args.output_size * factor == 1024 D = BicubicDownSample(factor=factor) face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze( 0).cuda() face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1) face = torchvision.transforms.ToPILImage()(face_tensor_lr) face.save(Path(args.output_dir) / (im.stem + f"_{i}.png"))
cache_dir.mkdir(parents=True, exist_ok=True) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) print("Downloading Shape Predictor") f = open_url( "https://drive.google.com/uc?id=1huhv8PYpNNKbGCLOaYUjOgR1pY5pmbJx", cache_dir=cache_dir, return_path=True) predictor = dlib.shape_predictor(f) for im in os.listdir(args.input_dir): if im.endswith('.png'): original_im = im im = os.path.join(args.input_dir, im) faces = align_face(im, predictor) for i, face in enumerate(faces): if (args.output_size): factor = 1024 // args.output_size assert args.output_size * factor == 1024 D = BicubicDownSample(factor=factor) face_tensor = torchvision.transforms.ToTensor()( face).unsqueeze(0).cuda() face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1) face = torchvision.transforms.ToPILImage()(face_tensor_lr) face.save(Path(args.output_dir) / original_im)