Esempio n. 1
0
    def __init__(self, cache_dir, verbose=True):
        super(PULSE, self).__init__()

        self.synthesis = G_synthesis().cuda()
        self.verbose = verbose

        cache_dir = Path(cache_dir)
        cache_dir.mkdir(parents=True, exist_ok=True)
        if self.verbose: print("Loading Synthesis Network")
        with open_url(
                "https://drive.google.com/uc?id=1hBFEf4ffqXZTtPVCsKjW1_esnWtn-suZ",
                cache_dir=cache_dir,
                verbose=verbose) as f:
            self.synthesis.load_state_dict(torch.load(f))

        for param in self.synthesis.parameters():
            param.requires_grad = False

        self.lrelu = torch.nn.LeakyReLU(negative_slope=0.2)

        if Path("gaussian_fit.pt").exists():
            self.gaussian_fit = torch.load("gaussian_fit.pt")
        else:
            if self.verbose: print("\tLoading Mapping Network")
            mapping = G_mapping().cuda()

            with open_url(
                    "https://drive.google.com/uc?id=1JojqBMKiahS5qGPXK7KFcngt8pzaczem",
                    cache_dir=cache_dir,
                    verbose=verbose) as f:
                mapping.load_state_dict(torch.load(f))

            if self.verbose: print("\tRunning Mapping Network")
            with torch.no_grad():
                torch.manual_seed(0)
                latent = torch.randn((1000000, 512),
                                     dtype=torch.float32,
                                     device="cuda")
                latent_out = torch.nn.LeakyReLU(5)(mapping(latent))
                self.gaussian_fit = {
                    "mean": latent_out.mean(0),
                    "std": latent_out.std(0)
                }
                torch.save(self.gaussian_fit, "gaussian_fit.pt")
                if self.verbose: print("\tSaved \"gaussian_fit.pt\"")
Esempio n. 2
0
    def __init__(self, cache_dir, verbose=True):
        super(PULSE, self).__init__()

        self.synthesis = G_synthesis().cuda()
        self.verbose = verbose

        cache_dir = Path(cache_dir)
        cache_dir.mkdir(parents=True, exist_ok=True)
        if self.verbose: print("Loading Synthesis Network")
        with open_url(
                "https://drive.google.com/uc?id=1rze7m8jqj9qT6SHLdKw98jlDJv5tgOpf",
                cache_dir=cache_dir,
                verbose=verbose) as f:
            self.synthesis.load_state_dict(torch.load(f))

        for param in self.synthesis.parameters():
            param.requires_grad = False

        self.lrelu = torch.nn.LeakyReLU(negative_slope=0.2)

        if Path("gaussian_fit.pt").exists():
            self.gaussian_fit = torch.load("gaussian_fit.pt")
        else:
            if self.verbose: print("\tLoading Mapping Network")
            mapping = G_mapping().cuda()

            with open_url(
                    "https://drive.google.com/uc?id=14R6iHGf5iuVx3DMNsACAl7eBr7Vdpd0k",
                    cache_dir=cache_dir,
                    verbose=verbose) as f:
                mapping.load_state_dict(torch.load(f))

            if self.verbose: print("\tRunning Mapping Network")
            with torch.no_grad():
                torch.manual_seed(0)
                latent = torch.randn((1000000, 512),
                                     dtype=torch.float32,
                                     device="cuda")
                latent_out = torch.nn.LeakyReLU(5)(mapping(latent))
                self.gaussian_fit = {
                    "mean": latent_out.mean(0),
                    "std": latent_out.std(0)
                }
                torch.save(self.gaussian_fit, "gaussian_fit.pt")
                if self.verbose: print("\tSaved \"gaussian_fit.pt\"")
Esempio n. 3
0
    def __init__(self, cache_dir, verbose=True):
        super(PULSE, self).__init__()

        self.synthesis = G_synthesis().cuda()
        self.verbose = verbose

        cache_dir = Path(cache_dir)
        cache_dir.mkdir(parents=True, exist_ok=True)
        if self.verbose: print("Loading Synthesis Network")
        with open_url(
                "https://drive.google.com/uc?id=1qIjaYfppKbgjMIdwB47dySmQNSycrmPD",
                cache_dir=cache_dir,
                verbose=verbose) as f:
            self.synthesis.load_state_dict(torch.load(f))

        for param in self.synthesis.parameters():
            param.requires_grad = False

        self.lrelu = torch.nn.LeakyReLU(negative_slope=0.2)

        if Path("gaussian_fit.pt").exists():
            self.gaussian_fit = torch.load("gaussian_fit.pt")
        else:
            if self.verbose: print("\tLoading Mapping Network")
            mapping = G_mapping().cuda()

            with open_url(
                    "https://drive.google.com/uc?id=14g0Jh0hAFaLj7BmKgHTDVcobwOqC778v",
                    cache_dir=cache_dir,
                    verbose=verbose) as f:
                mapping.load_state_dict(torch.load(f))

            if self.verbose: print("\tRunning Mapping Network")
            with torch.no_grad():
                torch.manual_seed(0)
                latent = torch.randn((1000000, 512),
                                     dtype=torch.float32,
                                     device="cuda")
                latent_out = torch.nn.LeakyReLU(5)(mapping(latent))
                self.gaussian_fit = {
                    "mean": latent_out.mean(0),
                    "std": latent_out.std(0)
                }
                torch.save(self.gaussian_fit, "gaussian_fit.pt")
                if self.verbose: print("\tSaved \"gaussian_fit.pt\"")
Esempio n. 4
0
    def __init__(self, cache_dir, verbose=True):
        super(PULSE, self).__init__()

        self.synthesis = G_synthesis().cuda()
        self.verbose = verbose

        cache_dir = Path(cache_dir)
        cache_dir.mkdir(parents=True, exist_ok=True)
        if self.verbose: print("Loading Synthesis Network")
        with open_url(
                "https://drive.google.com/uc?id=1TQSkJkoVdp8G_9Pyy7h3aBGxSY8rNau0",
                cache_dir=cache_dir,
                verbose=verbose) as f:
            self.synthesis.load_state_dict(torch.load(f))

        for param in self.synthesis.parameters():
            param.requires_grad = False

        self.lrelu = torch.nn.LeakyReLU(negative_slope=0.2)

        if Path("gaussian_fit.pt").exists():
            self.gaussian_fit = torch.load("gaussian_fit.pt")
        else:
            if self.verbose: print("\tLoading Mapping Network")
            mapping = G_mapping().cuda()

            with open_url(
                    "https://drive.google.com/uc?id=1ZBUs3rQjIZu2LZl_-q8cJWXhjSofxyrC",
                    cache_dir=cache_dir,
                    verbose=verbose) as f:
                mapping.load_state_dict(torch.load(f))

            if self.verbose: print("\tRunning Mapping Network")
            with torch.no_grad():
                torch.manual_seed(0)
                latent = torch.randn((1000000, 512),
                                     dtype=torch.float32,
                                     device="cuda")
                latent_out = torch.nn.LeakyReLU(5)(mapping(latent))
                self.gaussian_fit = {
                    "mean": latent_out.mean(0),
                    "std": latent_out.std(0)
                }
                torch.save(self.gaussian_fit, "gaussian_fit.pt")
                if self.verbose: print("\tSaved \"gaussian_fit.pt\"")
Esempio n. 5
0
    def __init__(self, cache_dir, verbose=True):
        super(PULSE, self).__init__()

        self.synthesis = G_synthesis().cuda()
        self.verbose = verbose

        cache_dir = Path(cache_dir)
        cache_dir.mkdir(parents=True, exist_ok=True)
        if self.verbose: print("Loading Synthesis Network")
        with open_url(
                "https://drive.google.com/uc?id=1VRuKSuYK8eOIW_5Mhm2LEUG0qF5hBC2S",
                cache_dir=cache_dir,
                verbose=verbose) as f:
            self.synthesis.load_state_dict(torch.load(f))

        for param in self.synthesis.parameters():
            param.requires_grad = False

        self.lrelu = torch.nn.LeakyReLU(negative_slope=0.2)

        if Path("gaussian_fit.pt").exists():
            self.gaussian_fit = torch.load("gaussian_fit.pt")
        else:
            if self.verbose: print("\tLoading Mapping Network")
            mapping = G_mapping().cuda()

            with open_url(
                    "https://drive.google.com/uc?id=1Kpj4AoF9fNEH77sTEOSbp_wodZz5M6qy",
                    cache_dir=cache_dir,
                    verbose=verbose) as f:
                mapping.load_state_dict(torch.load(f))

            if self.verbose: print("\tRunning Mapping Network")
            with torch.no_grad():
                torch.manual_seed(0)
                latent = torch.randn((1000000, 512),
                                     dtype=torch.float32,
                                     device="cuda")
                latent_out = torch.nn.LeakyReLU(5)(mapping(latent))
                self.gaussian_fit = {
                    "mean": latent_out.mean(0),
                    "std": latent_out.std(0)
                }
                torch.save(self.gaussian_fit, "gaussian_fit.pt")
                if self.verbose: print("\tSaved \"gaussian_fit.pt\"")
Esempio n. 6
0
    def __init__(self, cache_dir, verbose=True):
        super(PULSE, self).__init__()

        self.synthesis = G_synthesis().cuda()
        self.verbose = verbose

        cache_dir = Path(cache_dir)
        cache_dir.mkdir(parents=True, exist_ok=True)
        if self.verbose: print("Loading Synthesis Network")
        with open_url(
                "https://drive.google.com/uc?id=1en2t4bObHdT1RqDi5QBT34ETQGqGBDYn",
                cache_dir=cache_dir,
                verbose=verbose) as f:
            self.synthesis.load_state_dict(torch.load(f))

        for param in self.synthesis.parameters():
            param.requires_grad = False

        self.lrelu = torch.nn.LeakyReLU(negative_slope=0.2)

        if Path("gaussian_fit.pt").exists():
            self.gaussian_fit = torch.load("gaussian_fit.pt")
        else:
            if self.verbose: print("\tLoading Mapping Network")
            mapping = G_mapping().cuda()
            with open_url(
                    "https://drive.google.com/uc?id=1qFZNa2BvTMduy2tMLtuDzhP5EPIkzXDz",
                    cache_dir=cache_dir,
                    verbose=verbose) as f:
                mapping.load_state_dict(torch.load(f))

            if self.verbose: print("\tRunning Mapping Network")
            with torch.no_grad():
                torch.manual_seed(0)
                latent = torch.randn((1000000, 512),
                                     dtype=torch.float32,
                                     device="cuda")
                latent_out = torch.nn.LeakyReLU(5)(mapping(latent))
                self.gaussian_fit = {
                    "mean": latent_out.mean(0),
                    "std": latent_out.std(0)
                }
                torch.save(self.gaussian_fit, "gaussian_fit.pt")
                if self.verbose: print("\tSaved \"gaussian_fit.pt\"")
Esempio n. 7
0
    def __init__(self, cache_dir, verbose=True):
        super(PULSE, self).__init__()

        self.synthesis = G_synthesis().cuda()
        self.verbose = verbose

        cache_dir = Path(cache_dir)
        cache_dir.mkdir(parents=True, exist_ok=True)
        if self.verbose: print("Loading Synthesis Network")
        with open_url(
                "https://drive.google.com/uc?id=1FFo8iI59jMx7z5Xhsd1F3pkZk_953vbx",
                cache_dir=cache_dir,
                verbose=verbose) as f:
            self.synthesis.load_state_dict(torch.load(f))
        for param in self.synthesis.parameters():
            param.requires_grad = False

        self.lrelu = torch.nn.LeakyReLU(negative_slope=0.2)

        if Path("gaussian_fit.pt").exists():
            self.gaussian_fit = torch.load("gaussian_fit.pt")
        else:
            if self.verbose: print("\tLoading Mapping Network")
            mapping = G_mapping().cuda()

            with open_url(
                    "hhttps://drive.google.com/uc?id=18chtBAQXndzR86V6gvMZ8DW_xn_akYzf",
                    cache_dir=cache_dir,
                    verbose=verbose) as f:
                mapping.load_state_dict(torch.load(f))

            if self.verbose: print("\tRunning Mapping Network")
            with torch.no_grad():
                torch.manual_seed(0)
                latent = torch.randn((1000000, 512),
                                     dtype=torch.float32,
                                     device="cuda")
                latent_out = torch.nn.LeakyReLU(5)(mapping(latent))
                self.gaussian_fit = {
                    "mean": latent_out.mean(0),
                    "std": latent_out.std(0)
                }
                torch.save(self.gaussian_fit, "gaussian_fit.pt")
                if self.verbose: print("\tSaved \"gaussian_fit.pt\"")
Esempio n. 8
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=11Ix7AQ_Ct2kiEdPDz-OJCV53e27vbncJ",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 9
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1h0P0Cdxqbe6u_FSu2fXulrONw5uOECn6",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 10
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1hVMlehdzDyrxnkyof3sw9h4SfWDl_d9l",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 11
0
parser.add_argument('-output_size', type=int, default=32, help='size to downscale the input images to, must be power of 2')
parser.add_argument('-seed', type=int, help='manual seed to use')
parser.add_argument('-cache_dir', type=str, default='cache', help='cache directory for model weights')
parser.add_argument('-shape_predictor_path', type=str, default=SHAPE_PREDICTOR_PATH, help='SHAPE_PREDICTOR_PATH')


args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True,exist_ok=True)

print("Downloading Shape Predictor")
f=open_url(args.shape_predictor_path, cache_dir=cache_dir, return_path=True)
predictor = dlib.shape_predictor(f)

for im in tqdm(list(Path(args.input_dir).glob("*.*"))):
    faces = align_face(str(im),predictor)
    for i,face in enumerate(faces):
        if(args.output_size):
            factor = 1024//args.output_size
            assert args.output_size*factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)

        face.save(Path(args.output_dir) / (im.stem+f"_{i}.png"))
Esempio n. 12
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1hZDxpPP63Ioqw_XfibsJZ4Qg0zbl8Ah1",
    cache_dir=cache_dir,
    return_path=True)
#f=glob.glob(os.path.join('cache', 'shape_predictor_68_face_landmarks.dat'))[0]

predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
Esempio n. 13
0
parser.add_argument('-input_dir', type=str, default='realpics', help='directory with unprocessed images')
parser.add_argument('-output_dir', type=str, default='input', help='output directory')
parser.add_argument('-output_size', type=int, default=32, help='size to downscale the input images to, must be power of 2')
parser.add_argument('-seed', type=int, help='manual seed to use')
parser.add_argument('-cache_dir', type=str, default='cache', help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True,exist_ok=True)

print("Downloading Shape Predictor")
f=open_url("https://drive.google.com/uc?id=1huhv8PYpNNKbGCLOaYUjOgR1pY5pmbJx", cache_dir=cache_dir, return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im),predictor)

    for i,face in enumerate(faces):
        if(args.output_size):
            factor = 1024//args.output_size
            assert args.output_size*factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)

        face.save(Path(args.output_dir) / (im.stem+f"_{i}.png"))
Esempio n. 14
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1EKMAfEzhbbluTAG3s7pSQXYdJwqU7KuU",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 15
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1HoWjH10Z3aj9F2bwZ_BLEjs0doEX54Oa",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 16
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1gPj914bUjbNSrVHcp2VFHsTkhUc8qlSt",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 17
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1YJay0lwJm1wSwgTc1pWW3AR-4KluXyj2",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 18
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1MCwBMkB1tQTD0BsfvKes4ZChaTNVpXo7",
    cache_dir=cache_dir,
    return_path=True)

predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
Esempio n. 19
0
parser.add_argument('-input_dir', type=str, default='realpics', help='directory with unprocessed images')
parser.add_argument('-output_dir', type=str, default='input', help='output directory')
parser.add_argument('-output_size', type=int, default=32, help='size to downscale the input images to, must be power of 2')
parser.add_argument('-seed', type=int, help='manual seed to use')
parser.add_argument('-cache_dir', type=str, default='cache', help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True,exist_ok=True)
#shape_predictor_68_face_landmarks.dat
print("Downloading Shape Predictor")
f=open_url("https://drive.google.com/uc?id=10lhZigON47XliK_yNN0E7GSMChQzSvOD", cache_dir=cache_dir, return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im),predictor)

    for i,face in enumerate(faces):
        if(args.output_size):
            factor = 1024//args.output_size
            assert args.output_size*factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)

        face.save(Path(args.output_dir) / (im.stem+f"_{i}.png"))
Esempio n. 20
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1jLDPtyKEUfIoGPWxoCy1hDSNxZnS_woj",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 21
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1GXkQ-S6ccn07aNbygsqt6ErT6k6Yyfr9",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 22
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1i_pdRRm88tuJ1NJ1m1ARvt-W5lob4aJO",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 23
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1-dP98XN0kr9WP6qej7uXiLHZ4-1Se0D_",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 24
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1pp6d2bMxnq33Plyo5d013TFRrojcPHeL",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 25
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1zSa6yFfAUkF3PwSqFkJIpU4vXxKFbXQz",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 26
0
parser.add_argument('-input_dir', type=str, default='realpics', help='directory with unprocessed images')
parser.add_argument('-output_dir', type=str, default='input', help='output directory')
parser.add_argument('-output_size', type=int, default=32, help='size to downscale the input images to, must be power of 2')
parser.add_argument('-seed', type=int, help='manual seed to use')
parser.add_argument('-cache_dir', type=str, default='cache', help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True,exist_ok=True)

print("Downloading Shape Predictor")
f=open_url("https://drive.google.com/uc?id=1tFBiWw9JQZLLHQFhJW3R5MTkKZZc59mo", cache_dir=cache_dir, return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im),predictor)

    for i,face in enumerate(faces):
        if(args.output_size):
            factor = 1024//args.output_size
            assert args.output_size*factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)

        face.save(Path(args.output_dir) / (im.stem+f"_{i}.png"))
Esempio n. 27
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1oJRF5oYaOBrAMx9V6WoezhcinaAOo0v3",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 28
0
parser.add_argument('-input_dir', type=str, default='realpics', help='directory with unprocessed images')
parser.add_argument('-output_dir', type=str, default='input', help='output directory')
parser.add_argument('-output_size', type=int, default=32, help='size to downscale the input images to, must be power of 2')
parser.add_argument('-seed', type=int, help='manual seed to use')
parser.add_argument('-cache_dir', type=str, default='cache', help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True,exist_ok=True)

print("Downloading Shape Predictor")
f=open_url("https://drive.google.com/uc?id=1wbgY2OH4uUFwUotRaV_s7md586dqEBt2", cache_dir=cache_dir, return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im),predictor)

    for i,face in enumerate(faces):
        if(args.output_size):
            factor = 1024//args.output_size
            assert args.output_size*factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)

        face.save(Path(args.output_dir) / (im.stem+f"_{i}.png"))
Esempio n. 29
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=1xtxqSWYHADTEO9ptPG5174DbV3FYJPev",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)
Esempio n. 30
0
parser.add_argument('-cache_dir',
                    type=str,
                    default='cache',
                    help='cache directory for model weights')

args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

print("Downloading Shape Predictor")
f = open_url(
    "https://drive.google.com/uc?id=15lynLDEsiOruiaWvYlLUbr7UigTTbycP",
    cache_dir=cache_dir,
    return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.input_dir).glob("*.*"):
    faces = align_face(str(im), predictor)

    for i, face in enumerate(faces):
        if (args.output_size):
            factor = 1024 // args.output_size
            assert args.output_size * factor == 1024
            D = BicubicDownSample(factor=factor)
            face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(
                0).cuda()
            face_tensor_lr = D(face_tensor)[0].cpu().detach().clamp(0, 1)
            face = torchvision.transforms.ToPILImage()(face_tensor_lr)