def read_config(file): """Read user configuration from file or create basic config if it doesn't exist""" try: with open(file, "r") as f: content = json.load(f) return content except: setup.setup_config() print("Error: Please configure the config file now or start the setup if it is your first time.")
def get_landmark(filepath, args, face_alignment_path, face_detector_path): """get landmark with and without dlib :return: np.array shape=(68, 2) """ if args.use_dlib: import dlib detector = dlib.get_frontal_face_detector() # download model from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 if not os.path.exists('./shape_predictor_68_face_landmarks.dat'): print('Downloading files for aligning face image...') os.system( 'wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2' ) os.system('bzip2 -dk shape_predictor_68_face_landmarks.dat.bz2') os.system('rm shape_predictor_68_face_landmarks.dat.bz2') predictor = dlib.shape_predictor( './shape_predictor_68_face_landmarks.dat') img = dlib.load_rgb_image(filepath) dets = detector(img, 1) # print("Number of faces detected: {}".format(len(dets))) shape = None for k, d in enumerate(dets): # print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format( # k, d.left(), d.top(), d.right(), d.bottom())) # Get the landmarks/parts for the face in box d. shape = predictor(img, d) # print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1))) if shape is not None: t = list(shape.parts()) a = [] for tt in t: a.append([tt.x, tt.y]) lm = np.array(a) # lm is a shape=(68,2) np.array else: from psgan.preprocess import PreProcess from setup import setup_config config = setup_config(args) preprocess = PreProcess(config, args, None, face_alignment_path, face_detector_path, need_parser=False, return_landmarks=True) image = PIL.Image.open(filepath).convert("RGB") image = image.resize((256, 256)) lm = preprocess(image) return lm
def main(save_path='transferred_image.png'): parser = setup_argparser() parser.add_argument( "--source_path", default= "E:/meizhuanghouduan/PSGAN/assets/images/non-makeup/xfsy_0106.png", metavar="FILE", help="path to source image") parser.add_argument( "--reference_dir", default="E:/meizhuanghouduan/PSGAN/assets/images/makeup", help="path to reference images") parser.add_argument("--speed", action="store_true", help="test speed") parser.add_argument("--device", default="cpu", help="device used for inference") parser.add_argument( "--model_path", default="E:/meizhuanghouduan/PSGAN/assets/models/G.pth", help="model for loading") args = parser.parse_args() config = setup_config(args) # Using the second cpu inference = Inference(config, args.device, args.model_path) postprocess = PostProcess(config) source = Image.open(args.source_path).convert("RGB") reference_paths = list(Path(args.reference_dir).glob("*")) np.random.shuffle(reference_paths) for reference_path in reference_paths: if not reference_path.is_file(): print(reference_path, "is not a valid file.") continue reference = Image.open(reference_path).convert("RGB") # Transfer the psgan from reference to source. image, face = inference.transfer(source, reference, with_face=True) source_crop = source.crop( (face.left(), face.top(), face.right(), face.bottom())) image = postprocess(source_crop, image) image.save(save_path) if args.speed: import time start = time.time() for _ in range(100): inference.transfer(source, reference) print("Time cost for 100 iters: ", time.time() - start)
from torch.backends import cudnn from dataloder import get_loader from psgan.solver import Solver from setup import setup_config, setup_argparser def train_net(config): # enable cudnn https://zhuanlan.zhihu.com/p/73711222 cudnn.benchmark = True data_loader = get_loader(config) #solver = Solver(config, data_loader=data_loader, device="cuda") solver = Solver(config, data_loader=data_loader, device=config.device) solver.train() if __name__ == '__main__': args = setup_argparser().parse_args() config = setup_config(args) print("Call with args:") print(config) train_net(config) # lib pip install # 1.requests # 2.matplotlib # 3.fvcore # 4.dlib==19.6.1
import os import sys import socket_engine from setup import setup_config, mark_pid_on_fs from liblogging import init_logger import liblogging from flashpolicyd import policy_server if __name__ == '__main__': print "Trying to start the flash policy daemon" server = policy_server(843, './flashpolicy.xml') server.start() config = setup_config("konext") port = int(config['port']) # Port to listening on address = config['server_address'] # Address to listening on app_name = config['app_name'] # Name of the application # initialize the logger liblogging.logger = init_logger(config['format'], app_name, config['log_file'], config['max_logfile_size'], config['nb_logfile']) # checking availability ... if not socket_engine.check_server(address, port): liblogging.log("Port or address is busy. Please refer to logs to know more about the failure", liblogging.ERROR) sys.exit(1) # exiting due to error ...
# This is server.py file import os import sys import socket_engine from setup import setup_config, mark_pid_on_fs from liblogging import init_logger import liblogging from flashpolicyd import policy_server if __name__ == '__main__': print "Trying to start the flash policy daemon" server = policy_server(843, './flashpolicy.xml') server.start() config = setup_config("konext") port = int(config['port']) # Port to listening on address = config['server_address'] # Address to listening on app_name = config['app_name'] # Name of the application # initialize the logger liblogging.logger = init_logger(config['format'], app_name, config['log_file'], config['max_logfile_size'], config['nb_logfile']) # checking availability ... if not socket_engine.check_server(address, port): liblogging.log( "Port or address is busy. Please refer to logs to know more about the failure", liblogging.ERROR)