def load_checkpoints(config_path, checkpoint_path, cpu=False): with open(config_path) as f: config = yaml.load(f) generator = OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if not cpu: generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if not cpu: kp_detector.cuda() if cpu: checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) else: checkpoint = torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) if not cpu: generator = DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return generator, kp_detector
def load_checkpoints(config_path, checkpoint_path): with open(config_path) as f: config = yaml.load(f) generator = OcclusionAwareGenerator( **config['model_params']['generator_params'], **config['model_params']['common_params']) generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) kp_detector.cuda() checkpoint = torch.load(checkpoint_path) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) generator = DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return generator, kp_detector
def load_checkpoints(config_path, checkpoint_path, cpu=False): with open(config_path) as f: config = yaml.load(f) generator = OcclusionAwareGenerator( **config["model_params"]["generator_params"], **config["model_params"]["common_params"], ) if cpu: generator.cpu() else: generator.cuda() kp_detector = KPDetector( **config["model_params"]["kp_detector_params"], **config["model_params"]["common_params"], ) if cpu: kp_detector.cpu() else: kp_detector.cuda() checkpoint = torch.load(checkpoint_path, map_location="cpu" if cpu else None) generator.load_state_dict(checkpoint["generator"]) kp_detector.load_state_dict(checkpoint["kp_detector"]) generator = DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() return generator, kp_detector
opt = parser.parse_args() with open(opt.config) as f: config = yaml.load(f) blocks_discriminator = config['model_params']['discriminator_params']['num_blocks'] assert len(config['train_params']['loss_weights']['reconstruction']) == blocks_discriminator + 1 generator = MotionTransferGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) if not opt.cpu: generator.cuda() kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) if not opt.cpu: kp_detector = kp_detector.cuda() Logger.load_cpk(opt.checkpoint, generator=generator, kp_detector=kp_detector, use_cpu=True) vis = Visualizer() if not opt.cpu: generator = DataParallelWithCallback(generator) kp_detector = DataParallelWithCallback(kp_detector) generator.eval() kp_detector.eval() ''' Logic: The goal of this module is to essentially loop through all of the GIFs in a directory and then extract the pose points for the first frame of the GIF for each GIF. This allows for an alignment based