def main(args): checkpoint = torch.load(args.model_path, map_location=device) plfd_backbone = PFLDInference().to(device) plfd_backbone.load_state_dict(checkpoint['plfd_backbone']) plfd_backbone.eval() plfd_backbone = plfd_backbone.to(device) transform = transforms.Compose([transforms.ToTensor()]) cap = cv2.VideoCapture(0) while True: ret, img = cap.read() if not ret: break height, width = img.shape[:2] bounding_boxes, landmarks = detect_faces(img) for box in bounding_boxes: score = box[4] x1, y1, x2, y2 = (box[:4] + 0.5).astype(np.int32) w = x2 - x1 + 1 h = y2 - y1 + 1 size = int(max([w, h]) * 1.1) cx = x1 + w // 2 cy = y1 + h // 2 x1 = cx - size // 2 x2 = x1 + size y1 = cy - size // 2 y2 = y1 + size dx = max(0, -x1) dy = max(0, -y1) x1 = max(0, x1) y1 = max(0, y1) edx = max(0, x2 - width) edy = max(0, y2 - height) x2 = min(width, x2) y2 = min(height, y2) cropped = img[y1:y2, x1:x2] if dx > 0 or dy > 0 or edx > 0 or edy > 0: cropped = cv2.copyMakeBorder(cropped, dy, edy, dx, edx, cv2.BORDER_CONSTANT, 0) cropped = cv2.resize(cropped, (112, 112)) input = cv2.resize(cropped, (112, 112)) input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB) input = transform(input).unsqueeze(0).to(device) _, landmarks = plfd_backbone(input) pre_landmark = landmarks[0] pre_landmark = pre_landmark.cpu().detach().numpy().reshape( -1, 2) * [size, size] for (x, y) in pre_landmark.astype(np.int32): cv2.circle(img, (x1 + x, y1 + y), 1, (0, 0, 255)) cv2.imshow('0', img) if cv2.waitKey(10) == 27: break
def main(args): checkpoint = torch.load(args.model_path, map_location=device) pfld_backbone = PFLDInference().to(device) pfld_backbone.load_state_dict(checkpoint['plfd_backbone']) pfld_backbone.eval() pfld_backbone = pfld_backbone.to(device) transform = transforms.Compose([transforms.ToTensor()]) im = Image.open(args.image_path) img = np.array(im) height, width = img.shape[:2] draw = ImageDraw.Draw(im) bounding_boxes, landmarks = detect_faces(img) print(bounding_boxes) for box in bounding_boxes: score = box[4] x1, y1, x2, y2 = (box[:4] + 0.5).astype(np.int32) w = x2 - x1 + 1 h = y2 - y1 + 1 size = int(max([w, h]) * 1.1) cx = x1 + w // 2 cy = y1 + h // 2 x1 = cx - size // 2 x2 = x1 + size y1 = cy - size // 2 y2 = y1 + size dx = max(0, -x1) dy = max(0, -y1) x1 = max(0, x1) y1 = max(0, y1) edx = max(0, x2 - width) edy = max(0, y2 - height) x2 = min(width, x2) y2 = min(height, y2) cropped = img[y1:y2, x1:x2] if (dx > 0 or dy > 0 or edx > 0 or edy > 0): cropped = cv2.copyMakeBorder(cropped, dy, edy, dx, edx, cv2.BORDER_CONSTANT, 0) cropped = cv2.resize(cropped, (112, 112)) input = cv2.resize(cropped, (112, 112)) input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB) input = transform(input).unsqueeze(0).to(device) landmarks = pfld_backbone(input) pre_landmark = landmarks[0] pre_landmark = pre_landmark.cpu().detach().numpy().reshape( -1, 2) * [size, size] print(pre_landmark) for (x, y) in pre_landmark.astype(np.int32): # cv2.circle(img, (x1 + x, y1 + y), 1, (0, 0, 255)) draw.ellipse((x1 + x - 1, y1 + y - 1, x1 + x + 1, y1 + y + 1), fill=(0, 0, 255)) im.show()
def extract_keypoints(img_path, model_path): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") checkpoint = torch.load(model_path, map_location=device) pfld_backbone = PFLDInference().to(device) pfld_backbone.load_state_dict(checkpoint['pfld_backbone']) pfld_backbone.eval() pfld_backbone = pfld_backbone.to(device) transform = torchvision.transforms.Compose( [torchvision.transforms.ToTensor()]) img = cv2.imread(img_path, 1) height, width = img.shape[:2] bounding_boxes, landmarks = detect_faces(img) for box in bounding_boxes: x1, y1, x2, y2 = (box[:4] + 0.5).astype(np.int32) w = x2 - x1 + 1 # 宽度 h = y2 - y1 + 1 # 高度 cx = x1 + w // 2 # 中心宽度 cy = y1 + h // 2 # 中心高度 size = int(max([w, h]) * 1.1) x1 = cx - size // 2 x2 = x1 + size y1 = cy - size // 2 y2 = y1 + size x1 = max(0, x1) y1 = max(0, y1) x2 = min(width, x2) y2 = min(height, y2) edx1 = max(0, -x1) edy1 = max(0, -y1) edx2 = max(0, x2 - width) edy2 = max(0, y2 - height) cropped = img[y1:y2, x1:x2] if edx1 > 0 or edy1 > 0 or edx2 > 0 or edy2 > 0: cropped = cv2.copyMakeBorder(cropped, edy1, edy2, edx1, edx2, cv2.BORDER_CONSTANT, 0) input = cv2.resize(cropped, (112, 112)) cv2.imwrite(img_path.replace(".png", "_1.png"), input) input = transform(input).unsqueeze(0).to(device) _, landmarks = pfld_backbone(input) pre_landmark = landmarks[0] key_points = pre_landmark.cpu().detach().numpy().reshape( -1, 2) * [size, size] - [edx1, edy1] keypoints = [] for (x, y) in key_points: cv2.circle(img, (x1 + int(x), y1 + int(y)), 2, (0, 255, 0), -1) keypoints.append([x1 + int(x), y1 + int(y)]) cv2.imshow('face_landmark_68', img) cv2.waitKey(1000) return keypoints
def __init__(self): self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") model_path = os.path.join(PTH_DIR, 'checkpoint.pth.tar') checkpoint = torch.load(model_path, map_location=self.device) plfd_backbone = PFLDInference().to(self.device) plfd_backbone.load_state_dict(checkpoint['plfd_backbone']) plfd_backbone.eval() self.plfd_backbone = plfd_backbone.to(self.device) self.transform = transforms.Compose([transforms.ToTensor()])
def main(args): # face detection model model = init_detector(args.config, args.face_model) # landmark model checkpoint = torch.load(args.mark_model, map_location=device) plfd_backbone = PFLDInference().to(device) plfd_backbone.load_state_dict(checkpoint['plfd_backbone']) plfd_backbone.eval() plfd_backbone = plfd_backbone.to(device) transform = transforms.Compose([transforms.ToTensor()]) fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') save_path = '/home/yang/mark.mp4' writer = cv2.VideoWriter(save_path, fourcc, 30.0, (1280, 720), True) if args.video_path: cap = cv2.VideoCapture(args.video_path) else: cap = cv2.VideoCapture(0) while True: ret, img = cap.read() if not ret: break height, width = img.shape[:2] # bounding_boxes, landmarks = detect_faces(img) results = inference_detector(model, img) bboxs = decode_detections(results[0], args.d_thresh) for i, bbox in enumerate(bboxs): # x1, y1, x2, y2 = (bbox[:4]+0.5).astype(np.int32) w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] add = int(max(w, h)) bimg = cv2.copyMakeBorder(img, add, add, add, add, borderType=cv2.BORDER_CONSTANT, value=[127., 127., 127.]) bbox += add face_width = (1 + 0.4) * w center = [(bbox[0] + bbox[2]) // 2, (bbox[1] + bbox[3]) // 2] bbox[0] = center[0] - face_width // 2 bbox[1] = center[1] - face_width // 2 bbox[2] = center[0] + face_width // 2 bbox[3] = center[1] + face_width // 2 bbox = bbox.astype(np.int) crop_image = bimg[bbox[1]:bbox[3], bbox[0]:bbox[2], :] height, width, _ = crop_image.shape crop_image = cv2.resize(crop_image, (112, 112)) cv2.imshow('cropped face %d ' % i, crop_image) # input = cv2.resize(cropped, (112, 112)) input = cv2.cvtColor(crop_image, cv2.COLOR_BGR2RGB) input = transform(input).unsqueeze(0).to(device) _, landmarks = plfd_backbone(input) pre_landmark = landmarks[0] pre_landmark = pre_landmark.cpu().detach().numpy().reshape( -1, 2) * [width, height] for (x, y) in pre_landmark.astype(np.int32): cv2.circle(img, (bbox[0] + x - add, bbox[1] + y - add), 1, (0, 255, 0), -1) cv2.imshow('0', img) writer.write(img) if cv2.waitKey(1) == 27: break
def main(args): # Step 1: parse args config logging.basicConfig( format= '[%(asctime)s] [p%(process)s] [%(pathname)s:%(lineno)d] [%(levelname)s] %(message)s', level=logging.INFO, handlers=[ logging.FileHandler(args.log_file, mode='w'), logging.StreamHandler() ]) print_args(args) if args.backbone == "v2": from models.pfld import PFLDInference, AuxiliaryNet elif args.backbone == "v3": from models.mobilev3_pfld import PFLDInference, AuxiliaryNet elif args.backbone == "ghost": from models.ghost_pfld import PFLDInference, AuxiliaryNet elif args.backbone == "lite": from models.lite import PFLDInference, AuxiliaryNet else: raise ValueError("backbone is not implemented") plfd_backbone = PFLDInference() auxiliarynet = AuxiliaryNet() if os.path.exists(args.resume) and args.resume.endswith('.pth'): logging.info("loading the checkpoint from {}".format(args.resume)) check = torch.load(args.resume, map_location=torch.device('cpu')) plfd_backbone.load_state_dict(check["plfd_backbone"]) auxiliarynet.load_state_dict(check["auxiliarynet"]) args.start_epoch = check["epoch"] # Step 2: model, criterion, optimizer, scheduler plfd_backbone = plfd_backbone.to(device) auxiliarynet = auxiliarynet.to(device) criterion = LandMarkLoss() optimizer = torch.optim.Adam([{ 'params': plfd_backbone.parameters() }, { 'params': auxiliarynet.parameters() }], lr=args.base_lr, weight_decay=args.weight_decay) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode='min', patience=args.lr_patience, verbose=True) # step 3: data # argumetion transform = transforms.Compose([transforms.ToTensor()]) wlfwdataset = PFLDDatasets(args.dataroot, transform, img_root=os.path.realpath('./data'), img_size=args.img_size) dataloader = DataLoader(wlfwdataset, batch_size=args.train_batchsize, shuffle=True, num_workers=args.workers, drop_last=False) wlfw_val_dataset = PFLDDatasets(args.val_dataroot, transform, img_root=os.path.realpath('./data'), img_size=args.img_size) wlfw_val_dataloader = DataLoader(wlfw_val_dataset, batch_size=args.val_batchsize, shuffle=False, num_workers=args.workers) # step 4: run weighted_losses = [] train_losses = [] val_losses = [] val_nme = 1e6 for epoch in range(args.start_epoch, args.end_epoch + 1): weighted_train_loss, train_loss = train(dataloader, plfd_backbone, auxiliarynet, criterion, optimizer, epoch) if epoch % args.epoch_interval == 0: filename = os.path.join(str(args.snapshot), "checkpoint_epoch_" + str(epoch) + '.pth') save_checkpoint( { 'epoch': epoch, 'plfd_backbone': plfd_backbone.state_dict(), 'auxiliarynet': auxiliarynet.state_dict() }, filename) val_loss, cur_val_nme = validate(wlfw_val_dataloader, plfd_backbone, auxiliarynet, criterion) if cur_val_nme < val_nme: filename = os.path.join(str(args.snapshot), "checkpoint_min_nme.pth") save_checkpoint( { 'epoch': epoch, 'plfd_backbone': plfd_backbone.state_dict(), 'auxiliarynet': auxiliarynet.state_dict() }, filename) val_nme = cur_val_nme scheduler.step(val_loss) weighted_losses.append(weighted_train_loss.item()) train_losses.append(train_loss.item()) val_losses.append(val_loss.item()) logging.info( "epoch: {}, weighted_train_loss: {:.4f}, trainset loss: {:.4f} valset loss: {:.4f} best val " "nme: {:.4f}\n ".format(epoch, weighted_train_loss, train_loss, val_loss, val_nme)) weighted_losses = " ".join(list(map(str, weighted_losses))) train_losses = " ".join(list(map(str, train_losses))) val_losses = " ".join(list(map(str, val_losses))) logging.info(weighted_losses) logging.info(train_losses) logging.info(val_losses)
def main(args): checkpoint = torch.load(args.model_path, map_location=device) pfld_backbone = PFLDInference().to(device) pfld_backbone.load_state_dict(checkpoint['plfd_backbone']) pfld_backbone.eval() pfld_backbone = pfld_backbone.to(device) transform = transforms.Compose([transforms.ToTensor()]) if not os.path.exists(args.video_path): print('Video not found.') exit() res_dir_path = os.path.splitext(os.path.basename( args.video_path))[0] if args.res_dir is None else args.res_dir os.makedirs(res_dir_path, exist_ok=True) cap = cv2.VideoCapture(args.video_path) frame_index = 0 start_time = time.time() while (cap.isOpened()): ret, frame = cap.read() if not ret: break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(frame) img = np.array(im) height, width = img.shape[:2] draw = ImageDraw.Draw(im) bounding_boxes, landmarks = detect_faces(img) # print(bounding_boxes) for box in bounding_boxes: score = box[4] x1, y1, x2, y2 = (box[:4] + 0.5).astype(np.int32) w = x2 - x1 + 1 h = y2 - y1 + 1 size = int(max([w, h]) * 1.1) cx = x1 + w // 2 cy = y1 + h // 2 x1 = cx - size // 2 x2 = x1 + size y1 = cy - size // 2 y2 = y1 + size dx = max(0, -x1) dy = max(0, -y1) x1 = max(0, x1) y1 = max(0, y1) edx = max(0, x2 - width) edy = max(0, y2 - height) x2 = min(width, x2) y2 = min(height, y2) cropped = img[y1:y2, x1:x2] if (dx > 0 or dy > 0 or edx > 0 or edy > 0): cropped = cv2.copyMakeBorder(cropped, dy, edy, dx, edx, cv2.BORDER_CONSTANT, 0) cropped = cv2.resize(cropped, (112, 112)) input = cv2.resize(cropped, (112, 112)) input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB) input = transform(input).unsqueeze(0).to(device) landmarks = pfld_backbone(input) pre_landmark = landmarks[0] pre_landmark = pre_landmark.cpu().detach().numpy().reshape( -1, 2) * [size, size] # print(pre_landmark) for (x, y) in pre_landmark.astype(np.int32): # cv2.circle(img, (x1 + x, y1 + y), 1, (0, 0, 255)) draw.ellipse((x1 + x - 1, y1 + y - 1, x1 + x + 1, y1 + y + 1), fill=(0, 0, 255)) # im.show() im.save(f'{res_dir_path}{os.sep}{frame_index:05}.jpg') if (frame_index + 1) == 80: print(f'{frame_index / (time.time()-start_time)} FPS') frame_index += 1 cap.release() print(f'{frame_index / (time.time()-start_time)} FPS') print('Done.')