count = count + 1 print("====================================") print("img 번호: ", count) img_frame = cv.resize(img_frame, dsize=(0, 0), fx=1, fy=1) # if wanted == "June": # matrix=cv.getRotationMatrix2D( (width / 2 , height / 2) , 90 , 1 ) # img_frame=cv.warpAffine( img_frame , matrix , (width , height) ) # img_frame = cv.flip(img_frame, 0) #상하반전 gray = cv.cvtColor(img_frame, cv.COLOR_BGR2GRAY) dets = detector(gray, 1) print("얼굴 갯수:", "{}개".format(len(dets))) for face in dets: fa = FaceAligner(predictor, desiredLeftEye=(0.3, 0.3), desiredFaceWidth=112) faceAligned = fa.align(img_frame, gray, face.rect) cut = copy.deepcopy(faceAligned) x = face.rect.left() y = face.rect.top() w = face.rect.right() - x h = face.rect.bottom() - y #bounding box가 작아서 shape 끝점으로 대체 cv.rectangle(img_frame, (x, y), (x + w, y + h), (0, 0, 255), 2) print("x:", x, "y:", y, "w:", w, "h:", h) #faceAligned = cv.resize(faceAligned,dsize = (112,112), fx=1,fy=1) cv.imwrite('face.jpg', faceAligned) faceAligned = Image.open('face.jpg') # faceAligned = Image.fromarray(faceAligned) faceAligned = transforms(faceAligned).unsqueeze(0)
from TinyFacesDetector import TinyFacesDetector import dlib from Utils import Utils from FaceAligner import FaceAligner faces_out_folder = "./output/" image_path="sample.jpg" model_pkl="weights.pkl" Utils.mkdir_if_not_exist(faces_out_folder) tiny_faces_detector = TinyFacesDetector(model_pkl,use_gpu=True) #init the face landmarks detector predictor_5_face_landmarks = dlib.shape_predictor("shape_predictor_5_face_landmarks.dat") # tight face aligner : padding = 0.2 aligner_tight = FaceAligner(face_size=112, face_padding=0.2, predictor_5_face_landmarks=predictor_5_face_landmarks) face_rects=tiny_faces_detector.detect(image_path,nms_thresh=0.1,prob_thresh=0.5,min_conf=0.9) face_indx=0 for rect in face_rects: face_indx+=1 aligner_tight.out_dir= faces_out_folder aligner_tight.align_face(image_path,rect,str(face_indx)+'.jpg')
print("DDDDDDDDDDDD") image = cv2.imread(args.img_path) path = '/home/daehyeon/DepthNets/pipeline' # image = cv2.resize(image, None, fx=1, fy=1, interpolation=cv2.INTER_AREA) # Create a HOG face detector using the built-in dlib class # Load the image into an array start = time.time() try: faces_cnn = face_detector(image, 1) except: pass gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # kdkd 원래는 0.375 였음 for face in faces_cnn: fa = FaceAligner(predictor, desiredLeftEye=(0.3, 0.3), desiredFaceWidth=256) faceAligned = fa.align(image, gray, face.rect) cv2.imwrite(path + '/face_alignmented/{}.png'.format("source"), faceAligned) # cv2.imshow("Aligned", faceAligned) end = time.time() cv2.waitKey() cv2.destroyAllWindows() break # dir(train_data.root) # train_data = torchvision.datasets.ImageFolder(root='/home/daehyeon/hdd/deepfake_1st/fake/',) # count = 0 # path = '/home/daehyeon/DepthNets/pipeline'
weight='./mmod_human_face_detector.dat' face_detector=dlib.cnn_face_detection_model_v1( weight ) ALL=list( range( 0 , 5 ) ) dir = '/home/daehyeon/hdd/deepfake_1st/fake/' train_data = torchvision.datasets.ImageFolder(root=dir,) # 혹은 os.list_dir(dir)로도 data list 설정가능 count = 0 for i in range(len(train_data.imgs)): path = train_data.imgs[i][0] image = cv2.imread(path) # Create a HOG face detector using the built-in dlib class # Load the image into an array start=time.time() try: faces_cnn=face_detector( image , 1 ) except: continue count += 1 gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for face in faces_cnn : fa = FaceAligner(predictor,desiredLeftEye=(0.25, 0.25), desiredFaceWidth=256) # 얼굴 Crop Size에서 눈 사이 간격과 width 비율 = 1: 1-0.25-0.25 = 1:0.5 faceAligned = fa.align(image,gray,face.rect) cv2.imwrite('/home/daehyeon/hdd/processed/fake_256/{}.jpg'.format(count), faceAligned) # cv2.imshow("Aligned", faceAligned) end=time.time() print('{}개 중 {}번째 이미지'.format(len(train_data.imgs),count), '걸린시간:' , format( end - start , '.2f' ) ) cv2.waitKey() cv2.destroyAllWindows()
face_detector = dlib.cnn_face_detection_model_v1( "mmod_human_face_detector.dat") else: # faster face detector face_detector = dlib.get_frontal_face_detector() #init the face landmarks detector predictor_5_face_landmarks = dlib.shape_predictor( "shape_predictor_5_face_landmarks.dat") #init the object tracker object_tracker = dlib.correlation_tracker() # tight face aligner : padding = 0.2 aligner_tight = FaceAligner( face_size=112, face_padding=0.2, predictor_5_face_landmarks=predictor_5_face_landmarks) # loose face aligner : padding = 0.4 aligner_loose = FaceAligner( face_size=112, face_padding=0.4, predictor_5_face_landmarks=predictor_5_face_landmarks) # init the Face tracker face_tracker = FaceTracker(face_detector, object_tracker) aligners = [] for video_path in input_videos: video_key = os.path.basename(video_path) #set the out folder of the tight aligner