Ejemplo n.º 1
0
    def __getitem__(self, idx):
        image_name = os.path.join(self.root_dir,
                                self.key_pts_frame.iloc[idx, 0])

        image = mpimg.imread(image_name)

        # if image has an alpha color channel, get rid of it
        if(image.shape[2] == 4):
            image = image[:,:,0:3]

        key_pts = self.key_pts_frame.iloc[idx, 1:].to_numpy()
        key_pts = key_pts.astype('float').reshape(-1, 2)

        image, key_pts = face_alignment(image, key_pts)

        ground_truth = generate_trainable_gt(key_pts)

        image_data = image

        sample = {'image_data': image_data, 'image': image, 'keypoints': key_pts, 'ground_truth': ground_truth}

        if self.transform:
            sample = self.transform(sample)

        return sample
Ejemplo n.º 2
0
def extract_face_alignment(video_path, output_dir):
    detector = dlib.get_frontal_face_detector()
    predictor_1 = dlib.shape_predictor(
        '.\\Models\\shape_predictor_5_face_landmarks.dat')
    predictor_2 = dlib.shape_predictor(
        '.\\Models\\shape_predictor_68_face_landmarks.dat')

    cap = cv2.VideoCapture(video_path)
    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    fps = cap.get(cv2.CAP_PROP_FPS)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    scale = 1280 / width
    Write_frames = cv2.VideoWriter('Videos/tmp/frames.avi', fourcc, fps,
                                   (int(width * scale), int(height * scale)))
    falign = face_alignment()
    for i in range(frames):
        flag, frame = cap.read()
        if (flag == True):
            frame = cv2.resize(frame,
                               (int(width * scale), int(height * scale)))

            dets = detector(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), 0)
            if (len(dets) > 0):
                big_face = 0
                for j, d in enumerate(dets):
                    if (d.right() - d.left() > big_face):
                        big_face = d.right() - d.left()
                        face_location = dets[j]
                shape_1 = predictor_1(frame, face_location)
                shape_2 = predictor_2(frame, face_location)
                aligned_face, norm_land = falign.face_align_by_5(
                    frame, shape_2, 255, 1.0)
                filename = 'img' + str_idx(i) + '.jpg'
                filename = os.path.join(output_dir, filename)
                cv2.imwrite(filename, aligned_face)
    return 1
Ejemplo n.º 3
0
                # loop over the subset of facial landmarks, drawing the specific face part
                for (x, y) in shape[i:j]:
                    if name == 'right eye':
                        eye_landmarks[0].append(x)
                        eye_landmarks[1].append(y)
                    elif name == 'left eye':
                        eye_landmarks[2].append(x)
                        eye_landmarks[3].append(y)

                #     cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)
                #
                # cv2.imshow("Image", clone)

            # FACE ALIGNMENT & GET ANGLE, EYES CENTER
            output, angle, eyes_center = face_alignment(
                image=image, eye_landmarks=eye_landmarks)
            # cv2.imshow("output", output)

            # EYE PATCH EXTRACTION
            gray2 = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
            # detect faces in the grayscale image
            rect2 = detector(gray2, 1)
            if len(rect2) == 0:
                break
            rect2 = rect2[0]
            shape2 = predictor(gray2, rect2)
            shape2 = face_utils.shape_to_np(shape2)
            roi = []
            for (m, (name, (i, j))) in enumerate([('right eye', (36, 42)),
                                                  ('left eye', (42, 48))]):
                # extract the ROI of the face region as a separate image
Ejemplo n.º 4
0
        folder_name = f.split('.')[0]
        new_dir_f = new_dir + '/' + folder_name
        try:
            os.mkdir(new_dir_f)
        except Exception as e:
            print(str(e))

        vidcap = cv2.VideoCapture(video_path)
        success, image = vidcap.read()
        face_cascade = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')

        count = 0
        while success:
            # Allign face
            image = face_alignment(image)

            copy_image = image.copy()

            # Convert the image to RGB colorspace
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            # Convert the image to gray
            gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

            # Detect faces in the image using pre-Trained face dectector
            faces = face_cascade.detectMultiScale(gray_image, 1.25, 6)

            print('Number of faces detected:', len(faces))

            # Get the bounding box for each detected face
Ejemplo n.º 5
0
from helper import *
from face_alignment import face_alignment

if __name__ == '__main__':
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    args = parse_args()
    face_alignment(args)
Ejemplo n.º 6
0
def detect_micro_LBP_Webcam( ):
    print(' Processing Micro-expression by webcam ')
    
    detector = dlib.get_frontal_face_detector()
    
    predictor_2 = dlib.shape_predictor('.//Models//shape_predictor_68_face_landmarks.dat')
    
    cap = cv2.VideoCapture(0)
    window_len = int (30 / 2)
    processing_list = []
    falign = face_alignment()
    frames = 2000
    Farr = np.zeros(frames)
    micro_array = np.zeros(frames)
    i = 0
    face_location=None
    res = 0
    while True:
        
        
        flag, frame = cap.read()        
        if (flag==True):
            
            dets = detector(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), 0)
            if (len(dets) > 0):
                big_face=0
                for j,d in enumerate(dets):
                    if (d.right()-d.left()>big_face):
                        big_face=d.right()-d.left()
                        face_location=dets[j]
                shape_2 = predictor_2(frame, face_location)
                    

                #shape_1 = predictor_1(frame, face_location)
                #aligned_face = falign.face_registration(frame, shape_2, 225, 1.0)
                aligned_face , norm_landmark = falign.face_align_by_5(frame, shape_2, 255, 1.0)
                processing_list.append(aligned_face)
        
                count =  len(processing_list)
                if (count > window_len):
                    processing_list.pop(0)

                count =  len(processing_list)
                L = int(window_len/2) 
                idx = i
                i = i + 1
                dist = 0.0
                if (count == window_len):
                    st_idx = idx - window_len + 1
                    apex_idx = idx - int(window_len/2)

                    hframe = processing_list[0]
                    tframe = processing_list[count-1]

                    curr_frame = processing_list[int(window_len/2)]

                    dist , idx_sorted = calculate_distance_block(curr_frame,hframe,tframe , norm_landmark)

                    pos =  idx - int(window_len/2)
                    Farr[pos] = dist
                    res = calculate_curr_thr(Farr, i+1 , window_len)

                    if (res == 1):
                        print(pos)


                pre_shape = shape_2

            shape_np = falign.shape_to_np(shape_2)
            d=shape_np[45,0]-shape_np[36,0] 
            if (res == 1):
                frame = cv2.rectangle(frame, (shape_np[30,0]-d,shape_np[30,1]-d) , (shape_np[30,0]+d,shape_np[30,1]+d) ,(0,255,0), 1) 
            cv2.imshow('Face Micro expression demo ',frame)          
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break  

    return 1
Ejemplo n.º 7
0
def detect_micro_LBP(video_file_path ):
    print(' Processing Micro-expression on: ', video_file_path)
    
    detector = dlib.get_frontal_face_detector()
    predictor_1 = dlib.shape_predictor('.//Models//shape_predictor_5_face_landmarks.dat')
    predictor_2 = dlib.shape_predictor('.//Models//shape_predictor_68_face_landmarks.dat')
    
    cap = cv2.VideoCapture(video_file_path)

    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps= int(cap.get(cv2.CAP_PROP_FPS))
    print(fps)
    frames= int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    


    face_location=None
    window_len = int (fps / 2)
    processing_list = []
    falign = face_alignment()
    Farr = np.zeros(frames)
    micro_array = np.zeros(frames)
    for i in range(frames):
        flag, frame = cap.read()        
        if (flag==True):
            
            start_time = time.time()
            dets = detector(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), 0)
            if (len(dets) > 0):
                big_face=0
                for j,d in enumerate(dets):
                    if (d.right()-d.left()>big_face):
                        big_face=d.right()-d.left()
                        face_location=dets[j]
                shape_2 = predictor_2(frame, face_location)
                    

                #shape_1 = predictor_1(frame, face_location)
                #aligned_face = falign.face_registration(frame, shape_2, 225, 1.0)
                aligned_face , norm_landmark = falign.face_align_by_5(frame, shape_2, 255, 1.0)
                processing_list.append(aligned_face)
        
                count =  len(processing_list)
                if (count > window_len):
                    processing_list.pop(0)

                count =  len(processing_list)
                L = int(window_len/2) 
                idx = i
                dist = 0.0
                if (count == window_len):
                    st_idx = idx - window_len + 1
                    apex_idx = idx - int(window_len/2)

                    hframe = processing_list[0]
                    tframe = processing_list[count-1]

                    curr_frame = processing_list[int(window_len/2)]

                    dist , idx_sorted = calculate_distance_block(curr_frame,hframe,tframe , norm_landmark)

                    pos =  idx - int(window_len/2)
                    Farr[pos] = dist
                    res = calculate_curr_thr(Farr, i+1 , window_len)

                    if (res == 1):
                        print(pos)


                pre_shape = shape_2


    return 1
Ejemplo n.º 8
0
import cv2
from face_alignment import face_alignment
from face_base import find_face
from face_base import license_detection
from smooth_sharpen import smooth
from smooth_sharpen import sharpen
from face_base import face_wipeoff
import pytesseract

img = cv2.imread(
    '/Users/qinfeiyu/PycharmProjects/image_recognition/valuable/Obama_1.jpg')

img = face_alignment(img)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face, face_plus = find_face(img_gray)

face_plus, lincese = license_detection(img, face_plus)

lincese = smooth(lincese)
lincese = sharpen(lincese)
cv2.imshow('license', lincese)
cv2.waitKey(0)

# cv2.imshow('license',lincese)
# cv2.waitKey(0)

lincese_gray = cv2.cvtColor(lincese, cv2.COLOR_BGR2GRAY)

# face,face_plus = find_face(lincese_gray)
# lincese_gray_noface = face_wipeoff(lincese_gray,face_plus)
Ejemplo n.º 9
0
from helper import *
from face_alignment import face_alignment

face_alignment("frame0.jpg", "aframe0.jpg")