Пример #1
0
def overlay_heart_eyes(input_img,landmark_files):
  detector,predictor = landmark_files
  msg = None

  heart = cv2.imread("./helper/heart.jpg")
  detections = detector(input_img,1)
  if len(detections) == 0:
    msg = "Face not found. Try re-aligning your face."
    return input_img,msg

  for rect in detections:
    landmarks = predictor(input_img,rect)
    landmarks = points2array(landmarks.parts())
    le_upper_y = (landmarks[37][1] + landmarks[19][1]) //2
    le_lower_y = (landmarks[41][1] + landmarks[30][1])// 2
    le_left_x = (landmarks[17][0] + landmarks[36][0]) //2
    le_right_x = (landmarks[21][0] + landmarks[39][0]) //2

    re_upper_y = (landmarks[44][1] + landmarks[24][1]) //2
    re_lower_y = (landmarks[46][1] + landmarks[30][1])// 2
    re_left_x = (landmarks[26][0] + landmarks[45][0]) //2
    re_right_x = (landmarks[22][0] + landmarks[42][0]) //2

    l_ul = (le_left_x,le_upper_y)
    l_ur = (le_right_x,le_upper_y)
    l_lr = (le_right_x,le_lower_y)
    l_ll = (le_left_x,le_lower_y)

    r_ul = (re_left_x,re_upper_y)
    r_ur = (re_right_x,re_upper_y)
    r_lr = (re_right_x,re_lower_y)
    r_ll = (re_left_x,re_lower_y)

    pts_src = np.asarray(([(0,0),(heart.shape[1],0),(heart.shape[1],heart.shape[0]),(0,heart.shape[0])]))

    pts_dst = np.asarray([l_ul,l_ur,l_lr,l_ll])
    H = cv2.findHomography(pts_src,pts_dst,cv2.RANSAC)[0]
    le_mask_out = cv2.warpPerspective(heart,H,(input_img.shape[1],input_img.shape[0]))

    pts_dst = np.asarray([r_ul,r_ur,r_lr,r_ll])
    H = cv2.findHomography(pts_src,pts_dst,cv2.RANSAC)[0]
    re_mask_out = cv2.warpPerspective(heart,H,(input_img.shape[1],input_img.shape[0]))

    final_mask = le_mask_out + re_mask_out
    input_img_mask = cv2.inRange(final_mask,(0,0,0),(0,0,255))
    input_img = cv2.bitwise_and(input_img,input_img,mask = input_img_mask) + final_mask

  return input_img,msg
Пример #2
0
def blur_all_faces(input_img, landmark_files):
    detector, predictor = landmark_files
    msg = None
    detections = detector(input_img, 1)
    if len(detections) == 0:
        msg = "Face not found. Try re-aligning your face."
        return input_img, msg

    canvas = input_img.copy()
    for rect in detections:
        landmarks = predictor(input_img, rect)
        landmarks = points2array(landmarks.parts())
        indices = list(range(16)) + [26, 25, 24, 19, 18, 17, 0]
        pts = np.array(landmarks)[indices].reshape(-1, 1, 2)
        canvas = cv2.fillPoly(canvas, [pts], (255, 255, 255))
        canvas = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
        face_mask = cv2.threshold(canvas, 254, 255, 0, cv2.THRESH_BINARY)[1]
        face_mask_inv = cv2.bitwise_not(face_mask)
        blur_face = cv2.GaussianBlur(input_img, (37, 37), 150)
        blur_face = cv2.bitwise_and(blur_face, blur_face, mask=face_mask)
        bg = cv2.bitwise_and(input_img, input_img, mask=face_mask_inv)
        input_img = cv2.bitwise_or(bg, blur_face)

    return input_img, msg
Пример #3
0
def overlay_moustache(input_img,landmark_files,index):
  detector,predictor = landmark_files
  msg = None

  if index == 1:
    moustache = cv2.imread("./helper/moustache1.jpg")
    moustache = moustache[185:310,140:500,:3]
  elif index == 2:
    moustache = cv2.imread("./helper/moustache2.jpg")
    moustache = moustache[68:215,:,:3]
  moustache_inv = cv2.bitwise_not(moustache)

  detections = detector(input_img,1)
  if len(detections) == 0:
    msg = "Face not found. Try re-aligning your face."
    return input_img,msg

  for rect in detections:
    landmarks = predictor(input_img,rect)
    landmarks = points2array(landmarks.parts())

    lower_left = [(landmarks[48][0] + landmarks[3][0])//2,(landmarks[48][1] + landmarks[3][1])//2]
    lower_right = [(landmarks[54][0] + landmarks[13][0])//2,(landmarks[54][1] + landmarks[13][1])//2]
    lower_slope = ((lower_left[1]-lower_right[1])/( lower_right[0] - lower_left[0]))

    if lower_slope != 0:
      perpendicular_slope = -1/lower_slope
    else:
      perpendicular_slope = 1e-10

    r = int(math.sqrt((landmarks[33][0]-landmarks[62][0])**2 + (landmarks[33][1]-landmarks[62][1])**2))
    theta = math.atan(perpendicular_slope)

    # Due to decrasing y as we go up and x always being < 90 we need to modify the original paramnetric equation
    #sign_dict = {"x": "+","y": "-"} --> when theta > 0
    #sign_dict = {"x": "-","y": "+"} ---> when theta < 0
    if (theta >= 0):
      upper_left_x = int(lower_left[0] + r * math.cos(theta))
      upper_left_y = int( lower_left[1] - r * math.sin(theta))
      upper_right_x = int(lower_right[0] + r * math.cos(theta) )
      upper_right_y = int(lower_right[1] - r * math.sin(theta))
    else:
      upper_left_x = int(lower_left[0] - r * math.cos(theta))
      upper_left_y = int( lower_left[1] + r * math.sin(theta))
      upper_right_x = int(lower_right[0] - r * math.cos(theta) )
      upper_right_y = int(lower_right[1] + r * math.sin(theta))

    left_upper = [upper_left_x,upper_left_y]
    right_upper = [upper_right_x,upper_right_y]
    right_lower = lower_right
    left_lower = lower_left

    pts_src = np.asarray(([(0,0),(moustache.shape[1],0),(moustache.shape[1],moustache.shape[0]),(0,moustache.shape[0])]))
    pts_dst = np.array([left_upper,right_upper,right_lower,left_lower])

    H = cv2.findHomography(pts_src,pts_dst,cv2.RANSAC)[0]
    wrapped_overlay_inv = cv2.warpPerspective(moustache_inv,H,(input_img.shape[1],input_img.shape[0]))
    wrapped_overlay = cv2.bitwise_not(wrapped_overlay_inv)
    input_img = cv2.bitwise_and(wrapped_overlay,input_img)

  return input_img,msg
Пример #4
0
def overlay_thuglife(input_img, landmark_files):

    detector, predictor = landmark_files
    msg = None

    # Loads the spectacles image and crops it
    specs = np.array(cv2.imread("helper/specs.jpg"), dtype=np.uint8)
    specs_crop = specs[420:498, 179:567, :3]
    inv_specs_crop = cv2.bitwise_not(specs_crop)

    # Loads the cigar image
    cigar = np.array(cv2.imread("helper/cigar.jpg"), dtype=np.uint8)

    # Initializes the face detector and landmark predictor
    detections = detector(input_img, 1)

    if len(detections) == 0:
        msg = "Face not found. Try re-aligning your face."
        return input_img, msg

    for rect in detections:
        # Facial landmarks are identified and reformatted into an array type
        landmarks = predictor(input_img, rect)
        landmarks = points2array(landmarks.parts())

        # Calculating spectacles landmarks locations
        specs_left = landmarks[0]
        specs_right = landmarks[16]

        # Eye width is used to tweak the spectacle land mark positions calculated below.
        eyewidth = max(landmarks[40][1] - landmarks[38][1],
                       landmarks[44][1] - landmarks[46][1])

        specs_leftup = [specs_left[0], int(specs_left[1] - 1.5 * eyewidth)]
        specs_rightup = [specs_right[0], int(specs_right[1] - 1.5 * eyewidth)]

        specs_leftdown = [specs_left[0], int(specs_left[1] + 1.5 * eyewidth)]
        specs_rightdown = [
            specs_right[0],
            int(specs_right[1] + 1.5 * eyewidth)
        ]

        pts_src = np.array([(0, 0), (specs_crop.shape[1] - 1, 0),
                            (specs_crop.shape[1] - 1, specs_crop.shape[0] - 1),
                            (0, specs_crop.shape[0] - 1)])
        pts_dst = np.asarray(
            [specs_leftup, specs_rightup, specs_rightdown, specs_leftdown])
        H = cv2.findHomography(pts_src, pts_dst, cv2.RANSAC)[0]

        # Spectacles are wrapped on to the input image
        specs_mask_inv = cv2.warpPerspective(
            inv_specs_crop, H, (input_img.shape[1], input_img.shape[0]))
        specs_mask = cv2.bitwise_not(specs_mask_inv)
        input_img = cv2.bitwise_and(specs_mask, input_img)

        #Calculating cigar landmark locations
        cigar_leftup = landmarks[62]
        cigar_leftdown = landmarks[57]
        cigar_rightup = [landmarks[13][0], cigar_leftup[1]]
        cigar_rightdown = [landmarks[13][0], cigar_leftdown[1]]

        pts_src = np.array([(0, 0), (cigar.shape[1] - 1, 0),
                            (cigar.shape[1] - 1, cigar.shape[0] - 1),
                            (0, cigar.shape[0] - 1)])
        pts_dst = np.array(
            [cigar_leftup, cigar_rightup, cigar_rightdown, cigar_leftdown])
        H = cv2.findHomography(pts_src, pts_dst, cv2.RANSAC)[0]

        # Cigar is wrapped on to the input image
        cigar_out = cv2.warpPerspective(
            cigar, H, (input_img.shape[1], input_img.shape[0]))
        cigar_out_gray = cv2.cvtColor(cigar_out, cv2.COLOR_RGB2GRAY)
        cigar_mask_inv = cv2.threshold(cigar_out_gray, 0, 255,
                                       cv2.THRESH_BINARY)[1]
        cigar_mask = cv2.bitwise_not(cigar_mask_inv)

        # Final output image is calculated
        mask = cv2.bitwise_and(input_img, input_img, mask=cigar_mask)
        input_img = cv2.bitwise_or(mask, cigar_out)

    return input_img, msg
Пример #5
0
def horns_nd_fangs_overlay(input_img,landmark_files):
  msg = None
  detector,predictor = landmark_files
  detections = detector(input_img,1)

  if len(detections) == 0:
    msg = "Face not found. Try re-aligning your face."
    return input_img,msg

  for rect in detections:
    # Warpping Horns
    horns = cv2.imread("./helper/horns.jpg")
    horns = horns[:,:,:3]
    landmarks = predictor(input_img,rect)
    landmarks = points2array(landmarks.parts())

    lower_left = landmarks[1]
    lower_right = landmarks[15]
    lower_slope = ((lower_left[1]-lower_right[1])/( lower_right[0] - lower_left[0]))

    if lower_slope != 0:
      perpendicular_slope = -1/lower_slope
    else:
      perpendicular_slope = 1e-10

    r = int(math.sqrt((landmarks[30][0]-landmarks[8][0])**2 + (landmarks[30][1]-landmarks[8][1])**2))
    theta = math.atan(perpendicular_slope)

    # Due to decrasing y as we go up and x always being < 90 we need to modify the original parametric equation
    #sign_dict = {"x": "+","y": "-"} --> when theta > 0
    #sign_dict = {"x": "-","y": "+"} ---> when theta < 0
    if (theta >= 0):
      upper_left_x = int(lower_left[0] + r * math.cos(theta))
      upper_left_y = int( lower_left[1] - r * math.sin(theta))
      upper_right_x = int(lower_right[0] + r * math.cos(theta) )
      upper_right_y = int(lower_right[1] - r * math.sin(theta))
    else:
      upper_left_x = int(lower_left[0] - r * math.cos(theta))
      upper_left_y = int( lower_left[1] + r * math.sin(theta))
      upper_right_x = int(lower_right[0] - r * math.cos(theta) )
      upper_right_y = int(lower_right[1] + r * math.sin(theta))

    left_upper = [upper_left_x,upper_left_y]
    right_upper = [upper_right_x,upper_right_y]
    right_lower = lower_right
    left_lower = lower_left

    pts_dst = np.array([left_upper,right_upper,right_lower,left_lower])
    y = 800
    pts_src = np.array([(250,395),(780,395),(780,y),(250,y)])

    H = cv2.findHomography(pts_src,pts_dst,cv2.RANSAC)[0]
    horns_warpped = cv2.warpPerspective(horns,H,(input_img.shape[1],input_img.shape[0]))
    horns_bw = cv2.cvtColor(horns_warpped,cv2.COLOR_RGB2GRAY)
    input_img_mask = cv2.threshold(horns_bw,1,255,cv2.THRESH_BINARY_INV)[1]
    masked_input_img = cv2.bitwise_and(input_img,input_img,mask = input_img_mask)

    input_img = cv2.bitwise_or(masked_input_img,horns_warpped)
    #-----------------------------------------------------------------------------------------#
    # Wrapping Fangs
    fangs = cv2.imread("./helper/fangs.jpg")
    fangs = fangs[130:520,105:730,:3]

    upper_left = landmarks[60]
    upper_right = landmarks[64]
    upper_slope = ((lower_left[1]-lower_right[1])/( lower_right[0] - lower_left[0]))

    if upper_slope != 0:
      perpendicular_slope = -1/lower_slope
    else:
      perpendicular_slope = 1e-10

    r = int(math.sqrt((landmarks[62][0]-landmarks[33][0])**2 + (landmarks[62][1]-landmarks[33][1])**2))
    theta = math.atan(perpendicular_slope)

    # Due to decrasing y as we go up and x always being < 90 we need to modify the original parametric equation
    #sign_dict = {"x": "-","y": "+"} --> when theta > 0
    #sign_dict = {"x": "+","y": "-"} ---> when theta < 0
    if (theta >= 0):
      lower_left_x = int(upper_left[0] - r * math.cos(theta))
      lower_left_y = int(upper_left[1] + r * math.sin(theta))
      lower_right_x = int(upper_right[0] - r * math.cos(theta))
      lower_right_y = int(upper_right[1] + r * math.sin(theta))
    else:
      lower_left_x = int(upper_left[0] + r * math.cos(theta))
      lower_left_y = int(upper_left[1] - r * math.sin(theta))
      lower_right_x = int(upper_right[0] + r * math.cos(theta))
      lower_right_y = int(upper_right[1] - r * math.sin(theta))

    left_lower = [lower_left_x,lower_left_y]
    right_lower = [lower_right_x,lower_right_y]
    right_upper = upper_right
    left_upper = upper_left

    pts_dst = np.array([left_upper,right_upper,right_lower,left_lower])
    pts_src = np.asarray(([(0,0),(fangs.shape[1],0),(fangs.shape[1],fangs.shape[0]),(0,fangs.shape[0])]))

    H = cv2.findHomography(pts_src,pts_dst,cv2.RANSAC)[0]
    fangs_warpped = cv2.warpPerspective(fangs,H,(input_img.shape[1],input_img.shape[0]))
    fangs_mask = cv2.threshold(fangs_warpped,1,255,cv2.THRESH_BINARY_INV)[1]
    masked_input_img = cv2.bitwise_and(input_img,fangs_mask)
    input_img = cv2.bitwise_or(fangs_warpped,masked_input_img)

  return input_img,msg