Esempio n. 1
0
def upload():
	target = os.path.join(APP_ROOT, 'static/images/')
	print ("TARGET: ", target)

	if not os.path.isdir(target):
		os.mkdir(target)

	file = request.files['file']
	print ("File: ", file)
	filename = file.filename
	destination = os.path.join(APP_ROOT, 'static/images/')+str(secure_filename(file.filename))
	file.save(destination)
	print ("Location:" + destination)
	img = cv2.imread(os.path.join(APP_ROOT, destination))
	n_faces, faces_detected_img = detect_faces(haar_face_cascade, img)
	faces="No"
	AK="No"
	NM="No"
	if n_faces > 0:
            faces = "Yes"
            with graph.as_default():
                if (check_ak_or_namo(destination, model_ak) == True):
                    AK = "Yes"
                if (check_ak_or_namo(destination, model_nm) == True):
                    NM = "Yes"
	cv2.imwrite(destination, faces_detected_img)
	source = '/static/images/'+file.filename
	print (source)
	return render_template("upload.html",faces=faces, AK=AK, NM=NM, source=source)
Esempio n. 2
0
def of_dataset(folder="testset", model=None, view=False):
    '''measure the error across the given dataset,
    it compares the measured points with the annotated ground truth,
    optionally you can [view] the results'''
    assert (model)

    # load face and landmark detectors
    utils.load_shape_predictor(model)
    # utils.init_face_detector(True, 150)

    # init average-error
    err = 0
    num = 0

    for img, lmarks, path in utils.ibug_dataset(folder):
        # detections
        face = utils.prominent_face(utils.detect_faces(img, detector="dlib"))
        measured = utils.detect_landmarks(img, face)

        # get error
        num += 1
        err += normalized_root_mean_square(lmarks, measured)

        # results:
        if view is True:
            utils.draw_rect(img, face, color=Colors.yellow)
            utils.draw_points(img, lmarks, color=Colors.green)
            utils.draw_points(img, measured, color=Colors.red)
            utils.show_image(utils.show_properly(utils.crop_image(img, face)))

    print(err, num, err / num)
    print("average NRMS Error for {} is {}".format(folder, err / num))
Esempio n. 3
0
	def start(self):
		cv2.namedWindow("opening...")
		vc = cv2.VideoCapture(0)

		detect_flag = False

		videoflag = 0
		rval, frame = vc.read()
		s = time.time()
		a = time.time()
		tuples = [] # list of (image_ID, face, tag)
		self.faces = []
		while True:
			if frame is not None:
				pframe = cv2.flip(cv2.resize(frame, (960, 720)), 1)
				dframe = cv2.resize(pframe, (480, 360)) # size ratio = 4
				ratio = 2

			
				p = Photo()
				faces = detect_faces(self.detector, dframe)
				faces = sorted(faces, key=lambda tup: tup[0])
				for (r_x, r_y, r_w, r_h) in faces:
					x, y, w, h = ratio*r_x, ratio*r_y, ratio*r_w, ratio*r_h
					p.add_face(x, y, w, h)
					# Draw a rectangle around the faces
					cv2.rectangle(pframe, (x, y), (x+w, y+h), (255, 0, 0), 2)
				p.image = pframe
				p.extract_features()
				self.faces = []
				if len(p.faces) != 0:
					X = np.array([f.feature for f in p.faces])
					predY = self.model.predict(X)

					for (i, f) in enumerate(p.faces):
						if inv_label_maps[predY[i]] == 'T':
							cv2.rectangle(pframe, (f.x, f.y), (f.x+f.w, f.y+f.h), (0, 255, 0), 2)
						self.faces += [(f.x, f.y, f.w, f.h, inv_label_maps[predY[i]], None)]


				cv2.imshow("camera", pframe)

			
			rval, frame = vc.read()

			oper = cv2.waitKey(1) & 0xFF
			if oper == ord('p'):
				t = time.time()
				iname = datetime.datetime.fromtimestamp(t).strftime('%Y-%m-%d %H.%M.%S')
				print iname
				#iname = "/Users/GDog/Desktop/" + iname + ".png"
				self.images = pframe
				vc.release()
				cv2.destroyAllWindows()
				break
Esempio n. 4
0
def draw_face(image):
    '''save the face and draw the face-rec'''
    global boxes

    # detect faces
    faces = utils.detect_faces(image)

    if len(faces) > 0:
        face = faces[0]
        utils.draw_rect(image, face)
        boxes.append(face)
Esempio n. 5
0
def main():
	#开始测试
	for file in dirs:
		image_path = os.path.join(text_floder, os.path.basename(file))
		#print(os.path.basename(file))
		#print(image_path)
		# 加载灰度图像和灰度图片
		rgb_image = load_image(image_path, grayscale=False, color_mode = "rgb")
		gray_image = load_image(image_path, grayscale=True,color_mode = "grayscale")
		# 去掉维度为1的维度(只留下宽高,去掉灰度维度)
		gray_image = np.squeeze(gray_image)
		gray_image = gray_image.astype("uint8")
		faces = detect_faces(face_detection, gray_image)
		#print("-----")
		#print(len(faces))
		for face_coordinates in faces:
			#获取人脸在图像中的矩形坐标的对角两点
			x1, x2, y1, y2 = get_coordinates(face_coordinates)
			#print(x1, x2, y1, y2 )
			# 截取人脸图像像素数组
			gray_face = gray_image[y1:y2, x1:x2]

			try:
				# 将人脸reshape模型需要的尺寸
				gray_face = cv2.resize(gray_face,(emotion_target_size))
			except:
				continue

			gray_face = preprocessing_input(gray_face)
			gray_face = np.expand_dims(gray_face, 0)
			gray_face = np.expand_dims(gray_face, -1)
			# print(gray_face.shape)

			# 预测
			emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
			emotion_text = emotion_labels[emotion_label_arg]

			color = (255,0,0)
			# 画边框
			draw_bounding_box(face_coordinates, rgb_image, color)
			# 画表情说明
			draw_text(face_coordinates, rgb_image, emotion_text, color, 0, face_coordinates[3]+30, 1, 2)

			# 将图像转换为BGR模式显示
			bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
			cv2.imwrite("./pic_test/"+"predict"+os.path.basename(file), bgr_image)

			cv2.waitKey(1)
			cv2.destroyAllWindows()

	print("已识别%d张图片" % int(len(dirs)))
Esempio n. 6
0
	def start():
		cv2.namedWindow("preview")
		vc = cv2.VideoCapture(0)

		videoflag = 0
		rval, frame = vc.read()
		s = time.time()
		a = time.time()

		while True:
			if frame is not None:
				pframe = cv2.flip(cv2.resize(frame, (960, 720)), 1)
				p = Photo()
				faces = detect_faces(detector, pframe)
				faces = sorted(faces, key=lambda tup: tup[0])
				for (x, y, w, h) in faces:
					p.add_face(x, y, w, h)
					# Draw a rectangle around the faces
					cv2.rectangle(pframe, (x, y), (x+w, y+h), (255, 0, 0), 2)
				p.image = pframe
				p.extract_features()
				if len(p.faces) != 0:
					X = np.array([f.feature for f in p.faces])
					predY = model.predict(X)

					for (i, f) in enumerate(p.faces):
						if inv_label_maps[predY[i]] == 'T':
							cv2.rectangle(pframe, (f.x, f.y), (f.x+f.w, f.y+f.h), (0, 255, 0), 2)

					cv2.imshow("image", pframe)

			rval, frame = vc.read()
			oper = cv2.waitKey(1) & 0xFF
			if oper == ord('q'):
				break
			if oper == ord('p'):
				t = time.time()
				iname = datetime.datetime.fromtimestamp(t).strftime('%Y-%m-%d %H.%M.%S')
				print iname
				#iname = "/Users/GDog/Desktop/" + iname + ".png"
				self.images += [pframe]
				vc.release()
				break
Esempio n. 7
0
def save_image(im):
    faces = utils.detect_faces(im)
    save_path = PATH_SAVE
    for x, y, w, h in faces:
        face = im[y: y + h, x: x + w, :]
        file_name = '%s.jpg' % time.time()
        if CLASSIFY:
            face_data = imutils.resize(face, 128, 128)
            if face_data.shape[0] == 128 and face_data.shape[1] == 128:
                prediction = classifier.predict(np.array([face_data]))
                if np.max(prediction[0]) < FACE_MIN or np.max(prediction[0]) > FACE_MAX:
                    continue
                index = np.argmax(prediction[0])
                subdir = utils.NAMES_EN[index]
                save_path = os.path.join(CLASSIFY_PATH, subdir)
        utils.ensure_dir(save_path)
        final_path = os.path.join(save_path, file_name)
        cv2.imwrite(final_path, face)
        # print('File %s saved.' % file_name)
    return im
Esempio n. 8
0
def another_test():
    utils.load_shape_predictor("dlib/shape_predictor_68_face_landmarks.dat")

    for img, p in utils.images_inside("uffa"):
        fast = cv2.FastFeatureDetector_create()
        # kp = fast.detect(img, None)

        # draws:
        face = utils.detect_faces(img, detector="dlib")[0]
        # utils.draw_rect(img, face, color=Colors.green)

        pts = utils.detect_landmarks(img, face)
        pts = [pts[0], pts[3], pts[6], pts[10], pts[20], pts[22], pts[35]]
        # utils.draw_points(img, pts)

        # img = cv2.drawKeypoints(img, kp, None, color=Colors.cyan)

        keypoints = []

        for p in pts:
            roi = Region(p[0] - 10, p[1] - 10, 20, 20)
            patch = utils.crop_image(img, roi)
            keypoints.append(fast.detect(patch, None))

        # for kp in keypoints[2]:
        #     print(kp)
        #     # img = cv2.drawKeypoints(img, kp, None, color=Colors.cyan)

        for p in pts:
            for kp in keypoints:
                for k in kp:
                    x = int(k.pt[0] + p[0])
                    y = int(k.pt[1] + p[1])
                    utils.draw_point(img, x, y, radius=1)

        while True:
            cv2.imshow("window", show_properly(img))
            key = cv2.waitKey(20) & 0Xff

            if key == Keys.ESC:
                break
Esempio n. 9
0
        exit()

    # resize the captured frame for face detection to increase processing speed
    RESIZED_FRAME = cv2.resize(
        FRAME, FRAME_SCALE, interpolation=cv2.INTER_AREA)
    RESIZED_FRAME = cv2.flip(RESIZED_FRAME, 1)

    PROCESSED_FRAME = RESIZED_FRAME
    # Skip a frame if the no face was found last frame
    if FRAME_SKIP_RATE == 0:
        FACEFOUND = False
        for r in CURRENT_ROTATION_MAP:
            ROTATED_FRAME = ndimage.rotate(RESIZED_FRAME, r)
            GRAY_FRAME = cv2.cvtColor(ROTATED_FRAME, cv2.COLOR_BGR2GRAY)

            faces = utils.detect_faces(GRAY_FRAME)

            if len(faces):
                for f in faces:
                    # Crop out the face
                    x, y, w, h = [v for v in f]
                    CROPPED_FACE = GRAY_FRAME[y:y + h, x:x + w]
                    CROPPED_FACE = cv2.flip(CROPPED_FACE, 1)

                    name_to_display = predict(CROPPED_FACE)

                    cv2.rectangle(ROTATED_FRAME, (x, y), (x + w, y + h),
                                  (0, 255, 0))
                    cv2.putText(ROTATED_FRAME, name_to_display, (x, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0))
Esempio n. 10
0
    size = min(h, w)
    origin_y = 0
    origin_x = (w - h) // 2
    if h > w:
        origin_y = (h - w) // 2
        origin_x = 0

    pix = pix[origin_y:origin_y + size, origin_x:origin_x + size, :]
    pix = cv2.resize(pix, (ishape[1], ishape[0]))
    pix = pix / np.max(pix)
    pix = pix * 255

    # print('Start: {}'.format(datetime.now().time()))
    imgpix = tf.constant(value=pix, dtype='float32')
    bbox2d = detect_faces(interpreter=interpreter,
                          input_details=input_details,
                          output_details=output_details,
                          pix=imgpix)
    # print('End:   {}'.format(datetime.now().time()))

    bboxes = []
    for y1, x1, y2, x2, _ in bbox2d:
        h, w = y2 - y1, x2 - x1
        y, x = y1 + 0.5 * h, x1 + 0.5 * w
        edge = 1.0 * max(h, w)
        y1, x1, y2, x2 = int(y - 0.5 * edge), int(x - 0.5 * edge), int(
            y + 0.5 * edge), int(x + 0.5 * edge)

        if y1 < 0 or x1 < 0 or y2 > ishape[0] or x2 > ishape[1]:
            continue

        iobject = pix[y1:y2, x1:x2, :]
Esempio n. 11
0
import numpy as np
from PIL import Image
from v1.classifier import FaceClassifier

IMAGE_PATH = utils.root_path('doc/images/test2.jpg')
PREDICTION_POSTFIX = '_prediction'
SAVE_PREDICTION = True
PROB_THRESHOLD = 0.5

if __name__ == '__main__':
    classifier = FaceClassifier()
    target_W = utils.IM_WIDTH
    target_H = utils.IM_HEIGHT
    im = Image.open(IMAGE_PATH)
    im = np.array(im)
    faces = utils.detect_faces(im)

    if len(faces):
        xs, ls = [], []
        for (x, y, w, h) in faces:
            face = im[y: y + h, x: x + w, :]
            face = imutils.resize(face, target_W, target_H)
            if face.shape[0] == target_H and face.shape[1] == target_W:
                xs.append(face)
                ls.append((x, y + 5 if y < 20 else y - 5))
        prediction = classifier.predict(np.array(xs))
        result = utils.parse_predict(prediction, utils.NAMES_EN, unique=False)
        result = filter(lambda r: r[1] > PROB_THRESHOLD, result)
        result = ['%s: %.2f' % (n, p) for n, p in result]
        if result:
            for location, text, (x, y, w, h) in zip(ls, result, faces):
Esempio n. 12
0
# ----------------------------------------------------------------------

parser = argparse.ArgumentParser(prog='score',
                                 parents=[option_parser],
                                 description='Detect faces in an image.')

parser.add_argument('image', type=str, help='image path or URL')

args = parser.parse_args()

# Wrap face detection parameters.

face_params = FaceParams(args.scaleFactor, args.minNeighbors, args.minSize)

# ----------------------------------------------------------------------
# Face detection
# ----------------------------------------------------------------------

image = read_cv_image_from(
    args.image if is_url(args.image) else get_abspath(args.image))

faces = detect_faces(image, face_params=face_params)

print("Found {0} faces!".format(len(faces)))

result = mark_faces(image, faces)

image, result = convert_cv2matplot(image, result)

plot_side_by_side_comparison(image, result, rightlabel="Detected Faces")
Esempio n. 13
0
	rval, frame = vc.read()
	s = time.time()
	a = time.time()

	while True:

		if frame is not None:
			#print frame.shape
			print "image extract:", time.time()-a
			a = time.time()
			pframe = cv2.flip(cv2.resize(frame, (960, 720)), 1)
			print "flip+resize: ", time.time()-a
			a = time.time()
			#detector part
			p = Photo()
			faces = detect_faces(detector, pframe)
			faces = sorted(faces, key=lambda tup: tup[0])
			for (x, y, w, h) in faces:
				p.add_face(x, y, w, h)
				# Draw a rectangle around the faces
				cv2.rectangle(pframe, (x, y), (x+w, y+h), (255, 0, 0), 2)
			print "detect: ", time.time()-a
			a = time.time()
			#filter part
			p.image = pframe
			p.extract_features()
			if len(p.faces) != 0:
				X = np.array([f.feature for f in p.faces])
				predY = model.predict(X)

				for (i, f) in enumerate(p.faces):
Esempio n. 14
0
IMG_PATH = 'images'

cwd = os.getcwd()
print("Demonstrate face detection using images found in\n{}\n".format(
    os.path.join(cwd, IMG_PATH)))

print(
    "Please close each image (Ctrl-w) to proceed through the demonstration.\n")

imagePaths = glob.glob(os.path.join(IMG_PATH, "*"))
imagePaths.sort()

# ----------------------------------------------------------------------
# Face detection
# ----------------------------------------------------------------------

for imagePath in imagePaths:

    image = read_cv_image_from(imagePath)

    faces = detect_faces(image)

    print("Found {0} faces!".format(len(faces)))

    result = mark_faces(image, faces)

    image, result = convert_cv2matplot(image, result)

    plot_side_by_side_comparison(image, result, rightlabel="Detected Faces")
Esempio n. 15
0

# ----------------------------------------------------------------------
# Image demo
# ----------------------------------------------------------------------

print("\nDemonstrate face recognition using images found in\n{}\n".format(IMG_PATH))
print("Please close each image (Ctrl-w) to proceed through the demonstration.")
for imagePath in list(list_files(IMG_PATH)):

    print("\nRecognising faces in the image:\n  {}".format(imagePath))
    image = read_cv_image_from(imagePath)
    result = image.copy()
    rgb = convert_cv2matplot(image)
    print("    Detecting faces in the image ...")
    boxes = detect_faces(rgb)
    cnt = len(boxes)
    print("        {} face{} found!".format(cnt, 's' if cnt > 1 else ''))
    print("    Calculating the face encodings ...")
    encodings = encode_faces(boxes, rgb)
    print("    Comparing found faces with known faces ...")
    for (box, encoding) in zip(boxes, encodings):
        name = match_face(encoding, candidate_encodings, candidate_names, cnt_dict)
        if name is not None:
            mark_face(result, box, name)

    image, result = convert_cv2matplot(image, result)

    plot_side_by_side_comparison(image, result, rightlabel="Recognised Faces")

Esempio n. 16
0
        image_list = [image_path for image_path in sorted(glob.glob(image_prefix + "/*.[Jj][Pp][Gg]"))]
    else:
        image_list = [sys.argv[2]]
    with open(model_path, "rb") as f:
        model = cPickle.load(f)

    for image_path in image_list:
        p = Photo()
        # before applying the filter
        image1 = cv2.imread(image_path)
        image1 = cv2.resize(image1, (960, 720))
        if image1 is None:
            print "no image", image_path
            continue
        detector = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
        faces = detect_faces(detector, image1)
        print "find %d faces in image %s " % (len(faces), image_path)
        faces = sorted(faces, key=lambda tup: tup[0])
        for (x, y, w, h) in faces:
            p.add_face(x, y, w, h)
            # Draw a rectangle around the faces
            cv2.rectangle(image1, (x, y), (x + w, y + h), (0, 255, 0), 2)

        # after applying the filter
        # read image again to remove rectangles
        image2 = cv2.imread(image_path)
        image2 = cv2.resize(image2, (960, 720))
        p.read_image(image_path)
        p.extract_features()
        X = np.array([f.feature for f in p.faces])
        predY = model.predict(X)
Esempio n. 17
0
def build_trainset_auto(src="dataset", dst="trainset", debug=False):
    '''build a trainset automatically from an ibug-like dataset,
    the images are taken from [src] folder and saved to [dst] folder'''
    utils.init_face_detector(True, 150)
    qualiy = [int(cv2.IMWRITE_JPEG_QUALITY), 50]

    # file count for naming
    count = int(utils.count_files_inside(dst) / 8)

    for img, lmarks, path in utils.ibug_dataset(src):
        h, w = img.shape[:2]

        # crop a bigger region around landmarks
        region = utils.points_region(lmarks)
        scaled = region.scale(1.8, 1.8).ensure(w, h)

        img = utils.crop_image(img, scaled)

        # detect faces
        face = utils.prominent_face(utils.detect_faces(img))

        # if cnn fails try with dlib
        if face is None:
            faces = utils.detect_faces(img, detector="dlib")

            # ..if dlib fails take the region around landmarks
            if face is None:
                face = region.copy()
            else:
                face = utils.prominent_face(faces)

        # edit landmarks according to scaled region
        lmarks = adjust_landmarks(scaled, lmarks)

        # augumentations
        i = 0
        for image, landmarks, box in augment_data(img, face, lmarks):
            i = i + 1

            if debug:
                utils.draw_rect(image, box, color=Colors.yellow)
                utils.draw_points(image, landmarks, color=Colors.purple)
                name = f"image{i}"
                utils.show_image(show_properly(image), window=name)
            else:
                # save annotation and image
                ipath = os.path.join(dst, f"face{count}_{i}.jpg")
                apath = os.path.join(dst, f"face{count}_{i}.ann")
                cv2.imwrite(ipath, image, qualiy)
                Annotation(apath, box.as_list()[:4], landmarks).save()

        if debug:
            utils.draw_rect(img, face, color=Colors.red)
            utils.draw_points(img, lmarks, color=Colors.green)
            utils.show_image(show_properly(img))
        else:
            # save image and annotation
            ipath = os.path.join(dst, f"face{count}.jpg")
            apath = os.path.join(dst, f"face{count}.ann")
            cv2.imwrite(ipath, img, qualiy)
            Annotation(apath, face.as_list()[:4], lmarks).save()

        count = count + 1

        # info
        print("{} processed: {}\r".format(count, ipath))
Esempio n. 18
0
    height = int(cap.get(4))
    fps = 24  # 视频帧率

    # 指定写视频的格式, I420-avi, MJPG-mp4
    videoWriter = None

    classifier = None

    target_W = utils.IM_WIDTH
    target_H = utils.IM_HEIGHT

    while (cap.isOpened()):
        ret, frame = cap.read()
        if not isinstance(frame, np.ndarray):
            continue
        faces = utils.detect_faces(frame)
        if len(faces):
            xs, ls = [], []
            for (x, y, w, h) in faces:
                face = frame[y:y + h, x:x + w, :]
                face = imutils.resize(face, target_W, target_H)
                if face.shape[0] == target_H and face.shape[1] == target_W:
                    xs.append(face)
                    ls.append((x + 3, y + 15 if y < 20 else y - 5))
            if xs:
                if CLASSIFY:
                    classifier = classifier or FaceClassifier()
                    prediction = classifier.predict(np.array(xs))
                    result = utils.parse_predict(prediction, utils.NAMES_EN)
                    result = filter(
                        lambda r: r[1] > PROB_THRESHOLD and r[0] != 'unknown',