コード例 #1
0
ファイル: main.py プロジェクト: Flin42/CryptoPi-Hardware
def getTap():  #this function will loop
    reader, tag = NFCReader.readCard()

    #Take picture to verify
    displayText("Taking Picture", "")
    GPIO.cleanup()

    camera.takePicture()
    displayText("Picture Taken", "")

    #if no face, take picture again
    hasface = face.hasFace()
    while (hasface == False):
        displayText("Please have", "Face in Picture")
        camera.takePicture()
        hasface = face.hasFace()

    #Choose the amount to send through button presses
    money = getMoney()
    dic = {
        "tag": str(tag),
        "reader": str(reader),
        "money": money,
        "file": pictureUrl
    }
    print("sending: " + str(dic))
    strdic = json.dumps(dic)
    jsond = json.loads(strdic)
    response = requests.post(url, json=jsond)

    print("Status code: ", response.status_code)
コード例 #2
0
def Add_User(sound_enable=True):
    print('add user')
    detect.Release_Cam()
    fd.Create_Dataset(sound_enable)
    ft.Train_LBPH()
    detect.Reset_Cam()
    recognizer.read('trainer/trainer.yml')
コード例 #3
0
ファイル: main.py プロジェクト: nguyentrung0904/faceunlocker
def main():

    # sensor = Sensor(17)
    config = ConfigLoader("config//development.ini")
    print ("start face detector")
    fd = FaceDetection(patternIds=config.imageid, patternUrls=config.imagesurl)
    print ("starting done")
    # lcd = Lcd()
    mqtt = MQTTCenter(host=config.mqtt_host, pubid=config.mqtt_id)
    while True:

        # sensor.waitFor(GPIO.RISING);
        # print "Sensor is %d" % (sensor.getState())

        # lcd.clear()
        # lcd.display_string("Capturing in: ", 1)
        # lcd.display_string("seconds", 3)
        # i = 1
        # for i in range(1, 5, 1):
        #     time.sleep(1)
        #     lcd.display_string(i, 2)

        # Capture picture function returns path to the new picture

        # Ankan: write your function and use it here, assign the return path into the filePath variable
        filePath = raw_input("Type in the file name: ")
        # filePath = "images//ramsey.jpg"
        key = filePath
        # Upload to S3
        filePath = "images//%s" % (filePath)
        file = open(filePath, 'r+')

        # key = file.name.split("//")[1]

        imageUpload = ImageUpload(aws_access_key_id=config.AWS_ACCESS_KEY_ID,
                                  aws_secret_access_key=config.AWS_SECRET_ACCESS_KEY,
                                  bucketname=config.bucketname)
        print ("Uploading file")
        newImageURL = imageUpload.upload_to_s3(file, key)
        if newImageURL != "":
            print 'File uploaded'
        else:
            print 'File upload failed...'
        # Using MS API to compare
        confidence = fd.identifyFace(newImageURL)
        print ("Confidence: " + str(confidence))

        # MQTT Notify
        if confidence < 0:
            print ("fake images")
        elif confidence < 1:
            print (time.time())
            mqtt.publish(topic="askforpermission", message=newImageURL)
            while mqtt.currentKey != "":
                mqtt.client.loop()
                # imprint time.time()
        else:
            print ("Door opened")
コード例 #4
0
 def draw_all_results(self, image_number):
     showing_image = cv.imread(self.full_results[image_number].image_name)
     for face in self.full_results[image_number].faces:
         face.draw_face(showing_image, (0, 0, 0))
     for mask in self.full_results[image_number].mask_results:
         mask.draw_mask(showing_image, (0, 255, 0))
     cv.imshow("Image Detections", showing_image)
     cv.waitKey(0)
     return showing_image
コード例 #5
0
 def start(self):
     self.add_text("Converting video to frames...")
     self.eval_id = video_to_frames_with_db.convert_video_to_frames(
         self.user_name, self.video_name, "", self.video_type)
     self.add_text("FaceDetection...")
     results_folder = self.user_name + "_" + str(
         self.video_type) + "_" + str(self.eval_id)
     FaceDetection.face_detect_main(self.eval_id, results_folder,
                                    results_folder)
     self.add_text("UpperBodyDetection...")
     UpperBodyDetection.upperbody_detect_main(self.eval_id, results_folder,
                                              results_folder)
     self.add_text("LowerBodyDetection...")
     LowerBodyDetection.lowerbody_detect_main(self.eval_id, results_folder,
                                              results_folder)
     self.report_card = Scoring.score(self.user_name, self.video_type)
コード例 #6
0
def newUser():
    name = input("Enter Your Name: ")
    dirs = os.listdir(training_folder)
    os.makedirs(training_folder + '/' + name + '@' + str(len(dirs) + 1))
    cap = cv2.VideoCapture(0)
    i = 0
    while (True):
        ret, frame = cap.read()
        test = frame.copy()
        frame, frame_crop, rect = FaceDetection.detect_faces(
            FaceDetection.lbp_face_cascade, frame)
        cv2.imshow('Smile :) with different moods', frame)
        cv2.waitKey(50)
        if frame_crop != "None" and i < 100:
            print(training_folder + "/" + name + '@' + str(len(dirs) + 1) +
                  '/' + str(i) + '.jpg')
            cv2.imwrite(
                training_folder + "/" + name + '@' + str(len(dirs) + 1) + '/' +
                str(i) + '.jpg', frame_crop)
            #cv2.imwrite("sample.jpg",test)
            i += 1
        elif i >= 100:
            break

    cap.release()
    cv2.destroyAllWindows()
コード例 #7
0
def follow_gaze(cam, GazeNet):
	center_of_face = []
	while len(center_of_face) == 0:
		img = get_remote_image(cam)
		fd = FD.FaceDetector(img, True)
		center_of_face = fd.detectCenterFaces()
		if len(center_of_face) > 0:
			# get gaze directions
			gaze_coords = GazeNet.getGaze(center_of_face, img)
			cv2.circle(img, (gaze_coords[0], gaze_coords[1]), 10, (0,0,255))
			cv2.imshow("Image", img)
			cv2.waitKey(0)
			cv2.destroyAllWindows()
			# use gaze directions to look and point in that direction
			# gaze predicted location
			look_at_gazed(gaze_coords)
			time.sleep(1.5)
			# detect object
			closest_ball = find_closest_object(cam)
			if closest_ball is None:
			#TODO: Let is say it, and maybe look bakc at parent and/or try again
				print("I don't see what you are looking at")
				tts_p.say("I don't see what you are looking at")
			# if detected:
			# TODO: set a maximum distance.
			if closest_ball is not None:
				look_at_gazed(closest_ball)
				point_at_gazed(gaze_coords, cam)
コード例 #8
0
def run():
    fd = FD.FaceDetection()
    fr = VR.VggRecogniser()

    fout = createOutFile(VIDEO_PATH, OUT_DIR)
    if not os.path.exists(VIDEO_PATH):
        print 'video not exist...'
        return
    video_capture = cv2.VideoCapture(VIDEO_PATH)
    if not video_capture.isOpened():
        print 'video reading error...'
        return
    fps = int(video_capture.get(cv2.CAP_PROP_FPS))
    print 'fps: ', fps
    num_frame = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
    print 'frame count: ', num_frame

    for count in range(num_frame):
        ret, frame = video_capture.read()
        if not ret or count % fps != 0:
            continue

        _, scores, bboxes = fd.dectectFace(frame)
        imgs = []
        coors = []
        for i in range(len(scores)):
            if scores[i] >= 0.8:
                y1, x1, y2, x2 = bboxes[i]
                y1 = int(y1 * frame.shape[0])
                y2 = int(y2 * frame.shape[0])
                x1 = int(x1 * frame.shape[1])
                x2 = int(x2 * frame.shape[1])
                w = x2 - x1
                h = y2 - y1
                if w > frame.shape[1] * THRES_SIZE and h > frame.shape[
                        0] * THRES_SIZE:
                    face = frame[y1:y2, x1:x2, :]
                    imgs.append(face)
                    coors.append((x1, y1, x2 - x1, y2 - y1))
        if len(imgs) > 0:
            num, res = fr.recognise(imgs)
            ss = count / fps
            mm = ss / 60
            ss = ss % 60
            for k in range(num):
                uid, scr = res[k]
                if scr >= THRES_REG:
                    coor = coors[k]
                    fout.write(
                        '%02d:%02d\t%d %d %d %d\t%s\n' %
                        (mm, ss, coor[0], coor[1], coor[2], coor[3], uid))
                    print('%02d:%02d\t%d %d %d %d\t%s' %
                          (mm, ss, coor[0], coor[1], coor[2], coor[3], uid))
        print '.'

    fout.close()
コード例 #9
0
    def initUI(self,n):
        self.setWindowTitle(self.title)
        self.setGeometry(self.left, self.top, self.width, self.height)

        icn = QtGui.QIcon()
        icn.addPixmap(QtGui.QPixmap("alert.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.setWindowIcon(icn)


        buttonReply = QMessageBox.question(self, 'ALERT!!!', "Do you like to open webcam?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
        if buttonReply == QMessageBox.Yes:
            if n==1:
                FaceDetection.faceDetection()
            elif n==2:
                dataSetGenerator.DSGen()
                trainer.train()
            else:
                detector.predictor()
        self.show()
コード例 #10
0
 def on_pb_facedetection(self):
     names = FaceDetection.predict(self.fileName)
     w, h = self.gv_facedetection_source.width(
     ), self.gv_facedetection_source.height()
     self.gv_facedetection_source.setScene(
         self.show_image("./resource/face_detection.jpg", w, h))
     self.list_ogrenci.clear()
     for n in names:
         item = QListWidgetItem(n)
         self.list_ogrenci.addItem(item)
コード例 #11
0
ファイル: run.py プロジェクト: ChenmjSysu/Detection
def run(capture, captured_img_save_folder, face_img_save_folder):
    print datetime.datetime.now(),
    captured_img_filepath, img = VideoCapture.WriteImage(
        capture, captured_img_save_folder)
    face_detect_result = FaceDetection.FaceDetect(
        img, os.path.basename(captured_img_filepath.split(".jpg")[0]),
        face_img_save_folder)
    if face_detect_result is None:
        print "OK"
        return

    print "%d faces detected" % len(face_detect_result)
コード例 #12
0
def emotionDetection():
    global vs, outputFrame, lock
    while True:
        frame = vs.read()

        #frame , objectSet = ObjectDetection.objectDetection(frame)
        frame, emotionList = FaceDetection.getFaceROI(frame)
        SentimentAnalysis.storeSentimentScore(emotionList)

        frame = imutils.resize(frame, width=700)

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
コード例 #13
0
def RecognizeFace(image, faceCascade, eyeCascade, faceSize, threshold,
                  recognizer):
    found_faces = []

    gray, faces = FaceDetection.detectFaces(image,
                                            faceCascade,
                                            eyeCascade,
                                            returnGray=1)

    # If faces are found, try to recognize them
    for ((x, y, w, h), eyedim) in faces:
        label, confidence = recognizer.predict(
            cv2.resize(FaceDetection.levelFace(gray, ((x, y, w, h), eyedim)),
                       faceSize))
        print(label)
        print(confidence)
        # note that for some distributions of python-opencv, the predict function
        # returns the label only.
        #label = recognizer.predict(cv2.resize(detect.levelFace(gray, ((x, y, w, h), eyedim)), faceSize))
        #confidence = -1
        if confidence < threshold:
            found_faces.append((label, confidence, (x, y, w, h)))

    return found_faces
コード例 #14
0
def detect_face_with_range(start_index, end_index):
    for now_index in xrange(start_index, end_index + 1):
        file_name = str(now_index).zfill(5) + ".jpg"
        if not os.path.exists(os.path.join(path, file_name)):
            continue

        pic = cv2.imread(os.path.join(path, file_name))

        find_results = []
        gray = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)

        img = to_rgb(gray)
        width, height = gray.shape

        bounding_boxes, _ = FaceDetection.detect_face(img, minsize, pnet, rnet,
                                                      onet, threshold, factor)
        print("Detect {} face in {}".format(len(bounding_boxes), file_name))

        number_of_faces = bounding_boxes.shape[0]  # number of faces

        index = 0
        img_dir = os.path.join(save_path, file_name)
        if not os.path.exists(img_dir):
            os.makedirs(img_dir)
        face_positions = []
        for face_position in bounding_boxes:
            face_position = face_position.astype(int)
            face_position[1] = 0 if face_position[1] < 0 else face_position[1]
            face_position[3] = width - 1 if face_position[
                3] > width else face_position[3]
            face_position[0] = 0 if face_position[0] < 0 else face_position[0]
            face_position[2] = height - 1 if face_position[
                2] > height else face_position[2]

            cv2.rectangle(pic, (face_position[0], face_position[1]),
                          (face_position[2], face_position[3]), (0, 255, 0), 2)

            crop = img[face_position[1]:face_position[3],
                       face_position[0]:face_position[2], ]
            crop = cv2.resize(crop, (96, 96), interpolation=cv2.INTER_CUBIC)
            cv2.imwrite(os.path.join(img_dir, str(index) + ".png"), crop)
            face_positions.append((0.8, face_position[0], face_position[1],
                                   face_position[2], face_position[3]))
            index += 1

        all_face_positions[file_name] = face_positions
コード例 #15
0
def predict(test_img):
    img = test_img
    img, face, rect = FaceDetection.detect_faces(
        FaceDetection.haar_face_cascade, img, 1.1)
    if face == "None":
        pass
    else:
        face = cv2.cvtColor(np.array(face, dtype=np.uint16),
                            cv2.COLOR_BGR2GRAY)
        label, conf = face_recognizer.predict(np.array(face, dtype=np.uint16))
        if label == -1:
            label_text = "unknown"
        else:
            label_text = names[label]
    #print(face)
        draw_rectangle(img, rect)
        draw_text(img, label_text, rect[0], rect[1] - 5)

# print(face)
    return img
コード例 #16
0
def newUserTest():
    cap = cv2.VideoCapture(0)
    os.system('cls')
    previous_label = ""
    while (True):
        ret, frame = cap.read()
        #test = frame.copy()
        frame, frame_crop, rect = FaceDetection.detect_faces(
            FaceDetection.haar_face_cascade, frame, 1.1)
        if frame_crop == "None":
            pass
        else:

            frame_crop = cv2.cvtColor(np.array(frame_crop, dtype=np.uint16),
                                      cv2.COLOR_BGR2GRAY)
            label, conf = face_recognizer.predict(
                np.array(frame_crop, dtype=np.uint16))
            if label == -1:
                label_text = "unknown"
            else:
                label_text = names[label]
            #label_text = names[label]
            # print(face)
            draw_rectangle(frame, rect)
            global pass_name
            if previous_label != label_text:
                os.system('cls')
                previous_label = label_text
                print(label_text)
                if label_text == pass_name and pass_name != '':
                    sys.exit()
            draw_text(frame, label_text, rect[0], rect[1] - 5)
        cv2.imshow('Smile :) with different moods', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            #cv2.imwrite("sample.jpg",test)
            break

    cap.release()
    cv2.destroyAllWindows()
コード例 #17
0
ファイル: views.py プロジェクト: nguyenthanhan1001/FaceAPIs
def SSD_detect(filename):
    try:
        img = skimage.io.imread(filename)
        _ssd_fd = FaceDetection.FaceDetection()
        cc, scores, bboxes = _ssd_fd.dectectFace(img)
        res = {}
        if len(scores) < 1:
            res = {'code':config.CODE_NON_FACE}
        else:
            #visualization.bboxes_draw_on_img(img, cc, scores, bboxes, visualization.colors, class_names=['none-face', 'face'])
            #skimage.io.imsave(tmp_filename, img)
            bboxes = normalizeBBoxes(bboxes, img.shape[1], img.shape[0])

            res['code'] = config.CODE_SUCCESS
            res['num'] = len(scores)
            res['coordinates'] = []
            for ii in range(len(scores)):
                if scores[ii] >= config.SCORE_THRES:
                    y1, x1, y2, x2 = bboxes[ii]
                    res['coordinates'].append("%d,%d,%d,%d"%(x1, y1, x2 - x1, y2 - y1))
            res["url"] = filename
    except:
        res = {'code':config.CODE_SYS_ERR}
    return res
コード例 #18
0
    def data_storage_run(self):
        self.image_names = CommonComponents.load_image_names(
            config.WORKING_FOLDER)
        self.activeComponents = [True, True, True, True]
        if self.activeComponents[0]:
            print('Starting yolo detector')
            self.yoloDetector = yolo.ObjectDetector()
        if self.activeComponents[1]:
            print('Starting mask detector')
            self.maskDetector = mask.MaskRCNN()
        if self.activeComponents[2]:
            print('Starting face detector')
            self.faceDetector = face.FaceDetection()
        if self.activeComponents[3]:
            print('Starting pose detector')
            self.poseDetector = pose.PoseDetection()

        #Creates the full set of results in an array style setup
        self.full_results = []
        # Doing a loop over all images in the folder to find all of our results
        for name in self.image_names:
            new_entry = DataStorage.DataStorage(name.name)
            #Adding the yolo results for our entry
            if config.YOLO_OBJECT_COMPARISSON:
                yolo_results = self.yoloDetector.read_image(name.name)
                for object in yolo_results:
                    new_entry.yolo.append(DataStorage.Yolo(object))
            #Adding the mask results for our entry
            if config.FACE_COLOUR_COMPARISSON or config.MASK_OBJECT_COMPARISSON:
                self.maskDetector = mask.MaskRCNN()
                mask_entries = self.maskDetector.read_image(name.name)
                for masking in range(mask_entries[0].__len__()):
                    new_entry.mask_results.append(
                        DataStorage.Mask(mask_entries[0][masking],
                                         mask_entries[1][masking],
                                         mask_entries[2][masking],
                                         mask_entries[3][masking],
                                         mask_entries[4][masking],
                                         mask_entries[5][masking]))
            if config.FACE_COLOUR_COMPARISSON:
                if config.FACE_DETECTION_TYPE == "Cascade":
                    face_entry = self.faceDetector.read_image(name.name)
                    for front_face in face_entry[0]:
                        new_entry.faces.append(
                            DataStorage.Face(front_face, "Front"))
                    for side_face in face_entry[1]:
                        new_entry.faces.append(
                            DataStorage.Face(side_face, "Side"))
                elif config.FACE_DETECTION_TYPE == "dlib":
                    face_entries = self.faceDetector.facial_recognition_library_read_image(
                        name.name)
                    for x in face_entries:
                        new_entry.faces.append(
                            DataStorage.Face(x, "Unconfirmed"))
            if self.activeComponents[3]:
                # In here is where pose detection is done
                poses_detected = self.poseDetector.read_image(name.name)
                new_entry.poses = self.pose_processing(name.name,
                                                       poses_detected)
            self.full_results.append(new_entry)
        # Showing off all the face results here
        if config.DRAW_DISPLAY_IMAGES:
            for entry in self.full_results:
                testing_image = cv.imread(entry.image_name)
                if config.DRAW_FACE_RESULTS:
                    for face_entry in entry.faces:
                        face_entry.draw_face(testing_image, [255, 255, 255])
                if config.DRAW_MASK_RESULTS:
                    for mask_entry in entry.mask_results:
                        mask_entry.draw_mask(testing_image, [255, 255, 255])
                if config.DRAW_POSE_RESULTS:
                    for pose_entry in entry.poses:
                        pose_entry.draw_pose(testing_image)
                if config.FACE_SHADOW_REMOVAL:
                    testing_image = CommonComponents.retinex_shadow_removal(
                        testing_image, config.FACE_SHADOW_REMOVAL_TYPE)
                cv.imshow("Testing image", testing_image)
                cv.waitKey(0)

        # Object similarity scoring methods
        # Yolo scoring
        if config.YOLO_OBJECT_COMPARISSON:
            self.yolo_object_comparisson()
        # Mask scoring
        if config.MASK_OBJECT_COMPARISSON:
            self.mask_object_comparisson()

        # Running a maskless face-colour analysis
        if config.FACE_COLOUR_COMPARISSON:
            self.maskless_face_colour_analysis()
            # Calculating average maskless hsv face colours
            self.maskless_hsv_average_calculation()
            # Running a mask-based face_colour analysis
            self.face_average_colour_detection()
            # Making the comparissons of the face-based colour detections
            self.face_average_colour_comparissons()

        # Running a mask-based clothing colour analysis
        if config.CLOTHING_COLOUR_COMPARISSON:
            self.pose_average_colour_detection_open_pose()
            # Running a face-sized base clothing colour analysis
            self.average_clothes_detection_results_comparing()
コード例 #19
0
                                                     ROIStartY):(endY +
                                                                 ROIStartY),
                                                    (startX +
                                                     ROIStartX):(endX +
                                                                 ROIStartX)]
                                cv2.imshow("FaceFrame", roiDetected)
                                key = cv2.waitKey(1) & 0xFF

                                #Save Image to Disk
                                cv2.imwrite(Image_Path, roiDetected)
                                #Upload image to dropbox and generate a public sharing URL
                                URL = UploadToDropBox(Image_Name)
                                print("DropBox Image Public Share URL: " +
                                      str(URL))

                                FaceFound = FaceDetection.FaceDetection(
                                    roiDetected)
                                if (FaceFound == 'true'):
                                    FaceDetectionDelay = SMSAlertDelay

                        else:
                            print("")
                            print(
                                "False Detection Occured Detection Percentage: "
                                + str(DectectionPercentage))
                            print("")
                    else:
                        if (ValidObjectDetected == 'true'):
                            #if camera is in cool down period after a detection and the person remains in frame extend the cool down to prevent further alarms
                            SMSAlertDelay = SMSAlertDelay + datetime.timedelta(
                                seconds=0.5)
                            print("Object still in frame, Extending Cool Down")
コード例 #20
0
aqua_btn.pack()

oil_painting_btn = Button(
    root,
    text="Oil Painting",
    command=lambda: OilPainting.do_oil_painting(pathString.get()),
    width=15)
oil_painting_btn.pack()

molten_btn = Button(root,
                    text="Molten",
                    command=lambda: Molten.do_molten(pathString.get()),
                    width=15)
molten_btn.pack()

cartoonize_btn = Button(
    root,
    text="Cartoonize",
    command=lambda: Cartoonize.do_cartoonize(pathString.get()),
    width=15)
cartoonize_btn.pack()

face_detection_btn = Button(
    root,
    text="Face Detection",
    command=lambda: FaceDetection.do_face_detection(pathString.get()),
    width=15)
face_detection_btn.pack()

root.mainloop()
コード例 #21
0
import FaceDetection
import cv2
import glob
import os, sys, time
import visualization
faceDect = FaceDetection.FaceDetection()

INPUT = '/home/nptai/WIDER_val/images'
OUTPUT = './output-widerface'

paths = glob.glob(os.path.join(INPUT, '*/*.jpg'))
paths.sort()

if not os.path.isdir(OUTPUT):
    os.mkdir(OUTPUT)

total = len(paths)
curr = 0
xxtime = time.time()
for path in paths:
    xtime = time.time()
    curr += 1

    img = cv2.imread(path)
    [w, h, _] = img.shape

    classes, scores, bboxes = faceDect.dectectFace(img)

    visualization.bboxes_draw_on_img(img,
                                     classes,
                                     scores,
コード例 #22
0
def detect_face(camera):
	img = get_remote_image(camera)
	fd = FD.FaceDetector(img, True)
	return fd.detectCenterFaces()
コード例 #23
0
ファイル: main.py プロジェクト: sgrether/Smile-Detector
print("                          |___/            |_|                       ")
print("       __     __   _                                                 ")
print("__ __ /  \   /  \ / |                                                ")
print("\ V /| () |_| () || |                                                ")
print(" \_/  \__/(_)\__/ |_|                                                ")
print('\n Press space to capture a photo, and escape to analyze it')

import FaceDetection
import EyeTrack
from ImageGetter import getImage

noGoodPicture = False  #Used for program flow

while noGoodPicture == False:  #Continue until good picture is found

    face = FaceDetection.Face('webcam.jpg')
    smile = FaceDetection.findSmile(face)
    eyes = EyeTrack.trackEyes(face)

    if (smile == True and eyes == False):
        print('It looks like you were blinking')
        print('Press space to try taking the picture again')
        getImage()

    if (smile == False and eyes == True):
        print('Why so serious? Try smiling :D')
        print('Press space to try taking the picture again')
        getImage()

    if (smile == False and eyes == False):
        print('Wake up sleepy face! (No smile or eyes detected)')
コード例 #24
0
ファイル: main.py プロジェクト: ankur8931/face_anonymizer
import FaceDetection

cap = cv2.VideoCapture(
    '/home/neha/workspace/face_anonymization/face-demographics-walking.mp4')
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))

ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

img_array = []
count = 0
while (True):
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rects = FaceDetection.Detect(gray, frame)
    out_img = FaceDetection.Blur(rects, frame)

    cv2.imshow('out_img', out_img)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    filename = "results/out_img%d.jpg" % count
    cv2.imwrite(filename, out_img)

    count = count + 1
    img_array.append(out_img)

fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter('results/output_video.avi', fourcc, 1, (width, height))
コード例 #25
0
    cv2.imwrite(Image_Pth, image)
    Send_Mail(Image_Pth, "Test Email", "Label")

if (args["dropbox"]):
    print("")
    print("DropBox Test")
    print("")

    link = UploadToDropBox("img.jpg")
    print("File Uploaded, Viewing Link is: " + link)

if (args["mms"]):
    print("")
    print("This test requires the dropbox test is also run using -d")
    print("MMS Test using the file uploaded in the dropbox test")
    print("")
    Send_MMS("Test MMS", ConfigValues.ReturnAlertPhoneDestination(), link)

if (args["facedetection"]):
    print("")
    print("FaceDetection Test")
    print("")
    image = cv2.imread("C:\RT_OD\Couple.jpg")
    #next two lines are only needed to view the test image to validate that it loaded correctly from disk
    #cv2.imshow("Image", image)
    #cv2.waitKey(0)
    FaceDetection.FaceDetection(image)

print("")
print("Test Completed!")
コード例 #26
0
minsize = 20  # minimum size of face
threshold = [0.6, 0.7, 0.7]  # three steps's threshold
factor = 0.709  # scale factor

# restore mtcnn model
print('Creating networks and loading parameters')
gpu_memory_fraction = 1.0
with tf.Graph().as_default():
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=gpu_memory_fraction)
    config = tf.ConfigProto(gpu_options=gpu_options,
                            log_device_placement=False)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    with sess.as_default():
        pnet, rnet, onet = FaceDetection.create_mtcnn(sess,
                                                      './model_check_point/')

path = "/share/dataset/val/1_2_04_1/prob/dongnanmenwest_16_1920x1080_30/"
save_path = "/mnt/disk/faces_test_west/"
# path = "."
# save_path = "./test/"
all_face_positions = {}


def detect_face_with_range(start_index, end_index):
    for now_index in xrange(start_index, end_index + 1):
        file_name = str(now_index).zfill(5) + ".jpg"
        if not os.path.exists(os.path.join(path, file_name)):
            continue

        pic = cv2.imread(os.path.join(path, file_name))
コード例 #27
0
import os
import cv2

import FaceDetection as FD

OUT_DIR = '/home/mmhci_hcmus/cropped-avatars'
IMAGES_FILE = '/home/mmhci_hcmus/avatars.txt'

if __name__ == '__main__':
	fi = open(IMAGES_FILE, 'r')
	if not os.path.exists(OUT_DIR):
		os.makedirs(OUT_DIR)
	
	fd = FD.FaceDetection()
	fout = open(OUT_DIR + '.txt', 'w')
	for path in fi:
		path = path.strip()
		#print path
		name = os.path.basename(path).split('.')[0]
		_fd_ =  ''
		outpath = os.path.join(OUT_DIR, _fd_)
		#if os.path.exists(outpath):
		#	continue
		if not os.path.exists(outpath):
			os.mkdir(outpath)
		
		frame = cv2.imread(path)
		try:
			assert(len(frame.shape) == 3)
		except:
			fout.write('%s\t%d\n'%(os.path.basename(path), -1))
コード例 #28
0
import FaceDetection

x = FaceDetection.FaceDetection('test.png')
x.startStream()
コード例 #29
0
ファイル: views.py プロジェクト: nguyenthanhan1001/FaceAPIs
import random
import string

import skimage.io
import numpy as np
import urllib

import config

import sys
sys.path.insert(0, config.SSD300_DIR)
import FaceDetection
import visualization

global gb_detector
gb_detector = FaceDetection.FaceDetection()

def hello(request):
    return HttpResponse('mmHCI Face Detection Webservice')

def handle_uploaded_file(f, filename):
    with open(filename, 'wb+') as destination:
        for chunk in f.chunks():
            destination.write(chunk)

@csrf_exempt
def detect(request):
    tmp_filename = 'media/' + ''.join(random.choice(string.ascii_uppercase 
                        + string.digits) for _ in range(64)) + '.jpg'
    if request.method == 'POST':
        handle_uploaded_file(request.FILES['image'], tmp_filename)
コード例 #30
0
import time
sys.path.insert(0, "../arduino/")
import python2arduino
import cv2

#Chargement du bras controlé
motorSettings = settings.MotorSettings()
motorControler = motorControl.MotorControl(motorSettings.get())


#Chargement du joystick
joystickSettings = settings.MotorSettings()
joystickControler = motorControl.Control(motorSettings.get())

#Activation de la caméra pour avoir un retour du robot
faceStream = FaceDetection.FaceStream(0)

#Camera 
precision = 0.1
res = [360,240]
apertureAngle = [50.,30.]

exit = False

while not exit :
 #Lecture des informations du joystick
 joystickControler.readAllMotor()
 #Recopie des valeurs sur la lampe
 motorControler.setMotorsByControler(joystickControler)
 #Attente d'un caractère
 key = cv2.waitKey(10)
コード例 #31
0
import cv2
import os
import numpy as np
import FaceDetection as t

test_img=cv2.imread('Test/m.jpg')
faces_detected,gray_img=t.faceDetection(test_img)
print("faces_detected:",faces_detected)

faces,faceID=t.labels_for_training_data('trainingimages')
face_recognizer=t.train_classifier(faces,faceID)
name={0:"Bill Gates",1:"Mark"}
cap=cv2.VideoCapture(0)

for face in  faces_detected:
    (x,y,w,h)=face
    roi_gray=gray_img[y:y+h,x:x+h]
    label,confidence=face_recognizer.predict(roi_gray)
    print("label",label)
    t.draw_rect(test_img,face)
    predicted_name=name[label]
    t.put_text(test_img,predicted_name,x,y)

resized_img=cv2.resize(test_img,(700,500))
cv2.imshow("face detection ",resized_img)
cv2.waitKey(0)
cv2.destroyAllWindows()