Exemplo n.º 1
0
def main(args):

    df_profile = pd.read_excel('/mnt/data/cmnd_tima/blacklist.xlsx',
                               sheet_name='Profile')
    df_image = pd.read_excel('/mnt/data/cmnd_tima/blacklist.xlsx',
                             sheet_name='Image')
    face_detect = face.Detection()
    personIDs = [
        f for f in listdir(args.rawpath) if isdir(join(args.rawpath, f))
    ]
    for personID in tqdm(personIDs):
        cmnd = df_profile[df_profile['CustomerCreditId'] == int(
            personID)]['CardNumber'].values[0]
        i = 0
        personpath = args.rawpath + personID
        #filteredpath = args.pscard_dir + personID
        #if os.path.isdir(filteredpath):
        #continue
        #os.system('mkdir ' + filteredpath)
        files = [f for f in listdir(personpath) if isfile(join(personpath, f))]
        for file in files:
            try:
                filepath = personpath + '/' + file
                img = cv2.imread(filepath)
                img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                faces, images = face_detect.find_faces(img_rgb)
                if (len(faces) > 0):
                    i += 1
                    os.system('cp ' + filepath + ' ' + args.pscard_dir + cmnd +
                              '_' + str(i) + '.' + file.split('.')[-1])
            except:
                pass
Exemplo n.º 2
0
    def main(args):
        frame_interval = 3  # Number of frames after which to run face detection
        fps_display_interval = 5  # seconds
        frame_rate = 0
        frame_count = 0
        count = 0
        save_path = str('/work/MachineLearning/my_dataset/train_aligned/' +
                        args.name)

        if not os.path.exists(save_path):
            os.mkdir(save_path)

        print("Saving images into " + save_path)
        video_capture = cv2.VideoCapture(0)
        face_detection = face.Detection()
        #   face_recognition = face.Recognition()
        start_time = time.time()

        while True:
            # Capture frame-by-frame
            ret, frame = video_capture.read()

            if (frame_count % frame_interval) == 0:
                faces = face_detection.find_faces(frame)

                # Check our current fps
                end_time = time.time()
                if (end_time - start_time) > fps_display_interval:
                    frame_rate = int(frame_count / (end_time - start_time))
                    start_time = time.time()
                    frame_count = 0

    #        add_overlays(frame, faces, frame_rate)

            frame_count += 1
            if len(faces) == 1:
                frame = faces[0].image
                cv2.imshow('Enrolling', frame)
                cv2.setWindowTitle('Enrolling',
                                   str(args.name) + " " + str(count + 1))
                #cv2.putText(faces[0].image, 'Image: ' + str(frame_count+1), (0, 0), cv2.FONT_HERSHEY_SIMPLEX, 1,
                #            (255, 0, 0), thickness=2, lineType=2)
                rgb_frame = frame[:, :, ::-1]
                img = Image.fromarray(rgb_frame, "RGB")
                if img is not None:
                    img.save(
                        os.path.join(save_path + "/" + str(count) + ".jpg"))
                count += 1

                #if frame_count > 100:
                #   break

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # When everything is done, release the capture
        video_capture.release()
        cv2.destroyAllWindows()
def main(args):

    #face detector
    face_recognition = f.Detection()

    #read webcam video
    cap = cv2.VideoCapture(0)

    #take name args
    face_name = args.face_name

    path = 'images'

    directory = os.path.join(path, face_name)
    print(directory)

    #check if already have --name folder
    if not os.path.exists(directory):
        os.makedirs(directory, exist_ok='True')

    number_of_images = 0
    MAX_NUMBER_OF_IMAGES = 50
    count = 0

    while number_of_images < MAX_NUMBER_OF_IMAGES:
        ret, frame = cap.read()

        faces = face_recognition.find_faces(frame)

        # loop founded face
        for face in faces:
            face_bb = face.bounding_box.astype(int)

            cv2.rectangle(frame, (face_bb[0] - 10, face_bb[1] - 10),
                          (face_bb[2] + 10, face_bb[3] + 10), (0, 255, 0), 2)

            sub_face = frame[face_bb[1]:face_bb[3], face_bb[0]:face_bb[2]]
            dim = (160, 160)
            resized = cv2.resize(sub_face, dim, interpolation=cv2.INTER_AREA)

            # save image in --name folder
            if count == 5:
                FaceFileName = str(path) + "/" + str(face_name) + "/" + str(
                    number_of_images) + ".jpg"
                cv2.imwrite(FaceFileName, resized)
                number_of_images += 1
                count = 0
            count += 1

        cv2.imshow('add new data', frame)

        # Exit program
        if (cv2.waitKey(1) & 0xFF == ord('q')):
            break

    video_capture.release()
    cv2.destroyAllWindows()
Exemplo n.º 4
0
def main(args):
    frame_interval = 2  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0
    threshold = 0.77
    # detector = dlib.get_frontal_face_detector()

    codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    video_capture = cv2.VideoCapture(0)
    video_capture.set(cv2.CAP_PROP_FOURCC, codec)
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 768)
    # face_recognition = face.Recognition()
    face_detection = face.Detection()
    DetectEnable = 1

    while True:
        # Capture frame-by-frame
        # mypath = '/home/hoanviettran/Pictures/Face/TranTuanHung/facebook/TranTuanHung/'
        # files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
        # for file in files:
        #     file_path = mypath + file
        #     print(file)
        #     frame = cv2.imread(file_path)
        #     frame_count += 1
        ret, frame = video_capture.read()
        if (ret):
            if (DetectEnable):
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                # bbs = detector(frame_rgb, 0)
                # display = detect.bb_face(frame_rgb,bbs)
                # faces = face_recognition.identify(frame_rgb, threshold)
                faces, images = face_detection.find_faces(frame_rgb)
                if (len(faces) > 0):
                    DetectEnable = 0
                    start = time.time()
            # Check our current fps

            # add_overlays(frame, faces, frame_rate)

            # cv2.imshow('Video', frame)
            if (DetectEnable == 0):
                cv2.imwrite(
                    '/mnt/data/Face/Frames/' + str(datetime.now()) + '.jpg',
                    frame)

            end = time.time()
            if ((end - start) > 5):
                DetectEnable = 1

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
Exemplo n.º 5
0
 def __init__(self, facenet_model_checkpoint):
     self.batch_size = 64
     self.image_size = 160
     self.detect = None
     self.facenet_model_checkpoint = facenet_model_checkpoint
     gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3,
                                 allow_growth=True)
     self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
     #self.sess = tf.Session()
     with self.sess.as_default():
         facenet.load_model(self.facenet_model_checkpoint)
         self.detect = face.Detection()
Exemplo n.º 6
0
    def __init__(self, parent=None):
        super(Ui_MainWindow, self).__init__(parent)

        # self.face_recognition = face.Recognition()
        self.face_detection = Detection()
        self.face_detection_capture = face.Detection()
        self.timer_camera = QtCore.QTimer()
        self.timer_camera_capture = QtCore.QTimer()
        self.cap = cv2.VideoCapture()
        self.CAM_NUM = 0
        self.set_ui()
        self.slot_init()
        self.__flag_work = 0
        self.x = 0
Exemplo n.º 7
0
def main():
    # print(name)
    frame_interval = 3  # Number of frames after which to run face detection#抽帧检测
    fps_display_interval = 0.1  # seconds
    frame_rate = 0
    frame_count = 0
    # video_capture = cv2.VideoCapture("rtsp://*****:*****@192.168.2.166:554/video1")
    video_capture = cv2.VideoCapture(0)
    width, height = video_capture.get(3), video_capture.get(4)
    print('分辨率:')
    print(width, height)
    face_detection = face.Detection()
    start_time = time.time()

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        if (frame_count % frame_interval) == 0:
            faces = face_detection.find_faces(frame)
            # cv2.imwrite("3.jpg",frame)
            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        add_overlays(frame, faces, frame_rate)

        frame_count += 1
        # img_zo = cv2.resize(frame, (int(width) // 3, int(height) // 3), interpolation=cv2.INTER_AREA)
        img_zo = cv2.resize(frame, (640, 480), interpolation=cv2.INTER_AREA)
        # cv2.namedWindow("Video", cv2.CV_WINDOW_NORMAL)  # [2]创建图片的显示窗口
        srcImg = cv2.imread("2.png")
        # cv2.moveWindow("[ROIImg]", 100, 100)
        # cv2.imshow("[ROIImg]", srcImg)
        srcImg = cv2.resize(srcImg, (100, 100), interpolation=cv2.INTER_AREA)
        img_zo[0:100, 0:100] = srcImg
        # cv2.moveWindow("Video", 100, 100)
        cv2.imshow('Video', img_zo)


        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
Exemplo n.º 8
0
def main():

    face_recognition = face.Recognition()
    face_tracking = face.Detection()

    path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    path = os.path.join(path, "uploads")

    json_file = os.path.join(path, "output.json")

    with open(json_file) as json_data:
        json_decoded = json.load(json_data)

    while True:
        #wait a half a second for each loop
        time.sleep(.5)
        #get the file names of all frames in /uploads
        files = [
            f for f in listdir(path) if os.path.isfile(os.path.join(path, f))
        ]

        for img in files:
            if (img != "output.json" and img not in json_decoded):
                img_path = os.path.join(path, img)
                # check if it was an accidental frame (O bytes)
                if os.stat(img_path).st_size != 0:
                    # Grab a frame from filepath and preprocess
                    frame = misc.imread(img_path)
                    if frame.ndim == 2:
                        frame = facenet.to_rgb(frame)
                    frame = frame[:, :, 0:3]

                    faces = face_tracking.find_faces(frame)

                    if len(faces) == 1:
                        the_face = face_recognition.identify_name(faces)
                        print(img, the_face[0].name)
                        json_decoded[img] = the_face[0].name
                    else:
                        json_decoded[img] = 'None'

                    with open(json_file, 'w') as json_out:
                        json.dump(json_decoded, json_out)
Exemplo n.º 9
0
def catch(perName, maxNum, source):
    frame_interval = 1
    frame_count = 0
    video_capture = None
    if source == "camera":
        video_capture = cv2.VideoCapture(0)
    else:
        video_capture = cv2.VideoCapture(source)
    face_detect = faceLib.Detection()
    workpath = os.getcwd() + "/dataset/orignal/" + perName
    if os.path.exists(workpath) != True:
        os.makedirs(workpath)
    num = 1

    while True:
        ret, frame = video_capture.read()
        frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
        if (frame_count % frame_interval) == 0:
            faces = face_detect.find_faces(frame)
            for face in faces:
                if num > maxNum:
                    break

                face_bb = face.bounding_box.astype(int)
                left = face_bb[0]
                top = face_bb[1]
                right = face_bb[2]
                bottom = face_bb[3]
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0),
                              2)
                img_name = '%s/%d.jpg' % (workpath, num)
                cv2.imwrite(img_name, face.image,
                            [int(cv2.IMWRITE_JPEG_QUALITY), 100])
                num += 1
            cv2.imshow('video', frame)
        frame_count += 1
        if num > maxNum:
            break
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    video_capture.release()
    cv2.destroyAllWindows()
Exemplo n.º 10
0
def process_img(imgpath):
    img = misc.imread(imgpath)
    detector = face.Detection()

    faces = detector.find_faces(img)

    name = ntpath.basename(os.path.dirname(imgpath))
    #group = str(int(name.split("_")[1][:2]))
    frame = re.search('image-(.*).jpg', ntpath.basename(imgpath)).group(1)

    rows = []

    if len(faces) == 0:
        rows.append([name, frame, False, None, None, None, None])
    else:
        for f in faces:
            arr = f.bounding_box
            x, y, w, h = arr[0], arr[1], arr[2], arr[3]
            rows.append([name, frame, True, x, y, w, h])

    return rows
Exemplo n.º 11
0
def main(face_recognition):
  face_detection = face.Detection()

  sub_dirs = os.walk(sub_dir_full).next()[1]
  for sub_dir in sub_dirs:
     dir_full = os.path.join(sub_dir_full, sub_dir)
     img_basenames = os.listdir(dir_full) 

     for im_name in img_basenames:
        full_imname = os.path.join(dir_full,str(im_name))
        img = cv2.imread(full_imname, cv2.IMREAD_COLOR)
        frame = img[...,::-1]
        image = copy.copy(img)
        try:
           faces = face_detection.find_faces(frame) 
           for face in faces:
               face_bb = face.bounding_box.astype(int)       
               cv2.rectangle(image,(face_bb[0], face_bb[1]), (face_bb[2], face_bb[3]),(0, 255, 0), 2)
               cv2.putText(image,str(face.euler[0]), (face_bb[0], face_bb[3]),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),thickness=2, lineType=2) 
               cv2.imwrite(save_dir+'/'+str(im_name),image)
               print ('Yes!')  
        except:
           print("error!")  
Exemplo n.º 12
0
    def __init__(self, mode, specific_formats=None, specific_folder=None):

        self.mode = mode
        self.lst_of = {}
        self.doc_ext = []
        self.img_ext = []
        self.vid_ext = []
        self.sound_ext = []
        self.zip_ext = []
        self.code_ext = []
        self.media_ext = []
        self.data_ext = []
        self.app_ext = []
        self.font_ext = []
        self.sys_ext = []
        self.flags = []
        self.specifics = []
        self.all_files = {}
        self.errors = []
        self.file_structure = {}
        self.load_ext()
        self.now = datetime.now()
        self.dt_string = self.now.strftime("%d-%m-%Y %Hh%M")
        self.nude_classifier = NudeClassifier()
        self.nude_detector = NudeDetector()
        self.s = sched.scheduler(time.time, time.sleep)

        self.number_of_files = 0
        self.time_taken = 0
        self.prev_dir = None
        self.curr_dir = None
        self.faces = None
        self.points = None

        self.walked_dir = "checked.dir"
        self.all_walked = []
        self.load_walked()

        self.available_dirs = []
        self.non_available_dirs = []
        self.attach = ":/"
        self.let_dir = [
            "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
            "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
        ]

        self.runt = threading.Thread(target=self.find_all_dirs)
        self.runt.start()

        self.master_ext = [
            self.doc_ext, self.img_ext, self.vid_ext, self.sound_ext,
            self.zip_ext, self.code_ext, self.media_ext, self.data_ext,
            self.app_ext, self.font_ext, self.sys_ext, self.flags
        ]

        self.type_s = [
            "Documents", "Images", "Videos", "Sounds", "Compressed_Files",
            "Programming_Files", "Discs_Media", "Databases", "Applications",
            "Fonts", "System_Files", "Infected"
        ]

        self.face_detection = fc.Detection()
        self.face_recognition = fc.Recognition()
        self.face_verification = fc.Verification()

        if specific_formats is not None and specific_folder is not None:
            self.specifics = self.specifics + specific_formats
            self.master_ext.append(self.specifics)

            self.type_s.append(specific_folder)
Exemplo n.º 13
0
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import pickle
import os
from PIL import Image, ImageDraw
import cv2
import numpy as np
import tensorflow as tf
from scipy import misc

#import facenet
import sys
sys.path.append("align/")
import face
detector = face.Detection()

import imageio
filename = "01.bin"
reader = imageio.get_reader(filename, 'ffmpeg')
fps = reader.get_meta_data()['fps']

import time

count = 0
range_1 = count
range_2 = count + 10000
json_name = str(range_1) + "_" + str(range_2) + ".json"
fw = open(json_name, 'w')

resall = []
Exemplo n.º 14
0
    return q2.get()


#客户端函数
def client():
    tree = et.parse("config/conf.xml")
    root = tree.getroot()
    port = root.find('port').text
    host = root.find('myhost').text
    port = int(os.environ.get("PORT", port))
    app.run(host=host, port=port)


if __name__ == '__main__':
    #把人脸识别模型预先载入内存
    face_detection = face.Detection()
    #建立客户端连接线程
    parseImage_thread = threading.Thread(target=client)
    parseImage_thread.start()

    while True:
        while not q1.empty():
            value = q1.get()
            print "从队列q1中取出数据"
            try:
                faces = face_detection.find_faces(value)
            except:
                print "提示:识别发生异常!"
                data = {"isResult": "false", "resultNum": "0"}
                data = demjson.encode(data)
                q2.put(data)