Beispiel #1
0
 def __init__(self, devid):
     self.devid = devid
     # modes :runtime detect face shetai
     self.mode = "runtime"
     self.faceFlag = False
     self.faceList =[-100,1200,100,200,-100]
     self.face = FaceDetect()
     self.faceStableStandar = 0
     self.faceCount =0
     super(CameraThread, self).__init__()
Beispiel #2
0
def face_query(test_image):
    cfg = importlib.import_module("RetinaFace.data.config")
    fd = FaceDetect("./weights/Resnet50_Gender_Final.pth", cuda=True, cfg=cfg.cfg_re50)
    fa = FaceAlign(target_size=112)
    ff = FaceFeatures('ArcFace/weights/arcface_resnet50_epoch_30.pth', cuda=True)
    fq = FaceRetrival()
    img = cv.imread(test_image, cv.IMREAD_COLOR)
    faces, landmarks, points = fd(img, target_size=None, top=-1)
    aligned_faces = fa(img, faces, landmarks)
    features = ff(aligned_faces)
    results = fq(features)

    for point in points:
        cv.rectangle(img, (point[0], point[1]), (point[2], point[3]), color=(0, 0, 255), thickness=2)

    f_size = 50
    fontpath = "./simsun.ttc"
    font = ImageFont.truetype(fontpath, f_size)
    img_pil = Image.fromarray(cv.cvtColor(img, cv.COLOR_BGR2RGB))
    for point, result in zip(points, results):
        draw = ImageDraw.Draw(img_pil)
        print(result[0]['star'])
        draw.text((point[0], point[1] - f_size), result[0]['star'], font=font, fill=(255, 0, 0))

    img_pil.save("result_" + test_image.split('/')[-1])
Beispiel #3
0
def extract_features(src_dir, save_dir):
    cfg = importlib.import_module("RetinaFace.data.config")
    fd = FaceDetect("./weights/Resnet50_Gender_Final.pth", cuda=True, cfg=cfg.cfg_re50)
    fa = FaceAlign(target_size=112)
    ff = FaceFeatures('./weights/arcface_resnet50_epoch_30.pth', cuda=True)

    images = []
    for root, dirs, files in os.walk(src_dir):
        if not dirs:
            for file in files:
                images.append(os.path.join(root, file))

    print("total images:", len(images))
    with open(save_dir, "w") as f:
        writer = csv.writer(f)
        for i, image in enumerate(tqdm(images, desc="Features extracting")):
            try:
                star, fname = image.split('/')[-2:]
                dst_path = os.path.join("/data/datasets/chs_stars_faces", star)
                if not os.path.exists(dst_path):
                    os.mkdir(dst_path)
                img = cv.imread(image, cv.IMREAD_COLOR)
                faces, landmarks, points = fd(img, target_size=640, top=-1, max_size=1280)
                if len(faces) != 1:
                    continue
                aligned_faces = fa(img, faces, landmarks)
                cv.imwrite(os.path.join(dst_path, fname), aligned_faces[0])
                features = ff(aligned_faces)
                writer.writerow([star, fname, features[0]])
            except Exception as e:
                print(image, e)
Beispiel #4
0
def face_beauty(test_image):
    cfg = importlib.import_module("RetinaFace.data.config")
    fd = FaceDetect("./weights/Resnet50_Gender_Final.pth", cuda=True, cfg=cfg.cfg_re50)
    fa = FaceAlign(target_size=224)
    fb = FaceBeauty('Beauty/weight_local/beauty_ft_epoch_50.pth', cuda=True)
    img = cv.imread(test_image, cv.IMREAD_COLOR)
    faces, landmarks = fd(img, target_size=512, top=5)
    aligned_faces = fa(img, faces, landmarks)

    score = fb(aligned_faces)
Beispiel #5
0
def run(camera_port, drone):
    """Process frames from camera
    """
    cam = cv2.VideoCapture(camera_port)
    face_detect = FaceDetect()
    transform = Transform()
    video_record = VideoRecord(cam)

    try:
        running = True
        while running:
            running, frame = cam.read()
            if running:
                center = face_detect(frame)
                if center is not None:
                    result = transform(frame, center)
                    print result
                    zap, phi, theta, gaz, yaw = result
                    #gaz = theta = phi = 0
                    if drone is not None:
                        drone.at(libardrone.at_pcmd, True, phi, theta, gaz,
                                 yaw)

                if drone is not None:
                    navdata = drone.navdata.get(0, {})
                    # check battery status
                    battery = navdata.get('battery')
                    if battery is not None and battery < 15:
                        print 'Low battery:', battery
                        running = False
                    print navdata

            else:
                # error reading frame
                print 'Error reading video feed'
                if drone is not None:
                    print 'Is WiFi connected to drone?'

            video_record(frame)
            if cv2.waitKey(1) & 0xFF == 27:
                # escape key pressed so exit
                running = False
    except:
        traceback.print_exc()

    print 'Shutting down'
    video_record.running = False
    cam.release()
    cv2.destroyAllWindows()
Beispiel #6
0
    def __init__(self,
                 e_face=False,
                 e_helmet=False,
                 e_smoking=False,
                 TaskID='4396'):
        threading.Thread.__init__(self)
        self.e_face = e_face
        self.e_helmet = e_helmet
        self.e_smoking = e_smoking
        if e_face:
            self.e_face = FaceDetect()
        if e_helmet:
            self.e_helmet = MultiObjects_yolov3('yolo')
        if e_smoking:
            self.e_smoking = Smoking_yolo3('tiny')

        self.period = 10
        self.TaskID = '4396'
        self.rq = redis.Redis(host='127.0.0.1', port=6379)
Beispiel #7
0
from pymongo import *

import gParam
from event import Event
from facedetect import FaceDetect
from inspection import Inspection
from mongo.mongodb import features_write, person_write
from newthread import NewThread
from periodins import MultiObjects_yolov3
from stoppableThread import StoppableThread
from utils.utils import setting
from videocap import VCAP

# 由于Event事件响应功能接口仍未确定,因此初始化过程无法确定,目前初始化仅在测试时使用
# 初始化各线程  Event功能时的初始化
f_dectect = FaceDetect()
v_cap = VCAP()
v_cap.video_path = gParam.Video_Path
v_cap.video = '1.mp4'
# v_cap.video_path = False
# v_cap.start()
f_dectect.start()

setdefaulttimeout(10)

with open(gParam.Client_Server_Cfg, 'r') as f:
    configuration = json.loads(f.read())
    print('客户端与服务端TCP连接配置如下', configuration)

# redis 连接池
pool = redis.ConnectionPool(host=configuration['REDIS_HOST'],
Beispiel #8
0
class CameraThread(QThread):
    #定义信号,定义参数为str类型
    _signal = pyqtSignal(str)
    _signalError = pyqtSignal(dict)
    _signalDetectFace = pyqtSignal(list)
    _signalMessage = pyqtSignal(str)

    lock = threading.Lock()

    def __init__(self, devid):
        self.devid = devid
        # modes :runtime detect face shetai
        self.mode = "runtime"
        self.faceFlag = False
        self.faceList =[-100,1200,100,200,-100]
        self.face = FaceDetect()
        self.faceStableStandar = 0
        self.faceCount =0
        super(CameraThread, self).__init__()

    def run(self):
        self.camera()

    def camera(self):
        cap = cv2.VideoCapture(self.devid)
        if cap==None:
            self._signalError.connect(PUBLIC.CAMERA_OPEN_ERROR)
            return
        # i = 0
        _, frame = cap.read()
        # 稳定因素
        self.faceStableStandar = frame.shape[0]/8
        beginTime =time.time()
        while (True):
            _, frame = cap.read()
            # font = cv2.FONT_HERSHEY_SIMPLEX
            # i = i + 1
            # i = i % 100
            # cv2.putText(frame,"read:"+str(result),(10,500), font, 4,(255,0,255),2,cv2.LINE_AA)
            # cv2.imwrite("temp/temp"+str(i)+".jpg",frame)
            # print("temp/temp_" + self.mode + ".jpg")
            
            
            
            # 加锁
            if self.lock.acquire():
                
                
                # 每隔0.5s检查一下有无人脸
                if time.time()-beginTime >= 0.5 and self.mode == "runtime" :
                    r=self.face.detect(frame,True)
                    if len(r)>0 and self.faceFlag==True:
                        # 如果人头接近静止
                        if self.isStable(r[0],self.faceStableStandar):              
                            self._signalDetectFace.emit(list(r))
                            self.faceFlag =False
                        else:
                            # print("not stable")
                            self._signalMessage.emit("don't move")
                cv2.imwrite("temp/temp_" + self.mode + ".jpg", frame)
                if self.mode != "runtime":
                    print("mode:" + self.mode)
                    self.mode = "runtime"
                self.lock.release()
            else:
                print("没有获取锁")
            # cv2.imshow("video",frame)
            self._signal.emit("temp/temp_runtime" + ".jpg")
            cv2.waitKey(30)
    def isStable(self,r,standar=100):
        self.faceCount +=1
        self.faceList[self.faceCount % 5] = r[0]+r[2]/2
        temp = self.faceList[:]
        temp.sort()
        if temp[-1]-temp[0] < standar:
            return True
        else:
            return False




        

    def setMode(self, mode):
        # modes :runtime detect face shetai
        # 加锁
        if (self.lock.acquire()):
            self.mode = mode
            self.lock.release()
    def setFaceFlag(self, f):
        # modes :runtime detect face shetai
        # 加锁
        if (self.lock.acquire()):
            self.faceFlag = f
            self.lock.release()