Beispiel #1
0
def callback(data):

    try:
        img = bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
        print(e)

    time_start = clock()
    img_detected = detect_face(img)
    time_span = clock() - time_start

    if time_span == 0:
        fps = 0
    else:
        fps = 1 / time_span

    draw_str(img_detected, (5, 30), 'fps: %d' % fps)
    if show_video == True:
        cv2.imshow('face detection', img_detected)
        cv2.waitKey(1)

    try:
        pub.publish(bridge.cv2_to_imgmsg(img_detected, "bgr8"))
    except CvBridgeError as e:
        print(e)
	def recog_openface(self, detection):
		t = clock()
		dinfo = []
		for frame in detection.imgs:
			if self.classif.equadra(frame.copy()):
				

				#Classifica a imagem utilizando a rede neural do Openface
				#Chama a funcao no arquivo Recog.py
				#Retorna um rank de individuos possiveis, o primeiro tem maior probabilidade
				resp, dinfo  = self.classif.classifica()

				(nome,pb,reffile,tipoc) = resp[0]
				print "Encontrado {} em {} na camera {}".format(nome,detection.time, detection.camera.id)

				#Cria um objeto com as informacoes sobre a deteccao
				#Utilizado tambem para salvar a imagem e enviar para Kurento
				
				#Grava a imagem original da deteccao
				#d = Detection(detection.camera, nome, pb, detection.org_img, detection.time,send_alert)

				#Grava a imagem cropada da deteccao
				#d = Detection(detection.camera, nome, pb, frame.copy(), detection.time,True, dinfo)

				#Grava a imagem original da deteccao
				d = Detection(detection.camera, nome, pb, detection.org_img, detection.time,True, dinfo)

				self.sync_queue_out.put(d)

				dt = clock() - t
				print 'T Time',dt,' s'
				print "_____________________________________________________________________"
def callback(data):
    bridge = CvBridge()
    try:
        cv_img = bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
        print(e)

    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    time_start = clock()
        
    detect_people(cv_img, hog)

    time_span = clock() - time_start
    if time_span == 0:
        fps = 0
    else:
        fps = 1 / time_span
    draw_str(cv_img, (5,30), 'fps: %d' % fps)

    if show_video == True:    
        cv2.imshow('people detection', cv_img)
        cv2.waitKey(1)

    pub = rospy.Publisher("/opencv/detect/people",Image, queue_size = 1)
    try:
      pub.publish(bridge.cv2_to_imgmsg(cv_img, "bgr8"))
    except CvBridgeError as e:
      print(e)    
Beispiel #4
0
def compare(face_to_check,learn=False):
	import sys, getopt
	detected_time = 0
	detected_time_max = 10
	
	video_src = 0
	cascade_fn = os.path.join('data','haarcascades','haarcascade_frontalface_alt2.xml')

	cascade = cv2.CascadeClassifier(cascade_fn)

	cam = create_capture(video_src, fallback='synth:bg=../cpp/lena.jpg:noise=0.05')
	
	while True:
		ret, img1 = cam.read()
		gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
		gray = cv2.equalizeHist(gray)
		
		t = clock()
		rects = detect(gray, cascade)
		
		if len(rects):
			if detected_time<detected_time_max:
				detected_time+=1
			else:
				_found_size = (rects[0][0],rects[0][1],rects[0][2]-rects[0][0],
					rects[0][3]-rects[0][1])
				
				_found_face = cv.GetImage(cv.fromarray(img1))
				
				cv.SetImageROI(_found_face,_found_size)
				
				current_face = cv.CreateImage(cv.GetSize(_found_face),
					_found_face.depth,
					_found_face.nChannels)
				
				if learn:
					cv.Copy(_found_face, current_face, None)
					cv.SaveImage(os.path.join('data','images',face_to_check),current_face)
				
				cv.ResetImageROI(cv.GetImage(cv.fromarray(img1)))
				
				img2 = cv.LoadImage(os.path.join('data','images',face_to_check))
				
				dest_face = cv.CreateImage(cv.GetSize(img2),
					img2.depth,
					img2.nChannels)
				
				cv.Resize(_found_face, dest_face)
				
				if cv.Norm(dest_face,img2)<=30000:
					return True
				else:
					return False
				
				sys,exit()
		else:
			detected_time = 0
		
		dt = clock() - t
Beispiel #5
0
def look_at_hog(img):
    dims=(16,16)
    t = clock()
    hog = cv2.HOGDescriptor(dims,(16,16),(16,16),(8,8),9,1,-1)
    feature_vector = hog.compute(img, winStride=dims, padding=(0,0))
    dt = clock() - t
    print 'Extraction took: %.1f ms' % (dt*1000)
    t = clock()
    hog_viz=draw_hog2(img,hog,feature_vector)
    dt = clock() - t
    print 'drawing took: %.1f ms' % (dt*1000)
    return hog_viz
Beispiel #6
0
 def callback(self, data):
    cv_img=self.convert_image(data)
    #cascade = cv2.CascadeClassifier("irobot_hog_detect.xml")
    gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)
    t = clock()
    rects = detect(gray, cascade)
    vis = cv_img.copy()
    draw_rects(vis, rects, (0, 255, 0))
    dt = clock() - t
    draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
    cv2.imshow('Irobot_Detector', vis)
def detect_and_draw(img, cascade):
    # allocate temporary images
    #gray = cv.CreateImage((img.width,img.height), 8, 1)
    global haar_scale
    global min_neighbors
    for image_scale in range(1,5,1):
   	gray = cv.CreateImage((img.width,img.height), 8, 1)
    	small_img = cv.CreateImage((cv.Round(img.width / image_scale),cv.Round (img.height / image_scale)), 8, 1)
     
        global noface
       	global current_time
	global dailyfolder
               

	current_time=time.strftime("%H:%M:%S")
	dailyfolder=time.strftime("%F")
	FileName="/Detected-Faces/"
	FileName=dailyfolder+FileName+current_time+"_Image_Scale_"+str(image_scale)+ "_Min_Neighbors_" + str(min_neighbors)  +".jpeg"

	# convert color input image to grayscale
	cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

	# scale input image for faster processing
	cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
	cv.EqualizeHist(small_img, small_img)

	if(cascade):
		t = clock()
		faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),haar_scale, min_neighbors, haar_flags, min_size)
		t = clock() -t 
               
		if faces:
			for ((x, y, w, h), n) in faces:
				# the input to cv.HaarDetectObjects was resized, so scale the
				# bounding box of each face and convert it to two CvPoints
				pt1 = (int(x * image_scale), int(y * image_scale))
				pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
				cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
				#### DEBUG ### 
				#IO.LEDON()
				#wait(10)
				#IO.LEDOFF()
				#save image  update log 
			cv.SaveImage(FileName,img)
			f.updatelog(t,image_scale,min_neighbors)

			del(gray)
			del(small_img)

		else:
			del(gray)
			del(small_img)
Beispiel #8
0
def faceDetect(loopCount):
    import sys, getopt

    args, video_src = getopt.getopt(sys.argv[1:], "", ["cascade=", "nested-cascade="])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)
    cascade_fn = args.get("--cascade", OpenCVInstallDir + "data/haarcascades/haarcascade_frontalface_alt.xml")
    cascade = cv2.CascadeClassifier(cascade_fn)
    cam = create_capture(video_src, fallback="synth:bg=../cpp/lena.jpg:noise=0.05")
    idx = 0

    if loopCount == 0:
        loopCount = 1
        infinteLoop = True
    else:
        infinteLoop = False

    while idx < loopCount:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        dt = clock() - t

        draw_str(vis, (20, 20), "time: %.1f ms" % (dt * 1000))
        if rects == []:
            draw_str(vis, (20, 40), "We are having trouble seeing you, move around just a bit")
        #            draw_str(vis,(20,450), 'Look Here')
        else:
            if infinteLoop:
                idx = 0
                print rects
            else:
                idx = idx + 1
                try:
                    rectsum = rectsum + rects
                except:
                    rectsum = rects
                    # first time assignment

        #       cv2.imshow('facetracker', vis)

        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()
    return rectsum / idx
def get_face():
    import sys, getopt
    #print help_message

    args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)
    cascade_fn = args.get('--cascade', "haarcascade_frontalface_alt.xml")
    nested_fn  = args.get('--nested-cascade', "haarcascade_eye.xml")

    cascade = cv2.CascadeClassifier(cascade_fn)
    nested = cv2.CascadeClassifier(nested_fn)

    cam = create_capture(video_src, fallback='synth:bg=lena.jpg:noise=0.05')

    just_face = ''

    while True:
        #pdb.set_trace()
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()

        # Change to crop image to only show face
        if (just_face == ''):
            just_face = img.copy()

        param = (cv2.IMWRITE_PXM_BINARY, 1)
        if (len(rects) > 0):
            just_face = gray.copy()
            (x1,y1,x2,y2) = rects[0]
            just_face = just_face[y1:y2, x1:x2]
            cv2.imwrite('./test_face.pgm', just_face, param)
            return './test_face.pgm'

        vis_roi = vis
        draw_rects(vis, rects, (0, 255, 0))
        dt = clock() - t

        draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
        cv2.imshow('facedetect', just_face)

        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()
Beispiel #10
0
def get_lbp_rects(img):

    # print(img)



    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)

    t = clock()
    rects = detect(gray)
    dt = clock() - t



    return rects
Beispiel #11
0
    def on_frame(self, frame):
        h, w = frame.shape[:2]
        qi = 0
        #print "on_frame %d x %d" % (h, w)
        frame_diff = cv2.absdiff(frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        ret, motion_mask = cv2.threshold(gray_diff, self._threshold, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)

        centers = []
        rects = []
        draws = []
        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            silh_roi   = motion_mask        [y:y+rh,x:x+rw]
            orient_roi = mg_orient          [y:y+rh,x:x+rw]
            mask_roi   = mg_mask            [y:y+rh,x:x+rw]
            mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            if self._use_cv_gui:
                draws.append(lambda vis, rect=rect, angle=angle, color=color:
                                draw_motion_comp(vis, rect, angle, color))
            centers.append( (x+rw/2, y+rh/2) )
            rects.append(rect)

        self.tracker_group.update_trackers(centers, rects)

        #print 'Active trackers: %d' % len(trackers)
        #print 'Tracker score: %s' % ','.join(['%2d'%len(tracker.hits) for tracker in trackers])
        trackers = self.tracker_group.trackers
        cx, cy = None, None
        #print "#trackers = %d" % len(trackers)
        if len(trackers):
            first_tracker = trackers[0]
            cx, cy = center_after_median_threshold(frame, first_tracker.rect)
            cv2.circle(frame, (cx, cy), 5, (255, 255, 255), 3)
        print str(qi)*5; qi += 1
        print self._on_cx_cy
        self._on_cx_cy(cx, cy) # gives None's for no identified balloon
        print str(qi)*5; qi += 1

        if self._use_cv_gui:
            self.on_frame_cv_gui(frame, draws, (cx, cy))
        else:
            self.frame_vis(frame, draws, (cx, cy))

        #time.sleep(0.5)
        self.prev_frame = frame.copy()
        # TODO - print visualization onto image
        return frame
Beispiel #12
0
def get_image(camera, headTracking):
    global cascade
    ret, im = camera.read()
    t = clock()
    if (headTracking):
	smallIm = cv2.resize(im, (160,120))
        grey = cv2.cvtColor(smallIm, cv2.COLOR_BGR2GRAY)
        grey = cv2.equalizeHist(grey)
	rects = detect(grey, cascade)
        draw_rects(im, 4*rects, (0,255,0))
	#TODO: Also fire servos we need.
    draw_str(im, (20,40), str(readOutputPinsArduino()))
    dt = clock() - t
    draw_str(im, (20,20), 'Latency: %.4f ms' % (dt*1000))
    im_rgb=cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    cv_img=cv.fromarray(im_rgb)
    return cv_img
def faceDetect(img,classifier_xml_dir):
    cascade = cv2.CascadeClassifier(classifier_xml_dir)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)
    t = clock()
    rects = detect(gray, cascade)
    if len(rects)==0:
        facesFound = 0
    else:
        facesFound = 1    
    vis = img.copy()
    draw_rects(vis, rects, (0, 255, 0))
    for x1, y1, x2, y2 in rects:
        roi = gray[y1:y2, x1:x2]
        vis_roi = vis[y1:y2, x1:x2]
    dt = clock() - t
    draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
    return (rects,facesFound)
Beispiel #14
0
    def update(self):
        # increment the total number of frames examined during the
        # start and end intervals
        self._numFrames += 1
        if self._numFrames == self._window_size * 2:
            self._numFrames -= 120
            self._start = self._window_start

        if self._numFrames == self._window_size:
            self._window_start = clock()
    def process_image(self):
        """
        This function finds faces, draws them and their approximation by ellipses.
        """
        t = clock()
        rects = self.detect(self.gray, self.cascade)
        vis = self.source_color
        self.draw_rects(np.asarray(vis[:,:]), rects, (0, 255, 0))
        for x1, y1, x2, y2 in rects:
            roi = self.gray[y1:y2, x1:x2]
            vis_roi = vis[y1:y2, x1:x2]
            subrects = self.detect(roi.copy(), self.nested)
            self.draw_rects(np.asarray(vis_roi[:,:]), subrects, (255, 0, 0))
        dt = clock() - t

        draw_str(np.asarray(vis[:,:]), (20, 20), 'time: %.1f ms' % (dt*1000))
        cv2.imshow('facedetect',  np.asarray(vis[:,:]))
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Beispiel #16
0
def play_video(img, time_start, frame_count):
    time_span = clock() - time_start
    if time_span == 0:
        fps = 0
    fps = frame_count / time_span
    draw_str(img, (5, 30), 'fps: %d' % fps)

    cv2.imshow('play video', img)
    if 0xFF & cv2.waitKey(1) == KEY_ECS:
        raise ExitLoop
Beispiel #17
0
def init_scale(cam):
    while True:
        ret, img = cam.read()
        draw_str(img, (20, 20), 'To initialize: Keep camera static and perpendicular to ground plane ')
        draw_str(img, (20, 30), 'To initialize: Sit facing the camera approx. 2 feet away ')
        draw_str(img, (20, 40), 'To initialize: Press <Space> to initialize ')
        cv2.imshow('Initialize', img)

        if 0xFF & cv2.waitKey(5) == 32:
            break
    cv2.destroyAllWindows()

    print('Initializing')

    nFaces = 0
    avgHeight = 0
    t = clock()
    while True:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        rects = detect(gray, cascade)
        draw_rects(img, rects, (0, 255, 0))
        for x1, y1, x2, y2 in rects:
            if( y2-y1 < 150  or  y2-y1 > 220 ):   # too small or too large; ignore as noise
                continue
            nFaces += 1
            avgHeight = ((nFaces-1) * avgHeight + (y2-y1)) / nFaces  # frontalface is square, so width is same
        dt = clock() - t
        draw_str(img, (20, 20), 'Initializing: Keep camera static and perpendicular to ground plane ')
        draw_str(img, (20, 30), 'Initializing: Sit facing the camera approx. 2 feet away ')

        if(dt <= 5.0):
            draw_str(img, (20, imHeight - 20), 'time: %.1f ms' % (dt * 1000))
        else:
            draw_str(img, (20, imHeight - 20), 'time: DONE' )
        cv2.imshow('Initializing', img)

        if (0xFF & cv2.waitKey(5) == 32) or dt > 5.0:
            break
    cv2.destroyAllWindows()
    return avgHeight
 def __init__(self, topic, should_mirror, verbose):
     self.topic = topic
     self.KEY_ECS = 27
     self.should_mirror = should_mirror
     self.verbose = verbose
     self.bridge = CvBridge()
     rospy.on_shutdown(self.cleanup)
     self.shutdowm_msg = "Shutting down."
     self.node_name = "image_subscriber"
     self.time_start = clock()
     self.fps = FPS()
    def showImg(self, frame, keypoints, lines, contours):
        if self.args.nodisplay and self.args.stashinterval == 0:
            return

        if keypoints:
            frame = cv2.drawKeypoints(
                frame, [keypoints[0]], np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
            )
            if len(keypoints) > 1:
                frame = cv2.drawKeypoints(
                    frame, keypoints[1:], np.array([]), (255, 205, 25), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
                )
        if lines != None:
            for l in lines[0]:
                cv2.line(frame, (l[0], l[1]), (l[2], l[3]), (20, 255, 255))

        if contours != None:
            contours0, hier = contours
            cindex = self.values[3]  # if -1, all are drawn
            maxlevel = self.values[4]
            if len(contours0) <= cindex:
                self.putNotice("reset contour id")
                values[3] = -1
                cindex = -1
            cv2.drawContours(
                frame,
                contours0,
                cindex,
                (128, 255, 255),
                thickness=1,
                lineType=cv2.CV_AA,
                hierarchy=hier,
                maxLevel=maxlevel,
            )

        if not self.args.nodisplay:
            cv2.imshow("img", frame)

        if self.args.stashinterval != 0 and (common.clock() - self.lastStashTime) > self.args.stashinterval:
            cv2.imwrite(self.stashFilename, frame, self.stashParams)
            self.lastStashTime = common.clock()
Beispiel #20
0
def main():
    import sys, getopt

    args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)
    cascade_fn = args.get('--cascade', "data/haarcascades/haarcascade_frontalface_alt.xml")
    nested_fn  = args.get('--nested-cascade', "data/haarcascades/haarcascade_eye.xml")

    cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
    nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))

    cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg')))

    while True:
        ret, img = cam.read()
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        gray = cv.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))
        if not nested.empty():
            for x1, y1, x2, y2 in rects:
                roi = gray[y1:y2, x1:x2]
                vis_roi = vis[y1:y2, x1:x2]
                subrects = detect(roi.copy(), nested)
                draw_rects(vis_roi, subrects, (255, 0, 0))
        dt = clock() - t

        draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
        cv.imshow('facedetect', vis)

        if cv.waitKey(5) == 27:
            break

    print('Done')
    def show_video(self, img):
        self.fps.update()
        draw_str(img, (5, 30), "fps: %s" % self.fps)

        cv2.imshow("show %s" % (self.topic), img)
        key = cv2.waitKey(1)
        if 0xFF & key == self.KEY_ECS:
            rospy.signal_shutdown("User hit q key to quit.")
        elif 0xFF & key == ord("a"):
            file_name = "image_%s.jpg" % (str(int(clock())))
            cv2.imwrite(file_name, img)
            print "%s has saved." % file_name
Beispiel #22
0
 def __init__(self, topic, should_mirror, verbose):
     self.topic = topic
     self.KEY_ECS = 27
     self.should_mirror = should_mirror
     self.verbose = verbose
     self.bridge = CvBridge()
     rospy.on_shutdown(self.cleanup)
     self.shutdowm_msg = "Shutting down."
     self.node_name = 'image_subscriber'
     self.time_start = clock()
     self.frames = []
     self.frame_max = 90
     self.imgPlayer = ImagePlayer("show %s" % (self.topic))
    def __init__(self, stream):
        self.stream = stream
        self.numThread = cv2.getNumberOfCPUs()
        #self.numThread = 1
        self.workerPool = ThreadPool(processes = self.numThread)
        self.pendingWorker = deque()

        self.latency = StatValue()
        self.frameInterval = StatValue()
        self.lastFrameTime = clock()

        self.outFrames = deque(maxlen = self.numThread)
        self.faces = []
Beispiel #24
0
    def show_video(self):
        while not rospy.is_shutdown():
            if len(self.frames) > 0:
                img = self.frames.pop(0)
                self.imgPlayer.show(img)

                key = self.imgPlayer.get_key()
                if key == self.KEY_ECS:
                    rospy.signal_shutdown("User hit q key to quit.")
                elif key == ord('a'):
                    file_name = 'image_%s.jpg' % (str(int(clock())))
                    cv2.imwrite(file_name, img)
                    print '%s has saved.' % file_name
 def threadedProcess(self):
           
     rects = [] 
     if len(self.pendingWorker) > 0 and self.pendingWorker[0].ready():
         task = self.pendingWorker.popleft()
         frame, curTime = task.get()
         self.latency.update(clock() - curTime)
         
         draw_str(frame, (20, config.VIDEO_WIDTH -20), "Latency: %.1f ms" % (self.latency.value*1000))
         draw_str(frame, (20, config.VIDEO_WIDTH - 35), "FPS: %d" % (1/self.frameInterval.value))
         #print("Latency %lf" % (self.latency.value*1000))
         #print("FPS: %d" % (1/self.frameInterval.value))
         self.outFrames.append(frame)
         #cv2.imshow('Processed Video', frame) 
         #cv2.waitKey(1)
     if len(self.pendingWorker) < self.numThread:
         frame = self.stream.read()
         t = clock()
         self.frameInterval.update(t - self.lastFrameTime)
         self.lastFrameTime = t
         task = self.workerPool.apply_async(process, (copy.copy(frame), t))
         self.pendingWorker.append(task)
    def threadedProcess(self):
              
        rects = [] 
        if len(self.pendingWorker) > 0 and self.pendingWorker[0].ready():
            task = self.pendingWorker.popleft()
            frame, curTime = task.get()
            self.latency.update(clock() - curTime)
            
            draw_str(frame, (20, 360-20), "Latency: %.1f ms" % (self.latency.value*1000))
            draw_str(frame, (20, 360- 35), "FPS: %d" % (1/self.frameInterval.value))
            self.outFrames.append(frame)
        '''
        if len(self.pendingWorker) > 0:
            for i in range(0, len(self.pendingWorker)):
                if self.pendingWorker[i].ready():
                    for j in range(0, i):
                        waste = self.pendingWorker.popleft()
                        try:
                            waste.get()
                        except:
                            pass

                    task = self.pendingWorker.popleft()
                    frame, time = task.get()
                    self.latency.update(clock() - time)
                    draw_str(frame, (20, 20), "Latency: %.1f ms" % (self.latency.value*1000))
                    draw_str(frame, (300, 20), "FPS: %d" % (1/self.frameInterval.value))
                    cv2.imshow('Processed Video', frame)
                    cv2.waitKey(1)
                    break
        '''
        if len(self.pendingWorker) < self.numThread:
            grab, frame = self.stream.read()
            t = clock()
            self.frameInterval.update(t - self.lastFrameTime)
            self.lastFrameTime = t
            task = self.workerPool.apply_async(process, (copy.copy(frame), t))
            self.pendingWorker.append(task)
Beispiel #27
0
def handle_pub(video_path):
    topic = '/file/video'
    rospy.init_node('video_publisher')
    pub = rospy.Publisher(topic, Image, queue_size=2)

    print "\npublish video to topic:%s from file:%s ..." % (topic, video_path)
    videoCapture = cv2.VideoCapture(video_path)
    bridge = CvBridge()

    rate = rospy.Rate(FPS)
    time_start = clock()
    frame_count = 0
    success, img = videoCapture.read()
    while success:
        img_copy = img.copy()
        try:
            msg = bridge.cv2_to_imgmsg(img, "bgr8")
        except CvBridgeError as e:
            print(e)

        pub.publish(msg)

        if show_video == True:
            time_span = clock() - time_start
            if time_span == 0:
                fps = 0
            fps = frame_count / time_span
            draw_str(img_copy, (5, 30), 'fps: %d' % fps)

            cv2.imshow('play video', img_copy)
            if 0xFF & cv2.waitKey(1) == KEY_ECS:
                break

        rate.sleep()
        success, img = videoCapture.read()
        frame_count += 1

    cv2.destroyAllWindows()
    def capture(self):
        string = ""
        ret, img = self.cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = self.detect(gray, self.cascade)
        if len(rects) == 0:
            string = str(int(round(time.time() * 1000))) + ";n.f.;"
        vis = img.copy()
        self.draw_rects(vis, rects, (0, 255, 0))
        for x1, y1, x2, y2 in rects:
            roi = gray[y1:y2, x1:x2]
            vis_roi = vis[y1:y2, x1:x2]
            subrects_fn = self.detect(roi.copy(), self.nested)
            subrects_glass = self.detect(roi.copy(), self.glass)
            subrects_le = self.detect(roi.copy(), self.le)
            subrects_re = self.detect(roi.copy(), self.re)
            string = string + str(int(round(time.time() * 1000))) + ";"
            if not len(subrects_fn) == 0:
                self.draw_rects(vis_roi, subrects_fn, (255, 0, 0))
                string = string + "1;"
            elif not len(subrects_glass) == 0:
                self.draw_rects(vis_roi, subrects_glass, (255, 0, 0))
                string = string + "1;"
            elif (not len(subrects_le) == 0) or (not len(subrects_re) == 0):
                self.draw_rects(vis_roi, subrects_le, (255, 0, 0))
                self.draw_rects(vis_roi, subrects_re, (255, 0, 0))
                string = string + "0;"
            else:
                string = string + "n.e.;"
        dt = clock() - t
	    
        draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
        cv2.imshow('facedetect', vis)
        cv2.imwrite(self.foldername + "/eyeCaptureImages/" + str(int(round(time.time()*1000))) + ".jpg", vis)
        return string
Beispiel #29
0
 def __init__(self, topic, should_mirror, verbose, limit=1000):
     self.topic = topic
     self.KEY_ECS = 27
     self.should_mirror = should_mirror
     self.verbose = verbose
     self.bridge = CvBridge()
     rospy.on_shutdown(self.cleanup)
     self.shutdowm_msg = "Shutting down."
     self.node_name = 'image_subscriber'
     self.time_start = clock()
     self.limit = limit
     self.frame_count = 0
     self.total_latency = 0
     self.fps = FPS()
Beispiel #30
0
    def update(self, frame):
        frame_diff = cv2.absdiff(frame, self.prev_frame)
        real_diff = frame - self.prev_frame
        self.real_diff = cv2.cvtColor(real_diff,  cv2.COLOR_BGR2GRAY)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = 40 #cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)

        self.vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
        self.vis = cv2.cvtColor(self.vis, cv2.COLOR_GRAY2BGR)
        #self.process_motions()
        self.prev_frame = frame.copy()
Beispiel #31
0
        else:
            if not platform.system() == 'Windows':
                print("Unsupported format in capture check ", fourcc)
                break
            else:
                imglR = cv2.resize(img, (640, 360))
                gray = cv2.cvtColor(imglR, cv2.COLOR_BGR2GRAY)

        if show_params:
            cv2.putText(img, "Mode: {}".format(fourcc), (15, 40), font, 1.0,
                        color)
            cv2.putText(img, "FPS: {}".format(fps), (15, 80), font, 1.0, color)

        vis = img.copy()
        if (fotograma % detectar) == 0:
            t = clock()
            rects = detect(gray, cascade)
            #draw_rects(vis, rects, (0, 255, 0))
            rects2 = detect(gray, cascade2)
            #draw_rects(vis, rects2, (255, 255, 0))
            ret = ru.promediarDetecciones(rects, rects2)
            if show_params:
                draw_rects(vis, np.array(ret, dtype=np.int32), (0, 255, 0),
                           scale)
            dt = clock() - t
            if show_params:
                draw_str(vis, (20, 20),
                         'time Detection: %.1f ms' % (dt * 1000))
                draw_str(vis, (200, 20),
                         'time Tracking: %.1f ms' % (dt2 * 1000))
            boxes = multiTracker.update(imglR)
    def Run(self):
        if self.args.nodisplay:
            print("\nimgExplore: running in nodisplay mode (keys inactive)")
        else:
            print("\n\nkeys:\n" \
      "  ESC: exit\n\n" \
      "  c: rgb\n" \
      "  r: red\n" \
      "  g: green\n" \
      "  b: blue,\n" \
      "  h: hue\n" \
      "  s: sat\n"\
      "  v: val\n" \
      "  1: adaptive threshold (thresh,inv)\n"\
      "  2: threshold          (thresh,inv)\n"\
      "  3: huerange*valrange  (hmin,hmax,vmin,vmax)\n"\
      "  4: canny edges        (thresh/10, range)\n"\
      "  5: simple blobs       (thresh0,thresh1,treshStep,minA,colorthresh)\n"\
      "  6: houghlines         (rho,theta,thresh,minlen,maxgap)\n"\
      "  7: contours           (mode:0-3,method:0-3,offset,id(-1:all),depth)\n"\
      "  8: ORB features       (nfeatures,scaleFactor(0->255)],patchSize)\n"\
      "  9: dance1         	   (minX,maxX)\n"\
      "  0: gamma         	   (gamma)\n"\
      "\n"\
      "  F1,F2:            -/+ v1\n"\
      "  F3,F4:            -/+ v2\n"\
      "  F5,F6:            -/+ v3\n"\
      "  F7,F8:            -/+ v4\n"\
      "  F9,F10:           -/+ v5\n"\
      "  F11,F12:          -/+ v6\n"\
      "\n"\
      "  <right arrow>:    increase img seq frame\n"\
      "  <left arrow>:     decrease img seq frame\n"\
          )

        if not self.fnpat:
            for i in range(0, 4):
                vsrc = cv2.VideoCapture(i)
                if not vsrc or not vsrc.isOpened():
                    print("Problem opening video source %d" % i)
                    vsrc = None
                else:
                    break

            if not vsrc:
                exit(1)
            else:
                ret1 = vsrc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 1280)
                ret2 = vsrc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 720)
                print(ret1, ret2)
                if 1:
                    w = vsrc.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
                    h = vsrc.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
                    print("video res: %d %d" % (w, h))
        else:
            vsrc = None

        self.update = False
        while True:
            while len(self.pending) > 0 and self.pending[0].ready():
                frame, t0, keypoints, lines, contours = self.pending.popleft(
                ).get()
                self.latency.update(common.clock() - t0)
                self.robotCnx.SetFPS(int(1 / self.frameT.value))

                self.drawStr(
                    frame, (20, 20),
                    "latency       : %.1f ms" % (self.latency.value * 1000))
                self.drawStr(
                    frame, (20, 40),
                    "frame interval: %.1f ms" % (self.frameT.value * 1000))
                self.showImg(frame, keypoints, lines, contours)

            if vsrc:
                # Here we have a video source... Capture the image in the
                # main thread, process it in other threads.
                if len(self.pending) < self.threadn:
                    # we have threads available to perform work for us
                    ret, self.mainImg = vsrc.read()  # <---can take some time
                    t = common.clock()
                    self.frameT.update(t - self.lastFrameTime)
                    self.lastFrameTime = t
                    # pass a copy of the new frame to another thread for
                    # processing this alows us to get back to the
                    # time-consuming task of capturing the next video frame.
                    task = self.pool.apply_async(
                        processFrameCB, (self, self.mainImg.copy(), t))
                    self.pending.append(task)

                done,self.update,self.cmode,self.index,self.values,msg = \
                    self.checkKey(1, self.cmode, self.index, self.values)

                if msg:
                    self.putStatus(msg)

                if done:
                    break
            else:
                # Here we don't have a video source, rather rely on 'canned'
                #  images. Thus, we don't use multiple threads... just do it
                #  all right here.
                if self.index < 0:
                    # index < 0 signals user's intent to go bkd in framelist
                    goback = True
                    self.index = abs(self.index)
                else:
                    goback = False
                fn = self.fnpat % self.index
                base = os.path.basename(fn)
                if not os.path.isfile(fn) and self.index >= 0:
                    # image sequence may be missing some frames..
                    self.putNotice(base + " not found")
                    if goback:
                        self.index = -(self.index - 1)
                    else:
                        self.index = self.index + 1
                    continue

                if fn != self.lastFn or self.update:
                    if not self.update:
                        # update the current filename status field
                        self.putNotice(base)

                    self.mainImg = cv2.imread(fn, cv2.IMREAD_ANYCOLOR)
                    (img, t0, keypts, lines,
                     contours) = self.processFrame(self.mainImg.copy(),
                                                   common.clock())
                    self.frameT.update(common.clock() - t0)
                    self.robotCnx.SetFPS(int(1 / self.frameT.value))

                    if not self.args.nodisplay:
                        if self.cmode == 'rgb':
                            str = "%s     (%.2f ms)" % \
                                    (base,self.frameT.value*1000)
                        else:
                            str = "%s[%s] (%.2f ms)" % \
                                    (base,self.cmode,self.frameT.value*1000)
                        self.drawStr(img, (20, 20), str)
                        self.showImg(img, keypts, lines, contours)

                    self.lastFn = fn

                done,self.update,self.cmode,self.index,self.values,msg = \
                    self.checkKey(10, self.cmode, self.index, self.values)

                if msg:
                    self.putStatus(msg)

                if done:
                    break
        # end of while
        self.robotCnx.Shutdown()
    def process_frame(frame, t0):
        # some intensive computation...
        frame = cv2.medianBlur(frame, 19)
        # frame = cv2.medianBlur(frame, 19)
        return frame, t0

    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes = threadn)
    pending = deque()

    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    while True:
        while len(pending) > 0 and pending[0].ready():
            '''   '''
            res, t0 = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "Latency: %.1f ms" % (latency.value*1000))
            draw_str(res, (100, 20), "Frame interval: %.1f ms" % (frame_interval.value*1000))
            print('Interval: %.lf ms',(frame_interval.value*1000))
            #cv2.imshow('threaded video', res)
            frame = cv2.medianBlur(frame, 19)
        if len(pending) < threadn:
            #camera.capture(rawCap, format = "bgr")
            #frame = rawCap.array
            frame = cap.read()
            t = clock()
Beispiel #34
0
def main(cam_id):
    """
    由于眼部检测的不稳定性,这里采用在已经识别出的人脸范围内假定的眼睛区域作为眼睛区域的判断
    采用 关键点检测 加 伪眼部光流检测
    关键点静止但眼部光流明显时判定为活体
    这个主要判断 眨眼 来做为活体检测的依据
    :return:
    """
    print(__doc__)
    cap = cv2.VideoCapture(cam_id)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('D:/data.avi', fourcc, 30, (640, 480))
    f_cascade = cv2.CascadeClassifier(
        "C:/opencv/opencv/build/etc/haarcascades/haarcascade_frontalface_alt2.xml"
    )
    e_cascade = cv2.CascadeClassifier(
        "C:\\opencv\\opencv\\build\etc\\haarcascades\\haarcascade_eye.xml")
    ret, prev = cap.read()
    prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    feature_params = dict(maxCorners=500,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)

    tracks = []
    frame_index = 0
    detect_interval = 3
    track_len = 10
    msg_show_opt = 0  # 通过信息显示帧数
    msg_show_key = 0  # 通过信息显示帧数
    has_face = False

    # 存储每一帧的光流
    eye_flow_lines_t = []

    while True:
        if cv2.waitKey(1) == 27:  # Esc for exit
            break
        t = clock()
        ret, img = cap.read()
        img = cv2.flip(img, 1)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        rectangles = detect(gray, f_cascade)

        mask = np.zeros_like(gray)  # 设置关键点遮罩

        if len(rectangles) == 1:  # 限制一张人脸
            if not (has_face and True):
                tracks = []
            has_face = True
            for rectangle in rectangles:
                rx0, ry0, rx1, ry1 = rectangle
                # if not (140 < rx1 - rx0 < 160 and 140 < ry1 - ry0 < 160):  # 限定人脸识别框的大小
                #     continue
                draw_rectangle(img, rectangle, color=(0, 225, 0))  # 人脸范围
                rectangles_eye = detect(gray[ry0:ry1, rx0:rx1],
                                        e_cascade)  # 获取眼睛范围
                # draw_rectangles(img[ry0:ry1, rx0:rx1], rectangles_eye, color=(255, 0, 225))

                # 眼部光流场功能
                eye_flow_lines = []
                # for erx0, ery0, erx1, ery1 in rectangles_eye:
                #     eye_flow = opt_flow(prev_gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1],
                #                         gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1])  # get opt flow
                #     eye_flow_lines.append(draw_flow(img[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1],
                #                                     eye_flow, step=4))  # 显示光流点

                # 假眼部位置,假设脸纵向上部1/4位置到2/4位置及 横向左部1/6到5/6位置为眼部,以抵消眼部识别不能每次都有效地问题

                face_h = ry1 - ry0
                face_w = rx1 - rx0
                face_hs = face_h / 4
                face_he = face_h / 2
                face_ws = face_w / 6
                face_we = face_w / 6 * 5
                eye_flow = opt_flow(
                    prev_gray[ry0:ry1, rx0:rx1][face_hs:face_he,
                                                face_ws:face_we],
                    gray[ry0:ry1, rx0:rx1][face_hs:face_he, face_ws:face_we])
                eye_flow_lines.append(
                    draw_flow(img[ry0:ry1, rx0:rx1][face_hs:face_he,
                                                    face_ws:face_we],
                              eye_flow,
                              step=4))

                eye_sorted = []  # 排序后的长度集合(眼睛)
                eye_sorted2 = []
                for lines in eye_flow_lines:
                    mds = []
                    for (x1, y1), (x2, y2) in lines:
                        md = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
                        mds.append(md)
                        eye_sorted2.append(md)
                    eye_sorted.append(sorted(mds, reverse=True))
                    eye_flow_lines_t.append(eye_sorted2)  # 存储每一帧的光流位移信息
                # 绘制关键点轨迹
                # 会删除位移较大的关键点
                # 不影响其他的鉴别
                if len(tracks) > 0:
                    img0, img1 = prev_gray, gray
                    p0 = np.float32([tr[-1]
                                     for tr in tracks]).reshape(-1, 1, 2)
                    p1, st, err = cv2.calcOpticalFlowPyrLK(
                        img0, img1, p0, None, **lk_params)
                    p0r, st, err = cv2.calcOpticalFlowPyrLK(
                        img1, img0, p1, None, **lk_params)
                    d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                    good = d < 0.5
                    new_tracks = []
                    for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2),
                                                     good):
                        if not good_flag:
                            continue
                        if not (rx0 < x < rx1 and ry0 < y < ry1):
                            continue
                        tr.append((x, y))
                        if len(tr) > track_len:
                            del tr[0]
                        new_tracks.append(tr)
                        cv2.circle(img, (x, y), 2, (0, 255, 0), -1)
                    tracks = new_tracks
                    cv2.polylines(img, [np.int32(tr) for tr in tracks], False,
                                  (0, 255, 0))
                    draw_str(img, (20, 20), 'track count: %d' % len(tracks))

                # 限定人脸为兴趣区域
                cv2.fillPoly(
                    mask,
                    np.array([[[rx0, ry0], [rx1, ry0], [rx1, ry1],
                               [rx0, ry1]]]), (255, 255, 255))
                for x, y in [np.int32(tr[-1]) for tr in tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)  # 排除上一次的关键点

                if frame_index % detect_interval == 0:
                    print('**************** start ***************')
                    l_sorted = []
                    l_sorted_eye = []  # 眼睛区域的关键点
                    l_sorted_out = []  # 眼睛外部的关键点

                    l_tmp = []
                    l_tmp_eye = []
                    l_tmp_out = []
                    for tr in tracks:
                        (x0, y0) = tr[0]
                        (x1, y1) = tr[-1]

                        if rx0 + face_ws < x1 < rx0 + face_we and ry0 + face_hs < y1 < ry1 + face_he:
                            l_tmp_eye.append(
                                round(math.sqrt((x1 - x0)**2 + (y1 - y0)**2),
                                      2))
                        else:
                            l_tmp_out.append(
                                round(math.sqrt((x1 - x0)**2 + (y1 - y0)**2),
                                      2))

                        l = round(math.sqrt((x1 - x0)**2 + (y1 - y0)**2), 2)
                        l_tmp.append(l)
                        # if l > 0:
                        # print(round(math.atan(abs((y1 - y0) / (x1 - x0))) / math.pi * 180, 2), end=':')
                        print(l, end='\t')
                    print('\n+++++++++++++++')

                    l_sorted = sorted(l_tmp, reverse=True)
                    l_sorted_eye = sorted(l_tmp_eye, reverse=True)
                    l_sorted_out = sorted(l_tmp_out, reverse=True)

                    if len(l_sorted_out) > 3 and len(l_sorted_eye) > 3 \
                            and l_sorted_out[0] < 1 and l_sorted_eye[0] > 1 \
                            and l_sorted_eye[0] - l_sorted_out[0] > 1:
                        print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
                        msg_show_key = 30

                    # ========打印前十个=========================
                    if True:
                        for i, md2 in enumerate(eye_sorted):
                            count = 0
                            print('眼睛', str(i + 1), end=':\t')
                            for md in md2:
                                count += 1
                                if count > 150:
                                    break
                                print(round(md, 2), end=',')
                            print()
                        print('###################')

                    # 活体检测
                    np_eye = np.array(sorted(eye_sorted2, reverse=True)[:30])
                    np_eye = np_eye[np_eye > 0]
                    np_l = np.array(l_sorted[:10])

                    print(np_eye.size, '+++++', np_l.size)
                    if np_eye.size != 0 and np_l.size != 0:
                        flow_pre = np_eye[np_eye > 2].size * 1.0 / np_eye.size
                        ln_pre = np_l[np_l > 2].size * 1.0 / np_l.size
                        print(flow_pre, '---', ln_pre)
                        if 0.8 > flow_pre > 0.05 and ln_pre < 0.2:
                            msg_show_opt = 30
                            print(
                                'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'
                            )

                    print('**************** end ***************')
                    # 判断关键点
                    p = cv2.goodFeaturesToTrack(gray,
                                                mask=mask,
                                                **feature_params)
                    if p is not None:
                        for x, y in np.float32(p).reshape(-1, 2):
                            tracks.append([(x, y)])

            frame_index += 1
        else:
            has_face = False

        prev_gray = gray
        dt = clock() - t
        # draw_str(img, (20, 20), 'time: %.1f ms' % (dt * 1000))
        if msg_show_key > 0:
            draw_str(img, (450, 20), 'YES by KEY', front=(0, 0, 255))
            msg_show_key -= 1
        if msg_show_opt > 0:
            draw_str(img, (300, 20), 'YES by OPT', front=(0, 0, 255))
            msg_show_opt -= 1
        cv2.imshow("Face detect" + str(cam_id), img)
        out.write(img)
        # cv2.imshow('mask', mask)
    cap.release()
    out.release()
    cv2.destroyAllWindows()
    cam = video.create_capture(
        video_src, fallback='synth:class=chess:bg=../cpp/lena.jpg:noise=0.01')
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:, :, 1] = 255
    while True:
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        print type(motion_mask[0][0])
        timestamp = clock()
        cv2.motempl.updateMotionHistory(motion_mask, motion_history, timestamp,
                                        MHI_DURATION)
        mg_mask, mg_orient = cv2.motempl.calcMotionGradient(motion_history,
                                                            MAX_TIME_DELTA,
                                                            MIN_TIME_DELTA,
                                                            apertureSize=5)
        seg_mask, seg_bounds = cv2.motempl.segmentMotion(
            motion_history, timestamp, MAX_TIME_DELTA)

        visual_name = visuals[cv2.getTrackbarPos('visual', 'motempl')]
        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
Beispiel #36
0
def image_cap():
    d=[]


    print(__doc__)

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)
    cap.set(cv.CAP_PROP_FPS, 12)

    def process_frame(frame, t0):
        # some intensive computation...
        #frame = cv.medianBlur(frame, 19)
        #frame = cv.medianBlur(frame, 19)
        return frame, t0

    threadn = cv.getNumberOfCPUs()
    pool = ThreadPool(processes = threadn)
    pending = deque()

    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    while True:

        while len(pending) > 0 and pending[0].ready():

            res, t0 = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "threaded      :  " + str(threaded_mode))
            draw_str(res, (20, 40), "latency        :  %.1f ms" % (latency.value*1000))
            draw_str(res, (20, 60), "frame interval :  %.1f ms" % (frame_interval.value*1000))
            res=cv.resize(res,(176,100))
            cv2_im = cv.cvtColor(res, cv.COLOR_BGR2RGB)
            pil_im = Image.fromarray(cv2_im)

            d.append(np.array(pil_im))
            if len(d)==32:
                t1 = data_input(d[0:16])
                t2 = data_input(d[16:32])
                in_x = np.array([t1, t2])
                in_x = np.reshape(in_x, (2, 16, 128, 128, 3))
                start = time.clock()
                #p = Pool(1)
                #p.map(evaluate, in_x)

                evaluate(in_x)
                elapsed = time.clock()
                elapsed = elapsed - start
                print("Time spent in (function name) is: ", elapsed)
                d=[]


            cv.imshow('threaded video', res)
        if len(pending) < threadn:
            ret, frame = cap.read()
            t = clock()
            frame_interval.update(t - last_frame_time)
            last_frame_time = t
            if threaded_mode:
                task = pool.apply_async(process_frame, (frame.copy(), t))
            else:
                task = DummyTask(process_frame(frame, t))
            pending.append(task)
        ch = cv.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break




    cv.destroyAllWindows()
    known_face_names.append(re.sub("[0-9]", '', filename[:-4]))
    known_face_encodings.append(face_recognition.face_encodings(face)[0])

face_locations = []
face_encodings = []
face_names = []
process_this_frame = True

# get pluged camera, otherwise, default image.
cam = create_capture(0,
                     fallback='synth:bg={}:noise=0.05'.format(
                         cv.samples.findFile('src/image/image.jpg')))

while True:
    _, frame = cam.read()
    tempsAvantCalculs = clock()

    if process_this_frame:
        face_locations = face_recognition.face_locations(frame)
        face_encodings = face_recognition.face_encodings(frame, face_locations)

        face_names = []

    for face_encoding in face_encodings:
        matches = face_recognition.compare_faces(known_face_encodings,
                                                 face_encoding)
        name = "Inconnu"

        face_distances = face_recognition.face_distance(
            known_face_encodings, face_encoding)
        best_match_index = np.argmin(face_distances)
Beispiel #38
0
def main():
    """
    选取八个区域进行规律查找
    :return:
    """
    print __doc__
    cap = cv2.VideoCapture(0)
    f_cascade = cv2.CascadeClassifier("C:/opencv/opencv/build/etc/haarcascades/haarcascade_frontalface_alt.xml")
    e_cascade = cv2.CascadeClassifier("C:\\opencv\\opencv\\build\etc\\haarcascades\\haarcascade_eye.xml")
    ret, prev = cap.read()
    prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    track_len = 10
    detect_interval = 1
    tracks = []
    frame_idx = 0

    lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    feature_params = dict(maxCorners=500, qualityLevel=0.3, minDistance=7, blockSize=7)

    # test_areas = []  # 固定四个角
    # test_areas.append((0, 0, 128, 128))  # lu
    # test_areas.append((640 - 128, 0, 640, 128))  # ru
    # test_areas.append((640 - 128, 480 - 128, 640, 480))  # rd
    # test_areas.append((0, 480 - 128, 128, 480))  # ld

    while True:
        if cv2.waitKey(1) == 27:  # Esc for exit
            break
        t = clock()
        ret, img = cap.read()
        img = cv2.flip(img, 1)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        h, w = gray.shape
        rectangles = detect(gray, f_cascade)
        # draw_rectangles(img, rectangles, color=(255, 225, 0))  # 人脸范围
        # draw_rectangles(img, rectangles, scaling=1.3)  # 扩大后的范围

        if len(rectangles) == 1:  # 限制一张人脸
            for rectangle in rectangles:
                rx0, ry0, rx1, ry1 = rectangle
                # if rx1 - rx0 != 151 and ry1 - ry0 != 151:  # 限定人脸识别框的大小
                #     continue
                draw_rectangle(img, rectangle, color=(255, 225, 0))  # 人脸范围
                # draw_rectangle(img, rectangle, scaling=1.4)  # 扩大后的范围
                rectangles_eye = detect(gray[ry0:ry1, rx0:rx1], e_cascade)



                # if len(rectangles_eye) < 1:  # 限制必须两只眼睛都识别出来
                #     continue

                test_areas_face = []
                size = 32
                face_h = ry1 - ry0
                face_w = rx1 - rx0
                test_areas_face.append((0, 0, size, size))  # lu
                test_areas_face.append((face_w - size, 0, face_w, size))  # ru
                test_areas_face.append((face_w - size, face_h - size, face_w, face_h))  # rd
                test_areas_face.append((0, face_h - size, size, face_h))  # ld
                test_areas_face.append((face_w / 4, 0, face_w / 4 * 3, size))  # mu
                test_areas_face.append((face_w / 4, face_h - size, face_w / 4 * 3, face_h))  # md
                test_areas_face.append((0, face_h / 4, size, face_h / 4 * 3))  # ml
                test_areas_face.append((face_w - size, face_h / 4, face_w, face_h / 4 * 3))  # mr

                draw_rectangles(img[ry0:ry1, rx0:rx1], rectangles_eye, color=(255, 0, 225))

                draw_rectangles(img[ry0:ry1, rx0:rx1], test_areas_face, color=(25, 23, 225))

                eye_flow_lines = []  # 眼睛光流位移
                eye_flow_angs = []  # 眼睛光流角度
                for erx0, ery0, erx1, ery1 in rectangles_eye:
                    if len(tracks) > 0:
                        img0, img1 = prev_gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1], gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1]
                        p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
                        p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                        p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                        d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                        good = d < 1
                        new_tracks = []
                        for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
                            if not good_flag:
                                continue
                            tr.append((x, y))
                            if len(tr) > track_len:
                                del tr[0]
                            new_tracks.append(tr)
                            cv2.circle(img[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1], (x, y), 2, (0, 255, 0), -1)
                        tracks = new_tracks
                        cv2.polylines(img[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1], [np.int32(tr) for tr in tracks], False, (0, 255, 0))
                        draw_str(img, (200, 20), 'track count: %d' % len(tracks))

                    if frame_idx % detect_interval == 0:
                        mask = np.zeros_like(gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1])
                        mask[:] = 255
                        for x, y in [np.int32(tr[-1]) for tr in tracks]:
                            cv2.circle(mask, (x, y), 2, 0, -1)
                        p = cv2.goodFeaturesToTrack(gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1], mask=mask, **feature_params)
                        if p is not None:
                            for x, y in np.float32(p).reshape(-1, 2):
                                tracks.append([(x, y)])




                test_flow_lines = []  # 测试区域光流位移
                test_flow_angs = []  # 测试区域光流角度
                for erx0, ery0, erx1, ery1 in test_areas_face:
                    test_flow = opt_flow(prev_gray[ery0:ery1, erx0:erx1], gray[ery0:ery1, erx0:erx1])  # get opt flow
                    lines = draw_flow(img[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1], test_flow, step=4)  # 显示光流点
                    test_flow_lines.append(lines)

                    # mag, ang = cv2.cartToPolar(eye_flow[..., 0], eye_flow[..., 1])
                    # test_flow_angs.append(ang)

                eye_sorted = []  # 排序后的长度集合(眼睛)
                for lines in eye_flow_lines:
                    mds = []
                    for (x1, y1), (x2, y2) in lines:
                        md = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
                        mds.append(md)
                    eye_sorted.append(sorted(mds, reverse=True))

                test_sorted = []  # 排序后的长度集合(test)
                for lines in test_flow_lines:
                    mds = []
                    for (x1, y1), (x2, y2) in lines:
                        md = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
                        mds.append(md)
                    test_sorted.append(sorted(mds, reverse=True))

                # ========打印前十个=========================
                if True:
                    for i, md2 in enumerate(eye_sorted):
                        count = 0
                        print '眼睛' + str(i + 1) + ':\t',
                        for md in md2:
                            count += 1
                            if count > 10:
                                break
                            print str(round(md, 2)) + ',',
                        print ''
                    print ''

                    for i, md2 in enumerate(test_sorted):
                        count = 0
                        print '测试' + str(i + 1) + ':\t',
                        for md in md2:
                            count += 1
                            if count > 10:
                                break
                            print str(round(md, 2)) + ',',
                        print ''
                    print ''

                if False:
                    # ============= 打印前十个平均值 ============
                    for i, md2 in enumerate(eye_sorted):
                        count = 0
                        print '眼睛' + str(i + 1) + ':\t',
                        sum_avg = []
                        for md in md2:
                            count += 1
                            if count > 10:
                                break
                            sum_avg.append(md)
                        print round(1.0 * sum(sum_avg) / len(sum_avg), 2)

                    for i, md2 in enumerate(test_sorted):
                        count = 0
                        print '测试' + str(i + 1) + ':\t',
                        sum_avg = []
                        for md in md2:
                            count += 1
                            if count > 10:
                                break
                            sum_avg.append(md)
                        print round(1.0 * sum(sum_avg) / len(sum_avg), 2)
                    print ''

        prev_gray = gray
        dt = clock() - t
        draw_str(img, (20, 20), 'time: %.1f ms' % (dt * 1000))
        cv2.imshow("Face detect", img)
    cap.release()
    cv2.destroyAllWindows()
Beispiel #39
0
        video_src = 0
    args = dict(args)
    face_fn = args.get('--face-cascade',
                       "data/haarcascades/haarcascade_frontalface_alt.xml")
    hand_fn = args.get('--hand-cascade', "data/haarcascades/palm.xml")

    face_cascade = cv2.CascadeClassifier(face_fn)
    hand_cascade = cv2.CascadeClassifier(hand_fn)

    ar_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100)

    cam = create_capture(video_src,
                         fallback='synth:bg=../data/lena.jpg:noise=0.05')

    while True:
        t = clock()

        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        cam_dt = clock() - t
        t = clock()

        rects = detect(gray, face_cascade, 80, 500)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))

        face_det_dt = clock() - t
        t = clock()
Beispiel #40
0
    x20 = 0
    x21 = 0
    timeout = 1

    #=======================================================================
    #starting while loop
    #-------------------
    while True:
        global E1
        ret, img = cam.read()  #Readin image
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  #converting to gray scale
        gray = cv2.equalizeHist(gray)
        x30 = 0
        E1 = 0
        E2 = 0
        t = clock()
        rects = detect(gray, cascade)  #activate function to detect face
        vis = img.copy()  #copying image
        vistest = img.copy()
        if not nested.empty():
            for x1, y1, x2, y2 in rects:
                roi = gray[y1:y2, x1:x2]  #croping image
                vis_roi = vis[y1:y2, x1:x2]
                vistest_roi = vistest[y1:y2, x1:x2]

                #cv2.imshow('face', vis_roi)
                subrects = detect1(roi.copy(), nested)  #detecting closed eye
                subrects1 = detect2(roi.copy(),
                                    nested1)  #detecting closed and open eye

                draw_rects(vistest_roi, subrects1, (255, 0, 0))
Beispiel #41
0
    def Update(self):

        print("camera start")

        cv2.destroyAllWindows()

        # initialize the camera and grab a reference to the raw camera capture
        self._camera = PiCamera()
        self._camera.resolution = (self.__cameraResolutionX,
                                   self.__cameraResolutionY)
        self._camera.framerate = 32
        self._rawCapture = PiRGBArray(
            self._camera
        )  #, size=(self._camera.resolution.width, self._camera.resolution.height))

        # allow the camera to warmup
        time.sleep(0.1)

        cascade_fn = "/home/pi/opencv-3.1.0/data/haarcascades/haarcascade_frontalface_alt.xml"
        nested_fn = "/home/pi/opencv-3.1.0/data/haarcascades/haarcascade_eye.xml"
        #cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_default.xml")
        #nested_fn  = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")

        self._cascade = cv2.CascadeClassifier(cascade_fn)
        self._nested = cv2.CascadeClassifier(nested_fn)

        for frame in self._camera.capture_continuous(self._rawCapture,
                                                     format="bgr",
                                                     use_video_port=True):

            if (super().updating_ended == True):
                return

            # grab the raw NumPy array representing the image, then initialize the timestamp
            # and occupied/unoccupied text
            image = frame.array

            # local modules
            #from video import create_capture
            from common import clock, draw_str

            #self._camera.capture(self._rawCapture, format="bgr")
            #image = self._rawCapture.array

            cv2.imshow('image', image)

            # clear the stream in preparation for the next frame
            self._rawCapture.truncate(0)

            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)

            t = clock()
            rects = self.detect(gray, self._cascade)

            if (self._showImage == True):
                vis = image.copy()
                self.draw_rects(vis, rects, (0, 255, 0))

            dt = 0

            found_something = False

            if not self._nested.empty():
                posX = -1
                posY = -1
                bestWidth = -1
                for x1, y1, x2, y2 in rects:
                    width = x2 - x1
                    if (width > bestWidth):
                        bestWidth = width
                        posX = (x1 +
                                (x2 - x1) / 2) / self._camera.resolution.width
                        posY = (y1 +
                                (y2 - y1) / 2) / self._camera.resolution.height
                    if (self._showImage == True):
                        roi = gray[y1:y2, x1:x2]
                        vis_roi = vis[y1:y2, x1:x2]
                        subrects = self.detect(roi.copy(), self._nested)
                        self.draw_rects(vis_roi, subrects, (255, 0, 0))
                self.posXFace = posX
                self.posYFace = posY

                dt = clock() - t

                if (posX != -1):
                    #print('camera time: %.1f ms' % (dt*1000))
                    found_something = True

            if (self._showImage == True):
                draw_str(vis, (20, 20), 'time: %.1f ms' % (dt * 1000))
                cv2.imshow('facedetect', vis)

            if (found_something == True):
                time.sleep(self._delay_seconds)
            else:
                time.sleep(self._delay_seconds_when_idle)
    import sys
    try:
        fn1 = sys.argv[1]
    except:
        fn1 = "test_images/mona_lisa_face.png"

    print help_message

    # Good features parameters
    gf_params = dict(maxCorners=200,
                     qualityLevel=0.1,
                     minDistance=7,
                     blockSize=20,
                     useHarrisDetector=False,
                     k=0.04)

    img = cv2.imread(fn1, cv2.CV_LOAD_IMAGE_COLOR)
    grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)

    start = clock()

    keypoints = cv2.goodFeaturesToTrack(grey, mask=None, **gf_params)

    if keypoints is not None:
        for x, y in np.float32(keypoints).reshape(-1, 2):
            cv2.circle(img, (x, y), 3, (0, 255, 0, 0), cv.CV_FILLED, 8, 0)

    print "Elapsed time:", 1000 * (clock() - start), "milliseconds"
    cv2.imshow("Keypoints", img)
    cv2.waitKey()
Beispiel #43
0
    def evaluate_motempl_model(self, video_path1, video_path2, model_params):
        camera = cv2.VideoCapture(video_path1)
        camera2 = cv2.VideoCapture(video_path2)
        assert camera.isOpened() and camera2.isOpened(
        ), 'Can not capture source!'
        flow_data = []
        img_data = []
        input_data = []
        # model_params['crop_size']=224
        motion_history = np.zeros(
            (model_params['crop_size'][0], model_params['crop_size'][1]),
            np.float32)
        action = 'Normal'
        prob = 1.0

        flow_data2 = []
        img_data2 = []
        input_data2 = []
        action2 = 'Normal'
        motion_history2 = np.zeros(
            (model_params['crop_size'][0], model_params['crop_size'][1]),
            np.float32)
        prob2 = 1.0
        while camera.isOpened() and camera2.isOpened():
            try:
                _, frame = camera.read()
                _, frame2 = camera2.read()
                temp_frame = cv2.resize(frame, (model_params['crop_size'][0],
                                                model_params['crop_size'][1]),
                                        interpolation=cv2.INTER_CUBIC)
                temp_frame2 = cv2.resize(frame2,
                                         (model_params['crop_size'][0],
                                          model_params['crop_size'][1]),
                                         interpolation=cv2.INTER_CUBIC)
                img_data.append(temp_frame)
                img_data2.append(temp_frame2)

                # Calculate the motempl flow between two frames of camera1
                if len(img_data) == 3:
                    timestamp = clock()
                    flow_img = self.calc_motempl(img_data[0], img_data[2],
                                                 motion_history, timestamp)
                    flow_img = flow_img * 1.0 / 127.5
                    cv2.imshow('mote1', flow_img)
                    flow_img = np.array(flow_img)
                    flow_data.append(flow_img)
                    img_data = []

                # Calculate the motempl flow between two frames of camera2
                if len(img_data2) == 3:
                    timestamp2 = clock()
                    flow_img2 = self.calc_motempl(img_data2[0], img_data2[2],
                                                  motion_history2, timestamp2)
                    flow_img2 = flow_img2 * 1.0 / 127.5
                    cv2.imshow('mote2', flow_img2)
                    flow_img2 = np.array(flow_img2)
                    flow_data2.append(flow_img2)
                    img_data2 = []

                # camera1
                if len(flow_data) == model_params['sequence_length']:
                    action, prob, _ = self.calc_output(flow_data, input_data)
                    flow_data = []
                    input_data = []

                # camera2
                if len(flow_data2) == model_params['sequence_length']:
                    action2, prob2, _ = self.calc_output(
                        flow_data2, input_data2)
                    flow_data2 = []
                    input_data2 = []

                cv2.putText(frame, action, (20, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            1, text_color[action], 3)
                cv2.putText(frame, str(prob), (20, 90),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, text_color[action], 3)
                cv2.putText(frame2, action2, (20, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, text_color[action2],
                            3)
                cv2.putText(frame2, str(prob2), (20, 90),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, text_color[action2],
                            3)
                cv2.imshow('camera1', frame)
                cv2.imshow('camera2', frame2)
                choice = cv2.waitKey(1)
                choice = cv2.waitKey(1)

            except Exception as e:
                print(e)
                camera = cv2.VideoCapture(video_path1)
                camera2 = cv2.VideoCapture(video_path2)
Beispiel #44
0
def run(folder, file_name):
    video_src = os.path.join(os.curdir, folder, file_name)
    images_folder = os.path.join(folder, 'frames')
    if not os.path.isdir(images_folder):
        os.mkdir(images_folder)
    if os.path.isfile(video_src):
        print('it is a file: ', video_src)
    else:
        print('no file you n00b', video_src)
    # args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
    # try:
    #     video_src = video_src[0]
    # except:
    #     video_src = 0
    # args = dict(args)
    # cascade_fn = args.get('--cascade', "haarcascade_frontalface_alt.xml")
    cascade_fn = "haarcascade_frontalface_alt.xml"
    # nested_fn  = args.get('--nested-cascade', "haarcascade_eye.xml")
    nested_fn = "haarcascade_eye.xml"

    cascade = cv2.CascadeClassifier(cascade_fn)
    nested = cv2.CascadeClassifier(nested_fn)

    # cam = create_capture(video_src, fallback='synth:bg=../data/lena.jpg:noise=0.05')

    cam = cv2.VideoCapture(video_src)
    image_count = 0
    frame_count = 0
    while True:
        if frame_count % 500 == 0:
            print('faces found: ', image_count)
            print(int(cam.get(cv2.CAP_PROP_POS_FRAMES)), ": frames read")
        ret, img = cam.read()
        if not ret:
            break
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))
        if not nested.empty():
            for x1, y1, x2, y2 in rects:
                roi = gray[y1:y2, x1:x2]
                vis_roi = vis[y1:y2, x1:x2]
                subrects = detect(roi.copy(), nested)
                #If there is eyes on the picture
                if len(subrects) > 0:
                    #for every 20 frame it should save a picture.
                    if int(cam.get(cv2.CAP_PROP_POS_FRAMES)) % 20 == 0:
                        save_file_to = os.path.join(
                            images_folder,
                            "{0}{1}.jpg".format(file_name[:-4], image_count))
                        print("Captured: ",
                              (images_folder, "{0}{1}.jpg".format(
                                  file_name[:-4], image_count)))
                        print("At: ", cam.get(cv2.CAP_PROP_POS_MEC))
                        # print('saving file to: ', save_file_to)
                        cv2.imwrite(save_file_to, img)
                        image_count += 1

                # draw_rects(vis_roi, subrects, (255, 0, 0))
        dt = clock() - t
        frame_count += 1

        # draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
        #cv2.imshow('facedetect', vis)

        if cv2.waitKey(5) == 27:
            break
    print("{0} images made out of {1} frames".format(image_count, frame_count))
    cv2.destroyAllWindows()
Beispiel #45
0
def demo_video(video_file):
    detector = ObjectDetectorYolo(model='tiny-yolo-voc')
    mtracker = KalmanTracker(['person'], tracker='deep_sort')

    cap = common.VideoStream(video_file, queueSize=4).start()
    cv2.waitKey(500)
    Outcount, Incount = 0, 0
    total_t, counter = 0, 0

    while not cap.stopped:
        t = common.clock()
        imgcv = cap.read()

        if imgcv is not None:
            counter += 1
            detections = detector.run(imgcv)
            mtracker.update(imgcv, detections)
            cvboxes, ids = [], []

            for tid, tracker in mtracker.trackers.iteritems():
                if tracker.consecutive_invisible_count < 5:
                    state_current = get_pos(tracker.bbox)

                    try:
                        if state_current != tracker.regionside:
                            tracker.statechange += 1
                            print state_current, tracker.regionside, tracker.statechange
                            if state_current == 'Positive':
                                if tracker.statechange % 2:
                                    Incount += 1
                                else:
                                    Outcount -= 1
                            else:
                                if tracker.statechange % 2:
                                    Outcount += 1
                                else:
                                    Incount -= 1
                            tracker.regionside = state_current

                    except AttributeError:
                        tracker.regionside = state_current
                        tracker.statechange = 0

                    cvboxes.append(tracker.bbox)
                    ids.append(tid)
            print Incount, Outcount

            cv2.line(imgcv, (LINE['x1'], LINE['y1']), (LINE['x2'], LINE['y2']),
                     (0, 0, 255), 4)
            common.drawLabel(imgcv,
                             "IN:%d  OUT:%d" % (Incount, Outcount), (10, 10),
                             size=1,
                             color=(0, 0, 255))
            common.showImage(draw_boxes(imgcv, cvboxes, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print counter / total_t
    def processFrame(self, frame, t0):
        keypoints = None
        lines = None
        contours = None
        cmode = self.cmode
        values = self.values
        if cmode != 'rgb':
            if cmode in ['h', 's', 'v']:
                #mode = (cv2.COLOR_BGR2HLS, cv2.COLOR_HLS2BGR)
                mode = (cv2.COLOR_BGR2HSV, cv2.COLOR_HSV2BGR)
                hsv = cv2.cvtColor(frame, mode[0])
                if cmode == 'h':
                    if 0:
                        if mode[0] == cv2.COLOR_BGR2HSV:
                            hsv[:, :, 1] = 255  # s = 1
                            hsv[:, :, 2] = 128  # v = .5
                        else:
                            hsv[:, :, 1] = 128  # l = .5
                            hsv[:, :, 2] = 255  # s = 1
                        frame = cv2.cvtColor(hsv, mode[1])
                    else:
                        h, s, v = cv2.split(hsv)
                        # now find the interesting range of hues..
                        frame = cv2.inRange(h, values[0], values[1])
                        #frame = frame * v
                elif cmode == 's':
                    # extract the s as grayscale
                    if mode[0] == cv2.COLOR_BGR2HSV:
                        h, frame, v = cv2.split(hsv)
                    else:
                        h, l, frame = cv2.split(hsv)
                elif cmode == 'v':
                    if mode[0] == cv2.COLOR_BGR2HSV:
                        h, s, frame = cv2.split(hsv)
                    else:
                        h, frame, s = cv2.split(hsv)
            elif cmode in ['r', 'g', 'b']:
                if cmode == 'r':
                    b, g, frame = cv2.split(frame)
                elif cmode == 'g':
                    b, frame, r = cv2.split(frame)
                elif cmode == 'b':
                    frame, g, r = cv2.split(frame)
            elif cmode == 'adaptiveThreshold':
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                if values[1] == 0:
                    thresh = cv2.THRESH_BINARY
                else:
                    thresh = cv2.THRESH_BINARY_INV
                frame = cv2.adaptiveThreshold(
                    gray,
                    200,  # value to draw
                    cv2.ADAPTIVE_THRESH_MEAN_C,
                    thresh,
                    5,
                    values[0])
            elif cmode == 'threshold':
                frame = self.simpleThreshold(frame, values)
            elif cmode == 'huerange*valrange':
                frame = self.hvRange(frame)
            elif cmode == 'canny':
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                v1 = values[0] * 200
                v2 = v1 + values[1]
                frame = cv2.Canny(frame, v1, v2, apertureSize=5)
            elif cmode == 'simpleblob':
                if not "blobdetector" in self.algostate or self.update:
                    bp = cv2.SimpleBlobDetector_Params()

                    if values[4] >= 0:
                        bp.filterByColor = True
                        bp.blobColor = values[4]  # 0 or 255  (?)
                    else:
                        bp.filterByColor = False
                        bp.blobColor = 0

                    # Change thresholds
                    bp.minThreshold = values[0]  # 50
                    bp.maxThreshold = values[1]  # 150
                    bp.thresholdStep = values[2]  # 5

                    # Filter by Area.
                    bp.filterByArea = True
                    bp.minArea = values[3]  # 500
                    bp.maxArea = (640 * 480) / 5

                    # Filter by Circularity
                    bp.filterByCircularity = False
                    bp.minCircularity = 0.1

                    # Filter by Convexity
                    bp.filterByConvexity = False
                    bp.minConvexity = 0.87

                    # Filter by Inertia
                    bp.filterByInertia = False
                    bp.minInertiaRatio = 0.01

                    detector = cv2.SimpleBlobDetector(bp)
                    self.algostate["blobdetector"] = detector
                else:
                    detector = self.algostate["blobdetector"]
                if 0:
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                elif 0:
                    frame = self.hvRange(frame)
                elif 0:
                    hvals = self.getCmodeValues('h')
                    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                    frame, s, v = cv2.split(hsv)
                    frame = cv2.inRange(frame, hvals[0], hvals[1])
                else:
                    gvals = self.getCmodeValues('gamma')
                    gamma = 1 + 10 * gvals[0] / 100.0
                    self.putNotice('gamma: %f' % gamma)
                    for i in range(0, 256):
                        self.LUT[i] = 255 * ((i / 255.0)**gamma)
                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    frame = cv2.LUT(gray, self.LUT)

                keypoints = detector.detect(frame)  # we'll draw them
                keypoints = self.robotCnx.NewKeypoints(keypoints)
            elif cmode == "houghlines":
                cv = self.getCmodeValues('canny')
                bwf = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                v1 = cv[0] * 200
                v2 = v1 + cv[1]
                bwf = cv2.Canny(bwf, v1, v2, apertureSize=5)
                rho = max(values[0], 1)
                theta = max(math.radians(values[1]), 0)
                threshold = max(values[2], 1)  # votes needed to accept a line
                minlen = values[3]
                maxgap = values[4]
                lines = cv2.HoughLinesP(
                    bwf,
                    rho,  # distance res of accum in pixels
                    theta,
                    threshold,
                    minLineLength=minlen,
                    maxLineGap=maxgap)
                lines = self.robotCnx.NewLines(lines)
            elif cmode == 'contours':
                gray = self.simpleThreshold(frame)
                mode = cv2.RETR_TREE  #values[0]
                method = cv2.CHAIN_APPROX_SIMPLE  #values[1]
                off = (values[2], values[3])
                contours = cv2.findContours(gray, mode, method, offset=off)
                # NB: contours may be tuple (contours, hierarchy)
            elif cmode == 'ORB':
                if not "ORB" in self.algostate or self.update:
                    sf = max(1, min(2, 1. + values[1] / 255.))  # [1->2]
                    self.algostate["ORB"] = cv2.ORB(nfeatures=values[0],
                                                    scaleFactor=sf,
                                                    nlevels=values[2],
                                                    patchSize=values[3],
                                                    edgeThreshold=values[3])
                    # could add:
                    #   WTA_K: 2
                    #   scoreType ORB_HARRIS_SCORE
                    #  patchSize ~= edgeThreshold

                orb = self.algostate["ORB"]
                #keypoints,descrs = orb.detectAndCompute(frame, None)
                #gray = self.simpleThreshold(frame)
                gray = self.hvRange(frame)
                keypoints = orb.detect(gray, None)
                self.putNotice('ORB features: %d' % len(keypoints))
                frame = gray
            elif cmode == 'dance0':
                t = common.clock() * 2 * math.pi / 5
                x = math.sin(t) * values[0] / 27 * 320 + 320  #[0, 640]s/
                kp = cv2.KeyPoint(x, 240, 10)
                keypoints = self.robotCnx.NewKeypoints([kp])
            elif cmode == 'dance1':
                t = common.clock() * 2 * math.pi / 15
                x = 22 * math.sin(t) + 320
                #[0, 640]s/
                y = 60 * (1 + math.sin(t)) + 240
                # math.sin(t)*values[0]/27*320 + 240 #[0, 480]s/
                kp = cv2.KeyPoint(x, y, 10)
                keypoints = self.robotCnx.NewKeypoints([kp])
            elif cmode == 'gamma':
                # Since our inputs are normalized to [0, 1]
                # we want a power > 1 to reduce darker shades.
                # map values [0, 100] -> [1 - 10]
                gamma = 1 + 10 * values[0] / 100.0
                self.putNotice('gamma: %f' % gamma)
                for i in range(0, 256):
                    self.LUT[i] = 255 * ((i / 255.0)**gamma)
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                frame = cv2.LUT(gray, self.LUT)
            else:
                print("unknown cmode: " + cmode)
        return frame, t0, keypoints, lines, contours
Beispiel #47
0
def myapp(environ, start_response):
    global ws
    global cam
    global tracker
    global face_fn
    global con_fn
    global tri_fn
    global tracker
    global conns
    global trigs
    global cam
    global shape3D
    global ms

    ws = environ['wsgi.websocket']
    print('enter!')

    ws.send("hoge!")


    plot_flag = False
    try:
        while True:
            t = clock()

            ret, img = cam.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)

            if tracker.update(gray):
                draw_str(img, (20, 40), 'pos: %.1f, %.1f' % tracker.getPosition())
                draw_str(img, (20, 60), 'scale: %.1f ' % tracker.getScale())
                draw_str(img, (20, 80), 'orientation: %.1f, %.1f, %.1f' % tracker.getOrientation())
                tracker.getScale()
                tracker.getOrientation()
                #img = tracker.draw(img, conns, trigs)
                img = tracker.draw(img, conns, None)

                shape3D = tracker.get3DShape().reshape((3, 66))
                #print_cood(shape3D)

                print(t)

                # #left eye (check max - min on Y axis)
                # if isClosed(shape3D[1][36:41].tolist(), 2) == True:
                #     print("left eye closed")
                #
                # #right eye (check max - min on Y axis)
                # if isClosed(shape3D[1][42:47].tolist(), 2) == True:
                #     print("right eye closed")

                # rip (check max - min on Y axis)
                if isClosed(shape3D[1][60:65].tolist(), 0.7) == True:
                    print("rip closed")
                    if ws != None:
                        ws.send("close")
                        sleep(0.3)
                else:
                    print("rip open")
                    if ws != None:
                        ws.send("open")

                # comment out
                #print shape3D.min(), shape3D.max()

                 #comment out because need less
                 #ms.set(x=shape3D[0, :] , y=shape3D[1, :], z=shape3D[2, :])
            else:
                tracker.setWindowSizes((11, 9, 7))

            dt = clock() - t

            draw_str(img, (20, 20), 'time: %.1f ms' % (dt*1000))
            cv2.imshow('facedetect', img)

            if 0xFF & cv2.waitKey(5) == 27:
                break
    except:
        pass

    cv2.destroyAllWindows()
Beispiel #48
0
def main(cam_id, c_type):
    """
    由于眼部检测的不稳定性,这里采用在已经识别出的人脸范围内假定的眼睛区域作为眼睛区域的判断
    采用 关键点检测 加 伪眼部光流检测
    关键点静止但眼部光流明显时判定为活体
    这个主要判断 眨眼 来做为活体检测的依据
    更换为双摄像头
    并判断
    1、电子设备(普通摄像头中有人脸区域而红外摄像头中没有)
    2、照片(眼睛区域关键点位移的方差与眼睛外部的方差 差值小于阈值时判定为照片,但有很大程度会把真人识别成照片
        修改为判断连续 n 帧的方差差值,如果全部符合再判定为照片)
    3、通过,活体(。。。)
    :param cam_id: 摄像头ID
    :param c_type: 类型(color:Visible Light 可见光, gray:infrared 红外)
    :return:
    """

    print(__doc__)
    cap = cv2.VideoCapture(cam_id)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('D:/data.avi', fourcc, 30, (640, 480))
    f_cascade = cv2.CascadeClassifier(
        "C:/opencv/opencv/build/etc/haarcascades/haarcascade_frontalface_alt2.xml"
    )
    e_cascade = cv2.CascadeClassifier(
        "C:\\opencv\\opencv\\build\etc\\haarcascades\\haarcascade_eye.xml")
    ret, prev = cap.read()
    prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    feature_params = dict(maxCorners=500,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)

    tracks = []
    frame_index = 0  # 总体的帧数, 用于判断电子设备
    face_frame_index = 0  # 脸部区域的帧数,用于 关键点轨迹
    detect_interval = 3
    track_len = 10
    msg_show_success = 0  # 包含光流和关键点判断通过的
    msg_show_opt = 0  # 通过信息显示帧数
    msg_show_key = 0  # 通过信息显示帧数
    msg_show_success_f = 0  # 没有通过包含光流和关键点判断通过的
    msg_show_opt_f = 0  # 没有通过通过信息显示帧数
    msg_show_key_f = 0  # 没有通过通过信息显示帧数
    has_face = False
    sustain = 10  # 信息持续时间

    ph_check_count = 30  # 判断为照片时 持续的帧数
    photo_rec = [0] * ph_check_count  # 记录每一帧的照片判定

    hu_check_count = 30  # 判断为真人时 持续的帧数
    human_rec = [0] * hu_check_count  # 记录为真人

    # 存储每一帧的光流
    eye_flow_lines_t = []

    while True:
        if cv2.waitKey(1) == 27:  # Esc for exit
            break
        t = clock()
        ret, img = cap.read()
        if c_type == 'gray':
            img = img[24:456, 32:608]
            img = cv2.resize(img, (640, 480), interpolation=cv2.INTER_LINEAR)
        img = cv2.flip(img, 1)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        frame_index += 1
        rectangles = detect(gray, f_cascade)

        mask = np.zeros_like(gray)  # 设置关键点遮罩

        photo_rec[frame_index % ph_check_count] = 0
        human_rec[frame_index % hu_check_count] = 0

        if len(rectangles) == 1:  # 限制一张人脸
            share[c_type][frame_index % el_check_count] = 1  # 识别出人脸后将值设置为1
            if not (has_face and True):
                tracks = []
            has_face = True
            for rectangle in rectangles:
                rx0, ry0, rx1, ry1 = rectangle
                if rx1 - rx0 > 211:
                    draw_str(img, (20, 20), 'Close', front=(0, 0, 255))
                elif rx1 - rx0 < 211:
                    draw_str(img, (20, 20), 'Away', front=(0, 0, 255))
                else:
                    draw_str(img, (20, 20), 'OK. Hold.', front=(0, 255, 0))

                draw_rectangle(img, rectangle, color=(0, 225, 0))  # 人脸范围
                rectangles_eye = detect(gray[ry0:ry1, rx0:rx1],
                                        e_cascade)  # 获取眼睛范围
                # draw_rectangles(img[ry0:ry1, rx0:rx1], rectangles_eye, color=(255, 0, 225))

                # 眼部光流场功能
                eye_flow_lines = []
                # for erx0, ery0, erx1, ery1 in rectangles_eye:
                #     eye_flow = opt_flow(prev_gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1],
                #                         gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1])  # get opt flow
                #     eye_flow_lines.append(draw_flow(img[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1],
                #                                     eye_flow, step=4))  # 显示光流点

                # 假眼部位置,假设脸纵向上部1/4位置到2/4位置及 横向左部1/6到5/6位置为眼部,以抵消眼部识别不能每次都有效地问题

                face_h = ry1 - ry0
                face_w = rx1 - rx0
                face_hs = face_h / 4
                face_he = face_h / 2
                face_ws = face_w / 6
                face_we = face_w / 6 * 5
                eye_flow = opt_flow(
                    prev_gray[ry0:ry1, rx0:rx1][face_hs:face_he,
                                                face_ws:face_we],
                    gray[ry0:ry1, rx0:rx1][face_hs:face_he, face_ws:face_we])
                eye_flow_lines.append(
                    draw_flow(img[ry0:ry1, rx0:rx1][face_hs:face_he,
                                                    face_ws:face_we],
                              eye_flow,
                              step=4))

                eye_sorted = []  # 排序后的长度集合(眼睛)
                eye_sorted2 = []
                for lines in eye_flow_lines:
                    mds = []
                    for (x1, y1), (x2, y2) in lines:
                        md = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
                        mds.append(md)
                        eye_sorted2.append(md)
                    eye_sorted.append(sorted(mds, reverse=True))
                    eye_flow_lines_t.append(eye_sorted2)  # 存储每一帧的光流位移信息
                # 绘制关键点轨迹
                # 会删除位移较大的关键点
                # 不影响其他的鉴别
                if len(tracks) > 0:
                    img0, img1 = prev_gray, gray
                    p0 = np.float32([tr[-1]
                                     for tr in tracks]).reshape(-1, 1, 2)
                    p1, st, err = cv2.calcOpticalFlowPyrLK(
                        img0, img1, p0, None, **lk_params)
                    p0r, st, err = cv2.calcOpticalFlowPyrLK(
                        img1, img0, p1, None, **lk_params)
                    d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                    good = d < 0.5
                    new_tracks = []
                    for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2),
                                                     good):
                        if not good_flag:
                            continue
                        if not (rx0 < x < rx1 and ry0 < y < ry1):
                            continue
                        tr.append((x, y))
                        if len(tr) > track_len:
                            del tr[0]
                        new_tracks.append(tr)
                        cv2.circle(img, (x, y), 2, (0, 255, 0), -1)
                    tracks = new_tracks
                    cv2.polylines(img, [np.int32(tr) for tr in tracks], False,
                                  (0, 255, 0))
                    # draw_str(img, (20, 20), 'track count: %d' % len(tracks))

                # 限定人脸为兴趣区域
                cv2.fillPoly(
                    mask,
                    np.array([[[rx0, ry0], [rx1, ry0], [rx1, ry1],
                               [rx0, ry1]]]), (255, 255, 255))
                for x, y in [np.int32(tr[-1]) for tr in tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)  # 排除上一次的关键点

                if face_frame_index % detect_interval == 0:
                    # print('**************** start ***************')
                    l_sorted = []
                    l_sorted_eye = []  # 眼睛区域的关键点
                    l_sorted_out = []  # 眼睛外部的关键点

                    l_tmp = []
                    l_tmp_eye = []
                    l_tmp_out = []
                    for tr in tracks:
                        (x0, y0) = tr[0]
                        (x1, y1) = tr[-1]

                        if rx0 + face_ws < x1 < rx0 + face_we and ry0 + face_hs < y1 < ry1 + face_he:
                            l_tmp_eye.append(
                                round(math.sqrt((x1 - x0)**2 + (y1 - y0)**2),
                                      2))
                        else:
                            l_tmp_out.append(
                                round(math.sqrt((x1 - x0)**2 + (y1 - y0)**2),
                                      2))

                        l = round(math.sqrt((x1 - x0)**2 + (y1 - y0)**2), 2)
                        l_tmp.append(l)
                        # if l > 0:
                        # print(round(math.atan(abs((y1 - y0) / (x1 - x0))) / math.pi * 180, 2), end=':')
                        # print(l, end='\t')
                    # print('\n+++++++++++++++')

                    l_sorted = sorted(l_tmp, reverse=True)
                    l_sorted_eye = sorted(l_tmp_eye, reverse=True)
                    l_sorted_out = sorted(l_tmp_out, reverse=True)
                    if len(l_sorted_eye) > 0:
                        print(l_sorted_eye[0])
                    if len(l_sorted_out) > 0:
                        print(l_sorted_out[0])
                    print("--------------")
                    if len(l_sorted_out) > 3 and len(l_sorted_eye) > 3 \
                            and l_sorted_out[0] < 1 and l_sorted_eye[0] > 1 \
                            and l_sorted_eye[0] - l_sorted_out[0] > 1:
                        # print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
                        msg_show_key = sustain
                        msg_show_success = sustain
                        # human_rec[frame_index % hu_check_count] = 1
                    elif (len(l_sorted_out) > 3 and len(l_sorted_eye) > 3 and abs(np.var(l_sorted_eye[:3]) - np.var(l_sorted_out[:3])) < 0.0005)\
                            or (len(l_sorted_eye) > 1 and len(l_sorted_out) > 1 and l_sorted_out[0] < 0.1 and l_sorted_eye[0] < 0.1):
                        print(
                            np.var(l_sorted_eye[:3]) -
                            np.var(l_sorted_out[:3]))
                        print("yesyesyesyesyesyes")
                        # 判定照片
                        # msg_show_key_f = sustain
                        # msg_show_success_f = sustain
                        photo_rec[frame_index % ph_check_count] = 1

                    # ========打印前十个=========================
                    # if True:
                    #     for i, md2 in enumerate(eye_sorted):
                    #         count = 0
                    #         print('眼睛', str(i + 1), end=':\t')
                    #         for md in md2:
                    #             count += 1
                    #             if count > 150:
                    #                 break
                    #             print(round(md, 2), end=',')
                    #         print()
                    #     print('###################')

                    # 活体检测
                    np_eye = np.array(sorted(eye_sorted2, reverse=True)[:30])
                    np_eye = np_eye[np_eye > 0]
                    np_l = np.array(l_sorted[:10])

                    # print(np_eye.size, '+++++', np_l.size)
                    if np_eye.size != 0 and np_l.size != 0:
                        flow_pre = np_eye[np_eye > 2].size * 1.0 / np_eye.size
                        ln_pre = np_l[np_l > 2].size * 1.0 / np_l.size
                        # print(flow_pre, '---', ln_pre)
                        if 0.8 > flow_pre > 0.05 and ln_pre < 0.2:
                            msg_show_opt = sustain
                            msg_show_success = sustain
                            # print('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy')
                            # elif flow_pre < 0.05 and ln_pre < 0.2:
                            #     msg_show_opt_f = sustain
                            #     msg_show_success_f = sustain
                    # print('**************** end ***************')
                    # 判断关键点
                    p = cv2.goodFeaturesToTrack(gray,
                                                mask=mask,
                                                **feature_params)
                    if p is not None:
                        for x, y in np.float32(p).reshape(-1, 2):
                            tracks.append([(x, y)])

            face_frame_index += 1
        else:
            has_face = False
            share[c_type][frame_index %
                          el_check_count] = 0  # 没识别出人脸(或识别出多个)后将值设置为0

        prev_gray = gray
        dt = clock() - t
        # draw_str(img, (20, 20), 'time: %.1f ms' % (dt * 1000))
        # if msg_show_key > 0:
        #     draw_str(img, (450, 20), 'YES by KEY', front=(0, 0, 255))
        #     msg_show_key -= 1
        # if msg_show_opt > 0:
        #     draw_str(img, (300, 20), 'YES by OPT', front=(0, 0, 255))
        # if c_type == 'color'> 0:
        #     print(sum(photo_rec))
        if sum(share['color']) > el_check_count * 0.99 and sum(
                share['gray']) == 0:  # color中80%的帧里面识别出人脸,gray中大于80%的帧中没有识别出人脸
            draw_str(img, (400, 30), 'Electronic', front=(0, 0, 255), size=2)
            msg_show_success = 0
            msg_show_success_f = 0
        elif sum(photo_rec) > ph_check_count * 0.1:
            draw_str(img, (400, 30), 'Photo', front=(0, 0, 255), size=2)
            msg_show_success = 0
        elif sum(share['color']) > el_check_count * 0.99 \
                and msg_show_success > 0: # and sum(human_rec) > 0:
            draw_str(img, (400, 30), 'Pass', front=(0, 255, 0), size=2)
        if msg_show_success > 0:
            msg_show_success -= 1
        # msg_show_success_f -= 1

        # if c_type == 'color':
        cv2.imshow(c_type + str(cam_id), img)
        out.write(img)

    cap.release()
    out.release()
    cv2.destroyAllWindows()
    low_image = shm.zeros(img.shape[0:2], dtype=img.dtype)

    # controller_thread = threading.Thread(target=controller, args=(controller_state, stopping, controller_cv, the_lock))
    # controller_thread = mp.Process(target=controller, args=(controller_state, stopping, controller_cv, the_lock))
    # controller_thread.start()

    targeter = Targeter(height, width, add_controller_command)

    primed = True
    firing = False
    recognizing = False
    locked_counter = 0

    with recog.RecognizerManager() as recognizer:
        while True:
            t = clock()
            ret, img = cam.read()

            with the_lock:
                low_image[:] = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                low_image[:] = cv2.equalizeHist(low_image)

            if high_proc is None:
                high_proc = mp.Process(target=high_level_tracker,
                                       args=(
                                           low_image,
                                           stopping,
                                           the_lock,
                                           possible_targets,
                                           cascade,
                                       ))
Beispiel #50
0
yServo = xServo

servoPoint = Point(int(servo_min+((servo_max - servo_min)/2)), int(servo_min+((servo_max - servo_min)/2)))
lastFacePoint=0
lastPIDUpdate=0
servoPrevPoint = Point(int(servo_min+((servo_max - servo_min)/2)), int(servo_min+((servo_max - servo_min)/2)))

servoSpeed = Point(10,10)

xServoID = int(1)
yServoID = int(0)



curRect=-1
rectStart=clock()
dt = 0
t=0

def exit_signal(signum, frame):
	global isRunning
	isRunning=False
	
signal.signal(signal.SIGINT, exit_signal)
signal.signal(signal.SIGTERM, exit_signal)

def rectsToPoint(rects, point):
	if len(rects) >= 1:
		curRect = 0
		point.rectToPoint(rects[0])
		"""if len(rects) == 1:
Beispiel #51
0
        subfolder = os.path.join(path, 'out_img/', name.split('.')[0])
        if not os.path.exists(subfolder):
            print subfolder
            print '--------path not exist---------'
            os.mkdir(subfolder)
            i = -1
            while True:
                i = i + 1
                #rint i
                ret, img = cam.read()
                #print ret
                if not ret: break
                if i % 10 == 0:
                    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    gray = cv2.equalizeHist(gray)
                    t = clock()

                    rects = detect(gray, cascade)

                    vis = img.copy()

                    if not nested.empty():

                        for x1, y1, x2, y2 in rects:
                            roi = gray[y1:y2, x1:x2]
                            vis_roi = vis[y1:y2, x1:x2]

                            res = cv2.resize(vis_roi, (48, 48),
                                             interpolation=cv2.INTER_CUBIC)

                            im = Image.fromarray(res).convert('L')
    def go(self):
        # args is argparser.Namespace: an object whose members are
        # accessed as:  self.args.cannedimages

        self.robotCnx = robotCnx.RobotCnx(self.args.fakerobot)

        self.vsrc = None
        if self.args.cannedimages:
            self.fnpat = '../../pictures/RealFullField/%d.jpg'
        else:
            self.fnpat = None
        self.index = 0
        self.lastFn = None
        self.mainImg = None
        self.algostate = {}  # stores persistant objects depending on cmode
        self.threadn = cv2.getNumberOfCPUs()
        self.pool = ThreadPool(processes=self.threadn)
        self.pending = deque()
        self.threadedMode = True
        self.update = False
        self.latency = common.StatValue()
        self.frameT = common.StatValue()
        self.lastFrameTime = common.clock()
        self.lastStashTime = 0
        self.stashFilename = "/var/tmp/imgServer.home/currentImage.jpg"
        self.stashParams = [int(cv2.IMWRITE_JPEG_QUALITY), 50]
        self.zeros = (0, 0, 0, 0, 0, 0)
        self.LUT = np.array(range(0, 256)).astype('uint8')

        self.indent = ' ' * 50
        self.lastStatus = ""

        # cmodelist maps a number [0, len] to an cmode/algorithm
        self.cmodelist = [
            'rgb',
            'adaptiveThreshold',
            'threshold',
            'huerange*valrange',
            'canny',
            'simpleblob',
            'houghlines',
            'contours',
            'ORB',
            'dance1',
            'gamma',
            'b',
            'g',
            'h',
            'r',
            's',
            'v',
        ]

        # cmodeValueCache stores the edited values associated with each cmode.
        # It starts empty, then builds up state as cmode changes.
        self.cmodeValueCache = {
            'adaptiveThreshold': [-4, 0, 0, 0, 0, 0],
            'threshold': [75, 0, 0, 0, 0, 0],
            'huerange*valrange': [55, 100, 255, 255, 0, 0],  # works for g LED
            'canny': [10, 200, 0, 0, 0, 0],
            'simpleblob': [75, 150, 20, 40**2, 0, 0],
            # minThresh
            # maxThresh
            # thresStep
            # minSize (maxsize is 'large')
            'houghlines': [2, 5, 10, 30, 2, 0],
            # rho is distance resolution in pixels
            # theta is angle in degress (larger -> more lines)
            # threshold is measured in 'votes', higher -> fewer
            # minLineLength
            # maxLineGap
            'contours': [1, 0, 0, -1, 1, 0],
            # mode: [0-3]
            # method: [CHAIN_APPROX_NONE,_SIMPLE,_TC89_L1, KOS]
            # offset shift for contour
            # unused
            # which contour to draw, -1 is all
            # which level of the contours to draw
            'ORB': [40, 10, 8, 31, 0, 0],
            # nfeatures
            # scaleFactor [1 -> 2  (0->255)]
            # nlevels
            # patchSizea == edgeThreshold
            'dance1': [20, 20, 0, 0, 0, 0],
            #X(degrees)
            #Y(degrees)
            #Not rotating correctly (not interpreting correctly)
            'gamma': [34, 0, 0, 0, 0, 0],
            #alpha
            #beta
            'r': self.zeros,
            'g': self.zeros,
            'b': self.zeros,
            'h': [0, 61, 0, 0, 0, 0],
            's': self.zeros,
            'v': self.zeros,
            'rgb': self.zeros
        }

        #keyToCmode maps a key to a cmod
        self.keyToCmode = {
            49: self.cmodelist[1],  # adaptiveThreshold on '1' key
            50: self.cmodelist[2],
            51: self.cmodelist[3],
            52: self.cmodelist[4],
            53: self.cmodelist[5],
            54: self.cmodelist[6],  # houghlines
            55: self.cmodelist[7],  # contours
            56: self.cmodelist[8],  # ORB
            57: self.cmodelist[9],  # dance1
            48: self.cmodelist[10],  # 0 key: gamma
            98: 'b',
            99: 'rgb',
            103: 'g',
            104: 'h',
            114: 'r',
            115: 's',
            118: 'v',
        }

        self.keyToFrameChange = {
            81: 'b',
            83: 'n',
        }

        self.algoValueChange = {
            127: ("reset", self.zeros),  # keypad clear
            190: ("v1Down", (-1, 0, 0, 0, 0, 0)),  # f1
            191: ("v1Up  ", (1, 0, 0, 0, 0, 0)),  # f2
            84: ("v1Down", (-1, 0, 0, 0, 0, 0)),  # downarrow
            82: ("v1Up  ", (1, 0, 0, 0, 0, 0)),  # uparrow
            191: ("v1Up  ", (1, 0, 0, 0, 0, 0)),  # downarrow
            192: ("v2Down", (0, -1, 0, 0, 0, 0)),  # f3
            193: ("v2Up  ", (0, 1, 0, 0, 0, 0)),  # f4
            194: ("v3Down", (0, 0, -1, 0, 0, 0)),  # f5
            195: ("v3Up  ", (0, 0, 1, 0, 0, 0)),  # f6
            196: ("v4Down", (0, 0, 0, -1, 0, 0)),  # f7
            197: ("v4Up  ", (0, 0, 0, 1, 0, 0)),  # f8
            198: ("v5Down", (0, 0, 0, 0, -1, 0)),  # f9
            199: ("v5Up  ", (0, 0, 0, 0, 1, 0)),  # f10
            200: ("v6Down", (0, 0, 0, 0, 0, -1)),  # f11
            201: ("v6Up  ", (0, 0, 0, 0, 0, 1)),  # f12
        }

        self.cmode = self.getCmode(self.args.algorithm)
        self.values = self.getCmodeValues(self.cmode)
Beispiel #53
0
    def evaluate_motempl_model(self, video_path1, video_path2, model_params):
        camera = cv2.VideoCapture(video_path1)
        camera2 = cv2.VideoCapture(video_path2)
        assert camera.isOpened() and camera2.isOpened(), 'Can not capture source!'
        flow_data = []
        img_data = []
        input_data = []
        motion_history = np.zeros((224, 224), np.float32)
        action = 'Normal'
        prob = 0.0

        flow_data2 = []
        img_data2 = []
        input_data2 = []
        action2 = 'Normal'
        motion_history2 = np.zeros((224, 224), np.float32)
        prob2 = 0.0
        while camera.isOpened() and camera2.isOpened():
            try:
                _, frame = camera.read()
                _, frame2 = camera2.read()
                temp_frame = cv2.resize(frame, (224, 224),
                                        interpolation=cv2.INTER_CUBIC)
                temp_frame2 = cv2.resize(frame2, (224, 224),
                                         interpolation=cv2.INTER_CUBIC)
                img_data.append(temp_frame)
                img_data2.append(temp_frame2)

                # Calculate the optical flow between two frames of camera1
                if len(img_data) == 2:
                    # flow_img = self.calc_motempl(img_data[0], img_data[1], motion_history)
                    frame_diff = cv2.absdiff(img_data[0], img_data[1])
                    gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
                    _, motion_mask = cv2.threshold(gray_diff, 32, 1, cv2.THRESH_BINARY)
                    timestamp = clock()
                    cv2.motempl.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
                    flow_img = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
                    flow_img = cv2.cvtColor(flow_img, cv2.COLOR_GRAY2BGR)
                    cv2.imshow('mote1', flow_img)
                    flow_img = flow_img * 1.0 / 127.5
                    flow_img = np.array(flow_img)
                    flow_data.append(flow_img)
                    img_data = []

                # Calculate the optical flow between two frames of camera2
                if len(img_data2) == 2:
                    # flow_img2 = self.calc_motempl(img_data2[0], img_data2[1], motion_history2)
                    # flow_img2 = flow_img2 * 1.0 / 127.5
                    frame_diff2 = cv2.absdiff(img_data2[0], img_data2[1])
                    gray_diff2 = cv2.cvtColor(frame_diff2, cv2.COLOR_BGR2GRAY)
                    _, motion_mask2 = cv2.threshold(gray_diff2, 32, 1, cv2.THRESH_BINARY)
                    timestamp2 = clock()
                    cv2.motempl.updateMotionHistory(motion_mask2, motion_history2, timestamp2, MHI_DURATION)
                    flow_img2 = np.uint8(np.clip((motion_history2 - (timestamp2 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
                    flow_img2 = cv2.cvtColor(flow_img2, cv2.COLOR_GRAY2BGR)
                    flow_img2 = flow_img2 * 1.0 / 127.5
                    cv2.imshow('mote2', flow_img2)
                    flow_img2 = np.array(flow_img2)
                    flow_data2.append(flow_img2)
                    img_data2 = []

                # camera1
                if len(flow_data) == model_params['sequence_length']:
                    action, prob, _ = self.calc_output(flow_data, input_data)
                    flow_data = []
                    input_data = []

                # camera2
                if len(flow_data2) == model_params['sequence_length']:
                    action2, prob2, _ = self.calc_output(flow_data2, input_data2)
                    flow_data2 = []
                    input_data2 = []

                cv2.putText(frame, action, (20, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            1, text_color[action], 3)
                cv2.putText(frame, str(prob), (20, 90), cv2.FONT_HERSHEY_SIMPLEX,
                            1, text_color[action], 3)
                cv2.putText(frame2, action2, (20, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            1, text_color[action2], 3)
                cv2.putText(frame2, str(prob2), (20, 90), cv2.FONT_HERSHEY_SIMPLEX,
                            1, text_color[action2], 3)
                cv2.imshow('camera1', frame)
                cv2.imshow('camera2', frame2)
                choice = cv2.waitKey(10)
                choice = cv2.waitKey(10)

            except Exception as e:
                print(e)
                camera = cv2.VideoCapture(video_path1)
                camera2 = cv2.VideoCapture(video_path2)
libunsharp.pool_init()

liblaplacian_naive.pool_init()
liblaplacian.pool_init()

libbilateral_naive.pool_init()
libbilateral.pool_init()

# Thickness setting has been moved from openCV to openCV 2.
# So, just use the raw value for CV_FILLED = -1
CV_THICKNESS_FILLED = -1

while (cap.isOpened()):
    ret, frame = cap.read()

    frameStart = clock()
    rows = frame.shape[0]
    cols = frame.shape[1]
    if harris_mode:
        if cv_mode:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = np.float32(gray) / 4.0
            res = cv2.cornerHarris(gray, 3, 3, 0.04)
        else:
            res = np.empty((rows, cols), np.float32)
            if naive_mode:
                harris_naive(ctypes.c_int(cols - 2), ctypes.c_int(rows - 2),
                             ctypes.c_void_p(frame.ctypes.data),
                             ctypes.c_void_p(res.ctypes.data))
            else:
                harris(ctypes.c_int(cols - 2), ctypes.c_int(rows - 2),
Beispiel #55
0
xServo = servo_min + ((servo_max - servo_min) / 2)
yServo = xServo

servoPoint = Point(servo_min + ((servo_max - servo_min) / 2),
                   servo_min + ((servo_max - servo_min) / 2))
servoPrevPoint = Point(servo_min + ((servo_max - servo_min) / 2),
                       servo_min + ((servo_max - servo_min) / 2))

servoSpeed = Point(3, 3)

xServoID = 0
yServoID = 1

curRect = -1
rectStart = clock()
dt = 0
t = 0


def rectsToPoint(rects, point):
    if len(rects) >= 1:
        curRect = 0
        point.rectToPoint(rects[0])
        """if len(rects) == 1:
		curRect = 0
		point.rectToPoint(rects[0])
	elif len(rects) > curRect:
		rectDt = (rectStart - clock()) * 1000
		if rectDt > 10
			curRect=int(round(random.random*(len(rects)-1)))
Beispiel #56
0
    cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
    nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))

    cam = create_capture(video_src,
                         fallback='synth:bg={}:noise=0.05'.format(
                             cv.samples.findFile('samples/data/lena.jpg')))

    while True:

        ret, img = cam.read()  ##read the frame with the camera
        gray = cv.cvtColor(
            img, cv.COLOR_BGR2GRAY)  ## turn the frame from RPG to GRAY
        gray = cv.equalizeHist(gray)  ##optimize the frame
        vis = img.copy()  ## make a copy of the frame

        t = clock()
        find_time = 0.0  ## the time we use to find a face in each frame, if this arg = 0, it means that we cant find a face in frame

        # if we haven't a coordinate of the redrect.or we cant find a face in 2000ms,
        # we scan the whole frame first
        if Redrect.all() == 0 or total_time > 2000:
            rects = detect(gray,
                           cascade)  ## detect the faces from the whole frame
            if len(rects):
                Redrect = rects[0]
                # if we find the faces, we update the data of the Redrect,
                # and the area of the Redrect is 100 pixel bigger than the rect of the face
                Redrect = np.array([
                    Redrect[0] - 100, Redrect[1] - 100, Redrect[2] + 100,
                    Redrect[3] + 100
                ])
Beispiel #57
0
        cv.samples.findFile(cascade_fn))  # get the classification for the face
    #nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn)) # get the classification for eyes

    cam = create_capture(video_src,
                         fallback='synth:bg={}:noise=0.05'.format(
                             cv.samples.findFile('samples/data/lena.jpg')))
    #read the video and ready for the detecting

    # Make a loop so that make up-to-date dections automatically
    while rects == []:
        ret, img = cam.read()  # read the image

        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)  #make the image to grey
        gray = cv.equalizeHist(gray)  # make the histogram balance

        t = clock()  # mark down the starting time
        rects = detect(gray, cascade)  # capture the face area
        dt = clock() - t  # the time for the detecting
        #make a copy of image and then can show the new one
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))  # show the image with frame

        draw_str(vis, (20, 20),
                 'time: %.1f ms' % (dt * 1000))  # show the timing
        cv.imshow('facedetect-camshift', vis)  # show the plot

        if cv.waitKey(
                5
        ) == 27:  # delay for the vesion things and control the breakpoint here
            break
Beispiel #58
0
    #stream = cv2.VideoWriter("/home/chekkaa/git/rob421-applied-robotics/stream.avi", cv.CV_FOURCC(*'MJPG'), 60.0, (640, 480), True)

    shapes = [] # List of tracked shapes

    cv2.namedWindow('binary')
    cv2.namedWindow('contours')
    cv2.namedWindow('raw')
    cv2.createTrackbar('minarea', 'raw', 100, 10000, nothing)
    cv2.createTrackbar('maxarea', 'raw', 800, 10000, nothing)
    cv2.createTrackbar('lowthresh', 'raw', 200, 255, nothing)
    cv2.createTrackbar('highthresh', 'raw', 255, 255, nothing)

    while True:
        ret, img = cam.read()

        t = clock() # Start timing how long it took to process this frame

        grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        grayimg = cv2.equalizeHist(grayimg)
        lowthresh = cv2.getTrackbarPos('lowthresh', 'raw')
        highthresh = cv2.getTrackbarPos('highthresh', 'raw')
        ret, binimg = cv2.threshold(grayimg, lowthresh, highthresh, cv2.THRESH_BINARY)
        #binimg = cv2.adaptiveThreshold(grayimg, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 13, 5)
        #binimg = cv2.Canny(grayimg, 250, 255)
        # do erosion and dilation?
        visbinimg = binimg.copy()
        contours, hierarchy = cv2.findContours(binimg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cup_contours = []
        for contour in contours:
            contour_area = cv2.contourArea(contour)
            minarea = cv2.getTrackbarPos('minarea', 'raw')
Beispiel #59
0
    try:
        video_src = video_src[0]
    except:
        video_src = 1
    args = dict(args)
    cascade_fn = args.get('--cascade', ".haarcascade_frontalface_alt2.xml")
    cascade = cv2.CascadeClassifier(cascade_fn)

    cam = create_capture(video_src)

    while True:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))
        for x1, y1, x2, y2 in rects:
            roi = gray[y1:y2, x1:x2]
            vis_roi = vis[y1:y2, x1:x2]
        dt = clock() - t

        draw_str(vis, (20, 20), 'Processing time: %.1f ms' % (dt * 1000))
        cv2.imshow('Face detection', vis)
        cv2.imshow('Gray detection', gray)

        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()
Beispiel #60
0
                          )  # get trained face detected data
    nested_fn = args.get('--nested-cascade',
                         "data/haarcascades/haarcascade_eye.xml")

    cascade = cv.CascadeClassifier(
        cv.samples.findFile(cascade_fn))  # Loads the classifier from a file.
    nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))

    cam = create_capture(
        video_src,  # create_capture is a convenience function for capture creation,
        # falling back to procedural video in case of error.
        fallback='synth:bg={}:noise=0.05'.format(
            cv.samples.findFile('samples/data/lena.jpg')))

    while True:
        t = clock()  # start to count the time
        time = 0.0  # initialize the time counting
        ret, img = cam.read()  # capture a frame and store it
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY
                           )  # convert the color of the image from bgr to gray
        gray = cv.equalizeHist(
            gray)  # Equalizes the histogram of a grayscale image.
        vis = img.copy()  # make a copy of the image
        if detected.all(
        ) == 0 or total_time > 3000:  # if we haven't detected a face or can't find face in
            # the current sub rectangle more than 3000ms,start to detect in the full image
            rects = detect(gray, cascade)
            if len(
                    rects
            ):  # if find a face, create the sub rectangle by add 50 pixel for each x and y
                # and initialize the total time