Example #1
0
def callback(data):

    try:
        img = bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
        print(e)

    time_start = clock()
    img_detected = detect_face(img)
    time_span = clock() - time_start

    if time_span == 0:
        fps = 0
    else:
        fps = 1 / time_span

    draw_str(img_detected, (5, 30), 'fps: %d' % fps)
    if show_video == True:
        cv2.imshow('face detection', img_detected)
        cv2.waitKey(1)

    try:
        pub.publish(bridge.cv2_to_imgmsg(img_detected, "bgr8"))
    except CvBridgeError as e:
        print(e)
Example #2
0
    def run(self):
        started = time.time()
        print "started"
        print started
        while True:

            ret, frame = self.cap.read()
            currentframe = frame.copy()
            # cv2.imshow("Image", currentframe)
            instant = time.time()
            # print instant
            self.processImage (currentframe)
            if not self.isRecording:
                if self.somethingHasMoved():
                    self.speedEstimation()
                cv2.drawContours(currentframe, self.currentcontours,-1,(0, 0, 255),2)
            if self.show:
                for dist in self.tracks_dist:
                    if dist[2] > 0:
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        # cv2.putText(currentframe,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
                        # cv2.putText(currentframe, str(dist[2]/(9*5/30)), (60, 60), font, 4,(255,255,255),2,cv2.CV_AA)
                        draw_str(currentframe,(dist[0],dist[1]), str(dist[2]/(9*5/30)))
                cv2.imshow("Image", currentframe)
            self.prev_gray = self.gray_frame
            self.frame_idx += 1
            c = cv2.waitKey(1) % 0x100
            if c==27 or c == 10: #Break if user enters 'Esc'.
                break
	def run(self):
		while True:
			if (self.frame%10 == 0):
				self.frame = 1
			else:
				self.frame = self.frame +1
			ret, im = self.camera.read()
			im = cv2.flip(im, 1)
			self.imOrig = im.copy()
			self.imNoFilters = im.copy()
			im = cv2.blur(im, (self.Vars["smooth"], self.Vars["smooth"]))
			filter_ = self.filterSkin(im)
			filter_ = cv2.erode(filter_,
								cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.Vars["erode"], self.Vars["erode"])))           
			
			filter_ = cv2.dilate(filter_,
								 cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.Vars["dilate"], self.Vars["dilate"])))
			dilated = filter_.copy()
			cv2.imshow('dilated',dilated)
			try:
				vis,self.fingers, self.direction=findConvexHull().drawConvex(filter_,self.imOrig,self.frame)			
			except:
				# dilated = self.imOrig
				vis = self.imOrig
			common.draw_str(vis, (20, 50), 'No of fingers: ' + str(self.fingers))
			common.draw_str(vis, (20, 80), 'Movement of contour:' + str(self.direction))		
			cv2.imshow('image',vis)
			if cv2.waitKey(1) == 27:
				break
Example #4
0
	def run(self):
		cv2.namedWindow("Output")#, cv2.WINDOW_NORMAL)# | cv2.GUI_NORMAL) # GUI_NORMAL not supported for some reason?
		cv2.namedWindow("Input")#, cv2.WINDOW_NORMAL)
		cv2.resizeWindow("Output", *outputRes )
		cv2.resizeWindow("Input", *inputRes )
		
		while True:
			# Read a camera frame and process it.
			ret, frame = self.cam.read()
			self.input = frame.copy()
			frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

			# Basic thresholding
			ret, frameGray = cv2.threshold( frameGray, thresh, 255, cv2.THRESH_BINARY_INV )
			#frameGray = cv2.adaptiveThreshold( frameGray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize, C ) # This is essentially an edge detector.
			
			# Display the frame copy (with annotations)
			self.output = cv2.resize(frameGray,outputRes)
			h, w = self.input.shape[:2]
			H, W = self.output.shape[:2]
			cv2.rectangle( self.output, (0,0), (W, H), (0,0,0), -1 ) # Fill window with black.
			self.output[ (H-h)/2 : H - ((H-h)/2), (W-w)/2 : W - ((W-w)/2) ] = frameGray # Draw frame in centre of self.output.
			draw_str(self.output, (20, 40), 'Output')
			draw_str(self.input, (20, 40), 'Input')
			cv2.imshow('Output', self.output) # Show self.output
			cv2.imshow('Input', self.input) # Show self.input

			ch = 0xFF & cv2.waitKey(1)
			if ch == 27: # If Esc, exit
				break
Example #5
0
 def UpdateTracks(self, tracks):
     """
     Updates all the point lists using new and old image data.
     :param tracks: List of lists of points, in increasing order of recentness
     """
     new_tracks = []
     if len(tracks) > 0:
         #convert old pointlist to matrix, taking the last element in each point tracked
         p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
         #new pointlist from old points
         p1, st, err = cv2.calcOpticalFlowPyrLK(self.img0, self.img1, p0, None, **prm.lk_params)
         #old pointlist from new points
         p0r, st, err = cv2.calcOpticalFlowPyrLK(self.img1, self.img0, p1, None, **prm.lk_params)
         #
         d = abs(p0-p0r).reshape(-1, 2).max(-1)
         good = d < 1
         for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
             if not good_flag:
                 continue
             tr.append((x, y))
             #eliminate track length size. I don't care about history, so make track len small
             if len(tr) > prm.TRACK_LEN:
                 del tr[0]
             new_tracks.append(tr)
             if prm.DEBUG:
                 cv2.circle(self.vis, (x, y), 2, (0, 255, 0), -1)
         if prm.DEBUG:
             cv2.polylines(self.vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0))
             draw_str(self.vis, (20, 20), 'track count: %d' % len(tracks))
     return new_tracks
def callback(data):
    bridge = CvBridge()
    try:
        cv_img = bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
        print(e)

    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

    time_start = clock()
        
    detect_people(cv_img, hog)

    time_span = clock() - time_start
    if time_span == 0:
        fps = 0
    else:
        fps = 1 / time_span
    draw_str(cv_img, (5,30), 'fps: %d' % fps)

    if show_video == True:    
        cv2.imshow('people detection', cv_img)
        cv2.waitKey(1)

    pub = rospy.Publisher("/opencv/detect/people",Image, queue_size = 1)
    try:
      pub.publish(bridge.cv2_to_imgmsg(cv_img, "bgr8"))
    except CvBridgeError as e:
      print(e)    
Example #7
0
def show_image(img, fps):
    fps.update()
    draw_str(img, (5, 30), 'fps: %s' % fps)

    cv2.imshow('play video', img)
    if 0xFF & cv2.waitKey(1) == KEY_ECS:
        raise ExitLoop
Example #8
0
def show_video(filename, start_frame, end_frame, label):

	cap = get_capture(filename)
	fps = get_fps(cap)
	assert(fps == 24)
	keyDown = False
	first_frame = True
	state = 'YES!'
	num_frames = end_frame - start_frame
	if cap.set(cv.CV_CAP_PROP_POS_FRAMES, start_frame):
		length = end_frame - start_frame
		for i in range(length):
			ret, frame = cap.read()
			if ret:
				if first_frame:
					draw_str(frame, (20, 20), '%s? %s' % (label, state))
					cv2.imshow('', frame)
					ch = cv2.waitKey(2000)
				else:
					ch = cv2.waitKey(int(1000/fps))
				if ch == 113:
					break
				first_frame = False
				state = 'NO!' if keyDown else 'YES!'
				draw_str(frame, (20, 20), '%s? %s' % (label, state))
				cv2.imshow('', frame)
				if ch != -1: # key pressed
					keyDown = True if not keyDown else False
			else:
				break
	else:
		raise Exception('unable to set position to %d' % start_frame)
	if state == 'NO!':
		return 0, num_frames
	return 1, num_frames
Example #9
0
def main():
	try:
		video_src = sys.argv[1]
	except:
		print help_message
		return

	cap = video.create_capture(video_src)
	capture = cv.CaptureFromFile(video_src)
	fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
	num_frames = float(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))

	keyDown = False
	states = []

	index = 0
	while True:

		ret, frame = cap.read()
		if ret:
			if len(states) == 0:
				# show first frame and let user decide if it is good or bad
				draw_str(frame, (20, 40), 'BAD')
				cv2.imshow('', frame)
				ch = cv2.waitKey(2500)
			else:
				ch = cv2.waitKey(int(1000/fps))

			if ch != -1: # key pressed
				keyDown = True if not keyDown else False

			if keyDown:
				state = 'GOOD'
				states.append(1)
			else:
				state = 'BAD'
				states.append(0)

			# draw_str(frame, (20, 40), state)
			draw_str(frame, (20, 20), '%s, %2d:%02d\t %2.2f%%' % (state, int((index / fps) / 60), int((index / fps) % 60), 100.0 * index / num_frames))
			cv2.imshow('', frame)

			index += 1
		else:
			# no more frames...
			break
	d = dict(states=states)
	content = json.dumps(d)

	# do not write a file if json parser fails
	if content:
		# write to disc
		f = open('%s.txt' % video_src,'w')	
		f.write(content)
		f.close()
	else:
		print 'error in json parser'
    def run(self):
        while True:
            ret, frame = cap.read()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            cars = car_cascade.detectMultiScale(frame_gray, scaleFactor=1.1, minSize=(90, 90), maxSize=(800, 800))
            print len(cars)

            for (x,y,w,h) in cars:
                cv2.rectangle(vis,(x,y),(x+w,y+h),(0,0,255),2)

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                #print p0
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                # cars = car_cascade.detectMultiScale(frame_gray, scaleFactor=1.1, minSize=(90, 90), maxSize=(1000, 1000))
                # for (x,y,w,h) in cars:
                #     cv2.rectangle(vis,(x,y),(x+w,y+h),(0,0,255),2)
                #     car_centroid = [(x+(x/2)), (y+(y/2))]
                #     # self.tracks = np.append(self.tracks, car_centroid, axis=0)
                #     # p0 = np.float32(self.tracks).reshape(-1, 1, 2)
                #print "p: \n", car_centroid
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])

            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
Example #11
0
    def run(self):
        self.points_array=[]
        frame_id = -1
        while True:
            frame_id += 1
            present_tracks = []
            ret, frame = self.cam.read()
            if (frame == None):
                print("Video Ended")
                break
            if frame_id%self.drop_rate:
                continue

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    present_tracks.append(tr[-2:])
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                # present_tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])

            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)
            self.points_array.append(present_tracks)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
def draw(rects, mode, method,frame):
    color = (0, 0, 255)# Rect color selection
    for i in range(0, len(rects)):
        (x,y,w,h) = rects[i]
        pt1 = (int(x), int(y))
        pt2 = (int((x+w)), int(y + h))
        cv2.rectangle(frame,  pt1, pt2, color, 1) # Draws the Rect

    draw_str(frame, (20, 360 - 65),"Frame Num: %d"%(mode))   
    draw_str(frame, (20, 360- 50),"Mode: %s"%(method))
Example #13
0
 def draw_state(self, vis):
     (x, y), (w, h) = self.pos, self.size
     x1, y1, x2, y2 = int(x-0.5*w), int(y-0.5*h), int(x+0.5*w), int(y+0.5*h)
     cv2.rectangle(vis, (x1, y1), (x2, y2), (0, 0, 255))
     if self.good:
         cv2.circle(vis, (int(x), int(y)), 2, (0, 0, 255), -1)
     else:
         cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255))
         cv2.line(vis, (x2, y1), (x1, y2), (0, 0, 255))
     draw_str(vis, (x1, y2+16), 'PSR: %.2f' % self.psr)
Example #14
0
def play_video(img, time_start, frame_count):
    time_span = clock() - time_start
    if time_span == 0:
        fps = 0
    fps = frame_count / time_span
    draw_str(img, (5, 30), 'fps: %d' % fps)

    cv2.imshow('play video', img)
    if 0xFF & cv2.waitKey(1) == KEY_ECS:
        raise ExitLoop
Example #15
0
def drawContour(label,color,contour, r=0):
    '''Draws Contour (C) at center point with color 'c' and label 'l'''
    M = cv2.moments(contour)
    if M["m00"] == 0.0:
        M["m00"] = M["m00"] +1
    center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
    x,y = center
    cv2.circle(frame, center, 5, color, 3)
    draw_str(frame, center, str(label))
    draw_str(frame, (x-30,y), str(r))
Example #16
0
def main():
	video_src = None
	try:
		arg1 = sys.argv[1]
		arg2 = sys.argv[2]
		if arg1 == '-?':
			print help_message
			return
		else:
			video_src = arg1	
	except:		
		if video_src is None:
			print help_message
			return

	d,frames = getVideoMetadata(video_src, True)

	shift_vectors = d['shift_vectors']
	rmsdiffs = d['rmsdiffs']
	shift_vectors_sliding = d['shift_vectors_sliding']
	stand_dev = d['stand_dev']

	capture = cv.CaptureFromFile(video_src)
	fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
	num_frames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))

	# degree of smoothness
	degree = 12
	# time in seconds
	t = np.linspace(0, num_frames/fps, len(frames))
	# values are normalized to [0...1]
	# the magnitude of the vector - apply weights according to the direction of the vector? ie. vertical panning is worse than horizontal
	magnitudes = smoothTriangle((np.array([math.sqrt(x**2 + y**2) for x,y in shift_vectors])**2)/(63**2), degree)	
	# a measure of variation/contrast in the frame - a high value indicated less contrast and vice versa
	contrast = smoothTriangle((127.5 - np.array(stand_dev)) / 127.5, degree)
	# compute if a frame is accepted, and the according value
	if arg2 == 'anders':
		frame_states, frame_values = computeFrameStateAnders(magnitudes, contrast)
	elif arg2 == 'lauge':
		frame_states, frame_values = computeFrameStateLauge(magnitudes, contrast)
	else:
		return

	print 'video playback...'

	index = 0
	for frame in frames:
		frame_state = 'BAD' if not frame_states[index] else 'GOOD'
		frame_value = frame_values[index]
		draw_str(frame, (20, 20), 'time: %2.1f:%2.1f, magnitude: %2.2f%%, contrast^-1: %2.2f' % (t[index], t[-1], 100 * magnitudes[index], contrast[index]))
		draw_str(frame, (20, 40), '%s (%2.3f)' % (frame_state, frame_value))

		index += 1
		cv2.imshow('final cut', frame)
		cv2.waitKey(int(1000/fps))
Example #17
0
    def run(self):
        while True:
            ret, frame = self.cam.read()  # get frame

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray  # old and new image
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)  # something strange
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)

                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0 - p0r).reshape(-1, 2).max(-1)  # calc error for every point(?)

                good = d < 1  # flag(s)

                new_tracks = []

                for tr, (x, y), good_flag in zip(
                    self.tracks, p1.reshape(-1, 2), good
                ):  # taking old points, new points and flags
                    if not good_flag:  # if bad error then next
                        continue
                    tr.append((x, y))  # maybe it is path?
                    if len(tr) > self.track_len:  # delete old points
                        del tr[0]
                    new_tracks.append(tr)  # create new list of tracking points
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)  # draw it
                self.tracks = new_tracks

                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))  # draw tracks
                draw_str(vis, (20, 20), "track count: %d" % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 0

                mask[100:200, 100:200] = 255

                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)
                # print p, '!!!!!!!!!!!'
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])

            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow("lk_track", vis)

            ch = cv2.waitKey(1)
            if ch == 27:
                break
Example #18
0
    def run(self):
        while True:
            ret, frame = self.cam.read()
            if not ret:
                return
            global frame_gray
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            global height, width, depth
            height, width = frame_gray.shape
            #print(frame_gray.shape)
            global vis
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                print("#"*30+"\n")    
                print(tr) 
                print("#"*30+"\n")
                self.tracks = new_tracks
                print(kmean(new_tracks))
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])


            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
Example #19
0
 def callback(self, data):
    cv_img=self.convert_image(data)
    #cascade = cv2.CascadeClassifier("irobot_hog_detect.xml")
    gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)
    t = clock()
    rects = detect(gray, cascade)
    vis = cv_img.copy()
    draw_rects(vis, rects, (0, 255, 0))
    dt = clock() - t
    draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
    cv2.imshow('Irobot_Detector', vis)
    def show_video(self, img):
        self.fps.update()
        draw_str(img, (5, 30), "fps: %s" % self.fps)

        cv2.imshow("show %s" % (self.topic), img)
        key = cv2.waitKey(1)
        if 0xFF & key == self.KEY_ECS:
            rospy.signal_shutdown("User hit q key to quit.")
        elif 0xFF & key == ord("a"):
            file_name = "image_%s.jpg" % (str(int(clock())))
            cv2.imwrite(file_name, img)
            print "%s has saved." % file_name
    def run(self):

        while True:
            ret, frame = self.cam.read()

            # Create a region of interest to detect features in
            ROI = np.ones(frame.shape[:2], np.uint8)
            height, width = ROI.shape
            ROI[:height//3] = 0
            ROI[-height//3:] = 0
            mask = ROI

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                #mask = np.zeros_like(frame_gray)
                #mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])


            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
def get_face():
    import sys, getopt
    #print help_message

    args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)
    cascade_fn = args.get('--cascade', "haarcascade_frontalface_alt.xml")
    nested_fn  = args.get('--nested-cascade', "haarcascade_eye.xml")

    cascade = cv2.CascadeClassifier(cascade_fn)
    nested = cv2.CascadeClassifier(nested_fn)

    cam = create_capture(video_src, fallback='synth:bg=lena.jpg:noise=0.05')

    just_face = ''

    while True:
        #pdb.set_trace()
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()

        # Change to crop image to only show face
        if (just_face == ''):
            just_face = img.copy()

        param = (cv2.IMWRITE_PXM_BINARY, 1)
        if (len(rects) > 0):
            just_face = gray.copy()
            (x1,y1,x2,y2) = rects[0]
            just_face = just_face[y1:y2, x1:x2]
            cv2.imwrite('./test_face.pgm', just_face, param)
            return './test_face.pgm'

        vis_roi = vis
        draw_rects(vis, rects, (0, 255, 0))
        dt = clock() - t

        draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
        cv2.imshow('facedetect', just_face)

        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()
Example #23
0
def faceDetect(loopCount):
    import sys, getopt

    args, video_src = getopt.getopt(sys.argv[1:], "", ["cascade=", "nested-cascade="])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)
    cascade_fn = args.get("--cascade", OpenCVInstallDir + "data/haarcascades/haarcascade_frontalface_alt.xml")
    cascade = cv2.CascadeClassifier(cascade_fn)
    cam = create_capture(video_src, fallback="synth:bg=../cpp/lena.jpg:noise=0.05")
    idx = 0

    if loopCount == 0:
        loopCount = 1
        infinteLoop = True
    else:
        infinteLoop = False

    while idx < loopCount:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        dt = clock() - t

        draw_str(vis, (20, 20), "time: %.1f ms" % (dt * 1000))
        if rects == []:
            draw_str(vis, (20, 40), "We are having trouble seeing you, move around just a bit")
        #            draw_str(vis,(20,450), 'Look Here')
        else:
            if infinteLoop:
                idx = 0
                print rects
            else:
                idx = idx + 1
                try:
                    rectsum = rectsum + rects
                except:
                    rectsum = rects
                    # first time assignment

        #       cv2.imshow('facetracker', vis)

        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()
    return rectsum / idx
def draw_flow(img, flow, step=8):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
    fx, fy = flow[y,x].T
    lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
        mag = math.sqrt((x2-x1)**2 + (y2-y1)**2)
        draw_str(vis, (x1, y1), "{}".format(int(mag)))
    return vis
Example #25
0
    def run(self):
        cv2.namedWindow("lk_track")
        while True:
            ret, frame = self.cam.read()
            # something about this makes the video recorder actually turn on?
            if cv2.waitKey(1) & 0xFF == ord('q'):
              break
            # keep continueing until we get the camera on
            if frame is None:
              continue
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])


            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
Example #26
0
    def showLeftRightMost(self): #despite the actual y of hands, center y is taken 
        x,y = self.center()

        y += 20

        #Performance Mode, set center y as left and rightmost y
        if self.leftmost[1] > y:
            cv2.circle(frame, (self.leftmost[0],y), 5, (0,0,255), 3)
            draw_str(frame, (self.leftmost[0],y), str("left"))
        else:
            cv2.circle(frame, self.leftmost, 5, (0,0,255), 3)
            draw_str(frame, self.leftmost, str("left"))

        if self.rightmost[1] > y:
            cv2.circle(frame, (self.rightmost[0],y), 5, (0,0,255), 3)
            draw_str(frame, (self.rightmost[0],y), str("right"))
        else:
            cv2.circle(frame, self.rightmost, 5, (0,0,255), 3)
            draw_str(frame, self.rightmost, str("right"))

        cv2.putText(thresh, "head", self.extTop, cv2.FONT_HERSHEY_SIMPLEX,
		0.5, (127, 255, 0), 2)
        cv2.putText(thresh, "leg", self.otherLeg, cv2.FONT_HERSHEY_SIMPLEX,
		0.5, (127, 255, 0), 2)
        cv2.putText(thresh, "leg", self.extBot, cv2.FONT_HERSHEY_SIMPLEX,
		0.5, (127, 255, 0), 2)
Example #27
0
    def run(self):
        capture = cv2.VideoCapture("./data/video2.mp4")
        while self.flag:
            if self.UseCam:
                self.fps, frame = self.cam.read(1)
            else:
                self.fps, frame = capture.read()
            if not self.fps:
                self.tracks = []
                continue
            self.h, self.w = frame.shape[:2]
            # frame = self.undistort(frame)
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            self.vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(self.vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(self.vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(self.vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])

            self.frame_idx += 1
            self.prev_gray = frame_gray

            self._signal.emit()
Example #28
0
    def run(self):
        count_bad = 0
        while True:
            ret, frame = self.cam.read()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                count_bad = 0
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        count_bad += 1
                        continue
                    tr.append((x, y))
                    # if len(tr) > self.track_len:
                    # del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), "track count: %d" % len(self.tracks))
                draw_str(vis, (20, 50), "bad flags: %d" % count_bad)

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])

            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow("custom_lk_track", vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
            if ch == 112:
                time.sleep(3.5)
Example #29
0
    def run(self):
        while True:
            ret, frame = self.cam.read()
            frame = frame[:,::-1,:]
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = self.apply_filter(frame.copy())

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))
                self.gesture = self.detect_direction(self.tracks)

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append(deque([(x, y)],self.track_len))


            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch =  cv2.waitKey(1)
            if (ch == 2424832):            
                self.filter = (self.filter - 1)%5
            elif (ch == 2555904):
                self.filter = (self.filter + 1)%5
            elif ch == 27:
                break
Example #30
0
def main():
    import sys
    try: video_src = sys.argv[1]
    except: video_src = video.presets['chess']

    print help_message

    track_len = 4
    tracks = []
    cam = video.create_capture(video_src)
    old_mode = True
    while True:
        ret, frame = cam.read()
        vis = frame.copy()
        if len(tracks) > 0:
            p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
            img0 = cv2.cvtColor(prev_frame, cv.CV_BGR2GRAY)
            img1 = cv2.cvtColor(frame, cv.CV_BGR2GRAY)
            if old_mode:
                p1,  st, err, dt = calc_flow_old(img0, img1, p0)
            else:
                t = clock()
                p1,  st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, **lk_params)
                dt = clock()-t
            for tr, (x, y) in zip(tracks, p1.reshape(-1, 2)):
                tr.append((x, y))
                if len(tr) > 10:
                    del tr[0]
                cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
            cv2.polylines(vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0))
            draw_str(vis, (20, 20), ['new', 'old'][old_mode]+' mode')
            draw_str(vis, (20, 40), 'time: %.02f ms' % (dt*1000))
        prev_frame = frame.copy()

        cv2.imshow('lk_track', vis)
        ch = cv2.waitKey(5)
        if ch == 27:
            break
        if ch == ord(' ') or len(tracks) == 0:
            gray = cv2.cvtColor(frame, cv.CV_BGR2GRAY)
            p = cv2.goodFeaturesToTrack(gray, **feature_params)
            p = [] if p is None else p.reshape(-1, 2)
            tracks = []
            for x, y in np.float32(p):
                tracks.append([(x, y)])
        if ch == ord('1'):
            old_mode = not old_mode
def main():
    global boxes, objid

    cam = video.create_capture('cut1.mp4')
    content = readjson('video.json')

    framecount = 0
    for frames in content:
        oldboxes = boxes
        boxes = []
        ret, frame = cam.read()
        for obj in frames:
            if obj['confidence'] >= 0.3 and obj['label'] in objlist:
                newbox = Box(obj['topleft']['y'], obj['topleft']['x'],
                             obj['bottomright']['y'], obj['bottomright']['x'],
                             obj['label'])
                if framecount == 0:
                    newbox.id = objid
                    objid += 1
                boxes.append(newbox)
        frame = optflow(frame)
        for box in boxes:
            box.findFeatures()
            box.matchPoints(oldboxes)
            cv2.rectangle(frame, (box.left, box.top), (box.right, box.bot),
                          box.color, 3)
            # if len(box.history) > 0:
            #     center1 = ((box.history[0].left+box.history[0].right)/2, (box.history[0].top+box.history[0].bot)/2)
            #     last = len(box.history) - 1
            #     center2 = ((box.history[last].left+box.history[last].right)/2, (box.history[last].top+box.history[last].bot)/2)
            #     cv2.arrowedLine(frame, center1, center2, (0, 0, 225), 2)
            # for i in range(prediction_range_begin, last):
            #     if i%prediction_rate == 0:
            #         prediction_lefttop = (2*box.history[i].left - box.history[0].left, 2*box.history[i].top - box.history[0].top)
            #         prediction_botright = (2*box.history[i].right - box.history[0].right, 2*box.history[i].bot - box.history[0].bot)
            #         cv2.rectangle(frame, prediction_lefttop, prediction_botright, box.color, 1)
            draw_str(frame, (box.left, box.top - 10), str(box.id))
        cv2.imshow('track', frame)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
        framecount += 1
    cam.release()
    cv2.destroyAllWindows()
Example #32
0
def main():
    print(help_message)

    w, h = 512, 512

    args, args_list = getopt.getopt(sys.argv[1:], 'o:', [])
    args = dict(args)
    out = None
    if '-o' in args:
        fn = args['-o']
        out = cv.VideoWriter(args['-o'], cv.VideoWriter_fourcc(*'DIB '), 30.0, (w, h), False)
        print('writing %s ...' % fn)

    turing = np.zeros((h, w), np.float32)
    cv.randu(turing, np.array([0]), np.array([1]))

    def process_scale(a_lods, lod):
        d = a_lods[lod] - cv.pyrUp(a_lods[lod+1])
        for _i in range(lod):
            d = cv.pyrUp(d)
        v = cv.GaussianBlur(d*d, (3, 3), 0)
        return np.sign(d), v

    scale_num = 6
    for frame_i in count():
        a_lods = [turing]
        for i in range(scale_num):
            a_lods.append(cv.pyrDown(a_lods[-1]))
        ms, vs = [], []
        for i in range(1, scale_num):
            m, v = process_scale(a_lods, i)
            ms.append(m)
            vs.append(v)
        mi = np.argmin(vs, 0)
        turing += np.choose(mi, ms) * 0.025
        turing = (turing-turing.min()) / turing.ptp()

        if out:
            out.write(turing)
        vis = turing.copy()
        draw_str(vis, (20, 20), 'frame %d' % frame_i)
        cv.imshow(titleWindow, vis)
        if cv.waitKey(5) == 27:
            break
Example #33
0
    def run(self):
        while True:
            ret, frame = self.cam.read()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append((x, y))
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                    cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
                self.tracks = new_tracks
                cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
                draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

            if self.frame_idx % self.detect_interval == 0:
                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([(x, y)])


            self.frame_idx += 1
            self.prev_gray = frame_gray
            cv2.imshow('lk_track', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
Example #34
0
def faceDetect(img, classifier_xml_dir):
    cascade = cv2.CascadeClassifier(classifier_xml_dir)
    #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  ## Feed in grayscale
    gray = img
    gray = cv2.equalizeHist(gray)
    t = clock()
    rects = detect(gray, cascade)
    if len(rects) == 0:
        facesFound = 0
    else:
        facesFound = 1
    vis = img.copy()
    draw_rects(vis, rects, (0, 255, 0))
    for x1, y1, x2, y2 in rects:
        roi = gray[y1:y2, x1:x2]
        vis_roi = vis[y1:y2, x1:x2]
    dt = clock() - t
    draw_str(vis, (20, 20), 'time: %.1f ms' % (dt * 1000))
    return (rects, facesFound)
Example #35
0
def OpenCVCode(imgRGB, depth_colormap):
    global cascade, nested
    gray = cv.cvtColor(imgRGB, cv.COLOR_BGR2GRAY)
    gray = cv.equalizeHist(gray)

    t = clock()
    rects = detect(gray, cascade)
    vis = imgRGB.copy()
    draw_rects(vis, rects, (0, 255, 0))
    if not nested.empty():
        for x1, y1, x2, y2 in rects:
            roi = gray[y1:y2, x1:x2]
            vis_roi = vis[y1:y2, x1:x2]
            subrects = detect(roi.copy(), nested)
            draw_rects(vis_roi, subrects, (255, 0, 0))
    dt = clock() - t

    draw_str(vis, (20, 20), 'time: %.1f ms' % (dt * 1000))
    cv.imshow('facedetect', vis)
Example #36
0
    def process_image(self):
        """
        This function finds faces, draws them and their approximation by ellipses.
        """
        t = clock()
        rects = self.detect(self.gray, self.cascade)
        vis = self.source_color
        self.draw_rects(np.asarray(vis[:, :]), rects, (0, 255, 0))
        for x1, y1, x2, y2 in rects:
            roi = self.gray[y1:y2, x1:x2]
            vis_roi = vis[y1:y2, x1:x2]
            subrects = self.detect(roi.copy(), self.nested)
            self.draw_rects(np.asarray(vis_roi[:, :]), subrects, (255, 0, 0))
        dt = clock() - t

        draw_str(np.asarray(vis[:, :]), (20, 20),
                 'time: %.1f ms' % (dt * 1000))
        cv2.imshow('facedetect', np.asarray(vis[:, :]))
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Example #37
0
def main():
    import sys, getopt

    args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)
    cascade_fn = args.get('--cascade', "haarcascade_frontalface_alt.xml")
    nested_fn  = args.get('--nested-cascade', "haarcascade_eye.xml")

    cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
    nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))

    cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('data/lena.jpg')))

    while True:
        _ret, img = cam.read()
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        gray = cv.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))
        if not nested.empty():
            for x1, y1, x2, y2 in rects:
                roi = gray[y1:y2, x1:x2]
                vis_roi = vis[y1:y2, x1:x2]
                subrects = detect(roi.copy(), nested)
                draw_rects(vis_roi, subrects, (255, 0, 0))
        dt = clock() - t

        draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
        cv.imshow('facedetect', vis)

        if cv.waitKey(5) == 27:
            break

    print('Done')
Example #38
0
def lk_track(frame):
    global track_len, detect_interval, tracks, frame_idx, prev_gray, lk_params, feature_params
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    vis = frame.copy()

    if len(tracks) > 0:
        img0, img1 = prev_gray, frame_gray
        p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
        p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None,
                                               **lk_params)
        p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None,
                                                **lk_params)
        d = abs(p0 - p0r).reshape(-1, 2).max(-1)
        good = d < 1
        new_tracks = []
        for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
            if not good_flag:
                continue
            tr.append((x, y))
            if len(tr) > track_len:
                del tr[0]
            new_tracks.append(tr)
            cv2.circle(vis, (x, y), 3, (0, 0, 255), -1)
        tracks = new_tracks
        cv2.polylines(vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0))
        draw_str(vis, (250, 300), 'track count: %d' % len(tracks))

    if frame_idx % detect_interval == 0:
        mask = np.zeros_like(frame_gray)
        mask[:] = 255
        for x, y in [np.int32(tr[-1]) for tr in tracks]:
            cv2.circle(mask, (x, y), 5, 0, -1)
        p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)
        if p is not None:
            for x, y in np.float32(p).reshape(-1, 2):
                tracks.append([(x, y)])

    frame_idx += 1
    prev_gray = frame_gray
    #cv2.imshow('lk_track', vis)
    return vis
Example #39
0
    def update(self):

        self.get_new_data()

        # Render the plot
        self.plot_buffer = self.genPlot(self.ring_buffer.get())

        # Assemble the output: top half text, bottom half graph
        output_buffer = np.zeros((self.h, self.w, 4)).astype(np.uint8)
        output_buffer[self.h - self.plot_h:, ...] = self.plot_buffer.copy()

        # Add text for current value
        draw_str(
            output_buffer,
            (self.text_offset_x, self.text_offset_y - 2),
            f"{self.current_value_label} {self.current_value:.1f}",
            # "fps: %.1f" % self.current_value,
            self.text_align,
        )

        self.output = output_buffer.copy()
Example #40
0
    def draw_state(self, vis, i):
        (x, y), (w, h) = self.pos, self.size
        x1, y1, x2, y2 = int(x -
                             0.5 * w), int(y -
                                           0.5 * h), int(x +
                                                         0.5 * w), int(y +
                                                                       0.5 * h)
        cv2.rectangle(vis, (x1, y1), (x2, y2), (0, 0, 255))
        if self.good:
            cv2.circle(vis, (int(x), int(y)), 2, (0, 0, 255), -1)
            win32api.SetCursorPos((int(screenWidth - 4 * x), int(2 * y)))
            i = 0

        else:
            cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255))
            cv2.line(vis, (x2, y1), (x1, y2), (0, 0, 255))
            while (i == 0):
                #win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,int(2*x),int(2*y),0,0)
                #win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,int(2*x),int(2*y),0,0)
                i = 1
        draw_str(vis, (x1, y2 + 16), 'PSR: %.2f' % self.psr)
Example #41
0
    def capture(self):
        string = ""
        ret, img = self.cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = self.detect(gray, self.cascade)
        if len(rects) == 0:
            string = str(int(round(time.time() * 1000))) + ";n.f.;"
        vis = img.copy()
        self.draw_rects(vis, rects, (0, 255, 0))
        for x1, y1, x2, y2 in rects:
            roi = gray[y1:y2, x1:x2]
            vis_roi = vis[y1:y2, x1:x2]
            subrects_fn = self.detect(roi.copy(), self.nested)
            subrects_glass = self.detect(roi.copy(), self.glass)
            subrects_le = self.detect(roi.copy(), self.le)
            subrects_re = self.detect(roi.copy(), self.re)
            string = string + str(int(round(time.time() * 1000))) + ";"
            if not len(subrects_fn) == 0:
                self.draw_rects(vis_roi, subrects_fn, (255, 0, 0))
                string = string + "1;"
            elif not len(subrects_glass) == 0:
                self.draw_rects(vis_roi, subrects_glass, (255, 0, 0))
                string = string + "1;"
            elif (not len(subrects_le) == 0) or (not len(subrects_re) == 0):
                self.draw_rects(vis_roi, subrects_le, (255, 0, 0))
                self.draw_rects(vis_roi, subrects_re, (255, 0, 0))
                string = string + "0;"
            else:
                string = string + "n.e.;"
        dt = clock() - t

        draw_str(vis, (20, 20), 'time: %.1f ms' % (dt * 1000))
        cv2.imshow('facedetect', vis)
        cv2.imwrite(
            self.foldername + "/eyeCaptureImages/" +
            str(int(round(time.time() * 1000))) + ".jpg", vis)
        return string
    def _gen_output_frame(self, frame):
        app = self.apps_map[self.current_app]

        label_start = (0, 0)
        label_end = (700, 100)
        colour = (255, 255, 255)
        #rectangle(frame, label_start, label_end, colour, thickness=cv.CV_FILLED)

        # frame process time
        text1_start = (40, 40)
        text1 = "%-15s: %.1f ms" % ("frame interval", app.process_time)
        draw_str(frame, text1_start, text1)

        # execution mode
        text2_start = (40, 80)
        if app.id_ == 4:
            text2 = "%-15s : %s" % ("Pipeline",
                                    ModeType.darknet_name[app.current_mode])
        else:
            text2 = "%-15s : %s" % ("Pipeline",
                                    ModeType.name[app.current_mode])
        draw_str(frame, text2_start, text2)

        # app
        text3_start = (40, 120)
        text3 = "%-12s : %s" % ("Benchmark", app.name)
        draw_str(frame, text3_start, text3)

        return frame
Example #43
0
    def OpenCVCode(self, frame, depth_colormap, frameCount):
        frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        vis = frame.copy()

        if len(self.tracks) > 0:
            img0, img1 = self.prev_gray, frame_gray
            p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
            p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
            p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
            d = abs(p0-p0r).reshape(-1, 2).max(-1)
            good = d < 1
            new_tracks = []
            for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                if not good_flag:
                    continue
                tr.append((x, y))
                if len(tr) > self.track_len:
                    del tr[0]
                new_tracks.append(tr)
                cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
            self.tracks = new_tracks
            cv.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
            draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

        if self.frame_idx % self.detect_interval == 0:
            mask = np.zeros_like(frame_gray)
            mask[:] = 255
            for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
                cv.circle(mask, (x, y), 5, 0, -1)
            p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
            if p is not None:
                for x, y in np.float32(p).reshape(-1, 2):
                    self.tracks.append([(x, y)])


        self.frame_idx += 1
        self.prev_gray = frame_gray
        cv.imshow('KLT_track_PS', vis)
        return vis, None
Example #44
0
def main(video, fc, ec):
    print __doc__
    cap = cv2.VideoCapture(video)
    fCascade = cv2.CascadeClassifier(fc)
    eCascade = cv2.CascadeClassifier(ec)
    # for opt_flow
    ret, prev = cap.read()
    prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    while True:
        if cv2.waitKey(1) == 27:
            break
        t = clock()
        ret, img = cap.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        rects = detect(gray, fCascade)
        for sx1, sy1, sx2, sy2 in rects:
            # for opt_flow
            flow = cv2.calcOpticalFlowFarneback(prevgray[sy1:sy2, sx1:sx2],
                                                gray[sy1:sy2, sx1:sx2], None,
                                                0.5, 3, 15, 3, 5, 1.2, 0)
            draw_flow(img[sy1:sy2, sx1:sx2], flow)
        draw_rects(img, rects, (0, 255, 0))
        if not eCascade.empty():
            for x1, y1, x2, y2 in rects:
                roi = gray[y1:y2, x1:x2]
                vis_roi = img[y1:y2, x1:x2]
                subrects = detect(roi.copy(), eCascade)
                draw_rects(vis_roi, subrects, (255, 0, 0))

        prevgray = gray
        dt = clock() - t
        draw_str(img, (20, 20), 'time: %.1f ms' % (dt * 1000))
        cv2.imshow('HTJC', img)

    cap.release()
    cv2.destroyAllWindows()
    def threadedProcess(self):
              
        rects = [] 
        if len(self.pendingWorker) > 0 and self.pendingWorker[0].ready():
            task = self.pendingWorker.popleft()
            frame, curTime = task.get()
            self.latency.update(clock() - curTime)
            
            draw_str(frame, (20, 360-20), "Latency: %.1f ms" % (self.latency.value*1000))
            draw_str(frame, (20, 360- 35), "FPS: %d" % (1/self.frameInterval.value))
            self.outFrames.append(frame)
        '''
        if len(self.pendingWorker) > 0:
            for i in range(0, len(self.pendingWorker)):
                if self.pendingWorker[i].ready():
                    for j in range(0, i):
                        waste = self.pendingWorker.popleft()
                        try:
                            waste.get()
                        except:
                            pass

                    task = self.pendingWorker.popleft()
                    frame, time = task.get()
                    self.latency.update(clock() - time)
                    draw_str(frame, (20, 20), "Latency: %.1f ms" % (self.latency.value*1000))
                    draw_str(frame, (300, 20), "FPS: %d" % (1/self.frameInterval.value))
                    cv2.imshow('Processed Video', frame)
                    cv2.waitKey(1)
                    break
        '''
        if len(self.pendingWorker) < self.numThread:
            grab, frame = self.stream.read()
            t = clock()
            self.frameInterval.update(t - self.lastFrameTime)
            self.lastFrameTime = t
            task = self.workerPool.apply_async(process, (copy.copy(frame), t))
            self.pendingWorker.append(task)
Example #46
0
    def process_frame(frame, t0):
        # some intensive computation...
        # frame = cv2.medianBlur(frame, 19)
        # frame = cv2.medianBlur(frame, 19)
        # frame = cv2.bilateralFilter(frame,9,75,75)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = frame.copy()
        draw_rects(vis, rects, (0, 255, 0))
        if not nested.empty():
            for x1, y1, x2, y2 in rects:
                roi = gray[y1:y2, x1:x2]
                vis_roi = vis[y1:y2, x1:x2]
                subrects = detect(roi.copy(), nested)
                draw_rects(vis_roi, subrects, (255, 0, 0))
        dt = clock() - t

        draw_str(vis, (20, 80), 'time: %.1f ms' % (dt*1000))
        # cv2.imshow('facedetect', vis)
        return vis_roi, t0
Example #47
0
def main():
    global boxes, objid
    maxrange = 100
    maxwidth = 500
    cam = video.create_capture('cut1.mp4')
    content = readjson('video.json')

    for frames in content:
        boxes = []
        ret, frame = cam.read()
        frameheight = np.size(frame, 0)
        birdframe = np.zeros((frameheight, maxwidth, 3), np.uint8)
        birdframe[:] = (255, 255, 255)
        for obj in frames:
            if obj['confidence'] >= 0.3:
                newbox = Box(obj['topleft']['y'], obj['topleft']['x'],
                             obj['bottomright']['y'], obj['bottomright']['x'],
                             obj['label'])
                boxes.append(newbox)
                newbox.calcDistance()
        for box in boxes:
            cv2.rectangle(frame, (box.left, box.top), (box.right, box.bot),
                          (255, 0, 0), 2)
            draw_str(frame, (box.left, box.top - 10), str(box.distance)[0:5])
            cv2.rectangle(
                birdframe, (int(box.left * maxwidth / 1280),
                            int(frameheight - box.distance * 720 / maxrange)),
                (int(box.right * maxwidth / 1280),
                 int(frameheight - box.distance * 720 / maxrange + 10)),
                (255, 0, 0), 2)
        cv2.imshow('track', frame)
        cv2.imshow('birdseye view', birdframe)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
    cam.release()
    cv2.destroyAllWindows()
Example #48
0
    def get_frame(self):
        start = time.time()
        success, image = self.video.read()
        end = time.time()
        seconds = end - start
        if seconds < 1.0 / self.fps:
            time.sleep(1.0 / self.fps - seconds)

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray1 = gray.copy()
        gray2 = gray.copy()
        gray1 = cv2.equalizeHist(gray1)
        gray2 = cv2.resize(gray2, (500, 500))
        gray2 = cv2.GaussianBlur(gray2, (21, 21), 0)
        vis = image.copy()
        rects = self.detect(gray1, self.cascade)
        self.draw_rects(vis, rects, (0, 255, 0))
        if self.pre_frame is None:
            self.pre_frame = gray2
        else:
            img_delta = cv2.absdiff(self.pre_frame, gray2)
            thresh = cv2.threshold(img_delta, 25, 255, cv2.THRESH_BINARY)[1]
            img1, contours, hierarchy = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            for c in contours:
                if cv2.contourArea(c) < 1000:
                    continue
                else:
                    print("diff")
                    draw_str(vis, (20, 20), 'Have motion object!')
                    break

            self.pre_frame = gray2

        ret, jpeg = cv2.imencode('.jpg', vis)
        return jpeg.tostring()
Example #49
0
def update(_=None):
    noise = cv2.getTrackbarPos('noise', 'fit line')
    n = cv2.getTrackbarPos('point n', 'fit line')
    r = cv2.getTrackbarPos('outlier %', 'fit line') / 100.0
    outn = int(n*r)

    p0, p1 = (90, 80), (w-90, h-80)
    img = np.zeros((h, w, 3), np.uint8)
    cv2.line(img, toint(p0), toint(p1), (0, 255, 0))

    if n > 0:
        line_points = sample_line(p0, p1, n-outn, noise)
        outliers = np.random.rand(outn, 2) * (w, h)
        points = np.vstack([line_points, outliers])
        for p in line_points:
            cv2.circle(img, toint(p), 2, (255, 255, 255), -1)
        for p in outliers:
            cv2.circle(img, toint(p), 2, (64, 64, 255), -1)
        func = getattr(cv2.cv, cur_func_name)
        vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)
        cv2.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255))

    draw_str(img, (20, 20), cur_func_name)
    cv2.imshow('fit line', img)
    def threadedProcess(self):

        rects = []
        if len(self.pendingWorker) > 0 and self.pendingWorker[0].ready():
            task = self.pendingWorker.popleft()
            frame, curTime = task.get()
            self.latency.update(clock() - curTime)

            draw_str(frame, (20, config.VIDEO_WIDTH - 20),
                     "Latency: %.1f ms" % (self.latency.value * 1000))
            draw_str(frame, (20, config.VIDEO_WIDTH - 35),
                     "FPS: %d" % (1 / self.frameInterval.value))
            #print("Latency %lf" % (self.latency.value*1000))
            #print("FPS: %d" % (1/self.frameInterval.value))
            self.outFrames.append(frame)
            #cv2.imshow('Processed Video', frame)
            #cv2.waitKey(1)
        if len(self.pendingWorker) < self.numThread:
            frame = self.stream.read()
            t = clock()
            self.frameInterval.update(t - self.lastFrameTime)
            self.lastFrameTime = t
            task = self.workerPool.apply_async(process, (copy.copy(frame), t))
            self.pendingWorker.append(task)
Example #51
0
    def run(self):
        while True:
            ret, frame = self.cam.read()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()
            if self.p0 is not None:
                p2, trace_status = checkedTrace(self.gray1, frame_gray,
                                                self.p1)

                self.p1 = p2[trace_status].copy()
                self.p0 = self.p0[trace_status].copy()
                self.gray1 = frame_gray

                if len(self.p0) < 4:
                    self.p0 = None
                    continue
                H, status = cv2.findHomography(self.p0, self.p1,
                                               (0,
                                                cv2.RANSAC)[self.use_ransac],
                                               10.0)
                h, w = frame.shape[:2]
                overlay = cv2.warpPerspective(self.frame0, H, (w, h))
                vis = cv2.addWeighted(vis, 0.5, overlay, 0.5, 0.0)

                for (x0, y0), (x1, y1), good in zip(self.p0[:, 0],
                                                    self.p1[:, 0], status[:,
                                                                          0]):
                    if good:
                        cv2.line(vis, (x0, y0), (x1, y1), (0, 128, 0))
                    cv2.circle(vis, (x1, y1), 2, (red, green)[good], -1)
                draw_str(vis, (20, 20), 'track count: %d' % len(self.p1))
                if self.use_ransac:
                    draw_str(vis, (20, 40), 'RANSAC')
            else:
                p = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
                if p is not None:
                    for x, y in p[:, 0]:
                        cv2.circle(vis, (x, y), 2, green, -1)
                    draw_str(vis, (20, 20), 'feature count: %d' % len(p))

            cv2.imshow('lk_homography', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
            if ch == ord(' '):
                self.frame0 = frame.copy()
                self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
                if self.p0 is not None:
                    self.p1 = self.p0
                    self.gray0 = frame_gray
                    self.gray1 = frame_gray
            if ch == ord('r'):
                self.use_ransac = not self.use_ransac
Example #52
0
    def showLeftRightMost(self):
        x, y = self.center()

        y += 20

        #Performance Mode, set center y as left and rightmost y
        if self.leftmost[1] > y:
            cv2.circle(frame, (self.leftmost[0], y), 5, (0, 0, 255), 3)
            draw_str(frame, (self.leftmost[0], y), str("left"))
        else:
            cv2.circle(frame, self.leftmost, 5, (0, 0, 255), 3)
            draw_str(frame, self.leftmost, str("left"))

        if self.rightmost[1] > y:
            cv2.circle(frame, (self.rightmost[0], y), 5, (0, 0, 255), 3)
            draw_str(frame, (self.rightmost[0], y), str("right"))
        else:
            cv2.circle(frame, self.rightmost, 5, (0, 0, 255), 3)
            draw_str(frame, self.rightmost, str("right"))
def draw(rects, labelAndConf, frame):
    color = (0, 255, 0)  # Rect color selection
    for i in range(0, len(rects)):
        (x, y, w, h) = rects[i]
        pt1 = (int(x), int(y))
        pt2 = (int((x + w)), int(y + h))
        label = labelAndConf[i][0]
        confidence = labelAndConf[i][1]
        cv2.rectangle(frame, pt1, pt2, color, 4)  # Draws the Rect
        if confidence <= 70 and confidence != 0:
            if label == 1:
                draw_str(frame, (int(x), int(y - 5)),
                         "-VietDang-%d" % (confidence))
            elif label == 2:
                draw_str(frame, (int(x), int(y - 5)),
                         "-NgoDat-%d" % (confidence))
        else:
            draw_str(frame, (int(x), int(y - 5)), "--")
    draw_str(frame, (20, config.VIDEO_WIDTH - 50), time.ctime())
Example #54
0
def main():
    import sys

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)

    def process_frame(frame, t0):
        # some intensive computation...
        frame = cv.medianBlur(frame, 19)
        frame = cv.medianBlur(frame, 19)
        return frame, t0

    threadn = cv.getNumberOfCPUs()
    pool = ThreadPool(processes=threadn)
    pending = deque()

    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    while True:
        while len(pending) > 0 and pending[0].ready():
            res, t0 = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "threaded      :  " + str(threaded_mode))
            draw_str(res, (20, 40),
                     "latency        :  %.1f ms" % (latency.value * 1000))
            draw_str(
                res, (20, 60),
                "frame interval :  %.1f ms" % (frame_interval.value * 1000))
            cv.imshow('threaded video', res)
        if len(pending) < threadn:
            _ret, frame = cap.read()
            t = clock()
            frame_interval.update(t - last_frame_time)
            last_frame_time = t
            if threaded_mode:
                task = pool.apply_async(process_frame, (frame.copy(), t))
            else:
                task = DummyTask(process_frame(frame, t))
            pending.append(task)
        ch = cv.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break

    print('Done')
Example #55
0
    def update(dummy=None):
        sz = cv.getTrackbarPos('op/size', 'morphology')
        iters = cv.getTrackbarPos('iters', 'morphology')
        opers = cur_mode.split('/')
        if len(opers) > 1:
            sz = sz - 10
            op = opers[sz > 0]
            sz = abs(sz)
        else:
            op = opers[0]
        sz = sz*2+1

        str_name = 'MORPH_' + cur_str_mode.upper()
        oper_name = 'MORPH_' + op.upper()
        st = cv.getStructuringElement(getattr(cv, str_name), (sz, sz))
        res = cv.morphologyEx(img, getattr(cv, oper_name), st, iterations=iters)

        draw_str(res, (10, 20), 'mode: ' + cur_mode)
        draw_str(res, (10, 40), 'operation: ' + oper_name)
        draw_str(res, (10, 60), 'structure: ' + str_name)
        draw_str(res, (10, 80), 'ksize: %d  iters: %d' % (sz, iters))
        cv.imshow('morphology', res)
Example #56
0
  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
      print(e)

    frame_gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
    vis = cv_image.copy()

    if len(self.tracks) > 0:
        img0, img1 = self.prev_gray, frame_gray
        p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
        p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
        p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
        d = abs(p0-p0r).reshape(-1, 2).max(-1)
        good = d < 1
        new_tracks = []
        for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
            if not good_flag:
                continue
            tr.append((x, y))
            if len(tr) > self.track_len:
                del tr[0]
            new_tracks.append(tr)
            cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)
        self.tracks = new_tracks
        cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
        draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))
        vis, size_l, size_r = self.calc_mean(vis, p0, p1, good)
        draw_str(vis, (20, 40), 'Lenght left: %f' % size_l)
        draw_str(vis, (20, 60), 'Lenght right: %f' % size_r)
        self.ctrl_pub.publish(self.reactive_controller(size_l, size_r))

    if self.frame_idx % self.detect_interval == 0:
        mask = np.zeros_like(frame_gray)
        mask[:] = 255
        for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
            cv2.circle(mask, (x, y), 5, 0, -1)
        p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
        if p is not None:
            for x, y in np.float32(p).reshape(-1, 2):
                self.tracks.append([(x, y)])


    self.frame_idx += 1
    self.prev_gray = frame_gray

    cv2.imshow("Image window", vis)
Example #57
0
    def OpenCVCode(self, imgRGB, depth32f, frameCount):
        frame_gray = cv.cvtColor(imgRGB, cv.COLOR_BGR2GRAY)
        vis = imgRGB.copy()
        if self.p0 is not None:
            p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)

            self.p1 = p2[trace_status].copy()
            self.p0 = self.p0[trace_status].copy()
            self.gray1 = frame_gray

            if len(self.p0) < 4:
                self.p0 = None
                exit
            H, status = cv.findHomography(self.p0, self.p1,
                                          (0, cv.RANSAC)[self.use_ransac],
                                          10.0)
            h, w = imgRGB.shape[:2]
            overlay = cv.warpPerspective(self.frame0, H, (w, h))
            vis = cv.addWeighted(vis, 0.5, overlay, 0.5, 0.0)

            for (x0, y0), (x1, y1), good in zip(self.p0[:, 0], self.p1[:, 0],
                                                status[:, 0]):
                if good:
                    cv.line(vis, (x0, y0), (x1, y1), (0, 128, 0))
                cv.circle(vis, (x1, y1), 2, (red, green)[good], -1)
            draw_str(vis, (20, 20), 'track count: %d' % len(self.p1))
            if self.use_ransac:
                draw_str(vis, (20, 40), 'RANSAC')
        else:
            p = cv.goodFeaturesToTrack(frame_gray, **feature_params)
            if p is not None:
                for x, y in p[:, 0]:
                    cv.circle(vis, (x, y), 2, green, -1)
                draw_str(vis, (20, 20), 'feature count: %d' % len(p))

        cv.imshow('lk_homography', vis)
        ch = cv.waitKey(1)
        if ch == ord(' '):
            self.frame0 = imgRGB.copy()
            self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params)
            if self.p0 is not None:
                self.p1 = self.p0
                self.gray0 = frame_gray
                self.gray1 = frame_gray
        if ch == ord('r'):
            self.use_ransac = not self.use_ransac
        return vis, None
Example #58
0
def trackGoals():
    print('tracking')

    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes=threadn)
    pending = deque()

    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    while True:
        while len(pending) > 0 and pending[0].ready():
            res, t0 = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "threaded      :  " + str(threaded_mode))
            draw_str(res, (20, 40),
                     "latency        :  %.1f ms" % (latency.value * 1000))
            draw_str(
                res, (20, 60),
                "frame interval :  %.1f ms" % (frame_interval.value * 1000))
            cv2.imshow('threaded video', res)
        if len(pending) < threadn:
            ret, frame = cap.read()
            if frame is not None:
                t = clock()
                frame_interval.update(t - last_frame_time)
                last_frame_time = t
                if threaded_mode:
                    task = pool.apply_async(process_frame, (frame.copy(), t))
                else:
                    task = DummyTask(process_frame(frame, t))
                pending.append(task)
        ch = cv2.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break
    cv2.destroyAllWindows()
Example #59
0
    def visualise(self, frame):
        """
        Visualise tracks and optical flow
        """
        # print statistics in top left corner
        draw_str(frame, (20, 40), 'passed people: %d' % self.count_passed)
        draw_str(frame, (20, 60),
                 'fps: %d' % (self.frame_idx / self.total_time))
        if self.detector.other_objects:
            draw_str(
                frame, (20, 80),
                'other objects: %s' % ', '.join(self.detector.other_objects))
        # draw people's tracks
        for track in self.people:
            # choose color from color palette
            color = randColor(hash(track[0]))
            # draw rectangle for each detection in track
            for roi, _ in track:
                cv.rectangle(frame, roi.tl(), roi.br(), color)

        # show visualisation in window
        cv.imshow("counter", frame)
Example #60
0
        video_src = video_src[0]
    except:
        video_src = 1
    args = dict(args)
    cascade_fn = args.get('--cascade', ".haarcascade_frontalface_alt2.xml")
    cascade = cv2.CascadeClassifier(cascade_fn)

    cam = create_capture(video_src)

    while True:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))
        for x1, y1, x2, y2 in rects:
            roi = gray[y1:y2, x1:x2]
            vis_roi = vis[y1:y2, x1:x2]
        dt = clock() - t

        draw_str(vis, (20, 20), 'Processing time: %.1f ms' % (dt * 1000))
        cv2.imshow('Face detection', vis)
        cv2.imshow('Gray detection', gray)

        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()