Exemplo n.º 1
0
    def init_variable(self):
#        self.kinect1_timer = rospy.get_time()
        self.kinect1_tmp = where_msgs()
#        self.kinect2_timer = rospy.get_time()
        self.kinect2_tmp = where_msgs()
#        self.kinect3_timer = rospy.get_time()
        self.kinect3_tmp = where_msgs()
#        self.kinect4_timer = rospy.get_time()
        self.kinect4_tmp =where_msgs()
Exemplo n.º 2
0
	def timer_callback(self, event):
		''' to remove after-image'''
		if int(time()) - self.kinect1_timer >0.1:
			self.kinect1_dat = where_msgs()
		if int(time()) - self.kinect2_timer >0.1:
			self.kinect2_dat = where_msgs()
		if int(time()) - self.kinect3_timer >0.1:
			self.kinect3_dat = where_msgs()
		if int(time()) - self.kinect4_timer >0.1:
			self.kinect4_dat = where_msgs()

		if self.depth_kinect1_ready ==1:
			#print '\n========== 1 ==================' 
			self.gather_hd_roi(self.depth1, self.kinect1_dat, self.psn1[3], 1)
		if self.depth_kinect2_ready ==1:
			#print '\n========== 2 ==================' 
			self.gather_hd_roi(self.depth2, self.kinect2_dat, self.psn2[3], 2)
		if self.depth_kinect3_ready ==1:
			#print '\n========== 3 ==================' 
			self.gather_hd_roi(self.depth3, self.kinect3_dat, self.psn3[3], 3)
		if self.depth_kinect4_ready ==1:
			#print '\n========== 4 ==================' 
			self.gather_hd_roi(self.depth4, self.kinect4_dat, self.psn4[3], 4)
Exemplo n.º 3
0
	def init_variable(self):
		self.verbose = rospy.get_param('verbose', True)
		self.scale = rospy.get_param('scale', '0.6')
		self.szW = rospy.get_param('szW', '50')
		self.szH = rospy.get_param('szH', '50')
		self.szWH = rospy.get_param('szWH', '1200')
		self.szTOP= rospy.get_param('szTOP', '2.3')
		self.display = rospy.get_param('display', '2')
		print '\nparam: verbose {}, {}, {}, {}, {}, {}'.format(self.verbose, self.scale, self.szW, self.szH, self.szWH, self.szTOP) 
		
		self.psn1 = rospy.get_param("/psn_unit1")
		self.psn2 = rospy.get_param("/psn_unit2")
		self.psn3 = rospy.get_param("/psn_unit3")
		self.psn4 = rospy.get_param("/psn_unit4")

		self.depth1 = None
		self.depth2 = None
		self.depth3 = None
		self.depth4 = None
		self.depth_roi_mask_image1=None
		self.depth_roi_mask_image2=None
		self.depth_roi_mask_image3=None
		self.depth_roi_mask_image4=None
		self.depth_crop=None
		self.counter = 0
		
		self.kinect1_timer = int(time())
		self.kinect2_timer = int(time())
		self.kinect3_timer = int(time())
		self.kinect4_timer = int(time())
		self.kinect1_dat = where_msgs()
		self.kinect2_dat = where_msgs()
		self.kinect3_dat = where_msgs()
		self.kinect4_dat = where_msgs()
		self.depth_kinect1_ready=0
		self.depth_kinect2_ready=0
		self.depth_kinect3_ready=0
		self.depth_kinect4_ready=0
		self.depth_roi_mask_image1_ready=0
		self.depth_roi_mask_image2_ready=0
		self.depth_roi_mask_image3_ready=0
		self.depth_roi_mask_image4_ready=0

		'''
		### kalman tracking parameter setting for tracking ROI
#		sigma_q_roi = 67
#		sigma_r_roi = 60
#		sigma_p0_roi = 55
#		sigma_v_roi = 2
#		p_d_roi = 0.25
#		p_s_roi = 0.59
#		merge_thresh_roi = 30
#		F_roi = [[1, 0, 0, 0, 1, 0, 0, 0], 
#                [0, 1, 0, 0, 0, 1, 0 ,0], 
#                [0, 0, 1, 0, 0, 0 ,1, 0], 
#                [0, 0, 0, 1, 0, 0, 0, 1],
#                [0, 0, 0, 0, 1, 0, 0 ,0],                 
#                [0, 0, 0, 0, 0, 1, 0 ,0], 
#                [0, 0, 0, 0, 0, 0 ,1, 0], 
#                [0, 0, 0, 0, 0, 0, 0, 1]]
#		v_roi = math.pow(sigma_v_roi, 2)
#		q_roi = math.pow(sigma_q_roi, 2)
#		Q_roi = [[q_roi, 0, 0, 0, 0, 0, 0, 0], 
#                [0, q_roi, 0, 0, 0, 0, 0 ,0], 
#                [0, 0, q_roi, 0, 0, 0, 0, 0], 
#                [0, 0, 0, q_roi, 0, 0, 0, 0],
#                [0, 0, 0, 0, v_roi, 0, 0, 0],                 
#                [0, 0, 0, 0, 0, v_roi, 0 ,0], 
#                [0, 0, 0, 0, 0, 0 ,v_roi, 0], 
#                [0, 0, 0, 0, 0, 0, 0, v_roi]]
#		H_roi = [[1, 0, 0, 0, 0, 0, 0, 0], 
#                [0, 1, 0, 0, 0, 0, 0 ,0], 
#                [0, 0, 1, 0, 0, 0 ,0, 0], 
#                [0, 0, 0, 1, 0, 0, 0, 0]]
#		R_roi = [[math.pow(sigma_r_roi, 2), 0, 0, 0], [0, math.pow(sigma_r_roi, 2), 0, 0], [0, 0, math.pow(sigma_r_roi, 2), 0], [0, 0, 0, math.pow(sigma_r_roi, 2)]]
#		p_roi = math.pow(sigma_p0_roi, 2)
#		P0_roi = [[p_roi, 0, 0, 0, 0, 0, 0, 0], 
#                [0, p_roi, 0, 0, 0, 0, 0 ,0], 
#                [0, 0, p_roi, 0, 0, 0, 0, 0], 
#                [0, 0, 0, p_roi, 0, 0, 0, 0],
#                [0, 0, 0, 0, v_roi, 0, 0, 0],                 
#                [0, 0, 0, 0, 0, v_roi, 0 ,0], 
#                [0, 0, 0, 0, 0, 0 ,v_roi, 0], 
#                [0, 0, 0, 0, 0, 0, 0, v_roi]]
#		clutter_intensity_roi = 0.0
#		self.f_gmphd_roi = gmphdroi.GMPHD([], p_s_roi, p_d_roi, F_roi, Q_roi, H_roi, R_roi, P0_roi, clutter_intensity_roi, merge_thresh_roi)

		'''
		### kalman tracking parameter setting
		sigma_q = 0.5 #0.2
		sigma_r = 0.7 #0.1
		sigma_p0 = 0.7
		p_d = 0.2
		p_s = 0.99
		merge_thresh = 0.1
		F = [[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]]
		Q = [[math.pow(sigma_q, 2), 0, 0, 0], [0, math.pow(sigma_q, 2), 0, 0], [0, 0, 0.001, 0], [0, 0, 0, 0.001]]
		H = [[1, 0, 0, 0], [0, 1, 0, 0]]
		R = [[math.pow(2*sigma_r, 2), 0], [0, math.pow(2*sigma_r, 2)]]
		P0 = [[math.pow(sigma_p0, 2), 0, 0, 0], [0, math.pow(sigma_p0, 2), 0, 0], [0, 0, 0.01, 0], [0, 0, 0, 0.01]]
		clutter_intensity = 0.0
		self.born_components = []
		#self.f_gmphd = gmphd.GMPHD([], p_s, p_d, F, Q, H, R, P0, clutter_intensity, merge_thresh)
		########################

		self.isBusy = True
		rospy.sleep(1)
Exemplo n.º 4
0
	def gather_hd_roi(self, data1, data2, data3, psn):
		fusion_mutex.acquire()
		data_depth = data1
		data_depth[np.isnan(data_depth)] = np.median(data_depth)
		data_roi = data2

		rows=480
		cols=640

		szWH= self.szWH
		szW = self.szW
		szH = self.szH
		szTOP=self.szTOP

		
		### update where_msgs info in new variable
		msg_detect = where_msgs()
		msg_detect_z0 = where_msgs()
		msg_detect_z0.header= data_roi.header
		msg_detect_z0.total = 0

		if data_roi.total>0:
			msg_detect.header=data_roi.header
			msg_detect.total=data_roi.total
			msg_detect.cam_id=data_roi.cam_id
			msg_detect.roi=data_roi.roi
			msg_detect.location=[]
			msg_detect.user_id=[]

		cnt=0
#		print msg_detect.roi, '\n\n', data_roi.location

		if len(msg_detect.roi)>0:
			### extract elements of roi(x,y,w,h) in msg_detect.roi
			for roi in msg_detect.roi:
				crop= data_depth[roi.y_offset+10:roi.y_offset+roi.height-10, roi.x_offset+10:roi.x_offset+roi.width-10]
				x = crop.reshape(-1) # 1-dimension

				### filter out data from boundary condition
				ind = np.nonzero(x<400)
				x2 = np.delete(x, ind)
				ind = np.nonzero(x2>4200)
				x3 = np.delete(x2, ind)
				if len(x3) >0:
					self.depth_crop = x3
				else:
					x3=self.depth_crop

				### remove noises by adopting median value
				ret = cv2.medianBlur(x3, 3)
				dist = np.median(ret)

				if math.isnan(dist):
					x3 =self.depth_crop
					ret = cv2.medianBlur(x3, 3)
					dist = np.median(ret)
				else:
					self.depth_crop = x3
					
				'''
				ahist, bin_edge = np.histogram(x3,100)
				ahist =scipy.signal.medfilt(ahist,kernel_size=3)
				ii=np.argmax(ahist)
				dist=bin_edge[ii]
				'''
				
				### estimate the head location for z=0
				HFOV=62.0 #62.0 # 57.0i
				VFOV=48.6 #48.6 # 43.2
				fx = cols / (2.*np.tan(HFOV/2*np.pi/180))
				xw = (cols/2 - (roi.x_offset+roi.width/2)) *dist / fx

				fy = rows / (2.*np.tan(VFOV/2*np.pi/180))
				ycom =-(rows/2 - (roi.y_offset+roi.height/2))*dist / fy
				yw=-(rows/2 - (roi.y_offset))*dist / fy

				zw = dist - (yw - ycom)*sin(data3) 

				OFFSET = 0.1	# 0.05m
				head_x = round(xw/1000+ OFFSET, 2)
				head_y = round(-yw/1000, 2)
				head_z = round(zw/1000- 2*OFFSET, 2)

				#print data_roi.cam_id[cnt].data, ': Mea x({0:.2f}) y({1:.2f}) z({2:.2f})'.format(data_roi.location[cnt].x, data_roi.location[cnt].y, data_roi.location[cnt].z)
				#print data_roi.cam_id[cnt].data, ': Est x({0:.2f}) y({1:.2f}) z({2:.2f})\n'.format(head_x, head_y, head_z)

				if False:#data_roi.location[cnt].z==0 and not math.isnan(float(head_z)):
					loc = Point32(head_x, head_y, head_z)
					msg_detect.user_id.append(-1)
					msg_detect.location.append(loc)
					#msg_detect_z0.location.append(loc)
					#msg_detect_z0.total = msg_detect_z0.total+1
				else:
					loc = Point32(data_roi.location[cnt].x, data_roi.location[cnt].y, data_roi.location[cnt].z)
					msg_detect.user_id.append(0)
					msg_detect.location.append(loc)                
						
				cnt= cnt+1


		'''
		# data association ?
#		msg_track = where_msgs()
#		msg_track.header=msg_detect.header
#		msg_track.total=msg_detect.total
#		msg_track.cam_id=msg_detect.cam_id
#		msg_track.roi=msg_detect.roi
#		msg_track.location=msg_detect.location
#		msg_track.user_id=msg_detect.user_id
#		pscale = 160

		aa = np.zeros((msg_detect.total, 2))
		if psn == 1:
			for ii, loc in enumerate(msg_detect.location):
				if not np.isnan(loc.x):
					aa[ii] = [loc.x, loc.z]
					#print ii,': ', aa

			aa = np.transpose(aa)
#			print '\nNo.LOC({}):\n{}\n---------'.format(aa.shape[1], aa)
		
			plt.clf()	# clear current axis
			#verts =[(0,0), (27,0)]
			#codes =[Path.MOVETO, Path.LINETO]
			#path = Path(verts, codes)
			#patch = patches.PathPatch(path, facecolor='green', lw=2)
			#ax.add_patch(patch)

			'' ' create list(4,the number of aa columns=aa.shape[1])'' '
			bb=np.zeros((4, aa.shape[1]))
			bb[0]=aa[0]	# loc.x
			bb[1]=aa[1] # loc.z

			self.f_gmphd.run_iteration(bb)

			gms = self.f_gmphd.gm
			index=0
			for gm in gms:
				# http://stackoverflow.com/questions/9215658/plot-a-circle-with-pyplot
				#print 'ID({0}): Loc({1:.2f}, {2:.2f}), W({3:.2f}), Detect: {4}'.format(gm.id, gm.mean[0,0], gm.mean[1,0], gm.weight, gm.detect)
				#print 'cov', gm.cov,'\n'
				#print 'ID({0})- cov: {1:.2f}, {2:.2f}, {3:.2f}, {4:.2f}'.format(gm.id, gm.cov[0][0], gm.cov[0][1], gm.cov[0][2], gm.cov[0][3])

				#if gm.detect and len(msg_detect.location)>0  :# > 0.1:
				if gm.detect:
					plt.plot(gm.mean[0,0],4-gm.mean[1,0], 'ro')
					if isinstance(gm.mean[0,0], float) == True and isinstance(gm.mean[1,0], float)== True:
						mintrack=[]
						for n in range(len(msg_detect.location)):
							mintrack.append(sqrt((msg_detect.location[n].x - gm.mean[0,0])**2 + (msg_detect.location[n].z - gm.mean[1,0])**2))
							#print '{0} MD({1:.2f},{2:.2f})\tDET({3:.2f},{4:.2f})\t= {5}'.format(n, gm.mean[0,0], gm.mean[1,0], msg_detect.location[n].x, msg_detect.location[n].z, mintrack)
						if len(mintrack)>0:
							#print min(mintrack), mintrack.index(min(mintrack)), msg_detect.location[mintrack.index(min(mintrack))]
							if min(mintrack)<0.5 and msg_detect.location[mintrack.index(min(mintrack))].y==0.5:
								msg_detect.location[mintrack.index(min(mintrack))].x=(gm.mean[0,0])
								msg_detect.location[mintrack.index(min(mintrack))].z=(gm.mean[1,0])

			N=50
			area =np.pi*(15*np.random.rand(N))**2 # 0 to 5 point radiuses
			#plt.scatter(aa[0], 4-aa[1], s=area, alpha=0.5)
			plt.plot(aa[0], 4-aa[1], 'b+')
			plt.axis([-3,3,0,4])
			plt.draw()
			#plt.pause(0.03)
		'''

		### publish human location

		msg_hd = where_msgs()
		msg_hd.header = msg_detect.header
		msg_hd.total = 0

		if msg_detect.total>0:
			for n in range(msg_detect.total):
				### filter out human data detected with boundary condition
				if msg_detect.roi[n].width*msg_detect.roi[n].height>szWH and msg_detect.roi[n].width>szW and msg_detect.roi[n].height>szH :
					if msg_detect.location[n].y < szTOP :
						msg_hd.total = msg_hd.total+1
						msg_hd.cam_id.append(msg_detect.cam_id[n])
						msg_hd.roi.append(msg_detect.roi[n])
						msg_hd.location.append(msg_detect.location[n])
						msg_hd.user_id.append(msg_detect.user_id[n])
						#print n, ':({0:.2f},{1:.2f},{2:.2f})-({3:d},{4:d})'.format(msg_detect.location[n].x, msg_detect.location[n].y, msg_detect.location[n].z,msg_detect.roi[n].x_offset, msg_detect.roi[n].y_offset)
					else:
						print 'detect the object over {0:.1f}m'.format(szTOP)
				#else:
					#print 'detect the object: W{0:.1f}, H{1:.1f} over (X{0:.1f}, Y{1:.1f}, Z{2:.1f})'.format(msg_detect.roi[n].width, msg_detect.roi[n].height, msg_detect.location[n].x, msg_detect.location[n].y, msg_detect.location[n].z)

		if msg_hd.total>0:
			self.pub_detection.publish(msg_hd)

		fusion_mutex.release()
Exemplo n.º 5
0
    def timer_callback(self, event):
    	fusion_mutex.acquire()
    	data=self.kinect2_tmp
    	if not self.counter:#self.depth_image: 
            self.prev_msg = data        
        self.counter += 1
        if not self.isBusy:
            cv_image_rgb = self.rgb_image            
            font = cv2.FONT_HERSHEY_SIMPLEX  
#            (rows,cols,channels) = self.depth_image.shape
            rows =480
            cols=640              
            #publish data in where_msgs
            msg_detect = where_msgs()
	    if data.total>0:
	    	if data.cam_id[0].data=='sn_kinect_2':
#		    print"testtttttttttttttttttttt"
		    msg_detect.header=data.header
		    msg_detect.total=data.total
		    msg_detect.cam_id=data.cam_id
		    msg_detect.roi=data.roi
		    msg_detect.location = []
		    msg_detect.user_id = []
            index=0
            
            if len(msg_detect.roi)>0:
		        for roi in msg_detect.roi:
		            #dc = self.depth_image[roi.x_offset+roi.width/2, roi.y_offset+roi.height/2]
		            
	#                if roi.height > roi.width*1.5:
	#                    roi.height = int(roi.width*1.5)
								
		            crop = self.depth_image[roi.y_offset+10:roi.y_offset+roi.height-10, roi.x_offset+10:roi.x_offset+roi.width-10]
		            x = crop.reshape(-1)
		            ind = np.nonzero(x<400)
		            ind = np.nonzero(x<100/roi.width*1.5)
		            x2 = np.delete(x, ind)
		            ind = np.nonzero(x2>5000)
		            x3 = np.delete(x2, ind)
		            dist = np.median(x3)
		            '''
		            ahist, bin_edge = np.histogram(x3,100)
		            ahist = scipy.signal.medfilt(ahist,kernel_size=3)
		            ii = np.argmax(ahist)
		            dist = bin_edge[ii]
		            '''



		            #convert from pixel to x-y-z
		            fy = rows / (2 * np.tan(43.2/2*np.pi/180))
		            yw = -(rows/2 - (roi.y_offset+roi.height/2)) *dist / fy
		            
		            fx = cols / (2 * np.tan(57.0/2*np.pi/180))
		            xw = (cols/2 - (roi.x_offset+roi.width/2)) *dist / fx
		           
		            #print 'Person', int(xw), int(yw), int(dist)
		            
		            #loc = Point32(xw/1000, yw/1000, dist/1000)
		            if data.location[index].z==0:
		            	loc = Point32(xw/1000, 0.5, dist/1000)
		            	msg_detect.user_id.append(-1)
		            else:
		            	loc = Point32(data.location[index].x, data.location[index].y, data.location[index].z)
		            	msg_detect.user_id.append(0)                
		            msg_detect.location.append(loc)
		            index=index+1
#            print "republish message detection",msg_detect.location
            if len(msg_detect.roi):
				for roi in msg_detect.roi:
					if roi.width*roi.height>1000:
						cv2.rectangle(cv_image_rgb, (roi.x_offset, roi.y_offset), (roi.x_offset+roi.width, roi.y_offset+roi.height), (0, 255, 0), 2)
            self.prev_msg = msg_detect

            self.isBusy = True            
            self.where_pub.publish(msg_detect)         
            #self.depth_image = self.new_depth
            
            
            if 1:#not self.isBusy:
				#cv_image = self.rgb_image
								
				cv_image = cv2.imread("/home/psn2/repository//psn_unit/sp_hl_hd_op/convertwhere/floor.jpg")
				font = cv2.FONT_HERSHEY_SIMPLEX
#				msg_tracking = data
				msg_tracking = where_msgs()
				msg_tracking.header=msg_detect.header
				msg_tracking.total=msg_detect.total
				msg_tracking.cam_id=msg_detect.cam_id
				msg_tracking.roi=msg_detect.roi
				msg_tracking.location=msg_detect.location
				msg_tracking.user_id=msg_detect.user_id
	#			msg.location = []
	#			msg.user_id = []
				pscale = 160
				aa = np.zeros((msg_tracking.total, 2))
				for ii, loc in enumerate(msg_tracking.location):
					if not np.isnan(loc.x):
						aa[ii] = [loc.x, loc.z]
						
						cv2.circle(cv_image, ((int)(loc.x*pscale)+150, (int)(loc.z*pscale-pscale)), 3, (255, 0, 0), 2) 
#					print 'Person', loc.x,loc.z
				aa = np.transpose(aa)
				bb=np.zeros((4,aa.shape[1]))
				bb[0]=aa[0]
				bb[1]=aa[1]
				self.f_gmphd.run_iteration(bb)
				if self.verbose:
					gms = self.f_gmphd.gm
					index=0

					for gm in gms:
						if gm.detect and len(msg_tracking.location)>0  :# > 0.1:
							cv2.circle(cv_image, (int(gm.mean[0,0]*pscale+150), int(gm.mean[1,0]*pscale-pscale)), 3, (0, 255, 0), 2)
							#cv2.rectangle(cv_image, (x, y), (x2, y2), (0, 255, 0), 2)
							if isinstance(gm.mean[0,0], float) == True and isinstance(gm.mean[1,0], float)== True:
								msg_tracking.location[msg_tracking.total-1-index].x=(gm.mean[0,0])
								msg_tracking.location[msg_tracking.total-1-index].z=(gm.mean[1,0])
							
							index=index+1
#							cv2.putText(cv_image, str(gm.id) +'-{:.2f}'.format(gm.weight), (int(gm.mean[0,0]*pscale)+150, int(gm.mean[1,0]*pscale-pscale)), font ,0.5, (0, 0, 255), 2)
#					
#					cv2.imshow('Tracking: '+self.sensor_name, cv_image)
#					cv2.waitKey(3)
				
				
#				if self.verbose: 
#					cv2.rectangle(cv_image, (roi.x_offset, roi.y_offset), (roi.x_offset+roi.width, roi.y_offset+roi.height), (0, 255, 0), 2)
#					cv2.putText(cv_image, 'd:'+'{:.2f}'.format(float(dist)/1000), (roi.x_offset, roi.y_offset), font ,0.5, (255, 0, 0), 2)
				
##tracking ROI by kalman
#				aa_roi = np.zeros((msg_tracking.total, 8))
#				for ii, roi in enumerate(msg_tracking.roi):
#					aa_roi[ii] = [roi.x_offset, roi.y_offset, roi.x_offset+roi.width, roi.y_offset+roi.height, 0, 0, 0, 0]
#				if not len(aa_roi):
#					aa_roi = np.random.rand(1, 8)*10000

#				if len(aa_roi):
#					bb_roi = np.transpose(aa_roi)
#					tic = time()
#					self.f_gmphd_roi.run_iteration(bb_roi)
#				if self.verbose:
#					gms_roi = self.f_gmphd_roi.gm
#					index=0
#					for gm in gms_roi:
#						if gm.detect:# > 0.1:
#							if isinstance(msg_tracking.location[msg_tracking.total-1-index].x, float)== True :
#								pos = "x:"+"{0:.2f}".format(msg_tracking.location[msg_tracking.total-1-index].x) + ",z:" + "{0:.2f}".format(msg_tracking.location[msg_tracking.total-1-index].z)

#							
##							if isinstance((gm.mean[0]), int)== True:
#							print"((gm.mean[2])-(gm.mean[0]))*((gm.mean[3])-(gm.mean[1]))",((gm.mean[2])-(gm.mean[0]))*((gm.mean[3])-(gm.mean[1]))
#							if ((gm.mean[2])-(gm.mean[0]))*((gm.mean[3])-(gm.mean[1]))>1000:
#								cv2.rectangle(cv_image_rgb, (int(gm.mean[0]), int(gm.mean[1])), (int(gm.mean[2]), int(gm.mean[3])), (0, 0, 255), 2)                        
#								center_name = (int(gm.mean[0]), (int(gm.mean[1])+15))
#								cv2.putText(cv_image_rgb, pos,center_name, font ,0.5, (255, 0, 0), 2)
#							if isinstance((gm.mean[0]), int)== True and isinstance((gm.mean[1]), int) :
#								msg_tracking.roi[msg_tracking.total-1-index].x_offset=(gm.mean[0])
#								msg_tracking.roi[msg_tracking.total-1-index].y_offset=(gm.mean[1])
#								msg_tracking.roi[msg_tracking.total-1-index].width=(gm.mean[2])-(gm.mean[0])
#								msg_tracking.roi[msg_tracking.total-1-index].height=(gm.mean[3])-(gm.mean[1])
#							index=index+1
				small = cv2.resize(cv_image_rgb, (0,0), fx=0.6, fy=0.6)                     
				cv2.imshow('Tracking ROI: '+self.sensor_name, small)
				cv2.waitKey(3)
				self.where_pub_tracking.publish(msg_tracking)

            
        fusion_mutex.release()
    def where_callback(self, data):

        if not self.counter:#self.depth_image: 
            self.prev_msg = data
            
        self.counter += 1
        if not self.isBusy:
            cv_image = self.rgb_image            
            font = cv2.FONT_HERSHEY_SIMPLEX  
            (rows,cols,channels) = self.depth_image.shape              
            #publish data in where_msgs
            msg_detect = where_msgs()
            msg_detect.header=data.header
            msg_detect.total=data.total
            msg_detect.cam_id=data.cam_id
            msg_detect.roi=data.roi
            msg_detect.location = []
            msg_detect.user_id = []
            index=0
            for roi in msg_detect.roi:
                #dc = self.depth_image[roi.x_offset+roi.width/2, roi.y_offset+roi.height/2]
                
#                if roi.height > roi.width*1.5:
#                    roi.height = int(roi.width*1.5)
								
                crop = self.depth_image[roi.y_offset+10:roi.y_offset+roi.height-10, roi.x_offset+10:roi.x_offset+roi.width-10]
                x = crop.reshape(-1)
                ind = np.nonzero(x<400)
                ind = np.nonzero(x<100/roi.width*1.5)
                x2 = np.delete(x, ind)
                ind = np.nonzero(x2>5000)
                x3 = np.delete(x2, ind)
                dist = np.median(x3)
                '''
                ahist, bin_edge = np.histogram(x3,100)
                ahist = scipy.signal.medfilt(ahist,kernel_size=3)
                ii = np.argmax(ahist)
                dist = bin_edge[ii]
                '''



                #convert from pixel to x-y-z
                fy = rows / (2 * np.tan(43.2/2*np.pi/180))
                yw = -(rows/2 - (roi.y_offset+roi.height/2)) *dist / fy
                
                fx = cols / (2 * np.tan(57.0/2*np.pi/180))
                xw = (cols/2 - (roi.x_offset+roi.width/2)) *dist / fx
               
                #print 'Person', int(xw), int(yw), int(dist)
                
                #loc = Point32(xw/1000, yw/1000, dist/1000)
                if data.location[index].z==0:
                	loc = Point32(xw/1000, 0.5, dist/1000)
                	msg_detect.user_id.append(-1)
                else:
                	loc = Point32(data.location[index].x, data.location[index].y, data.location[index].z)
                	msg_detect.user_id.append(0)                
                msg_detect.location.append(loc)
                index=index+1
            print "republish message detection",msg_detect.location
            if len(self.prev_msg.roi):
				for roi in self.prev_msg.roi:
					cv2.rectangle(cv_image, (roi.x_offset, roi.y_offset), (roi.x_offset+roi.width, roi.y_offset+roi.height), (100, 255, 255), 1)
				if self.verbose:
					small = cv2.resize(cv_image, (0,0), fx=0.6, fy=0.6)     
                	#self.image_pub.publish(small)        
					cv2.imshow('Distance: '+self.sensor_name, small)
					cv2.waitKey(3)
            self.prev_msg = msg_detect

            self.isBusy = True            
            self.where_pub.publish(msg_detect)         
            #self.depth_image = self.new_depth
            
            
            if 1:#not self.isBusy:
				#cv_image = self.rgb_image
				cv_image = cv2.imread("/home/simonpic/repository/database/GUI/new_gui.png")
				font = cv2.FONT_HERSHEY_SIMPLEX
#				msg_tracking = data
				msg_tracking = where_msgs()
				msg_tracking.header=msg_detect.header
				msg_tracking.total=msg_detect.total
				msg_tracking.cam_id=msg_detect.cam_id
				msg_tracking.roi=msg_detect.roi
				msg_tracking.location=msg_detect.location
				msg_tracking.user_id=msg_detect.user_id
	#			msg.location = []
	#			msg.user_id = []
				pscale = 160
				aa = np.zeros((msg_tracking.total, 2))
				for ii, loc in enumerate(msg_tracking.location):
					if not np.isnan(loc.x):
						aa[ii] = [loc.x, loc.z]
						
						cv2.circle(cv_image, ((int)(loc.x*pscale), (int)(loc.z*pscale-pscale)), 3, (255, 0, 0), 2) 
					print 'Person', loc.x,loc.z
				aa = np.transpose(aa)
				bb=np.zeros((4,aa.shape[1]))
				bb[0]=aa[0]
				bb[1]=aa[1]
				self.f_gmphd.run_iteration(bb)
				if self.verbose:
					gms = self.f_gmphd.gm
					index=0
					for gm in gms:
						if gm.detect:# > 0.1:
							cv2.circle(cv_image, (int(gm.mean[0,0]*pscale), int(gm.mean[1,0]*pscale-pscale)), 3, (0, 255, 0), 2)
							#cv2.rectangle(cv_image, (x, y), (x2, y2), (0, 255, 0), 2)
	#						print"distance after tracking",(gm.mean[0,0]), (gm.mean[1,0])
							msg_tracking.location[msg_tracking.total-1-index].x=(gm.mean[0,0])
							msg_tracking.location[msg_tracking.total-1-index].z=(gm.mean[1,0])
							
							index=index+1
							cv2.putText(cv_image, str(gm.id) +'-{:.2f}'.format(gm.weight), (int(gm.mean[0,0]*pscale), int(gm.mean[1,0]*pscale-pscale)), font ,0.5, (0, 0, 255), 2)
					cv2.imshow('Tracking: '+self.sensor_name, cv_image)
					cv2.waitKey(3)
				
				
				if self.verbose: 
                    #plt.clf()
                    #plt.bar(center, ahist, align='center', width=width)
                    #plt.draw()                                      
                                
                    #print 'len1', len(x), 'len2', len(x2), len(x3)
                    
                    #print 'roi', roi
                    #print 'distance', self.depth_image[roi.y_offset+roi.height/2, roi.x_offset+roi.width/2, 0]
                    cv2.rectangle(cv_image, (roi.x_offset, roi.y_offset), (roi.x_offset+roi.width, roi.y_offset+roi.height), (0, 255, 0), 2)
                    #cv2.rectangle(cv_image, (roi.y_offset+10, roi.x_offset+10), (roi.y_offset+roi.height-10, roi.x_offset+roi.width-10), (0, 0, 255), 2)            
                    cv2.putText(cv_image, 'd:'+'{:.2f}'.format(float(dist)/1000), (roi.x_offset, roi.y_offset), font ,0.5, (255, 0, 0), 2)
				print"distance after tracking",msg_tracking.location
				self.where_pub_tracking.publish(msg_tracking)