Exemple #1
0
    def run(self):
        self.server_.start()  # start up communication server
        self.perform_homography()
        VisionGUI.create_gui(self.red_color_, self.blu_color_, self.ylw_color_)

        # Initialise some value in the beginning
        prev_bal_center = (0, 0)
        prev_blu_center = (0, 0)
        prev_ylw_center = (0, 0)

        # should be safe, as during the first frame method should accurately
        # determine direction, if no direction can be determined, than
        # no object is present(or thresholding is broken), so doesn't matter
        prev_blu_dir = (0, -20)
        prev_ylw_dir = (0, -20)

        prev_time = time.time()
        frame_count = 0
        one_sec = 0

        while self.alive_:
            try:
                start = time.time()
                (prev_bal_center, prev_blu_center, prev_ylw_center,
                 prev_blu_dir, prev_ylw_dir) = self.detect_objects(
                     prev_bal_center, prev_blu_center, prev_ylw_center,
                     prev_blu_dir, prev_ylw_dir)

                # update frame rate
                frame_count += 1
                one_sec += time.time() - start
                if one_sec >= 1.0:
                    self.fps_ = frame_count
                    frame_count = 0
                    one_sec = one_sec % 1.0

            except:
                Debug.print_stacktrace()
                self.kill_self()
                time.sleep(1)  # wait for server to finish
                print
        Logger.log('Vision Processor: Exiting ...')
Exemple #2
0
	def run(self):
		self.server_.start()	# start up communication server
		self.perform_homography()
		VisionGUI.create_gui(self.red_color_, self.blu_color_, self.ylw_color_)

		# Initialise some value in the beginning
		prev_bal_center = (0, 0)
		prev_blu_center = (0, 0)
		prev_ylw_center = (0, 0)

		# should be safe, as during the first frame method should accurately
		# determine direction, if no direction can be determined, than
		# no object is present(or thresholding is broken), so doesn't matter
		prev_blu_dir = (0, -20)
		prev_ylw_dir = (0, -20)

		prev_time = time.time()
		frame_count = 0
		one_sec = 0

		while self.alive_:
			try:
				start = time.time()
				(prev_bal_center, prev_blu_center, prev_ylw_center,
					prev_blu_dir, prev_ylw_dir) = self.detect_objects(
						prev_bal_center, prev_blu_center, prev_ylw_center,
						 prev_blu_dir, prev_ylw_dir)

				# update frame rate
				frame_count += 1
				one_sec += time.time() - start
				if one_sec >= 1.0:
					self.fps_ = frame_count
					frame_count = 0
					one_sec = one_sec % 1.0

			except:
				Debug.print_stacktrace()
				self.kill_self()
				time.sleep(1) # wait for server to finish
				print
		Logger.log('Vision Processor: Exiting ...')
Exemple #3
0
    def detect_objects(self, prev_bal_center, prev_blu_center, prev_ylw_center,
                       prev_blu_dir, prev_ylw_dir):
        # get camera or dummy frame and unidstort
        img = self.cam_.get_frame()
        # Size of frame is 640x480, so cut (50, 80) from the left to remove
        # the distortion. Usage: cv.SubRect(x, y, width, height)
        # TODO: Use homography info, current crop is hadrcoded
        img = cv.GetSubRect(img, (10, 70, 620, 340))  # crop
        size = cv.GetSize(img)

        blu_dir = ylw_dir = None
        bal_center = blu_center = ylw_center = None
        trust_bal = trust_ylw = trust_blu = True

        # create and HSV and RGB iamge for later use
        hsv_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
        rgb_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(img, rgb_img, cv.CV_BGR2RGB)

        # detect ball using hsv_img
        red_bin_img, bal_center = self.detect_ball(hsv_img, (3, 3), 3, (3, 3),
                                                   3)
        if not bal_center:
            bal_center = prev_bal_center
            trust_bal = False

        # detect blue robot using hsv_img
        blu_trsh = self.blu_color_.in_range_s(hsv_img)  # threshold
        erd_mat = (1, 1)
        erd = 1
        dil_mat = (2, 2)
        dil = 5  # erode/dilate
        cv.Erode(
            blu_trsh, blu_trsh,
            cv.CreateStructuringElementEx(erd_mat[0], erd_mat[1], 0, 0, 0),
            erd)
        cv.Dilate(
            blu_trsh, blu_trsh,
            cv.CreateStructuringElementEx(dil_mat[0], dil_mat[1], 0, 0, 0),
            dil)
        cv.Erode(
            blu_trsh, blu_trsh,
            cv.CreateStructuringElementEx(erd_mat[0] * 6, erd_mat[1] * 6, 0, 0,
                                          0), erd)

        blu_bin_img, blu_center, blu_dir, b_ang = self.detect_robot(blu_trsh)

        # detect yellow robot using rgb_img and invert
        ylw_trsh = self.ylw_color_.in_range_s(hsv_img)  # threshold
        cv.Not(ylw_trsh, ylw_trsh)  # invert
        erd_mat = (1, 1)
        erd = 3  # erode
        cv.Erode(
            ylw_trsh, ylw_trsh,
            cv.CreateStructuringElementEx(erd_mat[0] * 2, erd_mat[1] * 2, 0, 0,
                                          0), 2)

        ylw_bin_img, ylw_center, ylw_dir, y_ang = self.detect_robot(ylw_trsh)

        # fix values if current data cannot be trusted
        # ball
        if not prev_bal_center:
            prev_bal_center = bal_center
            trust_bal = False
        if trust_bal and bal_center: prev_bal_center = bal_center

        # blue robot
        if not blu_center:
            blu_center = prev_blu_center
            blu_dir = prev_blu_dir
        else:
            prev_blu_center = blu_center
            prev_blu_dir = blu_dir
        if not b_ang: b_ang = -1

        # yellow robot
        if not ylw_center:
            ylw_center = prev_ylw_center
            ylw_dir = prev_ylw_dir
        else:
            prev_ylw_center = ylw_center
            preb_ylw_dir = ylw_dir
        if not y_ang: y_ang = -1

        # fix coordinates because of difference between thresholded image
        # and real image
        bal_center = Math.add_vectors(bal_center, (8, 8))
        blu_center = Math.add_vectors(blu_center, (6, 7))
        # It seems yellow does not need correction anymore
        # ylw_center = Math.add_vectors(ylw_center, (6, 7))

        sys.stdout.write('\b' * 106 +
                         'BAL (%-4.4s,%-4.4s) BLU (%-4.4s,%-4.4s,%-4.4s)' %
                         (str(bal_center[0]) if trust_bal else 'X',
                          str(bal_center[1]) if trust_bal else 'X',
                          str(blu_center[0]) if trust_blu else 'X',
                          str(blu_center[1]) if trust_blu else 'X',
                          str(int(round(b_ang))) if trust_blu else 'X') +
                         ' YLW (%-4.4s,%-4.4s,%-4.4s) ' %
                         (str(ylw_center[0]) if trust_ylw else 'X',
                          str(ylw_center[1]) if trust_ylw else 'X',
                          str(int(round(y_ang))) if trust_ylw else 'X') +
                         'FPS: %d     ' % self.fps_)

        # adding to cache
        Locator.add_data_tuple_to_ball_cache((bal_center, time.time()))
        Locator.add_data_tuple_to_yellow_cache((ylw_center, time.time()))
        Locator.add_data_tuple_to_blue_cache((blu_center, time.time()))

        # adding to smoothed caches
        Locator.add_data_tuple_to_smoothed_ball_cache(
            (bal_center, time.time()))
        Locator.add_data_tuple_to_smoothed_yellow_cache(
            (ylw_center, time.time()))
        Locator.add_data_tuple_to_smoothed_blue_cache(
            (blu_center, time.time()))

        # checking for huge changes in velocity
        previous_ball_frame, last_ball_frame = Locator.simple_get_members(
            Locator.ball_cache, 1)
        previous_blu_frame, last_blu_frame = Locator.simple_get_members(
            Locator.blue_cache, 1)
        previous_ylw_frame, last_ylw_frame = Locator.simple_get_members(
            Locator.yellow_cache, 1)

        if Math.ecl_dist(previous_ball_frame[0], last_ball_frame[0]) > 3:
            bal_vel = Locator.calc_velocity_vector_ball(1)
        else:
            bal_vel = Locator.calc_velocity_vector_ball(10)
        if Math.ecl_dist(previous_blu_frame[0], last_blu_frame[0]) > 3:
            blu_vel = Locator.calc_velocity_vector_blue(1)
        else:
            blu_vel = Locator.calc_velocity_vector_blue(10)
        if Math.ecl_dist(previous_ylw_frame[0], last_ylw_frame[0]) > 3:
            ylw_vel = Locator.calc_velocity_vector_yellow(1)
        else:
            ylw_vel = Locator.calc_velocity_vector_yellow(10)

        # update centroids
        bal_center = self.bal_center_ = Locator.smoothed_ball_cache[
            len(Locator.smoothed_ball_cache) - 1][0]
        blu_center = self.blu_robot_center_ = Locator.blue_cache[
            len(Locator.smoothed_blue_cache) - 1][0]
        ylw_center = self.ylw_robot_center_ = Locator.yellow_cache[
            len(Locator.smoothed_yellow_cache) - 1][0]

        # update direction vecotrs
        self.blu_dir_ = blu_dir
        self.ylw_dir_ = ylw_dir

        # update 'trust' variables
        # TODO: Revew these, they may no longer be needed
        self.trust_bal_ = trust_bal
        self.trust_blu_ = trust_blu
        self.trust_ylw_ = trust_ylw

        # update velocities from cache
        self.bal_vel_ = bal_vel
        self.blu_vel_ = blu_vel
        self.ylw_vel_ = ylw_vel

        # send information to data stream subscribers
        self.server_.broadcast_to_subscribers(self.get_state())

        # display visula feedback if there is something to display
        if len(V_SETT.DISPLAY_FEED):
            VisionGUI.display_visual_feedback(
                img, red_bin_img, blu_bin_img, ylw_bin_img,
                (bal_center, blu_center, ylw_center, blu_dir, ylw_dir),
                (self.l_goal_t_, self.l_goal_b_, self.r_goal_t_,
                 self.r_goal_b_), (self.pitch_tl_, self.pitch_bl_,
                                   self.pitch_tr_, self.pitch_br_),
                (trust_bal, trust_blu, trust_ylw), (bal_vel, blu_vel, ylw_vel))

        return (prev_bal_center, prev_blu_center, prev_ylw_center,
                prev_blu_dir, prev_ylw_dir)
Exemple #4
0
	def detect_objects(self, prev_bal_center, prev_blu_center, prev_ylw_center,
			prev_blu_dir, prev_ylw_dir):
		# get camera or dummy frame and unidstort
		img = self.cam_.get_frame()
		# Size of frame is 640x480, so cut (50, 80) from the left to remove
		# the distortion. Usage: cv.SubRect(x, y, width, height)
		# TODO: Use homography info, current crop is hadrcoded
		img = cv.GetSubRect(img, (10, 70, 620, 340))	# crop
		size = cv.GetSize(img)

		blu_dir = ylw_dir = None
		bal_center = blu_center = ylw_center = None
		trust_bal = trust_ylw = trust_blu = True

		# create and HSV and RGB iamge for later use
		hsv_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
		cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
		rgb_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
		cv.CvtColor(img, rgb_img, cv.CV_BGR2RGB)

		# detect ball using hsv_img
		red_bin_img, bal_center = self.detect_ball(hsv_img,
			(3, 3), 3, (3, 3), 3)
		if not bal_center:
			bal_center = prev_bal_center
			trust_bal = False

		# detect blue robot using hsv_img
		blu_trsh = self.blu_color_.in_range_s(hsv_img)			# threshold
		erd_mat = (1, 1); erd = 1; dil_mat = (2, 2); dil = 5	# erode/dilate
		cv.Erode(blu_trsh, blu_trsh, cv.CreateStructuringElementEx(
				erd_mat[0], erd_mat[1], 0, 0, 0), erd)
		cv.Dilate(blu_trsh, blu_trsh, cv.CreateStructuringElementEx(
				dil_mat[0], dil_mat[1], 0, 0, 0), dil)
		cv.Erode(blu_trsh, blu_trsh, cv.CreateStructuringElementEx(
			erd_mat[0] * 6, erd_mat[1] * 6, 0, 0, 0), erd)

		blu_bin_img, blu_center, blu_dir, b_ang = self.detect_robot(blu_trsh)

		# detect yellow robot using rgb_img and invert
		ylw_trsh = self.ylw_color_.in_range_s(hsv_img)	# threshold
		cv.Not(ylw_trsh, ylw_trsh)						# invert
		erd_mat = (1, 1); erd = 3						# erode
		cv.Erode(ylw_trsh, ylw_trsh, cv.CreateStructuringElementEx(
			erd_mat[0] * 2, erd_mat[1] * 2, 0, 0, 0), 2)

		ylw_bin_img, ylw_center, ylw_dir, y_ang = self.detect_robot(ylw_trsh)

		# fix values if current data cannot be trusted
		# ball
		if not prev_bal_center:
			prev_bal_center = bal_center
			trust_bal = False
		if trust_bal and bal_center: prev_bal_center = bal_center

		# blue robot
		if not blu_center:
			blu_center = prev_blu_center
			blu_dir = prev_blu_dir
		else:
			prev_blu_center = blu_center
			prev_blu_dir = blu_dir
		if not b_ang: b_ang = -1

		# yellow robot
		if not ylw_center:
			ylw_center = prev_ylw_center
			ylw_dir = prev_ylw_dir
		else:
			prev_ylw_center = ylw_center
			preb_ylw_dir = ylw_dir
		if not y_ang: y_ang = -1

		# fix coordinates because of difference between thresholded image
		# and real image
		bal_center = Math.add_vectors(bal_center, (8, 8))
		blu_center = Math.add_vectors(blu_center, (6, 7))
		# It seems yellow does not need correction anymore
		# ylw_center = Math.add_vectors(ylw_center, (6, 7))

		sys.stdout.write('\b' * 106 +
		'BAL (%-4.4s,%-4.4s) BLU (%-4.4s,%-4.4s,%-4.4s)' % (
			str(bal_center[0]) if trust_bal else 'X',
			str(bal_center[1]) if trust_bal else 'X',
			str(blu_center[0]) if trust_blu else 'X',
			str(blu_center[1]) if trust_blu else 'X',
			str(int(round(b_ang))) if trust_blu else 'X') +
		' YLW (%-4.4s,%-4.4s,%-4.4s) ' % (
			str(ylw_center[0]) if trust_ylw else 'X',
			str(ylw_center[1]) if trust_ylw else 'X',
			str(int(round(y_ang))) if trust_ylw else 'X'
		) +
		'FPS: %d     ' % self.fps_)	

		# adding to cache
		Locator.add_data_tuple_to_ball_cache((bal_center, time.time()))
		Locator.add_data_tuple_to_yellow_cache((ylw_center, time.time()))
		Locator.add_data_tuple_to_blue_cache((blu_center, time.time()))

		# adding to smoothed caches
		Locator.add_data_tuple_to_smoothed_ball_cache((bal_center, time.time()))
		Locator.add_data_tuple_to_smoothed_yellow_cache((ylw_center, time.time()))
		Locator.add_data_tuple_to_smoothed_blue_cache((blu_center, time.time()))

		# checking for huge changes in velocity
		previous_ball_frame, last_ball_frame = Locator.simple_get_members(Locator.ball_cache,1)
		previous_blu_frame, last_blu_frame = Locator.simple_get_members(Locator.blue_cache,1)
		previous_ylw_frame, last_ylw_frame = Locator.simple_get_members(Locator.yellow_cache,1)

		if Math.ecl_dist(previous_ball_frame[0], last_ball_frame[0]) > 3:
			bal_vel = Locator.calc_velocity_vector_ball(1)
		else:
			bal_vel = Locator.calc_velocity_vector_ball(10)
		if Math.ecl_dist(previous_blu_frame[0], last_blu_frame[0]) > 3:
			blu_vel = Locator.calc_velocity_vector_blue(1)
		else:
			blu_vel = Locator.calc_velocity_vector_blue(10)
		if Math.ecl_dist(previous_ylw_frame[0], last_ylw_frame[0]) > 3:
			ylw_vel = Locator.calc_velocity_vector_yellow(1)
		else:
			ylw_vel = Locator.calc_velocity_vector_yellow(10)
		
		# update centroids
		bal_center = self.bal_center_ = Locator.smoothed_ball_cache[
			len(Locator.smoothed_ball_cache)-1][0]
		blu_center = self.blu_robot_center_ = Locator.blue_cache[
			len(Locator.smoothed_blue_cache)-1][0]
		ylw_center = self.ylw_robot_center_ = Locator.yellow_cache[
			len(Locator.smoothed_yellow_cache)-1][0]

		# update direction vecotrs
		self.blu_dir_ = blu_dir
		self.ylw_dir_ = ylw_dir

		# update 'trust' variables
		# TODO: Revew these, they may no longer be needed
		self.trust_bal_ = trust_bal
		self.trust_blu_ = trust_blu
		self.trust_ylw_ = trust_ylw

		# update velocities from cache
		self.bal_vel_ = bal_vel
		self.blu_vel_ = blu_vel
		self.ylw_vel_ = ylw_vel

		# send information to data stream subscribers 
		self.server_.broadcast_to_subscribers(self.get_state())

		# display visula feedback if there is something to display
		if len(V_SETT.DISPLAY_FEED):
			VisionGUI.display_visual_feedback(img,
				red_bin_img, blu_bin_img, ylw_bin_img,
				(bal_center, blu_center, ylw_center, blu_dir, ylw_dir),
				(self.l_goal_t_, self.l_goal_b_,
					self.r_goal_t_, self.r_goal_b_),
				(self.pitch_tl_, self.pitch_bl_,
					self.pitch_tr_, self.pitch_br_),
					(trust_bal, trust_blu, trust_ylw),
					(bal_vel, blu_vel, ylw_vel))

		return (prev_bal_center, prev_blu_center, prev_ylw_center,
					prev_blu_dir, prev_ylw_dir)