Пример #1
0
 def add_data_tuple_to_smoothed_blue_cache(data_tuple):
     chosen_vectors = Locator.get_members(Locator.blue_cache, 3)
     smoothed_data_tuple = Math.calc_weighted_average_for_vectors(
         chosen_vectors, [0.2, 0.3, 0.5])
     Locator.add_data_tuple_to_cache(smoothed_data_tuple,
                                     Locator.smoothed_blue_cache,
                                     Locator.smoothed_blue_cache_max_size)
Пример #2
0
 def calc_velocity_vector(cache, number_of_members):
     if len(cache) <= 1:
         return (int(0), int(0))
     else:
         older_newer_elem_tuple = Locator.simple_get_members(
             cache, number_of_members)
         if older_newer_elem_tuple != None:
             velocity_vector = Math.sub_vectors(
                 older_newer_elem_tuple[1][0], older_newer_elem_tuple[0][0])
             the_time = older_newer_elem_tuple[1][
                 1] - older_newer_elem_tuple[0][1]
             try:
                 velocity_vector = tuple([
                     element * (1.0 / the_time)
                     for element in velocity_vector
                 ])
                 velocity_vector = tuple([
                     int(round(element * 60.0))
                     for element in velocity_vector
                 ])
             except:
                 return (int(0), int(0))
             return velocity_vector
         else:
             return (int(0), int(0))
Пример #3
0
    def get_state(self):
        bal_X, bal_Y = Math.sub_vectors(self.bal_center_, self.pitch_tl_)
        blu_X, blu_Y = Math.sub_vectors(self.blu_robot_center_, self.pitch_tl_)
        ylw_X, ylw_Y = Math.sub_vectors(self.ylw_robot_center_, self.pitch_tl_)
        blu_dir_X, blu_dir_Y = self.blu_dir_
        ylw_dir_X, ylw_dir_Y = self.ylw_dir_
        trust_bal = int(round(self.trust_bal_))
        trust_blu = int(round(self.trust_blu_))
        trust_ylw = int(round(self.trust_ylw_))
        bal_velocity_X, bal_velocity_Y = self.bal_vel_
        blu_velocity_X, blu_velocity_Y = self.blu_vel_
        ylw_velocity_X, ylw_velocity_Y = self.ylw_vel_

        # message to be encoded is <msg_code><nr_args><arg1><arg2>...
        return [
            CommProto.STATE, 19, bal_X, bal_Y, blu_X, blu_Y, ylw_X, ylw_Y,
            blu_dir_X, blu_dir_Y, ylw_dir_X, ylw_dir_Y, trust_bal, trust_blu,
            trust_ylw, bal_velocity_X, bal_velocity_Y, blu_velocity_X,
            blu_velocity_Y, ylw_velocity_X, ylw_velocity_Y
        ]
Пример #4
0
	def get_state(self):
		bal_X, bal_Y = Math.sub_vectors(self.bal_center_, self.pitch_tl_)
		blu_X, blu_Y = Math.sub_vectors(
			self.blu_robot_center_, self.pitch_tl_)
		ylw_X, ylw_Y = Math.sub_vectors(
			self.ylw_robot_center_, self.pitch_tl_)
		blu_dir_X, blu_dir_Y = self.blu_dir_
		ylw_dir_X, ylw_dir_Y = self.ylw_dir_
		trust_bal = int(round(self.trust_bal_))
		trust_blu = int(round(self.trust_blu_))
		trust_ylw = int(round(self.trust_ylw_))
		bal_velocity_X, bal_velocity_Y = self.bal_vel_
		blu_velocity_X, blu_velocity_Y = self.blu_vel_
		ylw_velocity_X, ylw_velocity_Y = self.ylw_vel_

		# message to be encoded is <msg_code><nr_args><arg1><arg2>...
		return [CommProto.STATE, 19, bal_X, bal_Y, blu_X, blu_Y, ylw_X, ylw_Y,
			blu_dir_X, blu_dir_Y, ylw_dir_X, ylw_dir_Y,
			trust_bal, trust_blu, trust_ylw, bal_velocity_X, bal_velocity_Y,
			blu_velocity_X, blu_velocity_Y, ylw_velocity_X, ylw_velocity_Y]
Пример #5
0
    def detect_robot(self, trsh_im):
        # angle might not be accurate, but always determines a line
        # that is collinear to the longest part of the T on the robot plate.
        t_contour, t_center, ang = self.detect_robot_via_moments(trsh_im)
        if not (t_center and t_contour):
            return trsh_im, None, None, None
        # create dir vec (may be wrong when error_expected() returns True)
        # pointing upwards (systems 0 degree) and rotate it by the angle.
        t_dir = Math.int_vec(Math.rotate_vec((0, -20), ang))
        trust_dir = True  # lets assume this is the correct vector.

        # if mistake possible(robot orientation in danger zone), perform check
        if self.dir_error_expected(ang, 10):
            # Try to get the direction of the back using
            # get_back_dir(centroid, radius, supposed_direction_of_T,
            #	line_thikness, thresholded_bin_image)
            t_bck_dir = self.get_back_dir(t_contour, t_center, 20, t_dir, 12,
                                          cv.GetSize(trsh_im))
            # if the vectors are pointing relatively in one direction
            # there is an error, so invert vector
            if t_bck_dir and Math.ang_btw_vecs(t_dir, t_bck_dir) < 90:
                t_dir = Math.invert_vec(t_dir)
        return trsh_im, t_center, t_dir, ang
Пример #6
0
	def detect_robot_via_moments(self, trsh_im):
		# Create a copy of trsh im, so that the parameter is not changed.
		# Needed, because FindContours corrupts it's input image.
		tmp_im = cv.CreateImage(cv.GetSize(trsh_im), cv.IPL_DEPTH_8U, 1)
		cv.Copy(trsh_im, tmp_im)
		largest = find_largest_contour(cv.FindContours(tmp_im,
			cv.CreateMemStorage(0),	cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE))

		if not largest: return (None, None, None)

		moments = cv.Moments(largest)	# get moments
		centroid = Math.int_vec(get_contour_center(moments))
		orientation = direction_from_moments(moments)
		return largest, centroid, orientation
Пример #7
0
	def detect_robot(self, trsh_im):
		# angle might not be accurate, but always determines a line
		# that is collinear to the longest part of the T on the robot plate.
		t_contour, t_center, ang = self.detect_robot_via_moments(trsh_im)
		if not (t_center and t_contour):
			return trsh_im, None, None, None
		# create dir vec (may be wrong when error_expected() returns True)
		# pointing upwards (systems 0 degree) and rotate it by the angle.
		t_dir = Math.int_vec(Math.rotate_vec((0, -20), ang))
		trust_dir = True	# lets assume this is the correct vector.
		
		# if mistake possible(robot orientation in danger zone), perform check
		if self.dir_error_expected(ang, 10):
			# Try to get the direction of the back using
			# get_back_dir(centroid, radius, supposed_direction_of_T,
			#	line_thikness, thresholded_bin_image)
			t_bck_dir = self.get_back_dir(t_contour, t_center, 20, t_dir, 12,
				cv.GetSize(trsh_im))
			# if the vectors are pointing relatively in one direction
			# there is an error, so invert vector
			if t_bck_dir and Math.ang_btw_vecs(t_dir, t_bck_dir) < 90:
				t_dir = Math.invert_vec(t_dir)
		return trsh_im, t_center, t_dir, ang
Пример #8
0
    def detect_robot_via_moments(self, trsh_im):
        # Create a copy of trsh im, so that the parameter is not changed.
        # Needed, because FindContours corrupts it's input image.
        tmp_im = cv.CreateImage(cv.GetSize(trsh_im), cv.IPL_DEPTH_8U, 1)
        cv.Copy(trsh_im, tmp_im)
        largest = find_largest_contour(
            cv.FindContours(tmp_im, cv.CreateMemStorage(0), cv.CV_RETR_LIST,
                            cv.CV_CHAIN_APPROX_NONE))

        if not largest: return (None, None, None)

        moments = cv.Moments(largest)  # get moments
        centroid = Math.int_vec(get_contour_center(moments))
        orientation = direction_from_moments(moments)
        return largest, centroid, orientation
Пример #9
0
	def calc_velocity_vector(cache, number_of_members):
		if len(cache) <= 1:
			return (int(0),int(0))
		else:
			older_newer_elem_tuple = Locator.simple_get_members(cache, number_of_members)
			if older_newer_elem_tuple != None:
				velocity_vector = Math.sub_vectors(older_newer_elem_tuple[1][0], older_newer_elem_tuple[0][0])
				the_time = older_newer_elem_tuple[1][1] - older_newer_elem_tuple[0][1]
				try:
					velocity_vector = tuple([element*(1.0/the_time) for element in velocity_vector])
					velocity_vector = tuple([int(round(element*60.0)) for element in velocity_vector])
				except:
					return (int(0),int(0))
				return velocity_vector
			else:
				return (int(0),int(0))
Пример #10
0
	def detect_ball(self, hsv_img, erd_mat, erd, dil_mat, dil):
		size = cv.GetSize(hsv_img)
		# colours on pitch2 (note conversion is from BGR2HSV not RGB2HSV!)
		trsh_im = self.red_color_.in_range_s(hsv_img)

		cv.Dilate(trsh_im, trsh_im,
			cv.CreateStructuringElementEx(dil_mat[0], dil_mat[1], 0, 0, 0), dil)
		cv.Erode(trsh_im, trsh_im,
			cv.CreateStructuringElementEx(erd_mat[0], erd_mat[1], 0, 0, 0), erd)

		tmp_im = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
		cv.Copy(trsh_im, tmp_im)
		largest = find_largest_contour(cv.FindContours(
			tmp_im, cv.CreateMemStorage(0),
			cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE))
		if not largest: return trsh_im, None
		return trsh_im, Math.int_vec(get_contour_center(cv.Moments(largest)))
Пример #11
0
    def detect_ball(self, hsv_img, erd_mat, erd, dil_mat, dil):
        size = cv.GetSize(hsv_img)
        # colours on pitch2 (note conversion is from BGR2HSV not RGB2HSV!)
        trsh_im = self.red_color_.in_range_s(hsv_img)

        cv.Dilate(
            trsh_im, trsh_im,
            cv.CreateStructuringElementEx(dil_mat[0], dil_mat[1], 0, 0, 0),
            dil)
        cv.Erode(
            trsh_im, trsh_im,
            cv.CreateStructuringElementEx(erd_mat[0], erd_mat[1], 0, 0, 0),
            erd)

        tmp_im = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
        cv.Copy(trsh_im, tmp_im)
        largest = find_largest_contour(
            cv.FindContours(tmp_im, cv.CreateMemStorage(0),
                            cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE))
        if not largest: return trsh_im, None
        return trsh_im, Math.int_vec(get_contour_center(cv.Moments(largest)))
Пример #12
0
    def get_back_dir(self, t_contour, centroid, rad, t_dir, thickness, size):
        roi = self.draw_contour(t_contour, size)
        #roi = self.get_circular_roi(centroid, rad, bin_im)
        # draw a thick line from tip of 'dir_vec' to tip of the inverted
        # 'dir_vec'. This is done, to ensure that the 'stand' of the T is
        # removed and it's hat is split in two.
        cv.Line(roi, Math.add_vectors(t_dir, centroid),
                Math.add_vectors(Math.invert_vec(t_dir), centroid),
                ColorSpace.RGB_BLACK, thickness)

        tmp_im = cv.CreateImage(cv.GetSize(roi), cv.IPL_DEPTH_8U, 1)
        cv.Copy(roi, tmp_im)  # create image for FindContours
        seq = cv.FindContours(tmp_im, cv.CreateMemStorage(0),
                              cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE)

        # sort contours from ROI and try to take two with the biggest area
        contours = contours_area_sort(seq)[-2:]
        if not contours: return None
        nr_contours = len(contours)
        if nr_contours == 2:  # if two available, get vec to midpoint
            pt1 = get_contour_center(cv.Moments(contours[0]))
            pt2 = get_contour_center(cv.Moments(contours[1]))
            mid = Math.add_vectors(pt1, pt2, 1 / 2.0)
        elif nr_contours:  # if only one, retun it as mid point
            mid = get_contour_center(cv.Moments(contours[0]))
        # no contours found, check failed, get prev value
        else:
            return None

        mid = Math.int_vec(mid)
        dir_vec = Math.sub_vectors(mid, centroid)

        # show vector
        cv.Line(roi, centroid, Math.add_vectors(centroid, dir_vec),
                ColorSpace.RGB_WHITE, 1)
        cv.ShowImage('w', roi)
        return dir_vec  # return the back direction vec
Пример #13
0
	def get_back_dir(self, t_contour, centroid, rad, t_dir, thickness, size):
		roi = self.draw_contour(t_contour, size)
		#roi = self.get_circular_roi(centroid, rad, bin_im)
		# draw a thick line from tip of 'dir_vec' to tip of the inverted	
		# 'dir_vec'. This is done, to ensure that the 'stand' of the T is
		# removed and it's hat is split in two.
		cv.Line(roi, Math.add_vectors(t_dir, centroid),
			Math.add_vectors(Math.invert_vec(t_dir), centroid),
			ColorSpace.RGB_BLACK, thickness)

		tmp_im = cv.CreateImage(cv.GetSize(roi), cv.IPL_DEPTH_8U, 1)
		cv.Copy(roi, tmp_im)	# create image for FindContours
		seq = cv.FindContours(tmp_im, cv.CreateMemStorage(0),
			cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE)

		# sort contours from ROI and try to take two with the biggest area
		contours = contours_area_sort(seq)[-2:]
		if not contours : return None
		nr_contours = len(contours)
		if nr_contours == 2:	# if two available, get vec to midpoint
			pt1 = get_contour_center(cv.Moments(contours[0]))
			pt2 = get_contour_center(cv.Moments(contours[1]))
			mid = Math.add_vectors(pt1, pt2, 1 / 2.0)
		elif nr_contours:		# if only one, retun it as mid point
			mid = get_contour_center(cv.Moments(contours[0]))
		# no contours found, check failed, get prev value
		else: return None

		mid = Math.int_vec(mid)
		dir_vec = Math.sub_vectors(mid, centroid)

		# show vector
		cv.Line(roi, centroid, Math.add_vectors(centroid, dir_vec),
			ColorSpace.RGB_WHITE, 1)
		cv.ShowImage('w', roi)
		return dir_vec	# return the back direction vec
Пример #14
0
    def detect_objects(self, prev_bal_center, prev_blu_center, prev_ylw_center,
                       prev_blu_dir, prev_ylw_dir):
        # get camera or dummy frame and unidstort
        img = self.cam_.get_frame()
        # Size of frame is 640x480, so cut (50, 80) from the left to remove
        # the distortion. Usage: cv.SubRect(x, y, width, height)
        # TODO: Use homography info, current crop is hadrcoded
        img = cv.GetSubRect(img, (10, 70, 620, 340))  # crop
        size = cv.GetSize(img)

        blu_dir = ylw_dir = None
        bal_center = blu_center = ylw_center = None
        trust_bal = trust_ylw = trust_blu = True

        # create and HSV and RGB iamge for later use
        hsv_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
        rgb_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(img, rgb_img, cv.CV_BGR2RGB)

        # detect ball using hsv_img
        red_bin_img, bal_center = self.detect_ball(hsv_img, (3, 3), 3, (3, 3),
                                                   3)
        if not bal_center:
            bal_center = prev_bal_center
            trust_bal = False

        # detect blue robot using hsv_img
        blu_trsh = self.blu_color_.in_range_s(hsv_img)  # threshold
        erd_mat = (1, 1)
        erd = 1
        dil_mat = (2, 2)
        dil = 5  # erode/dilate
        cv.Erode(
            blu_trsh, blu_trsh,
            cv.CreateStructuringElementEx(erd_mat[0], erd_mat[1], 0, 0, 0),
            erd)
        cv.Dilate(
            blu_trsh, blu_trsh,
            cv.CreateStructuringElementEx(dil_mat[0], dil_mat[1], 0, 0, 0),
            dil)
        cv.Erode(
            blu_trsh, blu_trsh,
            cv.CreateStructuringElementEx(erd_mat[0] * 6, erd_mat[1] * 6, 0, 0,
                                          0), erd)

        blu_bin_img, blu_center, blu_dir, b_ang = self.detect_robot(blu_trsh)

        # detect yellow robot using rgb_img and invert
        ylw_trsh = self.ylw_color_.in_range_s(hsv_img)  # threshold
        cv.Not(ylw_trsh, ylw_trsh)  # invert
        erd_mat = (1, 1)
        erd = 3  # erode
        cv.Erode(
            ylw_trsh, ylw_trsh,
            cv.CreateStructuringElementEx(erd_mat[0] * 2, erd_mat[1] * 2, 0, 0,
                                          0), 2)

        ylw_bin_img, ylw_center, ylw_dir, y_ang = self.detect_robot(ylw_trsh)

        # fix values if current data cannot be trusted
        # ball
        if not prev_bal_center:
            prev_bal_center = bal_center
            trust_bal = False
        if trust_bal and bal_center: prev_bal_center = bal_center

        # blue robot
        if not blu_center:
            blu_center = prev_blu_center
            blu_dir = prev_blu_dir
        else:
            prev_blu_center = blu_center
            prev_blu_dir = blu_dir
        if not b_ang: b_ang = -1

        # yellow robot
        if not ylw_center:
            ylw_center = prev_ylw_center
            ylw_dir = prev_ylw_dir
        else:
            prev_ylw_center = ylw_center
            preb_ylw_dir = ylw_dir
        if not y_ang: y_ang = -1

        # fix coordinates because of difference between thresholded image
        # and real image
        bal_center = Math.add_vectors(bal_center, (8, 8))
        blu_center = Math.add_vectors(blu_center, (6, 7))
        # It seems yellow does not need correction anymore
        # ylw_center = Math.add_vectors(ylw_center, (6, 7))

        sys.stdout.write('\b' * 106 +
                         'BAL (%-4.4s,%-4.4s) BLU (%-4.4s,%-4.4s,%-4.4s)' %
                         (str(bal_center[0]) if trust_bal else 'X',
                          str(bal_center[1]) if trust_bal else 'X',
                          str(blu_center[0]) if trust_blu else 'X',
                          str(blu_center[1]) if trust_blu else 'X',
                          str(int(round(b_ang))) if trust_blu else 'X') +
                         ' YLW (%-4.4s,%-4.4s,%-4.4s) ' %
                         (str(ylw_center[0]) if trust_ylw else 'X',
                          str(ylw_center[1]) if trust_ylw else 'X',
                          str(int(round(y_ang))) if trust_ylw else 'X') +
                         'FPS: %d     ' % self.fps_)

        # adding to cache
        Locator.add_data_tuple_to_ball_cache((bal_center, time.time()))
        Locator.add_data_tuple_to_yellow_cache((ylw_center, time.time()))
        Locator.add_data_tuple_to_blue_cache((blu_center, time.time()))

        # adding to smoothed caches
        Locator.add_data_tuple_to_smoothed_ball_cache(
            (bal_center, time.time()))
        Locator.add_data_tuple_to_smoothed_yellow_cache(
            (ylw_center, time.time()))
        Locator.add_data_tuple_to_smoothed_blue_cache(
            (blu_center, time.time()))

        # checking for huge changes in velocity
        previous_ball_frame, last_ball_frame = Locator.simple_get_members(
            Locator.ball_cache, 1)
        previous_blu_frame, last_blu_frame = Locator.simple_get_members(
            Locator.blue_cache, 1)
        previous_ylw_frame, last_ylw_frame = Locator.simple_get_members(
            Locator.yellow_cache, 1)

        if Math.ecl_dist(previous_ball_frame[0], last_ball_frame[0]) > 3:
            bal_vel = Locator.calc_velocity_vector_ball(1)
        else:
            bal_vel = Locator.calc_velocity_vector_ball(10)
        if Math.ecl_dist(previous_blu_frame[0], last_blu_frame[0]) > 3:
            blu_vel = Locator.calc_velocity_vector_blue(1)
        else:
            blu_vel = Locator.calc_velocity_vector_blue(10)
        if Math.ecl_dist(previous_ylw_frame[0], last_ylw_frame[0]) > 3:
            ylw_vel = Locator.calc_velocity_vector_yellow(1)
        else:
            ylw_vel = Locator.calc_velocity_vector_yellow(10)

        # update centroids
        bal_center = self.bal_center_ = Locator.smoothed_ball_cache[
            len(Locator.smoothed_ball_cache) - 1][0]
        blu_center = self.blu_robot_center_ = Locator.blue_cache[
            len(Locator.smoothed_blue_cache) - 1][0]
        ylw_center = self.ylw_robot_center_ = Locator.yellow_cache[
            len(Locator.smoothed_yellow_cache) - 1][0]

        # update direction vecotrs
        self.blu_dir_ = blu_dir
        self.ylw_dir_ = ylw_dir

        # update 'trust' variables
        # TODO: Revew these, they may no longer be needed
        self.trust_bal_ = trust_bal
        self.trust_blu_ = trust_blu
        self.trust_ylw_ = trust_ylw

        # update velocities from cache
        self.bal_vel_ = bal_vel
        self.blu_vel_ = blu_vel
        self.ylw_vel_ = ylw_vel

        # send information to data stream subscribers
        self.server_.broadcast_to_subscribers(self.get_state())

        # display visula feedback if there is something to display
        if len(V_SETT.DISPLAY_FEED):
            VisionGUI.display_visual_feedback(
                img, red_bin_img, blu_bin_img, ylw_bin_img,
                (bal_center, blu_center, ylw_center, blu_dir, ylw_dir),
                (self.l_goal_t_, self.l_goal_b_, self.r_goal_t_,
                 self.r_goal_b_), (self.pitch_tl_, self.pitch_bl_,
                                   self.pitch_tr_, self.pitch_br_),
                (trust_bal, trust_blu, trust_ylw), (bal_vel, blu_vel, ylw_vel))

        return (prev_bal_center, prev_blu_center, prev_ylw_center,
                prev_blu_dir, prev_ylw_dir)
Пример #15
0
	def weighted_smoothing(cache, number_of_members, list_of_weights):
		members_chosen = Locators.simple_get_members(cache, number_of_members)
		return Math.calc_weighted_average_for_vectors(members_chosen, list_of_weights)
Пример #16
0
	def add_data_tuple_to_smoothed_blue_cache(data_tuple):
		chosen_vectors = Locator.get_members(Locator.blue_cache, 3)
		smoothed_data_tuple = Math.calc_weighted_average_for_vectors(chosen_vectors, [0.2,0.3,0.5])
		Locator.add_data_tuple_to_cache(smoothed_data_tuple, Locator.smoothed_blue_cache, Locator.smoothed_blue_cache_max_size)
Пример #17
0
class VisionGUI:

    WIN_COLOR = 'colored feed'
    WIN_RED_BIN = 'red binary image'
    WIN_BLU_BIN = 'blu binary image'
    WIN_YLW_BIN = 'ylw binary image'

    # TODO: Change integer values with these constancs all over the application
    COLOR_FEED = 0
    RED_BIN_FEED = 1
    BLU_BIN_FEED = 2
    YLW_BIN_FEED = 3
    GUI_SLIDERS = 4

    def __init__():
        abstract

    def create_gui(red_color, blu_color, ylw_color):
        if 0 in V_SETT.DISPLAY_FEED:
            cv.NamedWindow(VisionGUI.WIN_COLOR, 1)

        if 1 in V_SETT.DISPLAY_FEED:
            cv.NamedWindow(VisionGUI.WIN_RED_BIN, 1)
            if 4 in V_SETT.DISPLAY_FEED:
                VisionGUI.atch_trackbars_to_win(VisionGUI.WIN_RED_BIN,
                                                red_color)
            else:
                VisionGUI.deatch_trackbars(VisionGUI.WIN_RED_BIN)

        if 2 in V_SETT.DISPLAY_FEED:
            cv.NamedWindow(VisionGUI.WIN_BLU_BIN, 1)
            if 4 in V_SETT.DISPLAY_FEED:
                VisionGUI.atch_trackbars_to_win(VisionGUI.WIN_BLU_BIN,
                                                blu_color)
            else:
                VisionGUI.deatch_trackbars(VisionGUI.WIN_BLU_BIN)

        if 3 in V_SETT.DISPLAY_FEED:
            cv.NamedWindow(VisionGUI.WIN_YLW_BIN, 1)
            if 4 in V_SETT.DISPLAY_FEED:
                VisionGUI.atch_trackbars_to_win(VisionGUI.WIN_YLW_BIN,
                                                ylw_color)
            else:
                VisionGUI.deatch_trackbars(VisionGUI.WIN_YLW_BIN)

    create_gui = staticmethod(create_gui)

    def display_visual_feedback(img, red_bin_img, blu_bin_img, ylw_bin_img,
                                (bal_center, blu_center, ylw_center, blu_dir,
                                 ylw_dir), (l_goal_t_, l_goal_b_, r_goal_t_,
                                            r_goal_b_), (pitch_tl_, pitch_bl_,
                                                         pitch_tr_, pitch_br_),
                                (trust_bal, trust_blu,
                                 trust_ylw), (bal_vel, blu_vel, ylw_vel)):

        if 0 in V_SETT.DISPLAY_FEED:
            # display robot and ball centers
            # Ball
            cv.Circle(img, bal_center, 4, ColorSpace.RGB_BLACK, 2)
            # ylw velocity
            cv.Line(
                img, bal_center,
                Math.add_vectors(
                    bal_center, Math.int_vec(Math.scale_vec(bal_vel,
                                                            1 / 60.0))),
                ColorSpace.RGB_WHITE, 1, cv.CV_AA)

            # Blue
            left = Math.rotate_vec(blu_dir, -90)
            left_point = Math.add_vectors(left, blu_center)
            cv.Line(img, Math.int_vec(Math.add_vectors(left_point, blu_dir)),
                    Math.int_vec(Math.sub_vectors(left_point, blu_dir)), 1)
            right_point = Math.add_vectors(Math.invert_vec(left), blu_center)
            cv.Line(img, Math.int_vec(Math.add_vectors(right_point, blu_dir)),
                    Math.int_vec(Math.sub_vectors(right_point, blu_dir)), 1)
            cv.Circle(img, blu_center, 4, ColorSpace.RGB_BLU, -2)
            cv.Circle(img, blu_center, 20, ColorSpace.RGB_BLACK, 1)
            # blue_dir
            cv.Line(img, blu_center, Math.add_vectors(blu_center, blu_dir),
                    ColorSpace.RGB_BLACK, 1, cv.CV_AA)
            # blu velocity
            cv.Line(
                img, blu_center,
                Math.add_vectors(
                    blu_center, Math.int_vec(Math.scale_vec(blu_vel,
                                                            1 / 60.0))),
                ColorSpace.RGB_WHITE, 1, cv.CV_AA)

            # Yellow
            left = Math.rotate_vec(ylw_dir, -90)
            left_point = Math.add_vectors(left, ylw_center)
            cv.Line(img, Math.int_vec(Math.add_vectors(left_point, ylw_dir)),
                    Math.int_vec(Math.sub_vectors(left_point, ylw_dir)), 1)
            right_point = Math.add_vectors(Math.invert_vec(left), ylw_center)
            cv.Line(img, Math.int_vec(Math.add_vectors(right_point, ylw_dir)),
                    Math.int_vec(Math.sub_vectors(right_point, ylw_dir)), 1)
            cv.Circle(img, ylw_center, 4, ColorSpace.RGB_YLW, -2)
            cv.Circle(img, ylw_center, 20, ColorSpace.RGB_BLACK, 1)
            # ylw_dir
            cv.Line(img, ylw_center, Math.add_vectors(ylw_center, ylw_dir),
                    ColorSpace.RGB_BLACK, 1, cv.CV_AA)
            # ylw velocity
            cv.Line(
                img, ylw_center,
                Math.add_vectors(
                    ylw_center, Math.int_vec(Math.scale_vec(ylw_vel,
                                                            1 / 60.0))),
                ColorSpace.RGB_WHITE, 1, cv.CV_AA)

            # display goal lines
            cv.Circle(img, l_goal_t_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, l_goal_b_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, r_goal_t_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, r_goal_b_, 1, ColorSpace.RGB_WHITE, 2)

            # display pitch
            cv.Circle(img, pitch_tl_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, pitch_bl_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, pitch_tr_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, pitch_br_, 1, ColorSpace.RGB_WHITE, 2)

            # show image
            cv.ShowImage(VisionGUI.WIN_COLOR, img)

        # display binary images if parameters given
        if 1 in V_SETT.DISPLAY_FEED:
            cv.Circle(red_bin_img, bal_center, 1, ColorSpace.RGB_BLACK, 3)
            cv.Circle(red_bin_img, bal_center, 40, ColorSpace.RGB_WHITE, 1)
            cv.ShowImage(VisionGUI.WIN_RED_BIN, red_bin_img)

        if 2 in V_SETT.DISPLAY_FEED:
            cv.Circle(blu_bin_img, blu_center, 1, ColorSpace.RGB_BLACK, 3)
            cv.Circle(blu_bin_img, blu_center, 40, ColorSpace.RGB_WHITE, 1)
            cv.ShowImage(VisionGUI.WIN_BLU_BIN, blu_bin_img)

        if 3 in V_SETT.DISPLAY_FEED:
            cv.Circle(ylw_bin_img, ylw_center, 1, ColorSpace.RGB_BLACK, 3)
            cv.Circle(ylw_bin_img, ylw_center, 40, ColorSpace.RGB_WHITE, 1)
            cv.ShowImage(VisionGUI.WIN_YLW_BIN, ylw_bin_img)

        cv.WaitKey(1000 / 35)
Пример #18
0
 def weighted_smoothing(cache, number_of_members, list_of_weights):
     members_chosen = Locators.simple_get_members(cache, number_of_members)
     return Math.calc_weighted_average_for_vectors(members_chosen,
                                                   list_of_weights)
Пример #19
0
 def smoothe_vectors(cache, number_of_members):
     members = Locator.get_members(cache, number_of_members)
     if members != None:
         return Math.calc_average_for_vectors(members)
     else:
         return (int(0), int(0))
Пример #20
0
	def detect_objects(self, prev_bal_center, prev_blu_center, prev_ylw_center,
			prev_blu_dir, prev_ylw_dir):
		# get camera or dummy frame and unidstort
		img = self.cam_.get_frame()
		# Size of frame is 640x480, so cut (50, 80) from the left to remove
		# the distortion. Usage: cv.SubRect(x, y, width, height)
		# TODO: Use homography info, current crop is hadrcoded
		img = cv.GetSubRect(img, (10, 70, 620, 340))	# crop
		size = cv.GetSize(img)

		blu_dir = ylw_dir = None
		bal_center = blu_center = ylw_center = None
		trust_bal = trust_ylw = trust_blu = True

		# create and HSV and RGB iamge for later use
		hsv_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
		cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
		rgb_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
		cv.CvtColor(img, rgb_img, cv.CV_BGR2RGB)

		# detect ball using hsv_img
		red_bin_img, bal_center = self.detect_ball(hsv_img,
			(3, 3), 3, (3, 3), 3)
		if not bal_center:
			bal_center = prev_bal_center
			trust_bal = False

		# detect blue robot using hsv_img
		blu_trsh = self.blu_color_.in_range_s(hsv_img)			# threshold
		erd_mat = (1, 1); erd = 1; dil_mat = (2, 2); dil = 5	# erode/dilate
		cv.Erode(blu_trsh, blu_trsh, cv.CreateStructuringElementEx(
				erd_mat[0], erd_mat[1], 0, 0, 0), erd)
		cv.Dilate(blu_trsh, blu_trsh, cv.CreateStructuringElementEx(
				dil_mat[0], dil_mat[1], 0, 0, 0), dil)
		cv.Erode(blu_trsh, blu_trsh, cv.CreateStructuringElementEx(
			erd_mat[0] * 6, erd_mat[1] * 6, 0, 0, 0), erd)

		blu_bin_img, blu_center, blu_dir, b_ang = self.detect_robot(blu_trsh)

		# detect yellow robot using rgb_img and invert
		ylw_trsh = self.ylw_color_.in_range_s(hsv_img)	# threshold
		cv.Not(ylw_trsh, ylw_trsh)						# invert
		erd_mat = (1, 1); erd = 3						# erode
		cv.Erode(ylw_trsh, ylw_trsh, cv.CreateStructuringElementEx(
			erd_mat[0] * 2, erd_mat[1] * 2, 0, 0, 0), 2)

		ylw_bin_img, ylw_center, ylw_dir, y_ang = self.detect_robot(ylw_trsh)

		# fix values if current data cannot be trusted
		# ball
		if not prev_bal_center:
			prev_bal_center = bal_center
			trust_bal = False
		if trust_bal and bal_center: prev_bal_center = bal_center

		# blue robot
		if not blu_center:
			blu_center = prev_blu_center
			blu_dir = prev_blu_dir
		else:
			prev_blu_center = blu_center
			prev_blu_dir = blu_dir
		if not b_ang: b_ang = -1

		# yellow robot
		if not ylw_center:
			ylw_center = prev_ylw_center
			ylw_dir = prev_ylw_dir
		else:
			prev_ylw_center = ylw_center
			preb_ylw_dir = ylw_dir
		if not y_ang: y_ang = -1

		# fix coordinates because of difference between thresholded image
		# and real image
		bal_center = Math.add_vectors(bal_center, (8, 8))
		blu_center = Math.add_vectors(blu_center, (6, 7))
		# It seems yellow does not need correction anymore
		# ylw_center = Math.add_vectors(ylw_center, (6, 7))

		sys.stdout.write('\b' * 106 +
		'BAL (%-4.4s,%-4.4s) BLU (%-4.4s,%-4.4s,%-4.4s)' % (
			str(bal_center[0]) if trust_bal else 'X',
			str(bal_center[1]) if trust_bal else 'X',
			str(blu_center[0]) if trust_blu else 'X',
			str(blu_center[1]) if trust_blu else 'X',
			str(int(round(b_ang))) if trust_blu else 'X') +
		' YLW (%-4.4s,%-4.4s,%-4.4s) ' % (
			str(ylw_center[0]) if trust_ylw else 'X',
			str(ylw_center[1]) if trust_ylw else 'X',
			str(int(round(y_ang))) if trust_ylw else 'X'
		) +
		'FPS: %d     ' % self.fps_)	

		# adding to cache
		Locator.add_data_tuple_to_ball_cache((bal_center, time.time()))
		Locator.add_data_tuple_to_yellow_cache((ylw_center, time.time()))
		Locator.add_data_tuple_to_blue_cache((blu_center, time.time()))

		# adding to smoothed caches
		Locator.add_data_tuple_to_smoothed_ball_cache((bal_center, time.time()))
		Locator.add_data_tuple_to_smoothed_yellow_cache((ylw_center, time.time()))
		Locator.add_data_tuple_to_smoothed_blue_cache((blu_center, time.time()))

		# checking for huge changes in velocity
		previous_ball_frame, last_ball_frame = Locator.simple_get_members(Locator.ball_cache,1)
		previous_blu_frame, last_blu_frame = Locator.simple_get_members(Locator.blue_cache,1)
		previous_ylw_frame, last_ylw_frame = Locator.simple_get_members(Locator.yellow_cache,1)

		if Math.ecl_dist(previous_ball_frame[0], last_ball_frame[0]) > 3:
			bal_vel = Locator.calc_velocity_vector_ball(1)
		else:
			bal_vel = Locator.calc_velocity_vector_ball(10)
		if Math.ecl_dist(previous_blu_frame[0], last_blu_frame[0]) > 3:
			blu_vel = Locator.calc_velocity_vector_blue(1)
		else:
			blu_vel = Locator.calc_velocity_vector_blue(10)
		if Math.ecl_dist(previous_ylw_frame[0], last_ylw_frame[0]) > 3:
			ylw_vel = Locator.calc_velocity_vector_yellow(1)
		else:
			ylw_vel = Locator.calc_velocity_vector_yellow(10)
		
		# update centroids
		bal_center = self.bal_center_ = Locator.smoothed_ball_cache[
			len(Locator.smoothed_ball_cache)-1][0]
		blu_center = self.blu_robot_center_ = Locator.blue_cache[
			len(Locator.smoothed_blue_cache)-1][0]
		ylw_center = self.ylw_robot_center_ = Locator.yellow_cache[
			len(Locator.smoothed_yellow_cache)-1][0]

		# update direction vecotrs
		self.blu_dir_ = blu_dir
		self.ylw_dir_ = ylw_dir

		# update 'trust' variables
		# TODO: Revew these, they may no longer be needed
		self.trust_bal_ = trust_bal
		self.trust_blu_ = trust_blu
		self.trust_ylw_ = trust_ylw

		# update velocities from cache
		self.bal_vel_ = bal_vel
		self.blu_vel_ = blu_vel
		self.ylw_vel_ = ylw_vel

		# send information to data stream subscribers 
		self.server_.broadcast_to_subscribers(self.get_state())

		# display visula feedback if there is something to display
		if len(V_SETT.DISPLAY_FEED):
			VisionGUI.display_visual_feedback(img,
				red_bin_img, blu_bin_img, ylw_bin_img,
				(bal_center, blu_center, ylw_center, blu_dir, ylw_dir),
				(self.l_goal_t_, self.l_goal_b_,
					self.r_goal_t_, self.r_goal_b_),
				(self.pitch_tl_, self.pitch_bl_,
					self.pitch_tr_, self.pitch_br_),
					(trust_bal, trust_blu, trust_ylw),
					(bal_vel, blu_vel, ylw_vel))

		return (prev_bal_center, prev_blu_center, prev_ylw_center,
					prev_blu_dir, prev_ylw_dir)
Пример #21
0
	def smoothe_vectors(cache, number_of_members):
		members = Locator.get_members(cache, number_of_members)
		if members != None:
			return Math.calc_average_for_vectors(members)
		else:
			return (int(0),int(0))