Пример #1
0
    def get_back_dir(self, t_contour, centroid, rad, t_dir, thickness, size):
        roi = self.draw_contour(t_contour, size)
        #roi = self.get_circular_roi(centroid, rad, bin_im)
        # draw a thick line from tip of 'dir_vec' to tip of the inverted
        # 'dir_vec'. This is done, to ensure that the 'stand' of the T is
        # removed and it's hat is split in two.
        cv.Line(roi, Math.add_vectors(t_dir, centroid),
                Math.add_vectors(Math.invert_vec(t_dir), centroid),
                ColorSpace.RGB_BLACK, thickness)

        tmp_im = cv.CreateImage(cv.GetSize(roi), cv.IPL_DEPTH_8U, 1)
        cv.Copy(roi, tmp_im)  # create image for FindContours
        seq = cv.FindContours(tmp_im, cv.CreateMemStorage(0),
                              cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE)

        # sort contours from ROI and try to take two with the biggest area
        contours = contours_area_sort(seq)[-2:]
        if not contours: return None
        nr_contours = len(contours)
        if nr_contours == 2:  # if two available, get vec to midpoint
            pt1 = get_contour_center(cv.Moments(contours[0]))
            pt2 = get_contour_center(cv.Moments(contours[1]))
            mid = Math.add_vectors(pt1, pt2, 1 / 2.0)
        elif nr_contours:  # if only one, retun it as mid point
            mid = get_contour_center(cv.Moments(contours[0]))
        # no contours found, check failed, get prev value
        else:
            return None

        mid = Math.int_vec(mid)
        dir_vec = Math.sub_vectors(mid, centroid)

        # show vector
        cv.Line(roi, centroid, Math.add_vectors(centroid, dir_vec),
                ColorSpace.RGB_WHITE, 1)
        cv.ShowImage('w', roi)
        return dir_vec  # return the back direction vec
Пример #2
0
	def get_back_dir(self, t_contour, centroid, rad, t_dir, thickness, size):
		roi = self.draw_contour(t_contour, size)
		#roi = self.get_circular_roi(centroid, rad, bin_im)
		# draw a thick line from tip of 'dir_vec' to tip of the inverted	
		# 'dir_vec'. This is done, to ensure that the 'stand' of the T is
		# removed and it's hat is split in two.
		cv.Line(roi, Math.add_vectors(t_dir, centroid),
			Math.add_vectors(Math.invert_vec(t_dir), centroid),
			ColorSpace.RGB_BLACK, thickness)

		tmp_im = cv.CreateImage(cv.GetSize(roi), cv.IPL_DEPTH_8U, 1)
		cv.Copy(roi, tmp_im)	# create image for FindContours
		seq = cv.FindContours(tmp_im, cv.CreateMemStorage(0),
			cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE)

		# sort contours from ROI and try to take two with the biggest area
		contours = contours_area_sort(seq)[-2:]
		if not contours : return None
		nr_contours = len(contours)
		if nr_contours == 2:	# if two available, get vec to midpoint
			pt1 = get_contour_center(cv.Moments(contours[0]))
			pt2 = get_contour_center(cv.Moments(contours[1]))
			mid = Math.add_vectors(pt1, pt2, 1 / 2.0)
		elif nr_contours:		# if only one, retun it as mid point
			mid = get_contour_center(cv.Moments(contours[0]))
		# no contours found, check failed, get prev value
		else: return None

		mid = Math.int_vec(mid)
		dir_vec = Math.sub_vectors(mid, centroid)

		# show vector
		cv.Line(roi, centroid, Math.add_vectors(centroid, dir_vec),
			ColorSpace.RGB_WHITE, 1)
		cv.ShowImage('w', roi)
		return dir_vec	# return the back direction vec
Пример #3
0
    def detect_objects(self, prev_bal_center, prev_blu_center, prev_ylw_center,
                       prev_blu_dir, prev_ylw_dir):
        # get camera or dummy frame and unidstort
        img = self.cam_.get_frame()
        # Size of frame is 640x480, so cut (50, 80) from the left to remove
        # the distortion. Usage: cv.SubRect(x, y, width, height)
        # TODO: Use homography info, current crop is hadrcoded
        img = cv.GetSubRect(img, (10, 70, 620, 340))  # crop
        size = cv.GetSize(img)

        blu_dir = ylw_dir = None
        bal_center = blu_center = ylw_center = None
        trust_bal = trust_ylw = trust_blu = True

        # create and HSV and RGB iamge for later use
        hsv_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
        rgb_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(img, rgb_img, cv.CV_BGR2RGB)

        # detect ball using hsv_img
        red_bin_img, bal_center = self.detect_ball(hsv_img, (3, 3), 3, (3, 3),
                                                   3)
        if not bal_center:
            bal_center = prev_bal_center
            trust_bal = False

        # detect blue robot using hsv_img
        blu_trsh = self.blu_color_.in_range_s(hsv_img)  # threshold
        erd_mat = (1, 1)
        erd = 1
        dil_mat = (2, 2)
        dil = 5  # erode/dilate
        cv.Erode(
            blu_trsh, blu_trsh,
            cv.CreateStructuringElementEx(erd_mat[0], erd_mat[1], 0, 0, 0),
            erd)
        cv.Dilate(
            blu_trsh, blu_trsh,
            cv.CreateStructuringElementEx(dil_mat[0], dil_mat[1], 0, 0, 0),
            dil)
        cv.Erode(
            blu_trsh, blu_trsh,
            cv.CreateStructuringElementEx(erd_mat[0] * 6, erd_mat[1] * 6, 0, 0,
                                          0), erd)

        blu_bin_img, blu_center, blu_dir, b_ang = self.detect_robot(blu_trsh)

        # detect yellow robot using rgb_img and invert
        ylw_trsh = self.ylw_color_.in_range_s(hsv_img)  # threshold
        cv.Not(ylw_trsh, ylw_trsh)  # invert
        erd_mat = (1, 1)
        erd = 3  # erode
        cv.Erode(
            ylw_trsh, ylw_trsh,
            cv.CreateStructuringElementEx(erd_mat[0] * 2, erd_mat[1] * 2, 0, 0,
                                          0), 2)

        ylw_bin_img, ylw_center, ylw_dir, y_ang = self.detect_robot(ylw_trsh)

        # fix values if current data cannot be trusted
        # ball
        if not prev_bal_center:
            prev_bal_center = bal_center
            trust_bal = False
        if trust_bal and bal_center: prev_bal_center = bal_center

        # blue robot
        if not blu_center:
            blu_center = prev_blu_center
            blu_dir = prev_blu_dir
        else:
            prev_blu_center = blu_center
            prev_blu_dir = blu_dir
        if not b_ang: b_ang = -1

        # yellow robot
        if not ylw_center:
            ylw_center = prev_ylw_center
            ylw_dir = prev_ylw_dir
        else:
            prev_ylw_center = ylw_center
            preb_ylw_dir = ylw_dir
        if not y_ang: y_ang = -1

        # fix coordinates because of difference between thresholded image
        # and real image
        bal_center = Math.add_vectors(bal_center, (8, 8))
        blu_center = Math.add_vectors(blu_center, (6, 7))
        # It seems yellow does not need correction anymore
        # ylw_center = Math.add_vectors(ylw_center, (6, 7))

        sys.stdout.write('\b' * 106 +
                         'BAL (%-4.4s,%-4.4s) BLU (%-4.4s,%-4.4s,%-4.4s)' %
                         (str(bal_center[0]) if trust_bal else 'X',
                          str(bal_center[1]) if trust_bal else 'X',
                          str(blu_center[0]) if trust_blu else 'X',
                          str(blu_center[1]) if trust_blu else 'X',
                          str(int(round(b_ang))) if trust_blu else 'X') +
                         ' YLW (%-4.4s,%-4.4s,%-4.4s) ' %
                         (str(ylw_center[0]) if trust_ylw else 'X',
                          str(ylw_center[1]) if trust_ylw else 'X',
                          str(int(round(y_ang))) if trust_ylw else 'X') +
                         'FPS: %d     ' % self.fps_)

        # adding to cache
        Locator.add_data_tuple_to_ball_cache((bal_center, time.time()))
        Locator.add_data_tuple_to_yellow_cache((ylw_center, time.time()))
        Locator.add_data_tuple_to_blue_cache((blu_center, time.time()))

        # adding to smoothed caches
        Locator.add_data_tuple_to_smoothed_ball_cache(
            (bal_center, time.time()))
        Locator.add_data_tuple_to_smoothed_yellow_cache(
            (ylw_center, time.time()))
        Locator.add_data_tuple_to_smoothed_blue_cache(
            (blu_center, time.time()))

        # checking for huge changes in velocity
        previous_ball_frame, last_ball_frame = Locator.simple_get_members(
            Locator.ball_cache, 1)
        previous_blu_frame, last_blu_frame = Locator.simple_get_members(
            Locator.blue_cache, 1)
        previous_ylw_frame, last_ylw_frame = Locator.simple_get_members(
            Locator.yellow_cache, 1)

        if Math.ecl_dist(previous_ball_frame[0], last_ball_frame[0]) > 3:
            bal_vel = Locator.calc_velocity_vector_ball(1)
        else:
            bal_vel = Locator.calc_velocity_vector_ball(10)
        if Math.ecl_dist(previous_blu_frame[0], last_blu_frame[0]) > 3:
            blu_vel = Locator.calc_velocity_vector_blue(1)
        else:
            blu_vel = Locator.calc_velocity_vector_blue(10)
        if Math.ecl_dist(previous_ylw_frame[0], last_ylw_frame[0]) > 3:
            ylw_vel = Locator.calc_velocity_vector_yellow(1)
        else:
            ylw_vel = Locator.calc_velocity_vector_yellow(10)

        # update centroids
        bal_center = self.bal_center_ = Locator.smoothed_ball_cache[
            len(Locator.smoothed_ball_cache) - 1][0]
        blu_center = self.blu_robot_center_ = Locator.blue_cache[
            len(Locator.smoothed_blue_cache) - 1][0]
        ylw_center = self.ylw_robot_center_ = Locator.yellow_cache[
            len(Locator.smoothed_yellow_cache) - 1][0]

        # update direction vecotrs
        self.blu_dir_ = blu_dir
        self.ylw_dir_ = ylw_dir

        # update 'trust' variables
        # TODO: Revew these, they may no longer be needed
        self.trust_bal_ = trust_bal
        self.trust_blu_ = trust_blu
        self.trust_ylw_ = trust_ylw

        # update velocities from cache
        self.bal_vel_ = bal_vel
        self.blu_vel_ = blu_vel
        self.ylw_vel_ = ylw_vel

        # send information to data stream subscribers
        self.server_.broadcast_to_subscribers(self.get_state())

        # display visula feedback if there is something to display
        if len(V_SETT.DISPLAY_FEED):
            VisionGUI.display_visual_feedback(
                img, red_bin_img, blu_bin_img, ylw_bin_img,
                (bal_center, blu_center, ylw_center, blu_dir, ylw_dir),
                (self.l_goal_t_, self.l_goal_b_, self.r_goal_t_,
                 self.r_goal_b_), (self.pitch_tl_, self.pitch_bl_,
                                   self.pitch_tr_, self.pitch_br_),
                (trust_bal, trust_blu, trust_ylw), (bal_vel, blu_vel, ylw_vel))

        return (prev_bal_center, prev_blu_center, prev_ylw_center,
                prev_blu_dir, prev_ylw_dir)
Пример #4
0
	def detect_objects(self, prev_bal_center, prev_blu_center, prev_ylw_center,
			prev_blu_dir, prev_ylw_dir):
		# get camera or dummy frame and unidstort
		img = self.cam_.get_frame()
		# Size of frame is 640x480, so cut (50, 80) from the left to remove
		# the distortion. Usage: cv.SubRect(x, y, width, height)
		# TODO: Use homography info, current crop is hadrcoded
		img = cv.GetSubRect(img, (10, 70, 620, 340))	# crop
		size = cv.GetSize(img)

		blu_dir = ylw_dir = None
		bal_center = blu_center = ylw_center = None
		trust_bal = trust_ylw = trust_blu = True

		# create and HSV and RGB iamge for later use
		hsv_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
		cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
		rgb_img = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
		cv.CvtColor(img, rgb_img, cv.CV_BGR2RGB)

		# detect ball using hsv_img
		red_bin_img, bal_center = self.detect_ball(hsv_img,
			(3, 3), 3, (3, 3), 3)
		if not bal_center:
			bal_center = prev_bal_center
			trust_bal = False

		# detect blue robot using hsv_img
		blu_trsh = self.blu_color_.in_range_s(hsv_img)			# threshold
		erd_mat = (1, 1); erd = 1; dil_mat = (2, 2); dil = 5	# erode/dilate
		cv.Erode(blu_trsh, blu_trsh, cv.CreateStructuringElementEx(
				erd_mat[0], erd_mat[1], 0, 0, 0), erd)
		cv.Dilate(blu_trsh, blu_trsh, cv.CreateStructuringElementEx(
				dil_mat[0], dil_mat[1], 0, 0, 0), dil)
		cv.Erode(blu_trsh, blu_trsh, cv.CreateStructuringElementEx(
			erd_mat[0] * 6, erd_mat[1] * 6, 0, 0, 0), erd)

		blu_bin_img, blu_center, blu_dir, b_ang = self.detect_robot(blu_trsh)

		# detect yellow robot using rgb_img and invert
		ylw_trsh = self.ylw_color_.in_range_s(hsv_img)	# threshold
		cv.Not(ylw_trsh, ylw_trsh)						# invert
		erd_mat = (1, 1); erd = 3						# erode
		cv.Erode(ylw_trsh, ylw_trsh, cv.CreateStructuringElementEx(
			erd_mat[0] * 2, erd_mat[1] * 2, 0, 0, 0), 2)

		ylw_bin_img, ylw_center, ylw_dir, y_ang = self.detect_robot(ylw_trsh)

		# fix values if current data cannot be trusted
		# ball
		if not prev_bal_center:
			prev_bal_center = bal_center
			trust_bal = False
		if trust_bal and bal_center: prev_bal_center = bal_center

		# blue robot
		if not blu_center:
			blu_center = prev_blu_center
			blu_dir = prev_blu_dir
		else:
			prev_blu_center = blu_center
			prev_blu_dir = blu_dir
		if not b_ang: b_ang = -1

		# yellow robot
		if not ylw_center:
			ylw_center = prev_ylw_center
			ylw_dir = prev_ylw_dir
		else:
			prev_ylw_center = ylw_center
			preb_ylw_dir = ylw_dir
		if not y_ang: y_ang = -1

		# fix coordinates because of difference between thresholded image
		# and real image
		bal_center = Math.add_vectors(bal_center, (8, 8))
		blu_center = Math.add_vectors(blu_center, (6, 7))
		# It seems yellow does not need correction anymore
		# ylw_center = Math.add_vectors(ylw_center, (6, 7))

		sys.stdout.write('\b' * 106 +
		'BAL (%-4.4s,%-4.4s) BLU (%-4.4s,%-4.4s,%-4.4s)' % (
			str(bal_center[0]) if trust_bal else 'X',
			str(bal_center[1]) if trust_bal else 'X',
			str(blu_center[0]) if trust_blu else 'X',
			str(blu_center[1]) if trust_blu else 'X',
			str(int(round(b_ang))) if trust_blu else 'X') +
		' YLW (%-4.4s,%-4.4s,%-4.4s) ' % (
			str(ylw_center[0]) if trust_ylw else 'X',
			str(ylw_center[1]) if trust_ylw else 'X',
			str(int(round(y_ang))) if trust_ylw else 'X'
		) +
		'FPS: %d     ' % self.fps_)	

		# adding to cache
		Locator.add_data_tuple_to_ball_cache((bal_center, time.time()))
		Locator.add_data_tuple_to_yellow_cache((ylw_center, time.time()))
		Locator.add_data_tuple_to_blue_cache((blu_center, time.time()))

		# adding to smoothed caches
		Locator.add_data_tuple_to_smoothed_ball_cache((bal_center, time.time()))
		Locator.add_data_tuple_to_smoothed_yellow_cache((ylw_center, time.time()))
		Locator.add_data_tuple_to_smoothed_blue_cache((blu_center, time.time()))

		# checking for huge changes in velocity
		previous_ball_frame, last_ball_frame = Locator.simple_get_members(Locator.ball_cache,1)
		previous_blu_frame, last_blu_frame = Locator.simple_get_members(Locator.blue_cache,1)
		previous_ylw_frame, last_ylw_frame = Locator.simple_get_members(Locator.yellow_cache,1)

		if Math.ecl_dist(previous_ball_frame[0], last_ball_frame[0]) > 3:
			bal_vel = Locator.calc_velocity_vector_ball(1)
		else:
			bal_vel = Locator.calc_velocity_vector_ball(10)
		if Math.ecl_dist(previous_blu_frame[0], last_blu_frame[0]) > 3:
			blu_vel = Locator.calc_velocity_vector_blue(1)
		else:
			blu_vel = Locator.calc_velocity_vector_blue(10)
		if Math.ecl_dist(previous_ylw_frame[0], last_ylw_frame[0]) > 3:
			ylw_vel = Locator.calc_velocity_vector_yellow(1)
		else:
			ylw_vel = Locator.calc_velocity_vector_yellow(10)
		
		# update centroids
		bal_center = self.bal_center_ = Locator.smoothed_ball_cache[
			len(Locator.smoothed_ball_cache)-1][0]
		blu_center = self.blu_robot_center_ = Locator.blue_cache[
			len(Locator.smoothed_blue_cache)-1][0]
		ylw_center = self.ylw_robot_center_ = Locator.yellow_cache[
			len(Locator.smoothed_yellow_cache)-1][0]

		# update direction vecotrs
		self.blu_dir_ = blu_dir
		self.ylw_dir_ = ylw_dir

		# update 'trust' variables
		# TODO: Revew these, they may no longer be needed
		self.trust_bal_ = trust_bal
		self.trust_blu_ = trust_blu
		self.trust_ylw_ = trust_ylw

		# update velocities from cache
		self.bal_vel_ = bal_vel
		self.blu_vel_ = blu_vel
		self.ylw_vel_ = ylw_vel

		# send information to data stream subscribers 
		self.server_.broadcast_to_subscribers(self.get_state())

		# display visula feedback if there is something to display
		if len(V_SETT.DISPLAY_FEED):
			VisionGUI.display_visual_feedback(img,
				red_bin_img, blu_bin_img, ylw_bin_img,
				(bal_center, blu_center, ylw_center, blu_dir, ylw_dir),
				(self.l_goal_t_, self.l_goal_b_,
					self.r_goal_t_, self.r_goal_b_),
				(self.pitch_tl_, self.pitch_bl_,
					self.pitch_tr_, self.pitch_br_),
					(trust_bal, trust_blu, trust_ylw),
					(bal_vel, blu_vel, ylw_vel))

		return (prev_bal_center, prev_blu_center, prev_ylw_center,
					prev_blu_dir, prev_ylw_dir)
Пример #5
0
class VisionGUI:

    WIN_COLOR = 'colored feed'
    WIN_RED_BIN = 'red binary image'
    WIN_BLU_BIN = 'blu binary image'
    WIN_YLW_BIN = 'ylw binary image'

    # TODO: Change integer values with these constancs all over the application
    COLOR_FEED = 0
    RED_BIN_FEED = 1
    BLU_BIN_FEED = 2
    YLW_BIN_FEED = 3
    GUI_SLIDERS = 4

    def __init__():
        abstract

    def create_gui(red_color, blu_color, ylw_color):
        if 0 in V_SETT.DISPLAY_FEED:
            cv.NamedWindow(VisionGUI.WIN_COLOR, 1)

        if 1 in V_SETT.DISPLAY_FEED:
            cv.NamedWindow(VisionGUI.WIN_RED_BIN, 1)
            if 4 in V_SETT.DISPLAY_FEED:
                VisionGUI.atch_trackbars_to_win(VisionGUI.WIN_RED_BIN,
                                                red_color)
            else:
                VisionGUI.deatch_trackbars(VisionGUI.WIN_RED_BIN)

        if 2 in V_SETT.DISPLAY_FEED:
            cv.NamedWindow(VisionGUI.WIN_BLU_BIN, 1)
            if 4 in V_SETT.DISPLAY_FEED:
                VisionGUI.atch_trackbars_to_win(VisionGUI.WIN_BLU_BIN,
                                                blu_color)
            else:
                VisionGUI.deatch_trackbars(VisionGUI.WIN_BLU_BIN)

        if 3 in V_SETT.DISPLAY_FEED:
            cv.NamedWindow(VisionGUI.WIN_YLW_BIN, 1)
            if 4 in V_SETT.DISPLAY_FEED:
                VisionGUI.atch_trackbars_to_win(VisionGUI.WIN_YLW_BIN,
                                                ylw_color)
            else:
                VisionGUI.deatch_trackbars(VisionGUI.WIN_YLW_BIN)

    create_gui = staticmethod(create_gui)

    def display_visual_feedback(img, red_bin_img, blu_bin_img, ylw_bin_img,
                                (bal_center, blu_center, ylw_center, blu_dir,
                                 ylw_dir), (l_goal_t_, l_goal_b_, r_goal_t_,
                                            r_goal_b_), (pitch_tl_, pitch_bl_,
                                                         pitch_tr_, pitch_br_),
                                (trust_bal, trust_blu,
                                 trust_ylw), (bal_vel, blu_vel, ylw_vel)):

        if 0 in V_SETT.DISPLAY_FEED:
            # display robot and ball centers
            # Ball
            cv.Circle(img, bal_center, 4, ColorSpace.RGB_BLACK, 2)
            # ylw velocity
            cv.Line(
                img, bal_center,
                Math.add_vectors(
                    bal_center, Math.int_vec(Math.scale_vec(bal_vel,
                                                            1 / 60.0))),
                ColorSpace.RGB_WHITE, 1, cv.CV_AA)

            # Blue
            left = Math.rotate_vec(blu_dir, -90)
            left_point = Math.add_vectors(left, blu_center)
            cv.Line(img, Math.int_vec(Math.add_vectors(left_point, blu_dir)),
                    Math.int_vec(Math.sub_vectors(left_point, blu_dir)), 1)
            right_point = Math.add_vectors(Math.invert_vec(left), blu_center)
            cv.Line(img, Math.int_vec(Math.add_vectors(right_point, blu_dir)),
                    Math.int_vec(Math.sub_vectors(right_point, blu_dir)), 1)
            cv.Circle(img, blu_center, 4, ColorSpace.RGB_BLU, -2)
            cv.Circle(img, blu_center, 20, ColorSpace.RGB_BLACK, 1)
            # blue_dir
            cv.Line(img, blu_center, Math.add_vectors(blu_center, blu_dir),
                    ColorSpace.RGB_BLACK, 1, cv.CV_AA)
            # blu velocity
            cv.Line(
                img, blu_center,
                Math.add_vectors(
                    blu_center, Math.int_vec(Math.scale_vec(blu_vel,
                                                            1 / 60.0))),
                ColorSpace.RGB_WHITE, 1, cv.CV_AA)

            # Yellow
            left = Math.rotate_vec(ylw_dir, -90)
            left_point = Math.add_vectors(left, ylw_center)
            cv.Line(img, Math.int_vec(Math.add_vectors(left_point, ylw_dir)),
                    Math.int_vec(Math.sub_vectors(left_point, ylw_dir)), 1)
            right_point = Math.add_vectors(Math.invert_vec(left), ylw_center)
            cv.Line(img, Math.int_vec(Math.add_vectors(right_point, ylw_dir)),
                    Math.int_vec(Math.sub_vectors(right_point, ylw_dir)), 1)
            cv.Circle(img, ylw_center, 4, ColorSpace.RGB_YLW, -2)
            cv.Circle(img, ylw_center, 20, ColorSpace.RGB_BLACK, 1)
            # ylw_dir
            cv.Line(img, ylw_center, Math.add_vectors(ylw_center, ylw_dir),
                    ColorSpace.RGB_BLACK, 1, cv.CV_AA)
            # ylw velocity
            cv.Line(
                img, ylw_center,
                Math.add_vectors(
                    ylw_center, Math.int_vec(Math.scale_vec(ylw_vel,
                                                            1 / 60.0))),
                ColorSpace.RGB_WHITE, 1, cv.CV_AA)

            # display goal lines
            cv.Circle(img, l_goal_t_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, l_goal_b_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, r_goal_t_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, r_goal_b_, 1, ColorSpace.RGB_WHITE, 2)

            # display pitch
            cv.Circle(img, pitch_tl_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, pitch_bl_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, pitch_tr_, 1, ColorSpace.RGB_WHITE, 2)
            cv.Circle(img, pitch_br_, 1, ColorSpace.RGB_WHITE, 2)

            # show image
            cv.ShowImage(VisionGUI.WIN_COLOR, img)

        # display binary images if parameters given
        if 1 in V_SETT.DISPLAY_FEED:
            cv.Circle(red_bin_img, bal_center, 1, ColorSpace.RGB_BLACK, 3)
            cv.Circle(red_bin_img, bal_center, 40, ColorSpace.RGB_WHITE, 1)
            cv.ShowImage(VisionGUI.WIN_RED_BIN, red_bin_img)

        if 2 in V_SETT.DISPLAY_FEED:
            cv.Circle(blu_bin_img, blu_center, 1, ColorSpace.RGB_BLACK, 3)
            cv.Circle(blu_bin_img, blu_center, 40, ColorSpace.RGB_WHITE, 1)
            cv.ShowImage(VisionGUI.WIN_BLU_BIN, blu_bin_img)

        if 3 in V_SETT.DISPLAY_FEED:
            cv.Circle(ylw_bin_img, ylw_center, 1, ColorSpace.RGB_BLACK, 3)
            cv.Circle(ylw_bin_img, ylw_center, 40, ColorSpace.RGB_WHITE, 1)
            cv.ShowImage(VisionGUI.WIN_YLW_BIN, ylw_bin_img)

        cv.WaitKey(1000 / 35)