def __init__(self): # Initializations self.robot_perception = RobotPerception() self.target_selection = TargetSelection() self.path_planning = PathPlanning() # Check if the robot moves with target or just wanders self.move_with_target = rospy.get_param("calculate_target") # Flag to check if the vehicle has a target or not self.target_exists = False self.inner_target_exists = False # Container for the current path self.path = [] # Container for the subgoals in the path self.subtargets = [] # Container for the next subtarget. Holds the index of the next subtarget self.next_subtarget = 0 # Check if subgoal is reached via a timer callback rospy.Timer(rospy.Duration(0.10), self.checkTarget) # ROS Publisher for the path self.path_publisher = rospy.Publisher(rospy.get_param('path_pub_topic'), \ Path, queue_size = 10) # ROS Publisher for the subtargets self.subtargets_publisher = rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\ MarkerArray, queue_size = 10) # ROS Publisher for the current target self.current_target_publisher = rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\ Marker, queue_size = 10)
def __init__(self): super(Parameter, self).__init__() super(Utils, self).__init__() super(FieldView, self).__init__() super(Draw, self).__init__() self.capture = cv2.VideoCapture(camera) self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) # カメラ画像の横幅を1280に設定 self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) # カメラ画像の縦幅を720に設定 self.table_set = Tables() self.planner = PathPlanning(send=self.send) self.table_detection = True self.detection_success = False self.bottle_result = [None, None, None] self.standing_result_image = None self.quit = False self.yukari = Yukari() self.remove_separator_middle = False self.use_remove_separator = True self.points = None self.flip_points = None self.set_track_bar_pos(self.settings) self.ptlist = PointList(NPOINTS) self.click_mode = True logging.info('START DETECTION')
def __init__(self, selection_method): self.goals_position = [] self.goals_value = [] self.omega = 0.0 self.radius = 0 self.method = selection_method self.brush = Brushfires() self.topo = Topology() self.path_planning = PathPlanning()
def __init__(self, selection_method): self.goals_position = [] self.goals_value = [] self.omega = 0.0 self.radius = 0 self.method = selection_method self.brush = Brushfires() self.topo = Topology() self.path_planning = PathPlanning() # Initialize previous target self.previous_target = [-1, -1]
def __init__(self, selection_method): self.goals_position = [] self.goals_value = [] self.path = [] self.prev_target = [0, 0] self.omega = 0.0 self.radius = 0 self.method = selection_method self.brush = Brushfires() self.topo = Topology() self.path_planning = PathPlanning() self.robot_perception = RobotPerception() # can i use that?
def __init__(self, selection_method): self.goals_position = [] self.goals_value = [] self.omega = 0.0 self.radius = 0 self.method = selection_method if self.method_is_cost_based(): from robot_perception import RobotPerception self.robot_perception = RobotPerception() self.cost_based_properties = rospy.get_param("cost_based_properties") numpy.set_printoptions(precision=3, threshold=numpy.nan, suppress=True) self.brush = Brushfires() self.path_planning = PathPlanning()
def __init__(self): # Initializations self.robot_perception = RobotPerception() self.path_planning = PathPlanning() # Check if the robot moves with target or just wanders self.move_with_target = rospy.get_param("calculate_target") # Flag to check if the vehicle has a target or not self.target_exists = False self.select_another_target = 0 self.inner_target_exists = False # Container for the current path self.path = [] # Container for the subgoals in the path self.subtargets = [] # Container for the next subtarget. Holds the index of the next subtarget self.next_subtarget = 0 self.count_limit = 200 # 20 sec self.counter_to_next_sub = self.count_limit self.laser_aggregation = LaserDataAggregator() # Check if subgoal is reached via a timer callback rospy.Timer(rospy.Duration(0.10), self.checkTarget) # Read the target function self.target_selector = rospy.get_param("target_selector") print "The selected target function is" + self.target_selector self.target_selection = TargetSelection(self.target_selector) # ROS Publisher for the path self.path_publisher = \ rospy.Publisher(rospy.get_param('path_pub_topic'), \ Path, queue_size = 10) # ROS Publisher for the subtargets self.subtargets_publisher = \ rospy.Publisher(rospy.get_param('subgoals_pub_topic'),\ MarkerArray, queue_size = 10) # ROS Publisher for the current target self.current_target_publisher = \ rospy.Publisher(rospy.get_param('curr_target_pub_topic'),\ Marker, queue_size = 10)
def __init__(self, selection_method): self.goals_position = [] self.goals_value = [] self.omega = 0.0 self.radius = 0 self.method = selection_method self.previous_target = [] self.brush = Brushfires() self.topo = Topology() self.path_planning = PathPlanning() self.previous_target.append(50) self.previous_target.append(50) self.node2_index_x = 0 self.node2_index_y = 0 self.sonar = SonarDataAggregator() self.timeout_happened = 0
def __init__(self, selection_method): self.initial_time = time() self.method = selection_method self.initialize_gains = False self.brush = Brushfires() self.topology = Topology() self.path_planning = PathPlanning() self.droneConnector = DroneCommunication() # Parameters from YAML File self.debug = True #rospy.get_param('debug') self.map_discovery_purpose = rospy.get_param('map_discovery_purpose') self.color_evaluation_flag = rospy.get_param('color_rating') self.drone_color_evaluation_topic = rospy.get_param( 'drone_pub_color_rating') self.evaluate_potential_targets_srv_name = rospy.get_param( 'rate_potential_targets_srv') # Explore Gains self.g_color = 0.0 self.g_brush = 0.0 self.g_corner = 0.0 self.g_distance = 0.0 self.set_gain() if self.color_evaluation_flag: # Color Evaluation Service self.color_evaluation_service = rospy.ServiceProxy( self.evaluate_potential_targets_srv_name, EvaluateTargets) # Subscribe to Color Evaluation Topic to Get Results from Color Evaluation self.drone_color_evaluation_sub = rospy.Subscriber( self.drone_color_evaluation_topic, ColorEvaluationArray, self.color_evaluation_cb) # Parameters self.targets_color_evaluated = False # Set True Once Color Evaluation of Targets Completed self.color_evaluation = [] # Holds the Color Evaluation of Targets self.corner_evaluation = [ ] # Holds the Number of Corners Near Each Target
import cv2 import numpy as np from realsense.view import FieldView from path_planning import PathPlanning def f(x): return x - 1250 planner = PathPlanning(send=False) size = 720, 1280, 3 v = FieldView() window_name = 'main' cv2.namedWindow(window_name) cv2.createTrackbar('table_1', window_name, f(1250), f(3750), int) cv2.createTrackbar('table_2', window_name, f(1250), f(3750), int) cv2.createTrackbar('table_3', window_name, f(1250), f(3750), int) cv2.createTrackbar('zone', window_name, 0, 1, int) cv2.setTrackbarPos('table_1', window_name, f(2500)) cv2.setTrackbarPos('table_2', window_name, f(2500)) cv2.setTrackbarPos('table_3', window_name, f(2500)) while True: pos1 = cv2.getTrackbarPos('table_1', window_name) + 1250 pos2 = cv2.getTrackbarPos('table_2', window_name) + 1250 pos3 = cv2.getTrackbarPos('table_3', window_name) + 1250 zone = cv2.getTrackbarPos('zone', window_name) move_table_pos = (pos1, pos2, pos3) v.zone = zone points, flip_points = planner.main((pos1, pos2, pos3, zone))
def __init__(self): # Initializations of Objects Needed self.robot_perception = RobotPerception() self.path_planning = PathPlanning() # Parameters from YAML File self.move_with_target = rospy.get_param("calculate_target") self.target_selector = rospy.get_param("target_selector") self.turtlebot_path_topic = rospy.get_param('turtlebot_path_topic') self.turtlebot_subgoals_topic = rospy.get_param( 'turtlebot_subgoals_topic') self.turtlebot_curr_target_topic = rospy.get_param( 'turtlebot_curr_target_topic') self.map_frame = rospy.get_param('map_frame') self.debug = rospy.get_param('debug') self.turtlebot_save_progress_images = rospy.get_param( 'turtlebot_save_progress_images') self.limit_exploration_flag = rospy.get_param( 'limit_exploration') # Flag to Enable/ Disable OGM Enhancement # Flag to check if the vehicle has a target or not self.target_exists = False self.inner_target_exists = False # Container for the current path self.path = [] # Container for the subgoals in the path self.subtargets = [] # Container for the next subtarget. Holds the index of the next subtarget self.next_subtarget = 0 self.count_limit = 100 # 20 sec self.counter_to_next_sub = self.count_limit # Timer Thread (initialization) self.time_limit = 150 # in seconds self.timerThread = TimerThread(self.time_limit) # Check if subgoal is reached via a timer callback rospy.Timer(rospy.Duration(0.10), self.checkTarget) # Read the target function self.target_selection = TargetSelection(self.target_selector) # ROS Publisher for the path self.path_publisher = rospy.Publisher(self.turtlebot_path_topic, Path, queue_size=10) # ROS Publisher for the subtargets self.subtargets_publisher = rospy.Publisher( self.turtlebot_subgoals_topic, MarkerArray, queue_size=10) # ROS Publisher for the current target self.current_target_publisher = rospy.Publisher( self.turtlebot_curr_target_topic, Marker, queue_size=10) # For Testing Purpose self.timer_flag = True # True to Enable Timer for resetting Navigation # Speed Parameter self.max_linear_velocity = rospy.get_param('max_linear_speed') self.max_angular_velocity = rospy.get_param('max_angular_speed') # Parameters of OGM Enhancement self.first_run_flag = True # Flag to Control first Enhancement in OGM Map # Map Container Visualize Message self.map_size = np.array( [[rospy.get_param('x_min_size'), rospy.get_param('y_min_size')], [rospy.get_param('x_min_size'), rospy.get_param('y_max_size')], [rospy.get_param('x_max_size'), rospy.get_param('y_min_size')], [rospy.get_param('x_max_size'), rospy.get_param('y_max_size')]], dtype=np.float64) if self.limit_exploration_flag: # Create CV Bridge Object self.bridge = CvBridge() # Parameters self.drone_image = [] # Hold the Drone OGM Image converted in CV! self.ogm_in_cv = [] # Hold the OGM in CV Compatible Version self.drone_yaw = 0.0 # Holds the current Drone Yaw self.drone_map_pose = [] # Holds the current Drone Position in Map # Service self.drone_explore_limit_srv_name = rospy.get_param( 'drone_explore_limit_srv') self.limits_exploration_service = rospy.ServiceProxy( self.drone_explore_limit_srv_name, OgmLimits) # Subscriber self.drone_pub_image_topic = rospy.get_param( 'drone_pub_image_topic') self.drone_image_sub = rospy.Subscriber(self.drone_pub_image_topic, Image, self.drone_image_cb) # Flags self.done_enhancement = False # Flag for Controlling OGM Enhancement Loop self.drone_take_image = False # Flag for Controlling When to Read Image From Drone Camera # FIXME: Testing self.match_with_limit_pose = True # True/False to Match with Known Location/Template Matching self.match_with_drone_pose = False # True if you match with Drone Real Pose
import cv2 import numpy as np from realsense.view import FieldView from path_planning import PathPlanning def f(x): return x - 1250 planner = PathPlanning(send=True) size = 720, 1280, 3 v = FieldView() window_name = 'main' cv2.namedWindow(window_name) pos1, pos2, pos3, zone = 1250, 1250, 2500, 1 move_table_pos = (pos1, pos2, pos3) points, flip_points = planner.main((pos1, pos2, pos3, zone)) planner.send(points, flip_points, retry=False) v.zone = zone img = v.draw_field(move_table_pos, points) img = cv2.resize(img, (int(1280 * 0.65), int(720 * 0.65))) cv2.imshow(window_name, img) k = cv2.waitKey(0) while True: k = cv2.waitKey(1) if k == ord('i'): results = [True, True, True]