def point_to_vector(p): [[x, y]] = p x = x*X_RES_MM y = (y* Y_RES_MM) + Y_OFFSET_MM # y*=-1 # x*=-1 v = Vec2d.from_point(x, y) return v.with_angle(v.angle+270)
x_inc = math.cos(math.radians(angle)) y_inc = math.sin(math.radians(angle)) start_x = x start_y = y while 0 <= int(x) < MAP_SIZE_PIXELS and 0 <= int(y) < MAP_SIZE_PIXELS: val = grid[int(y)][int(x)] if val < prob_threshold and val != UNKNOWN: return math.sqrt((x - start_x)**2 + (y - start_y)**2) x += x_inc y -= y_inc return None def extract_repulsors((x, y), grid): pose = Vec2d.from_point(x, y) scale = (MAP_SIZE_PIXELS / 2.0) / (MAP_SIZE_METERS / 2.0) x = int((MAP_SIZE_PIXELS / 2.0) + pose.x * scale) y = int((MAP_SIZE_PIXELS / 2.0) - pose.y * scale) repulsors = [] for i in xrange(360): ray = trace_ray(grid, x, y, 80, i) if ray is not None: v = Vec2d(i, ray * 1000.0 / MAP_SIZE_PIXELS * MAP_SIZE_METERS) repulsors.append(v) return repulsors
def update_control((gps, costmap, pose, line_angle, state)): """figures out what we need to do based on the current state and map""" map_pose = costmap.transform transform = pose.transform.translation map_transform = map_pose.transform.translation diff = transform.x - map_transform.x, transform.y - map_transform.y diff = Vec2d.from_point(diff[0], diff[1]) map_rotation = get_rotation(map_pose.transform) rotation = get_rotation(pose.transform) diff = diff.with_angle(diff.angle + map_rotation) diff_rotation = -rotation + map_rotation path = generate_path(costmap.costmap_bytes, diff_rotation, (diff.x, diff.y)) if path is None: path = [] path_debug.publish( Path(header=Header(frame_id='map'), poses=[ PoseStamped( header=Header(frame_id='map'), pose=Pose(position=Point(x=x_to_m(p[0]), y=y_to_m(p[1])))) for p in path ])) print state # calculate theta_dot based on the current state if state['state'] == LINE_FOLLOWING or \ state['state'] == TRACKING_THIRD_WAYPOINT: offset = 10 if len(path) < offset + 1: goal = Vec2d(0, ATTRACTOR_THRESHOLD_MM) # always drive forward else: point = path[offset] goal = Vec2d.from_point(x_to_m(point[0] + 0.5), y_to_m(point[1] + 0.5)) goal = goal.with_magnitude(ATTRACTOR_THRESHOLD_MM) rotation = -line_angle.data # rotate to follow lines if abs(rotation) < 10: rotation = 0 rotation /= 1.0 else: # FIXME goal = calculate_gps_heading(gps, WAYPOINTS[state['tracking'] - 1]) # track the waypoint rotation = to180(goal.angle) goal = goal.with_angle( 0 ) # don't need to crab for GPS waypoint, steering will handle that # state_debug.publish(str(goal)) # calculate translation based on obstacles repulsors = extract_repulsors((diff.x, diff.y), costmap.map_bytes) potential = compute_potential(repulsors, goal) obstacle_debug.publish( PointCloud(header=Header(frame_id=topics.ODOMETRY_FRAME), points=[ Point32(x=v.x / 1000.0, y=v.y / 1000.0) for v in repulsors ])) translation = to180(potential.angle) # rospy.loginfo('translation = %s, rotation = %s, speed = %s', translation, rotation, INITIAL_SPEED) # don't rotate if bender needs to translate away from a line # if state['state'] == LINE_FOLLOWING: # translation_threshhold = 60 # rotation_throttle = 0 # if np.absolute(translation) > translation_threshhold: # rotation = rotation * rotation_throttle if state['state'] == CLIMBING_UP: speed = RAMP_SPEED rotation = gps['roll'] rotation *= -10 translation = 0 elif state['state'] == CLIMBING_DOWN: speed = SLOW_SPEED rotation = gps['roll'] rotation *= 10 translation = 0 else: obstacles = extract_repulsors((diff.x, diff.y), costmap.lidar_bytes) obstacles = [o for o in obstacles if abs(to180(o.angle)) < 45] min_dist = min( obstacles, key=lambda x: x.mag) if len(obstacles) > 0 else DIST_CUTOFF if min_dist < DIST_CUTOFF: speed = SLOW_SPEED else: speed = FAST_SPEED update_drivetrain(translation, rotation, speed) # rviz debug q = quaternion_from_euler(0, 0, math.radians(translation)) heading_debug.publish( PoseStamped(header=Header(frame_id='map'), pose=Pose(position=Point(x=diff.x, y=diff.y), orientation=Quaternion(q[0], q[1], q[2], q[3]))))
def camera_processor(): # open a video capture feed cam = cv2.VideoCapture(cam_name) #init ros & camera stuff # pub = rospy.Publisher(topics.CAMERA, String, queue_size=10) no_barrel_pub = rospy.Publisher(topics.CAMERA, String, queue_size=10) line_angle_pub = rospy.Publisher(topics.LINE_ANGLE, Int16, queue_size=0) global lidar lidar_obs = rx_subscribe(topics.LIDAR) Observable.combine_latest(lidar_obs, lambda n: (n)) \ .subscribe(update_lidar) rospy.init_node('camera') rate = rospy.Rate(10) exposure_init = False rawWidth = 640 rawHeight = 480 #camera_info = CameraInfo(53,38,76,91,134)#ground level#half (134 inches out) camera_info = CameraInfo(53, 40, 76, 180, 217, croppedWidth, croppedHeight) #ground level# 3/4 out while not rospy.is_shutdown(): #grab a frame ret_val, img = cam.read() # camera will set its own exposure after the first frame, regardless of mode if not exposure_init: update_exposure(cv2.getTrackbarPos('exposure', 'img_HSV')) update_auto_white(cv2.getTrackbarPos('auto_white', 'img_HSV')) exposure_init = True #record a video simultaneously while processing if ret_val == True: out.write(img) #for debugging # cv2.line(img,(640/2,0),(640/2,480),color=(255,0,0),thickness=2) # cv2.line(img,(0,int(480*.25)),(640,int(480*.25)),color=(255,0,0),thickness=2) #crop down to speed processing time #img = cv2.imread('test_im2.jpg') dim = (rawWidth, rawHeight) img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA) cropRatio = float(croppedHeight) / float(rawHeight) crop_img = img[int(rawHeight * float(1 - cropRatio)):rawHeight, 0:rawWidth] # crops off the top 25% of the image cv2.imshow("cropped", crop_img) #process the cropped image. returns a "birds eye" of the contours & binary image img_displayBirdsEye, contours = process_image(crop_img, camera_info) #raw contours = convert_to_cartesian(camera_info.map_width, camera_info.map_height, contours) #for filtered barrels vec2d_contour = contours_to_vectors(contours) #replaces NAV filtered_contours = filter_barrel_lines(camera=vec2d_contour, angle_range=8, lidar_vecs=lidar, mag_cusion=300, barrel_cusion=5) #EXTEND THE LINES filtered_cartesian_contours = vectors_to_contours(filtered_contours) try: closest_filtered_contour = closest_contour( filtered_cartesian_contours) # print "CLOSESTCONTOUR: ",closest_filtered_contour x_range = 5000 contour_lines = [] interval = 40 #just one line_angle, slope, intercept = calculate_line_angle( closest_filtered_contour) for x in range(x_range * -1, x_range): if x % interval == 0: y = slope * x + intercept v = Vec2d.from_point(x, y) contour_lines.append(v) except TypeError: #no camera data contour_lines = [] line_angle = 0 #build the camera message with the contours and binary image # local_map_msg = CameraMsg(contours=contours, camera_info=camera_info) # filtered_map_msg=CameraMsg(contours=contour_lines,camera_info=camera_info)#1 polyfit contour c = [] for cs in filtered_cartesian_contours: for v in cs: c.append(v) filtered_map_msg = CameraMsg( contours=c, camera_info=camera_info) #all raw contours #make bytestream and pass if off to ros # local_map_msg_string = local_map_msg.pickleMe() filtered_map_msg_string = filtered_map_msg.pickleMe() #rospy.loginfo(local_map_msg_string) # pub.publish(local_map_msg_string) no_barrel_pub.publish(filtered_map_msg_string) line_angle_pub.publish(line_angle) if cv2.waitKey(1) == 27: break rate.sleep() cv2.destroyAllWindows()
def create_vector((quality, angle, dist)): d = Vec2d(angle, dist) return Vec2d.from_point(-d.x, d.y) # lidar is backwards, invert x
def test_from_point(x, y, expected): actual = Vec2d.from_point(x, y) assert_equal((actual.angle, actual.mag), (expected.angle, expected.mag)) (-100, -100, Vec2d(0, 100)),