def __init__(self, cols, rows, widthMM, heightMM, #the x,y and th of the current location can be passed currentRow = -1, currentCol = -1, currentTh= 0, goalRow = -1, goalCol = -1, gridUpdate = "", image=None, grid=None, gridResize=1.0,showIter=0, title=""): self.markerToolkit = MarkerToolkit() self.x_localizeMM = 0 self.y_localizeMM = 0 self.thr_localize = 0 self.x_last_marker_localizeMM = 0 self.x_last_marker_robotMM = 0 self.y_last_marker_localizeMM = 0 self.y_last_marker_robotMM = 0 self.thr_last_marker_localize = 0 self.thr_last_marker_robot = 0 self.resetMarkers() OccupancyGrid.__init__(self, cols=cols, rows=rows, widthMM=widthMM, heightMM=heightMM, currentRow=currentRow, currentCol=currentCol, currentTh=currentTh, goalRow=goalRow, goalCol=goalCol, image=image, grid=grid, gridResize=gridResize, showIter=showIter, title=title)
def mergeOtherGridInfo(self, otherGrid): # find two common markers commonKeys = [] for key in self.markers.keys(): if otherGrid.markerExists(key): commonKeys.append(key) if len(commonKeys) < 2: print "I can not find two common markers, merge failed!" return # for now we just use the first two common markers found thisGridAnchor1Row = self.getMarkerRow(commonKeys[0]) thisGridAnchor1Col = self.getMarkerCol(commonKeys[0]) thisGridAnchor2Row = self.getMarkerRow(commonKeys[1]) thisGridAnchor2Col = self.getMarkerCol(commonKeys[1]) otherGridAnchor1Row = otherGrid.getMarkerRow(commonKeys[0]) otherGridAnchor1Col = otherGrid.getMarkerCol(commonKeys[0]) otherGridAnchor2Row = otherGrid.getMarkerRow(commonKeys[1]) otherGridAnchor2Col = otherGrid.getMarkerCol(commonKeys[1]) return OccupancyGrid.mergeOtherGridInfo( self, otherGrid, thisGridAnchor1Row=thisGridAnchor1Row, thisGridAnchor1Col=thisGridAnchor1Col, thisGridAnchor2Row=thisGridAnchor2Row, thisGridAnchor2Col=thisGridAnchor2Col, otherGridAnchor1Row=otherGridAnchor1Row, otherGridAnchor1Col=otherGridAnchor1Col, otherGridAnchor2Row=otherGridAnchor2Row, otherGridAnchor2Col=otherGridAnchor2Col, )
def drawCell(self,row,col,matrix,maxval,path,stepByStep,removeBelow=0): # see the occupancyGrid redraw comment for more info on the coordinate # system. OccupancyGrid.drawCell(self,row,col,matrix,maxval,path,stepByStep, removeBelow) x = col y = row if self.hasMarker(row=row,col=col): self.canvas.create_text((x + .5) * self.colScale, (y + .5) * self.rowScale, tag = 'label', text=self.printMarker(row=row,col=col), anchor="center", fill='orange')
# Since our "robot" is actually a shape we need to define our own 2d pose func # current_pose = get_2d_pose(robot) target_pose = get_2d_pose(goal) current_pose = robot.get_position(vision_sensor)[:2] + robot.get_orientation()[-1:] current_pose = (-current_pose[0], current_pose[1], current_pose[2]) print(current_pose) print(get_2d_pose(robot)) pr.step() origin_x, _, origin_y, _, _, _ = vision_sensor.get_bounding_box() # Setup occ_grid occ_grid = OccupancyGrid(origin_x, origin_y) depth = vision_sensor.capture_depth() occ_grid.fromDepth(depth) ''' dij_solver = Dijkstra(occ_grid, current_pose, target_pose) moves = dij_solver.solve() for i in moves: pr.step() set_2d_pose(robot, (i[0], i[1], current_pose[2])) time.sleep(0.05) ''' ''' # RRT Solver
# Get Scene SCENE_FILE = join(dirname(abspath(__file__)), 'scenes/scene_cpp.ttt') # Start Simulation pr = PyRep() pr.launch(SCENE_FILE, headless=False) pr.start() robot = Shape('start_pose') vision_sensor = VisionSensor('vision_sensor') # Generate a random map randomMapGenerator.generate_random_map(no_of_objects, False) # Setup occ_grid occ_grid = OccupancyGrid() setupOccGrid(occ_grid, vision_sensor) # Compute block size from shape in simulation bounding_box = robot.get_bounding_box() block_size_x = int( round(bounding_box[1] - bounding_box[0], 3) / occ_grid.resolution) block_size_y = int( round(bounding_box[3] - bounding_box[2], 3) / occ_grid.resolution) submapper = GridSubmapper(occ_grid) submapper.process(block_size_x, block_size_y) planner = SubmapPlanner(occ_grid, block_size_x, block_size_y) cur_pos = planner.getPath(submapper.submaps, only_start=True)
# Set numpy printing options np.set_printoptions(threshold=100 * 100, formatter={'all': lambda x: str(x) + ','}) # Get Scene SCENE_FILE = join(dirname(abspath(__file__)), 'scenes/scene_nonrectilinear.ttt') # Start Simulation pr = PyRep() pr.launch(SCENE_FILE, headless=True) pr.start() # Setup occ_grid vision_sensor = VisionSensor('vision_sensor') occ_grid = OccupancyGrid() setupOccGrid(occ_grid, vision_sensor) pr.step() # End Simulation pr.stop() pr.shutdown() # Process img_grey = np.uint8(occ_grid.grid * 255) contours, hierarchy = cv2.findContours(img_grey, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) img_color = cv2.cvtColor(img_grey, cv2.COLOR_GRAY2BGR) intermediate_step = img_color.copy()
def __init__( self, cols, rows, widthMM, heightMM, # the x,y and th of the current location can be passed currentRow=-1, currentCol=-1, currentTh=0, goalRow=-1, goalCol=-1, gridUpdate="", image=None, grid=None, gridResize=1.0, showIter=0, title="", ): self.centros = [] self.markerToolkit = MarkerToolkit() self.x_localizeMM = 0 self.y_localizeMM = 0 self.thr_localize = 0 self.x_last_marker_localizeMM = 0 self.x_last_marker_robotMM = 0 self.y_last_marker_localizeMM = 0 self.y_last_marker_robotMM = 0 self.thr_last_marker_localize = 0 self.thr_last_marker_robot = 0 self.resetMarkers() OccupancyGrid.__init__( self, cols=cols, rows=rows, widthMM=widthMM, heightMM=heightMM, currentRow=currentRow, currentCol=currentCol, currentTh=currentTh, goalRow=goalRow, goalCol=goalCol, image=image, grid=grid, gridResize=gridResize, showIter=showIter, title=title, ) self.distanceToWall = [[self.infinity for col in range(self.cols)] for row in range(self.rows)] # Todos los bloques ocupados forman parte de una pared y por tanto # se les asigna distancia 0 a las paredes. for row in range(self.rows): for col in range(self.cols): if self.grid[row][col] > self.threshhold: self.distanceToWall[row][col] = 0 # Inicia perceptrón con los pesos calculados en la parte1. Si no se desea usar, comentar # esta sección de código. self.pesos = [ -0.25830320437434307, 1.160526501403915, -0.15551005292679421, 1.2578377038143795, -0.55707372041035363, -2.9552247878192888, -1.6809527155003046, -2.2014578204055359, -13.0, 0.0, ] self.calculadorNeuronal = PerceptronMonocapa(self.pesos, w2)