def init(self, **kwargs): super().init(**kwargs) self.randomSampler = RandomPolicySampler() self.randomSampler.init(**kwargs) # max length we expect to find in our 'informed' sample space, starts as infinite self.cBest = float('inf') pathLen = float('inf') solutionSet = set() path = None # Computing the sampling space self.cMin = dist(self.start_pos, self.goal_pos) - self.args.goal_radius self.xCenter = np.array( [[(self.start_pos[0] + self.goal_pos[0]) / 2.0], [(self.start_pos[1] + self.goal_pos[1]) / 2.0], [0]]) a1 = np.array([[(self.goal_pos[0] - self.start_pos[0]) / self.cMin], [(self.goal_pos[1] - self.start_pos[1]) / self.cMin], [0]]) self.etheta = math.atan2(a1[1], a1[0]) # first column of idenity matrix transposed id1_t = np.array([1.0, 0.0, 0.0]).reshape(1, 3) M = a1 @ id1_t U, S, Vh = np.linalg.svd(M, 1, 1) self.C = np.dot( np.dot(U, np.diag([1.0, 1.0, np.linalg.det(U) * np.linalg.det(np.transpose(Vh)) ])), Vh)
def init(self, **kwargs): super().init(**kwargs) # For benchmark stats tracking self.args.env.stats.lscampler_restart_counter = 0 self.args.env.stats.lscampler_randomwalk_counter = 0 self.randomSampler = RandomPolicySampler() self.randomSampler.init(**kwargs) self.randomnessManager = NormalRandomnessManager() # probability layer self.particles_layer = pygame.Surface( (self.args.XDIM * self.args.scaling, self.args.YDIM * self.args.scaling), pygame.SRCALPHA) self.p_manager = ParticleManager(num_particles=16, startPt=self.start_pos, goalPt=self.goal_pos, args=self.args)
def init(self, **kwargs): super().init(**kwargs) self.randomSampler = RandomPolicySampler() self.randomSampler.init(**kwargs) # probability layer self.prob_layer = pygame.Surface( (self.PROB_BLOCK_SIZE * self.args.scaling, self.PROB_BLOCK_SIZE * self.args.scaling), pygame.SRCALPHA) self.shape = (int(self.args.XDIM / self.PROB_BLOCK_SIZE) + 1, int(self.args.YDIM / self.PROB_BLOCK_SIZE) + 1) self.prob_vector = np.ones(self.shape) self.prob_vector *= 1 # IMPORTANT because we are using log2 self.obst_vector = np.ones(self.shape) # self.prob_vector *= 20 self.prob_vector_normalized = None self.tree_vector = np.ones(self.shape) self.sampleCount = 0
class BiRRTSampler(Sampler): @overrides def init(self, **kwargs): super().init(**kwargs) self.randomSampler = RandomPolicySampler() self.randomSampler.init(**kwargs) @overrides def get_next_pos(self): # Random path while True: if random.random() < self.args.goalBias: # init/goal bias if self.args.planner.goal_tree_turn: p = self.start_pos else: p = self.goal_pos else: p = self.randomSampler.get_next_pos()[0] return p, self.report_success, self.report_fail
def init(self, **kwargs): super().init(**kwargs) self.randomSampler = RandomPolicySampler() self.randomSampler.init(**kwargs)
def main(): args = docopt(__doc__, version='RRT* Research v1.0') if args['--verbose'] > 2: LOGGER.setLevel(logging.DEBUG) elif args['--verbose'] > 1: LOGGER.setLevel(logging.INFO) elif args['--verbose'] > 0: LOGGER.setLevel(logging.WARNING) else: LOGGER.setLevel(logging.ERROR) # INFO includes only loading # DEBUG includes all outputs ch = logging.StreamHandler(sys.stdout) ch.setFormatter(logging.Formatter('%(message)s')) # ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) LOGGER.addHandler(ch) LOGGER.debug("commandline args: {}".format(args)) from planners.rrtPlanner import RRTPlanner planner_type = RRTPlanner # default planner type if args['rrt']: from planners.randomPolicySampler import RandomPolicySampler sampler = RandomPolicySampler(random_method=args['--random-method']) elif args['birrt']: from planners.birrtPlanner import BiRRTSampler, BiRRTPlanner sampler = BiRRTSampler() planner_type = BiRRTPlanner elif args['rrdt']: from planners.rrdtPlanner import RRdTSampler, RRdTPlanner sampler = RRdTSampler( restart_when_merge=not args['--no-restart-when-merge']) planner_type = RRdTPlanner elif args['informedrrt']: from planners.informedrrtSampler import InformedRRTSampler sampler = InformedRRTSampler() elif args['prm']: from planners.prmPlanner import PRMSampler, PRMPlanner sampler = PRMSampler() planner_type = PRMPlanner elif args['particle']: from planners.particleFilterSampler import ParticleFilterSampler sampler = ParticleFilterSampler() elif args['likelihood']: from planners.likelihoodPolicySampler import LikelihoodPolicySampler sampler = LikelihoodPolicySampler( prob_block_size=int(args['--prob-block-size'])) elif args['nearby']: from planners.nearbyPolicySampler import NearbyPolicySampler sampler = NearbyPolicySampler( prob_block_size=int(args['--prob-block-size'])) elif args['mouse']: from planners.mouseSampler import MouseSampler sampler = MouseSampler() rrt_options = MagicDict({ 'showSampledPoint': not args['--hide-sampled-points'], 'scaling': float(args['--scaling']), 'goalBias': float(args['--goal-bias']), 'image': args['<MAP>'], 'epsilon': float(args['--epsilon']), 'max_number_nodes': int(args['--max-number-nodes']), 'radius': float(args['--radius']), 'goal_radius': 2 / 3 * float(args['--radius']), 'ignore_step_size': args['--ignore-step-size'], 'always_refresh': args['--always-refresh'], 'enable_pygame': not args['--disable-pygame'], 'sampler': sampler, }) rrtplanner = planner_type(**rrt_options) rrt_options.update({ 'planner': rrtplanner, }) if args['start'] and args['goal']: rrt_options.update({ 'startPt': (float(args['<sx>']), float(args['<sy>'])), 'goalPt': (float(args['<gx>']), float(args['<gy>'])) }) rrt = env.Env(**rrt_options) return rrt
class ParticleFilterSampler(Sampler): @overrides def __init__(self, supressVisitedArea=True): self.supressVisitedArea = supressVisitedArea self._last_prob = None self.counter = 0 self._c_random = 0 self._c_resample = 0 @overrides def init(self, **kwargs): super().init(**kwargs) # For benchmark stats tracking self.args.env.stats.lscampler_restart_counter = 0 self.args.env.stats.lscampler_randomwalk_counter = 0 self.randomSampler = RandomPolicySampler() self.randomSampler.init(**kwargs) self.randomnessManager = NormalRandomnessManager() # probability layer self.particles_layer = pygame.Surface( (self.args.XDIM * self.args.scaling, self.args.YDIM * self.args.scaling), pygame.SRCALPHA) self.p_manager = ParticleManager(num_particles=16, startPt=self.start_pos, goalPt=self.goal_pos, args=self.args) @overrides def report_fail(self, idx, **kwargs): if idx >= 0: self.p_manager.modify_energy(idx=idx, factor=0.7) @overrides def report_success(self, idx, **kwargs): self.p_manager.confirm(idx, kwargs['pos']) self.p_manager.modify_energy(idx=idx, factor=1) def randomWalk(self, idx): self.args.env.stats.lscampler_randomwalk_counter += 1 # Randomly bias toward goal direction if random.random() < self.args.goalBias: dx = self.goal_pos[0] - self.p_manager.get_pos(idx)[0] dy = self.goal_pos[1] - self.p_manager.get_pos(idx)[1] goal_direction = math.atan2(dy, dx) new_direction = self.randomnessManager.draw_normal( origin=goal_direction, kappa=1.5) else: new_direction = self.randomnessManager.draw_normal( origin=self.p_manager.get_dir(idx), kappa=1.5) # scale the half norm by a factor of epsilon # Using this: https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.stats.halfnorm.html # factor = self.randomnessManager.draw_half_normal(self.args.epsilon, scale=self.args.epsilon * 0.5) factor = self.args.epsilon x, y = self.p_manager.get_pos(idx) x += math.cos(new_direction) * factor y += math.sin(new_direction) * factor self.p_manager.new_pos(idx=idx, pos=(x, y), dir=new_direction) return (x, y) def get_random_choice(self): prob = self.p_manager.get_prob() self._last_prob = prob # this will be used to paint particles try: choice = np.random.choice(range(self.p_manager.size()), p=prob) except ValueError as e: # NOTE dont know why the probability got out of sync... We notify the use, then try re-sync the prob LOGGER.error( "!! probability got exception '{}'... trying to re-sync prob again." .format(e)) self.p_manager.resync_prob() prob = self.p_manager.get_prob() self._last_prob = prob choice = np.random.choice(range(self.p_manager.size()), p=prob) return choice @overrides def get_next_pos(self): self.counter += 1 self._c_random += 1 self._c_resample += 1 # if self._c_random > RANDOM_RESTART_EVERY and RANDOM_RESTART_EVERY > 0: # _p = self.p_manager.random_restart_lowest() # print("Rand restart at counter {}, with p {}".format(self.counter, _p)) # self._c_random = 0 if self._c_random > RANDOM_RESTART_EVERY > 0: _p = self.p_manager.random_restart_specific_value() if _p: LOGGER.debug("Rand restart at counter {}, with p {}".format( self.counter, _p)) self._c_random = 0 self.p_manager.weighted_resampling() LOGGER.debug("Resampling at counter {}".format(self.counter)) self._c_resample = 0 LOGGER.debug(self.p_manager.get_prob()) if random.random() < 0: LOGGER.debug('rand') p = self.randomSampler.get_next_pos() choice = -1 else: # get a node to random walk choice = self.get_random_choice() p = self.randomWalk(choice) self.last_particle = p return (p, lambda c=choice, **kwargs: self.report_success(c, **kwargs), lambda c=choice, **kwargs: self.report_fail(c, **kwargs)) ############################################################ ## FOR PAINTING ## ############################################################ @staticmethod def get_color_transists(value, max_prob, min_prob): denominator = max_prob - min_prob if denominator == 0: denominator = 1 # prevent division by zero return 220 - 180 * (1 - (value - min_prob) / denominator) @overrides def paint(self, window): if self._last_prob is None: return max_num = self._last_prob.max() min_num = self._last_prob.min() for i, p in enumerate(self.p_manager.particles): self.particles_layer.fill((255, 128, 255, 0)) # get a transition from green to red c = self.get_color_transists(self._last_prob[i], max_num, min_num) c = max(min(255, c), 50) color = (c, c, 0) self.args.env.draw_circle(pos=p.pos, colour=color, radius=4, layer=self.particles_layer) window.blit(self.particles_layer, (0, 0))
class InformedRRTSampler(Sampler): @overrides def init(self, **kwargs): super().init(**kwargs) self.randomSampler = RandomPolicySampler() self.randomSampler.init(**kwargs) # max length we expect to find in our 'informed' sample space, starts as infinite self.cBest = float('inf') pathLen = float('inf') solutionSet = set() path = None # Computing the sampling space self.cMin = dist(self.start_pos, self.goal_pos) - self.args.goal_radius self.xCenter = np.array( [[(self.start_pos[0] + self.goal_pos[0]) / 2.0], [(self.start_pos[1] + self.goal_pos[1]) / 2.0], [0]]) a1 = np.array([[(self.goal_pos[0] - self.start_pos[0]) / self.cMin], [(self.goal_pos[1] - self.start_pos[1]) / self.cMin], [0]]) self.etheta = math.atan2(a1[1], a1[0]) # first column of idenity matrix transposed id1_t = np.array([1.0, 0.0, 0.0]).reshape(1, 3) M = a1 @ id1_t U, S, Vh = np.linalg.svd(M, 1, 1) self.C = np.dot( np.dot(U, np.diag([1.0, 1.0, np.linalg.det(U) * np.linalg.det(np.transpose(Vh)) ])), Vh) @overrides def get_next_pos(self): self.cBest = self.args.planner.c_max if self.cBest < float('inf'): r = [ self.cBest / 2.0, math.sqrt(self.cBest**2 - self.cMin**2) / 2.0, math.sqrt(self.cBest**2 - self.cMin**2) / 2.0 ] L = np.diag(r) xBall = self.sampleUnitBall() rnd = np.dot(np.dot(self.C, L), xBall) + self.xCenter p = [rnd[(0, 0)], rnd[(1, 0)]] else: p = self.randomSampler.get_next_pos()[0] return p, self.report_success, self.report_fail @staticmethod def sampleUnitBall(): a = random.random() b = random.random() if b < a: a, b = b, a sample = (b * math.cos(2 * math.pi * a / b), b * math.sin(2 * math.pi * a / b)) return np.array([[sample[0]], [sample[1]], [0]]) def paint(self, window): # draw the ellipse if self.args.sampler.cBest < float('inf'): cBest = self.cBest * self.args.scaling cMin = self.cMin * self.args.scaling a = math.sqrt(cBest**2 - cMin**2) # height b = cBest # width # rectangle that represent the ellipse r = pygame.Rect(0, 0, b, a) angle = self.etheta # rotate via surface ellipse_surface = pygame.Surface((b, a), pygame.SRCALPHA, 32).convert_alpha() try: pygame.draw.ellipse(ellipse_surface, (255,0,0,80), r) pygame.draw.ellipse(ellipse_surface, Colour.black, r, int(2 * self.args.scaling)) except ValueError: # sometime it will fail to draw due to ellipse being too narrow pass # rotate ellipse_surface = pygame.transform.rotate(ellipse_surface, -angle * 180 / math.pi) # we need to offset the blitz based on the surface ceenter rcx, rcy = ellipse_surface.get_rect().center ellipse_x = (self.xCenter[0] * self.args.scaling - rcx) ellipse_y = (self.xCenter[1] * self.args.scaling - rcy) window.blit(ellipse_surface, (ellipse_x, ellipse_y))
class LikelihoodPolicySampler(Sampler): @overrides def __init__(self, prob_block_size, supressVisitedArea=True): self.PROB_BLOCK_SIZE = prob_block_size self.supressVisitedArea = supressVisitedArea @overrides def init(self, **kwargs): super().init(**kwargs) self.randomSampler = RandomPolicySampler() self.randomSampler.init(**kwargs) # probability layer self.prob_layer = pygame.Surface( (self.PROB_BLOCK_SIZE * self.args.scaling, self.PROB_BLOCK_SIZE * self.args.scaling), pygame.SRCALPHA) self.shape = (int(self.args.XDIM / self.PROB_BLOCK_SIZE) + 1, int(self.args.YDIM / self.PROB_BLOCK_SIZE) + 1) self.prob_vector = np.ones(self.shape) self.prob_vector *= 1 # IMPORTANT because we are using log2 self.obst_vector = np.ones(self.shape) # self.prob_vector *= 20 self.prob_vector_normalized = None self.tree_vector = np.ones(self.shape) self.sampleCount = 0 @overrides def get_next_pos(self): if self.prob_vector_normalized is None or random.random() < 0.05: LOGGER.debug('rand') p = self.randomSampler.get_next_pos()[0] else: choice = np.random.choice(range(self.prob_vector_normalized.size), p=self.prob_vector_normalized.ravel()) y = choice % self.prob_vector_normalized.shape[1] x = int(choice / self.prob_vector_normalized.shape[1]) p = (x + random.random()) * self.PROB_BLOCK_SIZE, ( y + random.random()) * self.PROB_BLOCK_SIZE return p, self.report_success, self.report_fail @overrides def add_tree_node(self, pos): x, y = pos x = int(x / self.PROB_BLOCK_SIZE) y = int(y / self.PROB_BLOCK_SIZE) self.tree_vector[x][y] += 1 @overrides def add_sample_line(self, x1, y1, x2, y2): x1 = int(x1 / self.PROB_BLOCK_SIZE) y1 = int(y1 / self.PROB_BLOCK_SIZE) x2 = int(x2 / self.PROB_BLOCK_SIZE) y2 = int(y2 / self.PROB_BLOCK_SIZE) points = get_line((x1, y1), (x2, y2)) for p in points: self.report_fail(pos=p, free=True, alreadyDividedByProbBlockSize=True) @overrides def report_success(self, **kwargs): x, y = kwargs['pos'] # add all in between point of nearest node of the random pt as valid x1, y1 = self.args.env.cc.get_coor_before_collision( kwargs['nn'].pos, kwargs['rand_pos']) self.add_sample_line(x, y, x1, y1) @overrides def report_fail(self, **kwargs): p = kwargs['pos'] if p is None: return try: p = p.pos except AttributeError as e: pass if 'alreadyDividedByProbBlockSize' not in kwargs: x = int(p[0] / self.PROB_BLOCK_SIZE) y = int(p[1] / self.PROB_BLOCK_SIZE) else: x = p[0] y = p[1] if x < 0 or x >= self.prob_vector.shape[0] or \ y < 0 or y >= self.prob_vector.shape[1]: return # exit() # ^ setting the right coor ^ ############################### factor = 1.5 if 'obstacle' in kwargs: self.obst_vector[x][y] += 2 elif not kwargs['free']: self.obst_vector[x][y] += 1 # self.prob_vector[x][y] -= (100-self.prob_vector[x][y])*0.1 # if self.prob_vector[x][y] < 5: # self.prob_vector[x][y] = 5 elif kwargs['free']: if 'weight' in kwargs: self.prob_vector[x][y] += kwargs['weight'] else: self.prob_vector[x][y] += 1 # self.prob_vector[x][y] = 10 self.obst_vector[x][y] = 1 ######################################################### sigma_y = 2.0 sigma_x = 2.0 sigma = [sigma_y, sigma_x] if self.sampleCount % 10 == 0: pass self.prob_vector_normalized = np.copy(self.prob_vector) # self.prob_vector_normalized = np.f.prob_vector[x][y] -= (100-self.prob_vector[x][y])*0.1 # if self.prob_vector[x][y] < 5: # self.prob_vector[copy(self.prob_vector) tree_vector_normalized = np.copy(self.tree_vector**1.1) tree_vector_normalized = sp.ndimage.filters.gaussian_filter( tree_vector_normalized, (1.0, 1.0), mode='reflect') # self.prob_vector_normalized = tree_vector_normalized self.prob_vector_normalized *= (1 / self.obst_vector * 3) self.prob_vector_normalized = sp.ndimage.filters.gaussian_filter( self.prob_vector_normalized, sigma, mode='reflect') self.prob_vector_normalized *= (1 / tree_vector_normalized * 1.5) # self.prob_vector_normalized *= (1/self.tree_vector * 1.5) self.prob_vector_normalized /= self.prob_vector_normalized.sum( ) self.sampleCount += 1 ######################################################### #### FOR PAINTING ######################################################### @staticmethod def get_vector_alpha_parameters(vector): max_prob = vector.max() min_prob = vector.min() denominator = max_prob - min_prob if denominator == 0: denominator = 1 # prevent division by zero return max_prob, min_prob, denominator @overrides def paint(self, window): if self.prob_vector_normalized is not None: for i in range(self.prob_vector_normalized.shape[0]): for j in range(self.prob_vector_normalized.shape[1]): max_prob, min_prob, denominator = self.get_vector_alpha_parameters( self.prob_vector_normalized) alpha = 240 * (1 - (self.prob_vector_normalized[i][j] - min_prob) / denominator) # if self.tree_vector[i][j] > 1: # max_prob, min_prob, denominator = self.get_vector_alpha_parameters(self.tree_vector) # alpha = 240 * (1 - (self.prob_vector_normalized[i][j]-min_prob)/denominator) # print(alpha) # self.prob_layer.fill((0,255,0,alpha)) # else: self.prob_layer.fill((255, 128, 255, alpha)) # print(self.prob_vector_normalized[i][j]) window.blit(self.prob_layer, (i * self.PROB_BLOCK_SIZE * self.args.scaling, j * self.PROB_BLOCK_SIZE * self.args.scaling))