Exemple #1
0
    def plot_ODF(self):
        """ Plots ODF """
        self.handles = list()
        env = self.camera.sensor.GetEnv()

        max_dist = -np.inf
        for i in xrange(self.resolution):
            for j in xrange(self.resolution):
                for k in xrange(self.resolution):
                    if self.ODF[i, j, k] > max_dist and self.ODF[i, j,
                                                                 k] != np.inf:
                        max_dist = self.ODF[i, j, k]

        for i in xrange(self.resolution):
            for j in xrange(self.resolution):
                for k in xrange(self.resolution):
                    dist = self.ODF[i, j, k]
                    if dist >= 0 and dist != np.inf:
                        dist_pct = dist / max_dist
                        rgb = colorsys.hsv_to_rgb((2 / 3.) * dist_pct, 1, 1)
                        self.handles += [
                            utils.plot_point(env,
                                             self.get_voxel_center([i, j, k]),
                                             size=self.radius,
                                             color=rgb)
                        ]
                    else:
                        self.handles += [
                            utils.plot_point(env,
                                             self.get_voxel_center([i, j, k]),
                                             size=self.radius,
                                             color=[0, 0, 0])
                        ]
Exemple #2
0
 def signed_distance_complete(self):
     """
     Finds signed-distance from the current field-of-view to the object
     (where object is implicitly given by the ODF).
     Call this after update_TSDF and update_ODF.
     """
     zbuffer = self.camera.get_zbuffer()
     obj_in_fov = self.camera.is_in_fov(self.object, zbuffer)
     
     min_dist, min_voxel = np.inf, None
     for i in xrange(self.resolution):
         for j in xrange(self.resolution):
             for k in xrange(self.resolution):
                 print('({0},{1},{2})'.format(i,j,k))
                 if self.TSDF[i,j,k] != 0:
                     voxel_center = self.get_voxel_center([i,j,k])
                     if obj_in_fov:
                         if not self.camera.is_in_fov(voxel_center, zbuffer) and self.ODF[i,j,k] < min_dist:
                             min_dist = self.ODF[i,j,k]
                             min_voxel = [i,j,k]
                     else:
                         if self.camera.is_in_fov(voxel_center, zbuffer) and self.ODF[i,j,k] < min_dist:
                             min_dist = self.ODF[i,j,k]
                             min_voxel = [i,j,k]
                             
                     if self.camera.is_in_fov(voxel_center, zbuffer):
                         self.handles += [utils.plot_point(self.camera.sensor.GetEnv(), voxel_center, size=self.radius)]
                         
     sd = min_dist if obj_in_fov else -min_dist
     
     # TEMP
     self.handles += [utils.plot_point(self.camera.sensor.GetEnv(), self.get_voxel_center(min_voxel), size=5*self.radius)]
     
     return sd
Exemple #3
0
def plot_point(fig_name):
    """
    plot location map containing all the points
    :param fig_name: saved figure name
    :return: figure
    """
    utils.plot_point()
    plt.title(fig_name)
    if not os.path.exists(dir_fig):
        os.makedirs(dir_fig)
    plt.savefig('figures/' + fig_name + '.png')
Exemple #4
0
def test_camera():
    env = rave.Environment()
    env.Load('../envs/pr2-empty.env.xml')
    env.SetViewer('qtcoin')
    #env.GetViewer().SendCommand('SetFiguresInCamera 1') # also shows the figures in the image
    time.sleep(1)
    robot = env.GetRobots()[0]

    sensor = robot.GetAttachedSensor('head_cam').GetSensor()

    cam = Camera(robot, sensor)

    is_hits, hits = cam.get_hits(subsampled=True)
    zbuffer = cam.get_zbuffer()

    height, width, _ = hits.shape
    global handles
    for i in xrange(height):
        for j in xrange(width):
            #print zpoints[i,j,:]
            if is_hits[i, j]:
                if cam.is_in_fov(hits[i, j, :], zbuffer):
                    handles += [utils.plot_point(env, hits[i, j, :], size=.01)]

    IPython.embed()
Exemple #5
0
def test_distance_to_plot():
    import openravepy as rave
    
    env = rave.Environment()
    env.SetViewer('qtcoin')
    
    t = Triangle(np.random.rand(3), np.random.rand(3), np.random.rand(3))
    t.plot(env)
    
    p = 2*np.random.rand(3)
    closest_pt = t.closest_point_to(p)
    
    handles.append(utils.plot_point(env, p, color=(0,0,1)))
    handles.append(utils.plot_point(env, closest_pt))
    
    IPython.embed()
Exemple #6
0
 def plot_centers(self):
     self.handles = list()
     env = self.camera.sensor.GetEnv()
     for i in xrange(self.resolution):
         for j in xrange(self.resolution):
             for k in xrange(self.resolution):
                 self.handles += [utils.plot_point(env, self.get_voxel_center([i,j,k]), size=self.radius)]
Exemple #7
0
def test_camera():
    env = rave.Environment()
    env.Load('../envs/pr2-empty.env.xml')
    env.SetViewer('qtcoin')
    #env.GetViewer().SendCommand('SetFiguresInCamera 1') # also shows the figures in the image
    time.sleep(1)
    robot = env.GetRobots()[0]
    
    sensor = robot.GetAttachedSensor('head_cam').GetSensor()

    cam = Camera(robot, sensor)
    
    is_hits, hits = cam.get_hits(subsampled=True)
    zbuffer = cam.get_zbuffer()
    
    height, width, _ = hits.shape
    global handles
    for i in xrange(height):
        for j in xrange(width):
            #print zpoints[i,j,:]
            if is_hits[i,j]:
                if cam.is_in_fov(hits[i,j,:], zbuffer):
                    handles += [utils.plot_point(env, hits[i,j,:], size=.01)]
    
    IPython.embed()
Exemple #8
0
def test_distance_to_plot():
    import openravepy as rave

    env = rave.Environment()
    env.SetViewer('qtcoin')

    t = Triangle(np.random.rand(3), np.random.rand(3), np.random.rand(3))
    t.plot(env)

    p = 2 * np.random.rand(3)
    closest_pt = t.closest_point_to(p)

    handles.append(utils.plot_point(env, p, color=(0, 0, 1)))
    handles.append(utils.plot_point(env, closest_pt))

    IPython.embed()
Exemple #9
0
    def plot_FOV(self):
        env = self.camera.sensor.GetEnv()
        color = [1, 0, 0]

        is_hits, hits = self.camera.get_hits()
        rows, cols = is_hits.shape
        for i in xrange(rows):
            for j in xrange(cols):
                if is_hits[i, j]:
                    voxel = self.voxel_from_point(hits[i, j, :])
                    voxel_center = self.get_voxel_center(voxel)
                    self.handles += [
                        utils.plot_point(env,
                                         voxel_center,
                                         size=self.radius,
                                         color=color)
                    ]

        cam_pos = self.camera.sensor.GetTransform()[:3, 3]
        self.handles += utils.plot_segment(env,
                                           cam_pos,
                                           hits[0, 0, :],
                                           color=color)
        self.handles += utils.plot_segment(env,
                                           cam_pos,
                                           hits[0, cols - 1, :],
                                           color=color)
        self.handles += utils.plot_segment(env,
                                           cam_pos,
                                           hits[rows - 1, 0, :],
                                           color=color)
        self.handles += utils.plot_segment(env,
                                           cam_pos,
                                           hits[rows - 1, cols - 1, :],
                                           color=color)
Exemple #10
0
def test_voxel_grid():
    env = rave.Environment()
    env.Load('../envs/pr2-empty.env.xml')
    env.SetViewer('qtcoin')
    #env.GetViewer().SendCommand('SetFiguresInCamera 1') # also shows the figures in the image
    time.sleep(1)
    robot = env.GetRobots()[0]
    
    sensor = robot.GetAttachedSensor('head_cam').GetSensor()

    cam = Camera(robot, sensor)

    table = env.GetKinBody('table')
    table_pos = tfx.point(table.GetTransform()).array
    
    vgrid = VoxelGrid(cam, table_pos + np.array([0,0,0]), 1.5, 2, 1, resolution = 50)
    #vgrid.plot_centers()
    
    object = table_pos + np.array([.15,0,-.2])
    vgrid.object = object # TEMP
    h = utils.plot_point(env, object, size=.05, color=[1,0,0])
    
    vgrid.update_TSDF()
    #vgrid.update_ODF(object)
    #vgrid.signed_distance_complete()
    
    #vgrid.plot_TSDF()
    #vgrid.plot_ODF()
    #vgrid.plot_FOV()
    
    IPython.embed()
Exemple #11
0
    def signed_distance_complete(self):
        """
        Finds signed-distance from the current field-of-view to the object
        (where object is implicitly given by the ODF).
        Call this after update_TSDF and update_ODF.
        """
        zbuffer = self.camera.get_zbuffer()
        obj_in_fov = self.camera.is_in_fov(self.object, zbuffer)

        min_dist, min_voxel = np.inf, None
        for i in xrange(self.resolution):
            for j in xrange(self.resolution):
                for k in xrange(self.resolution):
                    print('({0},{1},{2})'.format(i, j, k))
                    if self.TSDF[i, j, k] != 0:
                        voxel_center = self.get_voxel_center([i, j, k])
                        if obj_in_fov:
                            if not self.camera.is_in_fov(
                                    voxel_center,
                                    zbuffer) and self.ODF[i, j, k] < min_dist:
                                min_dist = self.ODF[i, j, k]
                                min_voxel = [i, j, k]
                        else:
                            if self.camera.is_in_fov(
                                    voxel_center,
                                    zbuffer) and self.ODF[i, j, k] < min_dist:
                                min_dist = self.ODF[i, j, k]
                                min_voxel = [i, j, k]

                        if self.camera.is_in_fov(voxel_center, zbuffer):
                            self.handles += [
                                utils.plot_point(self.camera.sensor.GetEnv(),
                                                 voxel_center,
                                                 size=self.radius)
                            ]

        sd = min_dist if obj_in_fov else -min_dist

        # TEMP
        self.handles += [
            utils.plot_point(self.camera.sensor.GetEnv(),
                             self.get_voxel_center(min_voxel),
                             size=5 * self.radius)
        ]

        return sd
Exemple #12
0
 def plot_TSDF(self):
     """ Only plot objects hit (i.e. TSDF[i,j,k] = 0) """
     self.handles = list()
     env = self.camera.sensor.GetEnv()
     for i in xrange(self.resolution):
         for j in xrange(self.resolution):
             for k in xrange(self.resolution):
                 if self.TSDF[i,j,k] == 0:
                     self.handles += [utils.plot_point(env, self.get_voxel_center([i,j,k]), size=self.radius)]
Exemple #13
0
 def plot_centers(self):
     self.handles = list()
     env = self.camera.sensor.GetEnv()
     for i in xrange(self.resolution):
         for j in xrange(self.resolution):
             for k in xrange(self.resolution):
                 self.handles += [
                     utils.plot_point(env,
                                      self.get_voxel_center([i, j, k]),
                                      size=self.radius)
                 ]
Exemple #14
0
 def plot_ODF(self):
     """ Plots ODF """
     self.handles = list()
     env = self.camera.sensor.GetEnv()
     
     max_dist = -np.inf
     for i in xrange(self.resolution):
         for j in xrange(self.resolution):
             for k in xrange(self.resolution):
                 if self.ODF[i,j,k] > max_dist and self.ODF[i,j,k] != np.inf:
                     max_dist = self.ODF[i,j,k]
     
     for i in xrange(self.resolution):
         for j in xrange(self.resolution):
             for k in xrange(self.resolution):
                 dist = self.ODF[i,j,k]
                 if dist >= 0 and dist != np.inf:
                     dist_pct = dist/max_dist
                     rgb = colorsys.hsv_to_rgb((2/3.)*dist_pct,1,1)
                     self.handles += [utils.plot_point(env, self.get_voxel_center([i,j,k]), size=self.radius, color=rgb)]
                 else:
                     self.handles += [utils.plot_point(env, self.get_voxel_center([i,j,k]), size=self.radius, color=[0,0,0])]
Exemple #15
0
 def plot_TSDF(self):
     """ Only plot objects hit (i.e. TSDF[i,j,k] = 0) """
     self.handles = list()
     env = self.camera.sensor.GetEnv()
     for i in xrange(self.resolution):
         for j in xrange(self.resolution):
             for k in xrange(self.resolution):
                 if self.TSDF[i, j, k] == 0:
                     self.handles += [
                         utils.plot_point(env,
                                          self.get_voxel_center([i, j, k]),
                                          size=self.radius)
                     ]
Exemple #16
0
def closest_collisions(env, origin, directions, plot=False):
    rays = np.hstack((np.tile(origin, (directions.shape[0],1)), directions))
    
    with env:
        is_hits, hits = env.CheckCollisionRays(rays, None)
        
    if plot:
        global handles
        for i in xrange(len(rays)):
            handles += utils.plot_segment(env, origin, origin + directions[i,:])
        
        hits_list = [hits[i,:] for i in xrange(len(is_hits)) if is_hits[i]]
        for hit in hits_list:
            handles.append(utils.plot_point(env, hit))
Exemple #17
0
def closest_collisions(env, origin, directions, plot=False):
    rays = np.hstack((np.tile(origin, (directions.shape[0], 1)), directions))

    with env:
        is_hits, hits = env.CheckCollisionRays(rays, None)

    if plot:
        global handles
        for i in xrange(len(rays)):
            handles += utils.plot_segment(env, origin,
                                          origin + directions[i, :])

        hits_list = [hits[i, :] for i in xrange(len(is_hits)) if is_hits[i]]
        for hit in hits_list:
            handles.append(utils.plot_point(env, hit))
Exemple #18
0
def closest_collision(env, origin, direction, plot=False):
    rays = np.hstack((origin, direction))
    with env:
        is_hits, hits = env.CheckCollisionRays(np.array([rays]), None)
        
    closest_hit, closest_hit_dist = None, np.inf
    for i in xrange(hits.shape[0]):
        if is_hits[i] and np.linalg.norm(hits[i,:3] - origin) < closest_hit_dist:
            closest_hit = hits[i,:3]
            closest_hit_dist = np.linalg.norm(hits[i,:3] - origin)
            
    if plot:
        global handles
        handles += utils.plot_segment(env, origin, origin+direction)
        if closest_hit is not None:
            handles.append(utils.plot_point(env, closest_hit))
            
    return closest_hit
Exemple #19
0
 def plot_FOV(self):
     env = self.camera.sensor.GetEnv()
     color = [1,0,0]
     
     is_hits, hits = self.camera.get_hits()
     rows, cols = is_hits.shape
     for i in xrange(rows):
         for j in xrange(cols):
             if is_hits[i,j]:
                 voxel = self.voxel_from_point(hits[i,j,:])
                 voxel_center = self.get_voxel_center(voxel)
                 self.handles += [utils.plot_point(env, voxel_center, size=self.radius, color=color)]
                 
     cam_pos = self.camera.sensor.GetTransform()[:3,3]
     self.handles += utils.plot_segment(env, cam_pos, hits[0,0,:], color=color)
     self.handles += utils.plot_segment(env, cam_pos, hits[0,cols-1,:], color=color)
     self.handles += utils.plot_segment(env, cam_pos, hits[rows-1,0,:], color=color)
     self.handles += utils.plot_segment(env, cam_pos, hits[rows-1,cols-1,:], color=color)
Exemple #20
0
def closest_collision(env, origin, direction, plot=False):
    rays = np.hstack((origin, direction))
    with env:
        is_hits, hits = env.CheckCollisionRays(np.array([rays]), None)

    closest_hit, closest_hit_dist = None, np.inf
    for i in xrange(hits.shape[0]):
        if is_hits[i] and np.linalg.norm(hits[i, :3] -
                                         origin) < closest_hit_dist:
            closest_hit = hits[i, :3]
            closest_hit_dist = np.linalg.norm(hits[i, :3] - origin)

    if plot:
        global handles
        handles += utils.plot_segment(env, origin, origin + direction)
        if closest_hit is not None:
            handles.append(utils.plot_point(env, closest_hit))

    return closest_hit
Exemple #21
0
def test_fov():
    brett = PR2('../../envs/pr2-test.env.xml')
    env = brett.env
    arm = brett.rarm
    kinect = brett.r_kinect

    kinect.power_on()
    arm.set_pose(tfx.pose([2.8, -1.88, .98], tfx.tb_angles(0, 90, 0)))
    time.sleep(1)

    handles = list()
    pt = kinect.get_pose().position + [1, 0, 0]
    handles.append(utils.plot_point(env, pt.array))

    while True:
        arm.teleop()
        pixel = kinect.get_pixel_from_point(pt, False)
        print(pixel)

    IPython.embed()
Exemple #22
0
def test_fov():
    brett = PR2('../../envs/pr2-test.env.xml')
    env = brett.env
    arm = brett.rarm
    kinect = brett.r_kinect
    
    kinect.power_on()
    arm.set_pose(tfx.pose([2.8, -1.88, .98], tfx.tb_angles(0, 90, 0)))
    time.sleep(1)
    
    handles = list()
    pt = kinect.get_pose().position + [1, 0, 0]
    handles.append(utils.plot_point(env, pt.array))
    
    while True:
        arm.teleop()
        pixel = kinect.get_pixel_from_point(pt, False)
        print(pixel)
    
    IPython.embed()
Exemple #23
0
def test_beam_inside():
    import openravepy as rave
    
    env = rave.Environment()
    env.SetViewer('qtcoin')
    time.sleep(1)
    
    base = [0,0,0]
    a = [.1, .1, .5]
    b = [-.1, .1, .5]
    c = [-.1, -.1, .5]
    d = [.1, -.1, .5]
    
    beam = Beam(base, a, b, c, d)
    beam.plot(env)
    
    halfplanes = beam.get_halfplanes()
    for h in halfplanes:
        h.plot(env)
    
    """
    p0 = [0, 0, .3]
    p1 = [0, 0, .55]
    p2 = [.2, 0, .3]
    
    for p in [p0, p1, p2]:
        print('is_inside: {0}'.format(beam.is_inside(p)))
        handles.append(utils.plot_point(env, p))
    """
        
    for i in xrange(1000):
        p = np.random.rand(3)
        is_inside = beam.is_inside(p)
        print('is_inside: {0}'.format(is_inside))
        h = utils.plot_point(env, p)
        if is_inside:
            raw_input()
        
    
    IPython.embed()
Exemple #24
0
def test_beam_inside():
    import openravepy as rave

    env = rave.Environment()
    env.SetViewer('qtcoin')
    time.sleep(1)

    base = [0, 0, 0]
    a = [.1, .1, .5]
    b = [-.1, .1, .5]
    c = [-.1, -.1, .5]
    d = [.1, -.1, .5]

    beam = Beam(base, a, b, c, d)
    beam.plot(env)

    halfplanes = beam.get_halfplanes()
    for h in halfplanes:
        h.plot(env)
    """
    p0 = [0, 0, .3]
    p1 = [0, 0, .55]
    p2 = [.2, 0, .3]
    
    for p in [p0, p1, p2]:
        print('is_inside: {0}'.format(beam.is_inside(p)))
        handles.append(utils.plot_point(env, p))
    """

    for i in xrange(1000):
        p = np.random.rand(3)
        is_inside = beam.is_inside(p)
        print('is_inside: {0}'.format(is_inside))
        h = utils.plot_point(env, p)
        if is_inside:
            raw_input()

    IPython.embed()
Exemple #25
0
def test_voxel_grid():
    env = rave.Environment()
    env.Load('../envs/pr2-empty.env.xml')
    env.SetViewer('qtcoin')
    #env.GetViewer().SendCommand('SetFiguresInCamera 1') # also shows the figures in the image
    time.sleep(1)
    robot = env.GetRobots()[0]

    sensor = robot.GetAttachedSensor('head_cam').GetSensor()

    cam = Camera(robot, sensor)

    table = env.GetKinBody('table')
    table_pos = tfx.point(table.GetTransform()).array

    vgrid = VoxelGrid(cam,
                      table_pos + np.array([0, 0, 0]),
                      1.5,
                      2,
                      1,
                      resolution=50)
    #vgrid.plot_centers()

    object = table_pos + np.array([.15, 0, -.2])
    vgrid.object = object  # TEMP
    h = utils.plot_point(env, object, size=.05, color=[1, 0, 0])

    vgrid.update_TSDF()
    #vgrid.update_ODF(object)
    #vgrid.signed_distance_complete()

    #vgrid.plot_TSDF()
    #vgrid.plot_ODF()
    #vgrid.plot_FOV()

    IPython.embed()
Exemple #26
0
 def display(self, env):
     return utils.plot_point(env, self.point.array, color=self.color / 255.)
Exemple #27
0
def test_FOV(M=10):
    env = rave.Environment()
    env.Load('../envs/pr2-test.env.xml')
    env.SetViewer('qtcoin')
    env.GetViewer().SendCommand(
        'SetFiguresInCamera 1')  # also shows the figures in the image
    time.sleep(1)
    robot = env.GetRobots()[0]

    convexify_workspace(env, robot)

    cam = robot.GetAttachedSensor('head_cam').GetSensor()
    type = rave.Sensor.Type.Camera
    cam_geom = cam.GetSensorGeometry(type)

    height, width, _ = cam_geom.imagedata.shape
    F = .01  # real focal length in meters
    max_range = 5.

    fov = FOV(robot, cam_geom.KK, height, width, F, max_range)

    cam_pose = tfx.pose([0, 0, 0.05], frame='wide_stereo_optical_frame')

    start = time.time()
    beams = fov.get_beams(cam_pose)
    print('beams time: {0}'.format(time.time() - start))

    start = time.time()
    border = fov.get_border(beams)
    print('border time: {0}'.format(time.time() - start))

    IPython.embed()
    return

    table = env.GetKinBody('table')
    base = table.GetLink('base')
    extents = base.Geometry.GetBoxExtents(base.GetGeometries()[0])

    table_pos = tfx.pose(table.GetTransform()).position
    # assume table has orientation np.eye(3)
    x_min, x_max = table_pos.x - extents[0], table_pos.x + extents[0]
    y_min, y_max = table_pos.y - extents[1], table_pos.y + extents[1]
    z_min, z_max = table_pos.z + extents[2], table_pos.z + extents[2] + .2

    particles = list()
    for i in xrange(M):
        x = random_within(x_min, x_max)
        y = random_within(y_min, y_max)
        z = random_within(z_min, z_max)
        particles.append(np.array([x, y, z]))

    signed_distances = list()
    for i in xrange(M):
        print(i)
        signed_distances.append(
            fov.signed_distance(particles[i], beams, border))

    min_sd = min(signed_distances)
    sd_hue = np.array(signed_distances) + abs(min_sd)
    sd_hue = (1 / 3.0) * (sd_hue / max(sd_hue))
    for p, h in zip(particles, sd_hue):
        rgb = colorsys.hsv_to_rgb(h, 1, 1)
        handles.append(utils.plot_point(env, p, color=rgb))

    #fov.plot(beams)
    for tri in border:
        tri.plot(env)

    IPython.embed()
Exemple #28
0
def test_FOV(M=10):
    env = rave.Environment()
    env.Load('../envs/pr2-test.env.xml')
    env.SetViewer('qtcoin')
    env.GetViewer().SendCommand('SetFiguresInCamera 1') # also shows the figures in the image
    time.sleep(1)
    robot = env.GetRobots()[0]
    
    convexify_workspace(env, robot)
    
    cam = robot.GetAttachedSensor('head_cam').GetSensor()
    type = rave.Sensor.Type.Camera
    cam_geom = cam.GetSensorGeometry(type)
    
    height, width, _ = cam_geom.imagedata.shape
    F = .01 # real focal length in meters
    max_range = 5.
    
    fov = FOV(robot, cam_geom.KK, height, width, F, max_range)
        
    cam_pose = tfx.pose([0,0,0.05], frame='wide_stereo_optical_frame')
    
    start = time.time()
    beams = fov.get_beams(cam_pose)
    print('beams time: {0}'.format(time.time()-start))
    
    start = time.time()
    border = fov.get_border(beams)
    print('border time: {0}'.format(time.time()-start))
    
    IPython.embed()
    return
    
    table = env.GetKinBody('table')
    base = table.GetLink('base')
    extents = base.Geometry.GetBoxExtents(base.GetGeometries()[0])
    
    table_pos = tfx.pose(table.GetTransform()).position
    # assume table has orientation np.eye(3)
    x_min, x_max = table_pos.x - extents[0], table_pos.x + extents[0]
    y_min, y_max = table_pos.y - extents[1], table_pos.y + extents[1]
    z_min, z_max = table_pos.z + extents[2], table_pos.z + extents[2] + .2
    
    particles = list()
    for i in xrange(M):
        x = random_within(x_min, x_max)
        y = random_within(y_min, y_max)
        z = random_within(z_min, z_max)
        particles.append(np.array([x,y,z]))
        
    signed_distances = list()
    for i in xrange(M):
        print(i)
        signed_distances.append(fov.signed_distance(particles[i], beams, border))
        
    min_sd = min(signed_distances)
    sd_hue = np.array(signed_distances)+abs(min_sd)
    sd_hue = (1/3.0)*(sd_hue/max(sd_hue))
    for p, h in zip(particles, sd_hue):
        rgb = colorsys.hsv_to_rgb(h, 1, 1)
        handles.append(utils.plot_point(env, p, color=rgb))
    
    #fov.plot(beams)
    for tri in border:
        tri.plot(env)
    
    IPython.embed()
Exemple #29
0
 def display(self, env):
     return utils.plot_point(env, self.point.array, color=self.color/255.)