def on_key_press(e):
    # print("{} {}".format(e.text, e.key))
    global pose_idx, is_playing, playing_dt, text_pose_idx, show_trailj
    if e.text == ',':
        set_pose(pose_idx - 1)
    elif e.text == '.':
        set_pose(pose_idx + 1)
    elif e.text == '<':
        set_pose(pose_idx - 10)
    elif e.text == '>':
        set_pose(pose_idx + 10)
    elif e.key == 'Home':
        set_pose(0)
    elif e.key == 'End':
        set_pose(max_pose_idx)
    elif e.key == 'PageDown':
        set_pose(pose_idx - 100)
    elif e.key == 'PageUp':
        set_pose(pose_idx + 100)
    elif e.text == ' ':
        if is_playing:
            playing_timer.stop()
            is_playing = False
        else:
            playing_timer.start(playing_dt)
            is_playing = True
    elif e.text == 'f':
        text_pose_idx.visible = not text_pose_idx.visible
    elif e.text == 't':
        show_trailj = not show_trailj
        for j in tj1: j.visible = show_trailj
    elif e.text == 'P':
        img = canvas.render(bgcolor='white')
        img_fname = '{}{}{}{:08d}{}'.format('snapshot_', os.path.basename(pose_data_fname).split('.')[0], '_', pose_idx, '.png')
        io.write_png(img_fname, img)
Example #2
0
 def on_key_press(ev):
     print(ev.key.name)
     if ev.key.name in 'S':
         print("Saving...")
         res = _screenshot()
         vispy_file.write_png('config_space/{}_shot.png'.format(seq), res)
         print("Done")
Example #3
0
def update(event):
    global view, f, F, vol, volume1, canvas, rec, rec_prefix, project_name, \
            play

    start = time()

    if not play:
        if rec:
            play = True

    if play:
        maxF = camera_move(view.camera, f, F)

    f += 1

    if rec:

        image = canvas.render()
        io.write_png(f'{rec_prefix}/{project_name}/{project_name}_{f}.png',
                     image)

        ETA = (time() - start) * (maxF - f
                                  )  # (time / frame) * frames remaining
        ETA = (ETA / 60) / 60  # seconds to hours
        ETA = np.modf(ETA)
        ETA = int(ETA[1]), int(round(ETA[0] * 60))
        ETA = str(ETA[0]) + ":" + str(ETA[1]).zfill(2)

        print('saved frame', f, 'eta:', ETA)
Example #4
0
 def export_image(self):
     image = self.canvas.render()
     if self.canvas.domain:
         name = self.canvas.domain.name()
     else:
         name = "tiling-(" + ','.join(map(str, self.canvas.orders)) + ")"
     io.write_png("export/" + name + ".png", image)
Example #5
0
    def on_timer(self, event):
        global mReadImages, mReadVideo, cap
        global frameArr, mFrames

        if self.texture:  #Change on each frame    3-8-2019
            if mReadImages:
                print("self.texture?, frame", self.frame)
                path = mRootPath + str(self.frame % 8) + ".png"
                print(path)
                #im = io.read_png(path) #mRootPath + str(self.frame%8)+".png")
                im = io.imread(path)  #mRootPath + str(self.frame%8)+".png")
                #imread requires PIL or imageio
                #directly write video frames from opencv numpy arrays?
                self.program['u_texture1'] = gloo.Texture2D(im)
            if mReadVideo:  #read video
                try:
                    if (self.cap):
                        #cv2.waitKey(35)
                        print("mReadVideo?")
                        #cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
                        #ret, frame = self.cap.read(0)
                        #ret, frame = self.cap.read(self.frame)
                        ret, frame = cap.read()  #self.frame) #global
                        if ret: cv2.imshow("cap", frame)
                        if ret:
                            self.program['u_texture1'] = gloo.Texture2D(frame)
                        else:
                            print("ret=?, frame=?", ret, frame)
                            self.program['u_texture1'] = gloo.Texture2D(
                                frameArr[self.frame % mFrames])
                except (e):
                    print(e)

        #print("slef.save=", self.save)
        #sleep(5)
        if self.save:  #True: #self.save==1:
            print("IN slef.save=", self.save)
            i = self.render()  #without OK?
            io.write_png(str(self.frame) + ".png", i)
            #no such: io.write_jpg(str(self.frame)+".jpg", i);

        #self.mSlowDown = 1; #if not self.mSlowDown: self.mSlowDown = 1
        self.program['iTime'] = event.elapsed * self.mSlowDown
        self.program['iGlobalTime'] = event.elapsed
        self.program['iDate'] = get_idate()  # used in some shadertoy exs
        #self.update()
        self.frame += 1  #24-11-2018
        print(self.frame)
        self.frameTime.append((self.frame, event.elapsed))
        if (self.frame % frame_divider == 0):
            ln = len(self.frameTime)
            fr1, t1 = self.frameTime[ln - 1]
            fr2, t2 = self.frameTime[ln - 2]
            fps = (fr1 - fr2) / (t1 - t2)
            #print({:04.2f} fps, end=", ");
            print(" %0.2f" % fps, end=", ")
            sys.stdout.flush()

        self.update()
Example #6
0
    def on_key_press(self, event):

        print event.text

        if event.text == 'q':
            img = self.render()
            io.write_png(
                "../../render/render{:02d}.png".format(self.cur_frame), img)

        if event.text == 'b':
            self.point_size = self.point_size * 1.2
            self.update_color_or_size()

        if event.text == 's':
            self.point_size = self.point_size / 1.2
            self.update_color_or_size()

        if event.text == 'j':
            self.camera_length = self.camera_length * 2
            self.update_camera()

        if event.text == 'l':
            self.camera_length = self.camera_length * 0.5
            self.update_camera()

        if event.text == 't':
            self.show_camera = not self.show_camera
            self.update_camera()

        if event.key == 'Right':

            if (self.cur_frame < self.num_frames - 1):

                self.cur_frame += 1
                print "frame{:d}".format(self.cur_frame)

                self.transform_nodes()

                print event.text + ' inside'

        if event.key == 'Left':

            if (self.cur_frame > 0):

                self.cur_frame -= 1
                print "frame{:d}".format(self.cur_frame)

                self.transform_nodes()

        # if event.text == 's':
        #     if(self.show_color):
        #         self.show_color = False
        #         self.update_color_or_size()

        if event.text == 'c':

            self.show_color = not self.show_color
            self.update_color_or_size()
Example #7
0
 def write_img(self, img, filename=None):
     if filename is None:
         suffix = 0
         filepat = "screen%d.png"
         while os.path.exists(filepat % suffix):
             suffix = suffix + 1
         filename = filepat % suffix
     io.write_png(filename, img)
     print("Wrote " + filename)
 def write_img(self, img, filename=None):
     if filename is None:
         suffix = 0;
         filepat = "screen%d.png"
         while os.path.exists(filepat % suffix):
             suffix = suffix + 1
         filename = filepat % suffix
     io.write_png(filename, img)
     print("Wrote " + filename)
Example #9
0
 def save_image(self):
     path, category = QFileDialog.getSaveFileName(self,
                                                  "Save tree as image",
                                                  TREES_PATH,
                                                  "Png files (*.png)")
     if not path.endswith(".png"):
         path += ".png"
     if path:
         self.layout.canvas._smooth_enabled = False
         image = self.layout.canvas.render()
         self.layout.canvas._smooth_enabled = True
         io.write_png(path, image)
Example #10
0
    def write(self):
        image = _screenshot(alpha=True)
        write_png('covers2/%s.png' % self.time, image)

        data = {}
        data.update(self.input_manager.smoothed_inputs)
        data['time'] = self.time
        data['definition'] = self.fractal.definition['name']

        with open('covers2/%s.yml' % self.time, 'w') as f:
            yaml.dump(data, stream=f)

        print "Wrote image and data for %s" % self.time
Example #11
0
def create_thumbnail(path, model, up="z"):
    if path.endswith(".obj"):
        up = "y"
    canvas = vispy.scene.SceneCanvas(bgcolor='white')
    canvas.unfreeze()
    canvas.view = canvas.central_widget.add_view()
    mesh = vispy.scene.visuals.Mesh(vertices=model.vertices, shading='flat', faces=model.faces)
    canvas.view.add(mesh)
    canvas.view.camera = vispy.scene.TurntableCamera(up=up, fov=30)
    canvas.view.camera.depth_value = 0.5
    img = canvas.render()
    img_name = change_ext(path, '.png')
    img_path = os.path.join(settings.THUMBS_ROOT, get_file_name(img_name))
    if os.path.exists(img_path):
        os.path.remove(img_path)
    io.write_png(img_path, img)
    canvas.close()
Example #12
0
def test_make_png():
    """Test to ensure that make_png functions correctly."""
    # Save random RGBA and RGB arrays onto disk as PNGs using make_png.
    # Read them back with an image library and check whether the array
    # saved is equal to the array read.

    # Create random RGBA array as type ubyte
    rgba_save = np.random.randint(256, size=(100, 100, 4)).astype(np.ubyte)
    # Get rid of the alpha for RGB
    rgb_save = rgba_save[:, :, :3]
    # Output file should be in temp
    png_out = op.join(temp_dir, 'random.png')

    # write_png implicitly tests _make_png
    for rgb_a in (rgba_save, rgb_save):
        write_png(png_out, rgb_a)
        rgb_a_read = read_png(png_out)
        assert_array_equal(rgb_a, rgb_a_read)
Example #13
0
def test_make_png():
    """ Test to ensure that make_png functions correctly.
    """
    # Save random RGBA and RGB arrays onto disk as PNGs using make_png.
    # Read them back with an image library and check whether the array
    # saved is equal to the array read.

    # Create random RGBA array as type ubyte
    rgba_save = np.random.randint(256, size=(100, 100, 4)).astype(np.ubyte)
    # Get rid of the alpha for RGB
    rgb_save = rgba_save[:, :, :3]
    # Output file should be in temp
    png_out = op.join(temp_dir, 'random.png')

    # write_png implicitly tests _make_png
    for rgb_a in (rgba_save, rgb_save):
        write_png(png_out, rgb_a)
        rgb_a_read = read_png(png_out)
        assert_array_equal(rgb_a, rgb_a_read)
Example #14
0
def on_key_press(event):
    # modifiers = [key.name for key in event.modifiers]
    # print('Key pressed - text: %r, key: %s, modifiers: %r' % (
    #     event.text, event.key.name, modifiers))
    if event.key.name == 'A':
        # print(dir(view.scene))
        print(view.scene.children)
        objects.append(
            Ellipse3D([0, 0, 0],
                      parent=view.scene,
                      radius=2.5,
                      border_color='r',
                      color=(0, 0, 0, 0.5)))
    if event.key.name == 'S':
        filename = '{}-pyversor-screenshot.png'.format(
            datetime.datetime.now().isoformat())
        screenshot = canvas.render()
        io.write_png(filename, screenshot)
        print('Saved screenshot with filename: {}'.format(filename))
Example #15
0
def view_contacts(pc, contacts):
    canvas = vispy.scene.SceneCanvas(keys='interactive', show=True)
    view = canvas.central_widget.add_view()

    scatter = visuals.Markers()
    scatter.set_data(pc, edge_color=None, face_color=(1, 1, 1, .5), size=5)
    view.add(scatter)

    scatter2 = visuals.Markers()
    scatter2.set_data(contacts, edge_color=None, face_color=(1, 0, 0, 1), size=20)
    view.add(scatter2)

    view.camera = 'turntable'

    axis = visuals.XYZAxis(parent=view.scene)
    img = canvas.render()
    io.write_png('images/pc_contacts.png', img)
    send_image('images/pc_contacts.png')
    if sys.flags.interactive != 1:
        vispy.app.run()
Example #16
0
    def __init__(self, prime_numbers):
        app.Canvas.__init__(self, keys='interactive')
        self.prime_numbers = prime_numbers

        # Create vertices
        v_position, v_color, v_size = self.get_vertices()

        self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
        # Set uniform and attribute
        self.program['a_color'] = gloo.VertexBuffer(v_color)
        self.program['a_position'] = gloo.VertexBuffer(v_position)
        self.program['a_size'] = gloo.VertexBuffer(v_size)
        gloo.set_state(clear_color='white',
                       blend=True,
                       blend_func=('src_alpha', 'one_minus_src_alpha'))

        img = self.render()
        io.write_png('render.png', img)

        self.show()
Example #17
0
def view_pc(pc):
    #
    # Make a canvas and add simple view
    #
    canvas = vispy.scene.SceneCanvas(keys='interactive', show=True)
    view = canvas.central_widget.add_view()

    scatter = visuals.Markers()
    scatter.set_data(pc, edge_color=None, face_color=(1, 1, 1, .5), size=5)
    view.add(scatter)

    view.camera = 'turntable'

    axis = visuals.XYZAxis(parent=view.scene)
    img = canvas.render()
    io.write_png('images/pc.png', img)
    send_image('images/pc.png')
    if sys.flags.interactive != 1:
        vispy.app.run()
    x = raw_input('Use this PC? (y/n):')
    return x
Example #18
0
      elif (args.puzzles or args.pics) and not os.path.isdir(passport_dir):
        os.mkdir(passport_dir, mode=0o755)
      if args.puzzles:
        passport_context = Context(passport.to_dict())
        puzzle_page = puzzle_template.render(passport_context)
        if args.dry_run:
          print('  index.html')
        else:
          with open(os.path.join(passport_dir, 'index.html'), 'w') as file:
            file.write(puzzle_page)
      if args.pics:
        for orbit in orbits:
          for dessin in orbit.dessins():
            canvas.set_domain(dessin.domain)
            image = canvas.render()
            name = dessin.domain.name()
            if args.dry_run:
              print(2*' ' + name + '.png')
            else:
              io.write_png(os.path.join(passport_dir, name + '.png'), image)
      
      puzzles.append(passport)
      if len(puzzles) >= args.n_max:
        break
  
  list_template = engine.get_template('puzzles.html')
  list_context = Context({'passports': [passport.to_dict() for passport in puzzles]})
  list_page = list_template.render(list_context)
  with open(os.path.join('docs', 'puzzles.html'), 'w') as file:
    file.write(list_page)
Example #19
0
def pcd_vispy(scans=None,
              img=None,
              boxes=None,
              name=None,
              index=0,
              vis_size=(800, 600),
              save_img=False,
              visible=True,
              no_gt=False,
              multi_vis=False,
              point_size=0.02):
    if multi_vis:
        canvas = vispy.scene.SceneCanvas(title=name,
                                         keys='interactive',
                                         size=vis_size,
                                         show=True)
    else:
        canvas = vispy.scene.SceneCanvas(title=name,
                                         keys='interactive',
                                         size=vis_size,
                                         show=visible)
    grid = canvas.central_widget.add_grid()
    vb = grid.add_view(row=0, col=0, row_span=2)
    vb_img = grid.add_view(row=1, col=0)

    vb.camera = 'turntable'
    vb.camera.elevation = 90  # 21.0
    vb.camera.center = (6.5, -0.5, 9.0)
    vb.camera.azimuth = -90  # -75.5
    vb.camera.scale_factor = 63  # 32.7

    if scans is not None:
        if not isinstance(scans, list):
            pos = scans[:, :3]
            scatter = visuals.Markers()
            scatter.set_gl_state('translucent', depth_test=False)
            scatter.set_data(pos,
                             edge_width=0,
                             face_color=(1, 1, 1, 1),
                             size=point_size,
                             scaling=True)
            vb.add(scatter)
        else:
            pos = scans[0][:, :3]
            scatter = visuals.Markers()
            scatter.set_gl_state('translucent', depth_test=False)
            scatter.set_data(pos,
                             edge_width=0,
                             face_color=(1, 1, 1, 1),
                             size=point_size,
                             scaling=True)
            vb.add(scatter)

            pos = scans[1][:, :3]
            scatter = visuals.Markers()
            scatter.set_gl_state('translucent', depth_test=False)
            scatter.set_data(pos,
                             edge_width=0,
                             face_color=(0, 1, 1, 1),
                             size=point_size,
                             scaling=True)
            vb.add(scatter)

    axis = visuals.XYZAxis()
    vb.add(axis)

    if img is None:
        img = np.zeros(shape=[1, 1, 3], dtype=np.float32)
    image = visuals.Image(data=img, method='auto')
    vb_img.camera = 'turntable'
    vb_img.camera.elevation = -90.0
    vb_img.camera.center = (2100, -380, -500)
    vb_img.camera.azimuth = 0.0
    vb_img.camera.scale_factor = 1500
    vb_img.add(image)

    if boxes is not None:
        gt_indice = np.where(boxes["cls_rpn"] == 4)[0]
        gt_cnt = len(gt_indice)
        boxes_cnt = boxes["center"].shape[0]
        i = 0
        for k in range(boxes_cnt):
            radio = max(boxes["score"][k] - 0.5, 0.005) * 2.0
            color = (0, radio, 0, 1)  # Green
            if boxes["cls_rpn"][k] == 4:  #  gt boxes
                i = i + 1
                vsp_box = visuals.Box(depth=boxes["size"][k][0],
                                      width=boxes["size"][k][1],
                                      height=boxes["size"][k][2],
                                      color=(0.3, 0.4, 0.0, 0.06),
                                      edge_color='pink')
                mesh_box = vsp_box.mesh.mesh_data
                mesh_border_box = vsp_box.border.mesh_data
                vertices = mesh_box.get_vertices()
                center = np.array([
                    boxes["center"][k][0], boxes["center"][k][1],
                    boxes["center"][k][2]
                ],
                                  dtype=np.float32)
                vertices_roa_trans = box_rot_trans(vertices,
                                                   -boxes["yaw"][k][0],
                                                   center)  #
                mesh_border_box.set_vertices(vertices_roa_trans)
                mesh_box.set_vertices(vertices_roa_trans)
                vb.add(vsp_box)
                if True:
                    text = visuals.Text(text='det: ({}/{})'.format(i, gt_cnt),
                                        color='white',
                                        face='OpenSans',
                                        font_size=12,
                                        pos=[
                                            boxes["center"][k][0],
                                            boxes["center"][k][1],
                                            boxes["center"][k][2]
                                        ],
                                        anchor_x='left',
                                        anchor_y='top',
                                        font_manager=None)
                    vb.add(text)
            elif (boxes["cls_rpn"][k] + boxes["cls_cube"][k]
                  ) == 0:  # True negative cls rpn divided by cube
                vb.add(
                    line_box(boxes["center"][k],
                             boxes["size"][k],
                             -boxes["yaw"][k],
                             color=color))
            elif (boxes["cls_rpn"][k] + boxes["cls_cube"][k]
                  ) == 1:  # False negative cls rpn divided by cube
                vb.add(
                    line_box(boxes["center"][k],
                             boxes["size"][k],
                             -boxes["yaw"][k],
                             color="red"))
            elif (boxes["cls_rpn"][k] + boxes["cls_cube"][k]
                  ) == 2:  # False positive cls rpn divided by cube
                vb.add(
                    line_box(boxes["center"][k],
                             boxes["size"][k],
                             -boxes["yaw"][k],
                             color="blue"))
            elif (boxes["cls_rpn"][k] + boxes["cls_cube"][k]
                  ) == 3:  # True positive cls rpn divided by cube
                vb.add(
                    line_box(boxes["center"][k],
                             boxes["size"][k],
                             -boxes["yaw"][k],
                             color="yellow"))
            text = visuals.Text(
                text=str(k),
                color=color,
                face='OpenSans',
                font_size=12,
                pos=[
                    boxes["center"][k][0] - boxes["size"][k][0] / 2,
                    boxes["center"][k][1] - boxes["size"][k][1] / 2,
                    boxes["center"][k][2] - boxes["size"][k][2] / 2
                ],
                anchor_x='left',
                anchor_y='top',
                font_manager=None)

            vb.add(text)

    if save_img:
        folder = path_add(cfg.TEST_RESULT, cfg.RANDOM_STR)
        if not os.path.exists(folder):
            os.makedirs(folder)
        fileName = path_add(folder, str(index).zfill(6) + '.png')
        res = canvas.render(bgcolor='black')[:, :, 0:3]
        vispy_file.write_png(fileName, res)

    @canvas.connect
    def on_key_press(ev):
        if ev.key.name in '+=':
            a = vb.camera.get_state()
            print(a)

    if visible:
        pass
        vispy.app.run()

    return canvas
Example #20
0
    def input_data(self,
                   scans=None,
                   img=None,
                   boxes=None,
                   index=0,
                   save_img=False,
                   no_gt=False):
        self.canvas = vispy.scene.SceneCanvas(show=True)
        self.grid = self.canvas.central_widget.add_grid()
        self.vb = self.grid.add_view(row=0, col=0, row_span=2)
        self.vb_img = self.grid.add_view(row=1, col=0)

        self.vb.camera = 'turntable'
        self.vb.camera.elevation = 90  #21.0
        self.vb.camera.center = (6.5, -0.5, 9.0)
        self.vb.camera.azimuth = -90  #-75.5
        self.vb.camera.scale_factor = 63  #32.7

        self.vb_img.camera = 'turntable'
        self.vb_img.camera.elevation = -90.0
        self.vb_img.camera.center = (2100, -380, -500)
        self.vb_img.camera.azimuth = 0.0
        self.vb_img.camera.scale_factor = 1500

        pos = scans[:, 0:3]
        scatter = visuals.Markers()
        scatter.set_gl_state('translucent', depth_test=False)
        scatter.set_data(pos,
                         edge_width=0,
                         face_color=(1, 1, 1, 1),
                         size=0.01,
                         scaling=True)
        self.vb.add(scatter)

        if img is None:
            img = np.zeros(shape=[1, 1, 3], dtype=np.float32)
        image = visuals.Image(data=img, method='auto')
        self.vb_img.add(image)

        if boxes is not None:
            if len(boxes.shape) == 1:
                boxes = boxes.reshape(1, -1)
            gt_indice = np.where(boxes[:, -1] == 2)[0]
            gt_cnt = len(gt_indice)
            i = 0
            for box in boxes:
                radio = max(box[0] - 0.5, 0.005) * 2.0
                color = (0, radio, 0, 1)  # Green

                if box[-1] == 4:  #  gt boxes
                    i = i + 1
                    vsp_box = visuals.Box(width=box[4],
                                          depth=box[5],
                                          height=box[6],
                                          color=(0.6, 0.8, 0.0,
                                                 0.3))  #edge_color='yellow')
                    mesh_box = vsp_box.mesh.mesh_data
                    mesh_border_box = vsp_box.border.mesh_data
                    vertices = mesh_box.get_vertices()
                    center = np.array([box[1], box[2], box[3]],
                                      dtype=np.float32)
                    vtcs = np.add(vertices, center)
                    mesh_border_box.set_vertices(vtcs)
                    mesh_box.set_vertices(vtcs)
                    self.vb.add(vsp_box)
                    if False:
                        text = visuals.Text(text='gt: ({}/{})'.format(
                            i, gt_cnt),
                                            color='white',
                                            face='OpenSans',
                                            font_size=12,
                                            pos=[box[1], box[2], box[3]],
                                            anchor_x='left',
                                            anchor_y='top',
                                            font_manager=None)
                        self.vb.add(text)

                if (box[-1] +
                        box[-2]) == 0:  # True negative cls rpn divided by cube
                    self.vb.add(line_box(box, color=color))
                if (box[-1] + box[-2]
                    ) == 1:  # False negative cls rpn divided by cube
                    self.vb.add(line_box(box, color='red'))
                if (box[-1] + box[-2]
                    ) == 2:  # False positive cls rpn divided by cube
                    if no_gt:
                        self.vb.add(line_box(box, color='yellow'))
                    else:
                        self.vb.add(line_box(box, color='blue'))
                if (box[-1] +
                        box[-2]) == 3:  # True positive cls rpn divided by cube
                    self.vb.add(line_box(box, color='yellow'))

        if save_img:
            if not os.path.exists(folder):
                os.makedirs(folder)
            fileName = path_add(folder, str(index).zfill(6) + '.png')
            res = self.canvas.render(bgcolor='black')[:, :, 0:3]
            vispy_file.write_png(fileName, res)

        @self.canvas.connect
        def on_key_press(ev):
            if ev.key.name in '+=':
                a = self.vb.camera.get_state()
            print(a)
Example #21
0
    def on_timer(self, event):

        start = time()

        if not hasattr(self, 'plotted'):
            zoom_plot_ret = zoom_plot(self, [0, 4], [0, 1], first=True)
            if type(zoom_plot_ret) is tuple:
                self.plotted, self.rulers = zoom_plot_ret
            else:
                self.plotted = zoom_plot_ret
                self.rulers = []
            self.camera = self._plot_widgets[0].view.camera

        else:

            C = len(keyframes)
            c = self.f // self.c_frames
            z = (self.f / self.c_frames) % 1
            #print(self.f, z, c, C)

            if c >= C - 1:
                if self.rec:
                    self.close()
                self.done()

            else:
                #         0      1     2        3
                # LRBA = left, right, bottom, aspect
                LRBA = smooth(z, keyframes[c], keyframes[c + 1])

                left = LRBA[0]
                bottom = LRBA[2]
                width = LRBA[1] - LRBA[0]
                height = width * 1 / LRBA[3]

                # left, bottom, width, height
                rect = (left, bottom, width, height)

                self.camera.rect = tuple(rect)

            rect = self.camera.rect
            rates = [rect.left, rect.right]
            ends = [rect.bottom, rect.top]

            zoom_plot(self.plotted, rates, ends)

            if self.rec:
                rec_prefix = self.rec['pre']
                project_name = self.rec['name']

                image = self.render()
                io.write_png(
                    f'{rec_prefix}/{project_name}/{project_name}_{self.f}.png',
                    image)

                ETA = (time() - start) * (
                    self.f_max - self.f)  # (time / frame) * frames remaining
                ETA = (ETA / 60) / 60  # seconds to hours
                ETA = np.modf(ETA)
                ETA = int(ETA[1]), int(round(ETA[0] * 60))
                ETA = str(ETA[0]) + ":" + str(ETA[1]).zfill(2)

                print(f'>>> FRAME: {project_name}_{self.f}.png, ETA', ETA, ',',
                      round(100 * self.f / self.f_max, 2), '% :', self.f, '/',
                      self.f_max)

            self.f += 1
Example #22
0
  renderer = PlotRenderer()
  consumer = Consumer(renderer)
  render_executor = threading.Thread(target=renderer.executor)
  consumer_executor = threading.Thread(target=consumer.consume) 

  render_executor.start()
  consumer_executor.start()

  fig = vp.Fig()
  ax_left = fig[0, 0]
  ax_right = fig[0, 1]

  data = np.random.randn(10, 2)
  ax_left.plot(data)
  ax_right.histogram(data[1])

  start = time.time_ns()
  count = 0
  while True:
    count += 1

    image = fig.render()
    # io.imsave("wonderful.png",image)
    io.write_png("wonderful.png", image)
    # renderer.push(io._make_png, image, 0)
    # io._make_png(image, level=0)

    if (time.time_ns() - start) > 1e9:
      print(f'Producer: {count} {renderer.queue_in.qsize()}', flush=True)
      count = 0
      start = time.time_ns()
 def disconnected(self, URI):
     print('Disconnected')
     img = self.canvas.render()
     io.write_png("01.png", img)
Example #24
0
def cube(im_in, azimuth=30., elevation=45., name=None,
         ext=ext, do_axis=True, show_label=True,
         cube_label = {'x':'x', 'y':'y', 't':'t'},
         colormap='gray', roll=-180., vmin=0., vmax=1.,
         figsize=figsize, figpath=figpath, **kwargs):

    """

    Visualization of the stimulus as a cube

    """
    im = im_in.copy()

    N_X, N_Y, N_frame = im.shape
    fx, fy, ft = get_grids(N_X, N_Y, N_frame)
    import numpy as np
    from vispy import app, scene
    try:
        AffineTransform = scene.transforms.AffineTransform
    except:
        AffineTransform = scene.transforms.MatrixTransform

    app.use_app('pyglet')
    from vispy.util.transforms import perspective, translate, rotate
    canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=450)
    view = canvas.central_widget.add_view()

#         frame = scene.visuals.Cube(size = (N_X/2, N_frame/2, N_Y/2), color=(0., 0., 0., 0.),
#                                         edge_color='k',
#                                         parent=view.scene)
    for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],
              [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],
              [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):
#             line = scene.visuals.Line(pos=np.array([[p[0]*N_Y/2, p[1]*N_X/2, p[2]*N_frame/2], [p[3]*N_Y/2, p[4]*N_X/2, p[5]*N_frame/2]]), color='black', parent=view.scene)
        line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_frame/2, p[2]*N_Y/2],
                                                [p[3]*N_X/2, p[4]*N_frame/2, p[5]*N_Y/2]]), color='black', parent=view.scene)

    opts = {'parent':view.scene, 'cmap':'grays', 'clim':(0., 1.)}
    image_xy = scene.visuals.Image(np.rot90(im[:, :, 0], 3), **opts)
    tr_xy = AffineTransform()
    tr_xy.rotate(90, (1, 0, 0))
    tr_xy.translate((-N_X/2, -N_frame/2, -N_Y/2))
    image_xy.transform = tr_xy

    image_xt = scene.visuals.Image(np.fliplr(im[:, -1, :]), **opts)
    tr_xt = AffineTransform()
    tr_xt.rotate(90, (0, 0, 1))
    tr_xt.translate((N_X/2, -N_frame/2, N_Y/2))
    image_xt.transform = tr_xt

    image_yt = scene.visuals.Image(np.rot90(im[-1, :, :], 1), **opts)
    tr_yt = AffineTransform()
    tr_yt.rotate(90, (0, 1, 0))
    tr_yt.translate((+N_X/2, -N_frame/2, N_Y/2))
    image_yt.transform = tr_yt

    if do_axis:
        t = {}
        for text in ['x', 'y', 't']:
            t[text] = scene.visuals.Text(cube_label[text], parent=canvas.scene, face='Helvetica', color='black')
            t[text].font_size = 8
        t['x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8
        t['t'].pos = canvas.size[0] - canvas.size[0] // 5, canvas.size[1] - canvas.size[1] // 6
        t['y'].pos = canvas.size[0] // 12, canvas.size[1] // 2

    cam = scene.TurntableCamera(elevation=35, azimuth=30)
    cam.fov = 45
    cam.scale_factor = N_X * 1.7
    if do_axis: margin = 1.3
    else: margin = 1
    cam.set_range((-N_X/2, N_X/2), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2, N_frame/2))
    view.camera = cam
    if not(name is None):
        im = canvas.render(size=figsize)
        app.quit()
        import vispy.io as io
        io.write_png(name + ext, im)
    else:
        app.quit()
        return im
Example #25
0
def visualize(z_in, azimuth=25., elevation=30.,
    thresholds=[0.94, .89, .75, .5, .25, .1], opacities=[.9, .8, .7, .5, .2, .1],
#     thresholds=[0.94, .89, .75], opacities=[.99, .7, .2],
#     thresholds=[0.7, .5, .2], opacities=[.95, .5, .2],
    fourier_label = {'f_x':'f_x', 'f_y':'f_y', 'f_t':'f_t'},
    name=None, ext=ext, do_axis=True, do_grids=False, draw_projections=True,
    colorbar=False, f_N=2., f_tN=2., figsize=figsize, figpath=figpath, **kwargs):
    """

    Visualization of the Fourier spectrum by showing 3D contour plots at different thresholds

    parameters
    ----------
    z : envelope of the cloud

    """
    z = z_in.copy()
    N_X, N_Y, N_frame = z.shape
    fx, fy, ft = get_grids(N_X, N_Y, N_frame)

    # Normalize the amplitude.
    z /= z.max()

    from vispy import app, scene
    try:
        AffineTransform = scene.transforms.AffineTransform
    except:
        AffineTransform = scene.transforms.MatrixTransform

    app.use_app('pyglet')
    #from vispy.util.transforms import perspective, translate, rotate
    from vispy.color import Color
    transparent = Color(color='black', alpha=0.)
    import colorsys
    canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=450)
    view = canvas.central_widget.add_view()

    vol_data = np.rollaxis(np.rollaxis(z, 1), 2)
#         volume = scene.visuals.Volume(vol_data, parent=view.scene)#frame)
    center = scene.transforms.STTransform(translate=( -N_X/2, -N_Y/2, -N_frame/2))
#         volume.transform = center
#         volume.cmap = 'blues'

    if draw_projections:
        from vispy.color import Colormap
        cm = Colormap([(1.0, 1.0, 1.0, 1.0), 'k'])
        opts = {'parent':view.scene, 'cmap':cm, 'clim':(0., 1.)}

        energy_xy = np.rot90(np.max(z, axis=2)[:, ::-1], 3)[:, ::-1]
        fourier_xy = scene.visuals.Image(np.rot90(energy_xy), **opts)
        tr_xy = AffineTransform()
        tr_xy.rotate(90, (0, 0, 1))
        tr_xy.translate((N_X/2, -N_Y/2, -N_frame/2))
        fourier_xy.transform = tr_xy

        energy_xt = np.rot90(np.max(z, axis=1)[:, ::-1], 3)[::-1, ::-1]
        fourier_xt = scene.visuals.Image(energy_xt, **opts)
        tr_xt = AffineTransform()
        tr_xt.rotate(90, (1, 0, 0))
        tr_xt.translate((-N_X/2, N_Y/2, -N_frame/2))
        fourier_xt.transform = tr_xt

        energy_yt = np.max(z, axis=0)#[:, ::-1]
        fourier_yt = scene.visuals.Image(energy_yt, **opts)
        tr_yt = AffineTransform()
        tr_yt.rotate(90, (0, 1, 0))
        tr_yt.translate((-N_X/2, -N_Y/2, N_frame/2))
        fourier_yt.transform = tr_yt

    # Generate iso-surfaces at different energy levels
    surfaces = []
    for i_, (threshold, opacity) in enumerate(list(zip(thresholds, opacities))):
        surfaces.append(scene.visuals.Isosurface(z, level=threshold,
#                                         color=Color(np.array(colorsys.hsv_to_rgb(1.*i_/len(thresholds), 1., 1.)), alpha=opacity),
                                    color=Color(np.array(colorsys.hsv_to_rgb(.66, 1., 1.)), alpha=opacity),
                                    shading='smooth', parent=view.scene)
                                                )
        surfaces[-1].transform = center

    # Draw a sphere at the origin
    axis = scene.visuals.XYZAxis(parent=view.scene)
    for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],
              [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],
              [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):
        line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_Y/2, p[2]*N_frame/2], [p[3]*N_X/2, p[4]*N_Y/2, p[5]*N_frame/2]]), color='black', parent=view.scene)

    axisX = scene.visuals.Line(pos=np.array([[0, -N_Y/2, 0], [0, N_Y/2, 0]]), color='red', parent=view.scene)
    axisY = scene.visuals.Line(pos=np.array([[-N_X/2, 0, 0], [N_X/2, 0, 0]]), color='green', parent=view.scene)
    axisZ = scene.visuals.Line(pos=np.array([[0, 0, -N_frame/2], [0, 0, N_frame/2]]), color='blue', parent=view.scene)

    if do_axis:
        t = {}
        for text in ['f_x', 'f_y', 'f_t']:
            t[text] = scene.visuals.Text(fourier_label[text], parent=canvas.scene, face='Helvetica', color='black')
            t[text].font_size = 8
        t['f_x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8
        t['f_y'].pos = canvas.size[0] - canvas.size[0] // 8, canvas.size[1] - canvas.size[1] // 6
        t['f_t'].pos = canvas.size[0] // 8, canvas.size[1] // 2

    cam = scene.TurntableCamera(elevation=elevation, azimuth=azimuth, up='z')
    cam.fov = 48
    cam.scale_factor = N_X * 1.8
    if do_axis: margin = 1.35
    else: margin = 1
    cam.set_range((-N_X/2*margin, N_X/2/margin), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2*margin, N_frame/2/margin))
    view.camera = cam

    im = canvas.render(size=figsize)
    app.quit()
    if not(name is None):
        import vispy.io as io
        io.write_png(name + ext, im)
    else:
        return im
Example #26
0
# Create a folder to store image frames
os.system("rm -rf imgs; rm -Rf video.mp4; mkdir imgs")
counter = 0

# Run some episodes
for num_episode in range(10):  # runs for 10 episodes by default
    obs = env.reset()  # reset the environment; get the first observation
    # sanity check to make sure the script is still running from CLI
    print(num_episode)

    done = False
    while not done:  # only reset when the environment says to
        action, _states = model.predict(
            obs, deterministic=True)  # use model for movements
        obs, reward, done, info = env.step(
            action)  # step model, gather new input
        env.render(
        )  # render the image to the display, to allow for optional mouse input

        # also save the image to an array
        io.write_png("imgs/{0:05}.png".format(counter),
                     env.envs[0].renderer.canvas.render())
        counter += 1

print("Creating video:")
os.system(
    "ffmpeg -r 60 -i imgs/%05d.png -vcodec libx264 -y -an video.mp4 -vf \"pad=ceil(iw/2)*2:ceil(ih/2)*2\" -pix_fmt yuv420p && rm -rf imgs"
)

print("Video file created. See 'video.mp4' in script folder.")
def pcd_vispy_standard(scans=None,
                       img=None,
                       boxes=None,
                       name=None,
                       index=0,
                       vis_size=(800, 600),
                       save_img=False,
                       visible=True,
                       multi_vis=False,
                       point_size=0.02,
                       lidar_view_set=None):
    if multi_vis:
        canvas = vispy.scene.SceneCanvas(title=name,
                                         keys='interactive',
                                         size=vis_size,
                                         show=True)
    else:
        canvas = vispy.scene.SceneCanvas(title=name,
                                         keys='interactive',
                                         size=vis_size,
                                         show=visible)
    grid = canvas.central_widget.add_grid()
    vb = grid.add_view(row=0, col=0, row_span=2)
    vb_img = grid.add_view(row=1, col=0)
    if lidar_view_set is None:
        vb.camera = 'turntable'
        vb.camera.elevation = 90  # 21.0
        vb.camera.center = (6.5, -0.5, 9.0)
        vb.camera.azimuth = -90  # -75.5
        vb.camera.scale_factor = 63  # 32.7
    else:
        vb.camera = 'turntable'
        vb.camera.elevation = lidar_view_set['elevation']  # 21.0
        vb.camera.center = lidar_view_set['center']
        vb.camera.azimuth = lidar_view_set['azimuth']
        vb.camera.scale_factor = lidar_view_set['scale_factor']

    if scans is not None:
        if not isinstance(scans, list):
            pos = scans[:, :3]
            scatter = visuals.Markers()
            scatter.set_gl_state('translucent', depth_test=False)
            scatter.set_data(pos,
                             edge_width=0,
                             face_color=(1, 1, 1, 1),
                             size=point_size,
                             scaling=True)
            vb.add(scatter)
        else:
            pos = scans[0][:, :3]
            scatter = visuals.Markers()
            scatter.set_gl_state('translucent', depth_test=False)
            scatter.set_data(pos,
                             edge_width=0,
                             face_color=(1, 1, 1, 1),
                             size=point_size,
                             scaling=True)
            vb.add(scatter)

            pos = scans[1][:, :3]
            scatter = visuals.Markers()
            scatter.set_gl_state('translucent', depth_test=False)
            scatter.set_data(pos,
                             edge_width=0,
                             face_color=(0, 1, 1, 1),
                             size=0.1,
                             scaling=True)
            vb.add(scatter)

    axis = visuals.XYZAxis()
    vb.add(axis)

    if img is None:
        img = np.zeros(shape=[1, 1, 3], dtype=np.float32)
    image = visuals.Image(data=img, method='auto')
    vb_img.camera = 'turntable'
    vb_img.camera.elevation = -90.0
    vb_img.camera.center = (1900, 160, -1300)
    vb_img.camera.azimuth = 0.0
    vb_img.camera.scale_factor = 1500
    vb_img.add(image)

    if boxes is not None:
        if len(boxes.shape) == 1:
            boxes = boxes.reshape(-1, boxes.shape[0])
        # one box: type,xyz,lwh,yaw,[score,reserve1,reserve2]
        for box in boxes:
            if box[0] == 1:  # type:car
                vb.add(line_box_stand(box, color="yellow"))
            elif box[0] == 2:  # type:Perdestrain
                vb.add(line_box_stand(box, color="red"))
            elif box[0] == 3:  # type:Cyclist
                vb.add(line_box_stand(box, color="blue"))
            elif box[0] == 4:  # type:Van
                vb.add(line_box_stand(box, color="pink"))
            else:
                vb.add(line_box_stand(box, color="green"))

    if save_img:
        folder = path_add(cfg.TEST_RESULT, cfg.RANDOM_STR)
        if not os.path.exists(folder):
            os.makedirs(folder)
        fileName = path_add(folder, str(index).zfill(6) + '.png')
        res = canvas.render(bgcolor='black')[:, :, 0:3]
        vispy_file.write_png(fileName, res)

    @canvas.connect
    def on_key_press(ev):
        if ev.key.name in '+=':
            a = vb.camera.get_state()
        print(a)

    if visible:
        pass
        vispy.app.run()

    return canvas
Example #28
0
def visualize(z_in, azimuth=25., elevation=30.,
    thresholds=[0.94, .89, .75, .5, .25, .1], opacities=[.9, .8, .7, .5, .2, .1],
#     thresholds=[0.94, .89, .75], opacities=[.99, .7, .2],
#     thresholds=[0.7, .5, .2], opacities=[.95, .5, .2],
    fourier_label = {'f_x':'f_x', 'f_y':'f_y', 'f_t':'f_t'},
    name=None, ext=ext, do_axis=True, do_grids=False, draw_projections=True,
    colorbar=False, f_N=2., f_tN=2., figsize=figsize, **kwargs):
    """

    Visualization of the Fourier spectrum by showing 3D contour plots at different thresholds

    parameters
    ----------
    z : envelope of the cloud

    """
    if not(os.path.isdir(figpath)): os.mkdir(figpath)
    z = z_in.copy()
    N_X, N_Y, N_frame = z.shape
    fx, fy, ft = get_grids(N_X, N_Y, N_frame)

    # Normalize the amplitude.
    z /= z.max()

    from vispy import app, scene
    app.use_app('pyglet')
    #from vispy.util.transforms import perspective, translate, rotate
    from vispy.color import Color
    transparent = Color(color='black', alpha=0.)
    import colorsys
    canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=450)
    view = canvas.central_widget.add_view()

    vol_data = np.rollaxis(np.rollaxis(z, 1), 2)
#         volume = scene.visuals.Volume(vol_data, parent=view.scene)#frame)
    center = scene.transforms.STTransform(translate=( -N_X/2, -N_Y/2, -N_frame/2))
#         volume.transform = center
#         volume.cmap = 'blues'

    if draw_projections:
        from vispy.color import Colormap
        cm = Colormap([(1.0, 1.0, 1.0, 1.0), 'k'])
        opts = {'parent':view.scene, 'cmap':cm, 'clim':(0., 1.)}

        energy_xy = np.rot90(np.max(z, axis=2)[:, ::-1], 3)
        fourier_xy = scene.visuals.Image(np.rot90(energy_xy), **opts)
        tr_xy = scene.transforms.MatrixTransform()
        tr_xy.rotate(90, (0, 0, 1))
        tr_xy.translate((N_X/2, -N_Y/2, -N_frame/2))
        fourier_xy.transform = tr_xy

        energy_xt = np.rot90(np.max(z, axis=1)[:, ::-1], 3)
        fourier_xt = scene.visuals.Image(energy_xt, **opts)
        tr_xt = scene.transforms.MatrixTransform()
        tr_xt.rotate(90, (1, 0, 0))
        tr_xt.translate((-N_X/2, N_Y/2, -N_frame/2))
        fourier_xt.transform = tr_xt

        energy_yt = np.max(z, axis=0)[:, ::-1]
        fourier_yt = scene.visuals.Image(energy_yt, **opts)
        tr_yt = scene.transforms.MatrixTransform()
        tr_yt.rotate(90, (0, 1, 0))
        tr_yt.translate((-N_X/2, -N_Y/2, N_frame/2))
        fourier_yt.transform = tr_yt

    # Generate iso-surfaces at different energy levels
    surfaces = []
    for i_, (threshold, opacity) in enumerate(zip(thresholds, opacities)):
        surfaces.append(scene.visuals.Isosurface(z, level=threshold,
#                                         color=Color(np.array(colorsys.hsv_to_rgb(1.*i_/len(thresholds), 1., 1.)), alpha=opacity),
                                    color=Color(np.array(colorsys.hsv_to_rgb(.66, 1., 1.)), alpha=opacity),
                                    shading='smooth', parent=view.scene)
                                                )
        surfaces[-1].transform = center

    # Draw a sphere at the origin
    axis = scene.visuals.XYZAxis(parent=view.scene)
    for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],
              [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],
              [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):
        line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_Y/2, p[2]*N_frame/2], [p[3]*N_X/2, p[4]*N_Y/2, p[5]*N_frame/2]]), color='black', parent=view.scene)

    axisX = scene.visuals.Line(pos=np.array([[0, -N_Y/2, 0], [0, N_Y/2, 0]]), color='red', parent=view.scene)
    axisY = scene.visuals.Line(pos=np.array([[-N_X/2, 0, 0], [N_X/2, 0, 0]]), color='green', parent=view.scene)
    axisZ = scene.visuals.Line(pos=np.array([[0, 0, -N_frame/2], [0, 0, N_frame/2]]), color='blue', parent=view.scene)

    if do_axis:
        t = {}
        for text in ['f_x', 'f_y', 'f_t']:
            t[text] = scene.visuals.Text(fourier_label[text], parent=canvas.scene, face='Helvetica', color='black')
            t[text].font_size = 8
        t['f_x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8
        t['f_y'].pos = canvas.size[0] - canvas.size[0] // 8, canvas.size[1] - canvas.size[1] // 6
        t['f_t'].pos = canvas.size[0] // 8, canvas.size[1] // 2

    cam = scene.TurntableCamera(elevation=elevation, azimuth=azimuth, up='z')
    cam.fov = 48
    cam.scale_factor = N_X * 1.8
    if do_axis: margin = 1.35
    else: margin = 1
    cam.set_range((-N_X/2*margin, N_X/2/margin), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2*margin, N_frame/2/margin))
    view.camera = cam

    im = canvas.render(size=figsize)
    app.quit()
    if not(name is None):
        import vispy.io as io
        io.write_png(name + ext, im)
    else:
        return im
 def on_key_press(self, event):
     # Hold <Ctrl> to enter drag mode.
     if keys.CONTROL in event.modifiers:
         # TODO: I cannot get the mouse position within the key_press event ...
         # so it is not yet implemented. The purpose of this event handler
         # is simply trying to highlight the visual node when <Ctrl> is pressed
         # but mouse is not moved (just nicer interactivity), so not very
         # high priority now.
         pass
     # Press <Space> to reset camera.
     if event.text == ' ':
         self.camera.fov = self.fov
         self.camera.azimuth = self.azimuth
         self.camera.elevation = self.elevation
         self.camera.set_range()
         self.camera.scale_factor = self.scale_factor
         self.camera.scale_factor /= self.zoom_factor
         for child in self.view.children:
             if type(child) == XYZAxis:
                 child._update_axis()
     # Press <s> to save a screenshot.
     if event.text == 's':
         screenshot = _screenshot()
         io.write_png(self.title + '.png', screenshot)
     # Press <d> to toggle drag mode.
     if event.text == 'd':
         if not self.drag_mode:
             self.drag_mode = True
             self.camera.viewbox.events.mouse_move.disconnect(
                 self.camera.viewbox_mouse_event)
         else:
             self.drag_mode = False
             self._exit_drag_mode()
             self.camera.viewbox.events.mouse_move.connect(
                 self.camera.viewbox_mouse_event)
     # Press <a> to get the parameters of all visual nodes.
     if event.text == 'a':
         print("===== All useful parameters ====")
         # Canvas size.
         print("Canvas size = {}".format(self.size))
         # Collect camera parameters.
         print("Camera:")
         camera_state = self.camera.get_state()
         for key, value in camera_state.items():
             print(" - {} = {}".format(key, value))
         print(" - {} = {}".format('zoom factor', self.zoom_factor))
         # Collect slice parameters.
         print("Slices:")
         pos_dict = {'x': [], 'y': [], 'z': []}
         for node in self.view.scene.children:
             if type(node) == AxisAlignedImage:
                 pos = node.pos
                 if node.seismic_coord_system and node.axis in ['y', 'z']:
                     pos = node.limit[1] - pos  # revert y and z axis
                 pos_dict[node.axis].append(pos)
         for axis, pos in pos_dict.items():
             print(" - {}: {}".format(axis, pos))
         # Collect the axis legend parameters.
         for node in self.view.children:
             if type(node) == XYZAxis:
                 print("XYZAxis loc = {}".format(node.loc))
Example #30
0
    OSMESA_LIBRARY=/opt/osmesa_llvmpipe/lib/libOSMesa.so \
    python examples/offscreen/simple_osmesa.py
"""
import vispy
vispy.use(app='osmesa')  # noqa

import numpy as np
import vispy.plot as vp
import vispy.io as io

# Check the application correctly picked up osmesa
assert vispy.app.use_app().backend_name == 'osmesa', 'Not using OSMesa'

data = np.load(io.load_data_file('electrophys/iv_curve.npz'))['arr_0']
time = np.arange(0, data.shape[1], 1e-4)

fig = vp.Fig(size=(800, 800), show=False)

x = np.linspace(0, 10, 20)
y = np.cos(x)
line = fig[0, 0].plot((x, y), symbol='o', width=3, title='I/V Curve',
                      xlabel='Current (pA)', ylabel='Membrane Potential (mV)')
grid = vp.visuals.GridLines(color=(0, 0, 0, 0.5))
grid.set_gl_state('translucent')
fig[0, 0].view.add(grid)

fig.show()

img = fig.render()
io.write_png("osmesa.png", img)
                    edge_color=scan.sem_label_color[..., ::-1],
                    size=2, edge_width=2.0)
    canvas = vispy.scene.SceneCanvas(keys='interactive', show=False, bgcolor='w', size=(1000, 1000))
    
    camera = vispy.scene.cameras.TurntableCamera(elevation=0, azimuth=-30, roll=0)
    view = canvas.central_widget.add_view()  

    view.add(scatter)
    view.camera = camera

    img_root = os.path.join(save_root, seqs[seq_idx], 'groundtruth')
    os.makedirs(img_root, exist_ok=True)
    img_path = os.path.join(img_root, '%06d' % frame_idx + '.png')
    img = canvas.render()
    print('Processing ' + img_path)
    io.write_png(img_path, img[..., :3])

# save the visualizations of predictions
for frame_idx in range(200):
    data_path = os.path.join(data_root, seqs[seq_idx] + '-' + '%06d'%frame_idx + '.npz')
    scan.open_scan(data_path)
    scan.colorize()

    scatter = visuals.Markers()
    scatter.set_data(scan.points,
                    face_color=scan.sem_pred_color[..., ::-1],
                    edge_color=scan.sem_pred_color[..., ::-1],
                    size=2, edge_width=2.0)
    canvas = vispy.scene.SceneCanvas(keys='interactive', show=False, bgcolor='w', size=(1000, 1000))
    
    camera = vispy.scene.cameras.TurntableCamera(elevation=0, azimuth=-30, roll=0)
Example #32
0
    def __init__(self, surface, bed, new_waves_class=None, size=(600, 600), 
            sky_img_path="D:\Documents\water-surface\water-surface/fluffy_clouds.png", 
            bed_img_path="D:\Documents\water-surface\water-surface/seabed.png",
            depth_img_path="D:\Documents\water-surface\water-surface/depth.png"):
        app.Canvas.__init__(self, size=size,
                            title="Water surface simulator")
        # запрещаем текст глубины depth_test (все точки будут отрисовываться),
        # запрещает смещивание цветов blend - цвет пикселя на экране равен gl_fragColor.
        gloo.set_state(clear_color=(0, 0, 0, 1), depth_test=True, blend=True)
        self.program = gloo.Program(vertex, fragment_triangle)
        self.program_point = gloo.Program(vertex, fragment_point)

        self.surface = surface
        self.surface_class = new_waves_class
        self.surface_wave_list = []
        self.add_wave_center((self.size[0] / 2, self.size[1] / 2))

        self.bed = bed
        self.sky_img = io.read_png(sky_img_path)
        self.bed_img = io.read_png(bed_img_path)
        io.write_png(depth_img_path, self.bed.depth())
        self.depth_img = io.read_png(depth_img_path)
        
        # xy координаты точек сразу передаем шейдеру, они не будут изменятся со временем
        self.program["a_position"] = self.surface.position()
        self.program_point["a_position"] = self.surface.position()

        self.program['u_sky_texture'] = gloo.Texture2D(
            self.sky_img, wrapping='repeat', interpolation='linear')
        self.program['u_bed_texture'] = gloo.Texture2D(
            self.bed_img, wrapping='repeat', interpolation='linear')
        
        self.program['u_bed_depth_texture'] = gloo.Texture2D(
            self.depth_img, wrapping='repeat', interpolation='linear')
            
        self.program_point["u_eye_height"] = self.program["u_eye_height"] = 3
        self.program["u_alpha"] = 0.3
        self.program["u_bed_depth"] = -0.5

        self.program["u_sun_direction"] = self.normalize([0, 1, 0.1])
        self.program["u_sun_diffused_color"] = [1, 0.8, 1]
        self.program["u_sun_reflected_color"] = [1, 0.8, 0.6]

        self.triangles = gloo.IndexBuffer(self.surface.triangulation())

        # Set up GUI
        self.camera = np.array([0, 0, 1])
        self.up = np.array([0, 1, 0])
        self.set_camera()
        self.are_points_visible = False
        self.drag_start = None
        self.diffused_flag = True
        self.reflected_flag = True
        self.bed_flag = True
        self.depth_flag = True
        self.sky_flag = True
        self.apply_flags()

        # Run
        self._timer = app.Timer('auto', connect=self.on_timer, start=True)
        self.activate_zoom()
        self.show()
Example #33
0
    # set up canvas
    canvas = DomainCanvas(4, 4, 3, size=(400, 400))

    # handle command line options
    parser = ArgumentParser()
    parser.add_argument('-n',
                        dest='n_max',
                        type=int,
                        action='store',
                        default=20)
    parser.add_argument('--all', dest='partial', action='store_false')
    parser.add_argument('--dry-run', dest='dry_run', action='store_true')
    args = parser.parse_args()

    # render dessins
    n = 0
    for orbit in hyp_orbits:
        for dessin in orbit.dessins():
            canvas.set_domain(dessin.domain)
            image = canvas.render()
            name = dessin.domain.name()
            if args.dry_run:
                print(name + '.png')
            else:
                io.write_png(os.path.join('batch-export', name + '.png'),
                             image)

            n += 1
            if args.partial and n >= args.n_max:
                sys.exit(0)
Example #34
0
def on_key_press(event):
    if event.text == 's':
        global still_num

        # Stop preexisting animation
        fade_out.stop()
        usr_message.text = 'Saved still image'
        usr_message.color = (1, 1, 1, 0)

        # Write screen to .png
        still = canvas.render()
        still_name = str(seq) + "_" + str(still_num) + ".png"
        io.write_png(still_name, still)
        still_num = still_num + 1

        # Display and fade saved message
        fade_out.start()

    if event.text == 'e':
        global load_data

        # Stop preexisting animation
        fade_out.stop()
        usr_message.color = (1, 1, 1, 0)

        if load_data == None:
            global fractal_data

            # export data created in this program
            file_name = "3D_Fractal_" + seq + "_steps" + str(steps)
            np.save(file_name, fractal_data, allow_pickle=False)

            # set user message
            usr_message.text = 'Exported fractal'
        else:
            # set user message
            usr_message.text = 'Cannot export data loaded into program'

        # display user message
        fade_out.start()

    if event.text == 'l':
        global volume, loaded_data_later
        loaded_data_later = True

        # Stop preexisting animation
        fade_out.stop()
        usr_message.color = (1, 1, 1, 0)

        # open file dialog to select load data
        root = tk.Tk()
        root.withdraw()
        load_data = filedialog.askopenfilename()

        # make sure file extension is .npy
        file_ext = load_data[len(load_data) - 3:]
        if file_ext != 'npy':
            usr_message.text = 'Can only load .npy files'
        else:
            usr_message.text = 'Fractal loaded'

            # load fractal data
            fractal_data = np.load(load_data)

            # normalize data and get color map
            fractal_3D, chaotic_boundary = normalize(fractal_data, 0.0)
            fractal_map = getfractalcolormap(chaotic_boundary)

            # erase old volume
            volume.parent = None

            # make new volume from normalized fractal data
            volume = scene.visuals.Volume(fractal_3D,
                                          clim=(0, 1),
                                          method='translucent',
                                          parent=view.scene,
                                          threshold=0.225,
                                          cmap=fractal_map,
                                          emulate_texture=False)
            volume.transform = scene.STTransform(translate=(-steps // 2,
                                                            -steps // 2,
                                                            -steps // 2))

        # display user message
        fade_out.start()
Example #35
0
# create_movie.py
# Oliver Evans
# Clarkson University REU 2016
# Created: Thu 28 Jul 2016 12:18:30 AM EDT
# Last Edited: Thu 28 Jul 2016 12:25:26 AM EDT

# Create movie from working_visualization.py of constant rotation about z-axis

from working_visualization import *
from vispy import io
from vispy_volume import Canvas

c = Canvas(xlim, ylim, zlim, PP_3d, clim=None)
c.run()

c._view.camera.set_state({'elevation': 20})

for i in range(40):
    c._view.camera.set_state({'azimuth': i * 9})
    img = c.render()
    io.write_png('volume_img/img/{:03d}.png'.format(i), img)
Example #36
0
def run3DVisualizationIPM(ipmPoints,
                          centroids,
                          violations,
                          frame,
                          render=False):
    '''
    Takes all the world coordinates produced by the IPM method, and their color values and plots them in 3D space, using vispy. Also draws halo cylinders around 3D points corresponding to people centroids (from efficientdet bounding boxes). Also draws 3D lines (tubes) between the halos that represent a pair of people which are violating the 6' restriction.

            Parameters:
                    ipmPoints (list): World coordinates with color, generated by the ipm method (x,y,z,[r,g,b])

                    centroids (list): A list of lists, where each inner list object is a 3D world point, representing the centroid of a person (found using the efficientdet bounding boxes), in format [x,y,z]
                    
                    violations (list): A list of lists, where each inner list is a pair of integers, which are two indices in the bBoxes list representing a pair of people violating the 6' restriction. Formatted as [[pi1,pi2],[pi3,pi4]]

                    frame (int): frame number for the filename to save to

                    render (bool): whether or not to render canvas to a file
                    
            Returns:
    '''

    # Create canvas to draw everything
    canvas = vispy.scene.SceneCanvas(keys='interactive',
                                     show=True,
                                     size=(1920, 1080))
    view = canvas.central_widget.add_view()

    # Unpack ipm points
    ipmPos = []
    ipmColor = []
    for point in ipmPoints:
        ipmPos.append([point[0], point[1], point[2]])
        r = point[3][0]
        g = point[3][1]
        b = point[3][2]
        ipmColor.append([r, g, b])

    pos = np.array(ipmPos)
    colors = np.array(ipmColor)

    # 3D scatter plot to show depth map pointcloud
    scatter = visuals.Markers()
    scatter.antialias = 0
    scatter.set_data(pos, edge_color=None, face_color=colors, size=5)
    view.add(scatter)
    # Draw cylinders around centroids
    for point in centroids:
        x, y, z = point

        cyl_mesh = vispy.geometry.create_cylinder(10,
                                                  10,
                                                  radius=[500, 500],
                                                  length=50)

        # Move cylinder to correct location in 3D space
        # Make sure to negate the y value, otherwise everything will be mirrored
        vertices = cyl_mesh.get_vertices()
        center = np.array([x, -y, z + 150], dtype=np.float32)
        vtcs = np.add(vertices, center)
        cyl_mesh.set_vertices(vtcs)

        cyl = visuals.Mesh(meshdata=cyl_mesh, color='g')
        view.add(cyl)

    # Draw lines between violating people
    for pair in violations:
        x1, y1, z1 = centroids[pair[0]]
        x2, y2, z2 = centroids[pair[1]]
        #lin = visuals.Line(pos=np.array([[x1,-y1,z1+1],[x2,-y2,z2+1]]), color='r', method='gl')
        #view.add(lin)
        tube = visuals.Tube(points=np.array([[x1, -y1, z1 + 150],
                                             [x2, -y2, z2 + 150]]),
                            radius=50,
                            color='red')
        view.add(tube)

    view.camera = 'turntable'  # or try 'arcball'

    view.camera.elevation = 30.5
    view.camera.azimuth = -78.5
    view.camera.distance = 8250.000000000002
    view.camera.fov = 60
    # Add a colored 3D axis for orientation
    axis = visuals.XYZAxis(parent=view.scene)
    if (render):
        img = canvas.render()
        fname = "out_renders/IPM" + str(frame) + ".png"
        io.write_png(fname, img)
    else:
        vispy.app.run()
Example #37
0
def cube(im_in, azimuth=30., elevation=45., name=None,
         ext=ext, do_axis=True, show_label=True,
         cube_label = {'x':'x', 'y':'y', 't':'t'},
         colormap='gray', roll=-180., vmin=0., vmax=1.,
         figsize=figsize, **kwargs):

    """

    Visualization of the stimulus as a cube

    """
    if not(os.path.isdir(figpath)): os.mkdir(figpath)
    im = im_in.copy()

    N_X, N_Y, N_frame = im.shape
    fx, fy, ft = get_grids(N_X, N_Y, N_frame)
    import numpy as np
    from vispy import app, scene
    app.use_app('pyglet')
    from vispy.util.transforms import perspective, translate, rotate
    canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=450)
    view = canvas.central_widget.add_view()

#         frame = scene.visuals.Cube(size = (N_X/2, N_frame/2, N_Y/2), color=(0., 0., 0., 0.),
#                                         edge_color='k',
#                                         parent=view.scene)
    for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],
              [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],
              [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):
#             line = scene.visuals.Line(pos=np.array([[p[0]*N_Y/2, p[1]*N_X/2, p[2]*N_frame/2], [p[3]*N_Y/2, p[4]*N_X/2, p[5]*N_frame/2]]), color='black', parent=view.scene)
        line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_frame/2, p[2]*N_Y/2],
                                                [p[3]*N_X/2, p[4]*N_frame/2, p[5]*N_Y/2]]), color='black', parent=view.scene)

    opts = {'parent':view.scene, 'cmap':'grays', 'clim':(0., 1.)}
    image_xy = scene.visuals.Image(np.rot90(im[:, :, 0], 3), **opts)
    tr_xy = scene.transforms.MatrixTransform()
    tr_xy.rotate(90, (1, 0, 0))
    tr_xy.translate((-N_X/2, -N_frame/2, -N_Y/2))
    image_xy.transform = tr_xy

    image_xt = scene.visuals.Image(np.fliplr(im[:, -1, :]), **opts)
    tr_xt = scene.transforms.MatrixTransform()
    tr_xt.rotate(90, (0, 0, 1))
    tr_xt.translate((N_X/2, -N_frame/2, N_Y/2))
    image_xt.transform = tr_xt

    image_yt = scene.visuals.Image(np.rot90(im[-1, :, :], 1), **opts)
    tr_yt = scene.transforms.MatrixTransform()
    tr_yt.rotate(90, (0, 1, 0))
    tr_yt.translate((+N_X/2, -N_frame/2, N_Y/2))
    image_yt.transform = tr_yt

    if do_axis:
        t = {}
        for text in ['x', 'y', 't']:
            t[text] = scene.visuals.Text(cube_label[text], parent=canvas.scene, face='Helvetica', color='black')
            t[text].font_size = 8
        t['x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8
        t['t'].pos = canvas.size[0] - canvas.size[0] // 5, canvas.size[1] - canvas.size[1] // 6
        t['y'].pos = canvas.size[0] // 12, canvas.size[1] // 2

    cam = scene.TurntableCamera(elevation=35, azimuth=30)
    cam.fov = 45
    cam.scale_factor = N_X * 1.7
    if do_axis: margin = 1.3
    else: margin = 1
    cam.set_range((-N_X/2, N_X/2), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2, N_frame/2))
    view.camera = cam
    if not(name is None):
        im = canvas.render(size=figsize)
        app.quit()
        import vispy.io as io
        io.write_png(name + ext, im)
    else:
        app.quit()
        return im
Example #38
0
vispy.use(app='osmesa')  # noqa

import numpy as np
import vispy.plot as vp
import vispy.io as io

# Check the application correctly picked up osmesa
assert vispy.app.use_app().backend_name == 'osmesa', 'Not using OSMesa'

data = np.load(io.load_data_file('electrophys/iv_curve.npz'))['arr_0']
time = np.arange(0, data.shape[1], 1e-4)

fig = vp.Fig(size=(800, 800), show=False)

x = np.linspace(0, 10, 20)
y = np.cos(x)
line = fig[0, 0].plot((x, y),
                      symbol='o',
                      width=3,
                      title='I/V Curve',
                      xlabel='Current (pA)',
                      ylabel='Membrane Potential (mV)')
grid = vp.visuals.GridLines(color=(0, 0, 0, 0.5))
grid.set_gl_state('translucent')
fig[0, 0].view.add(grid)

fig.show()

img = fig.render()
io.write_png("osmesa.png", img)