Exemplo n.º 1
0
class QtPlotItem3D(QtPlotItem2D):
    """Use forward instance to not cause import issues if
    not installed."""

    widget = ForwardInstance(gl_view_widget)

    def create_widget(self):
        from pyqtgraph.opengl import GLViewWidget

        if isinstance(self.parent(), AbstractQtPlotItem):
            self.widget = self.parent_widget()
        else:
            self.widget = GLViewWidget(parent=self.parent_widget())
            self.widget.opts["distance"] = 40
            self.widget.raise_()

    def init_signals(self):
        pass

    def _create_grid(self):
        from pyqtgraph.opengl import GLGridItem

        gx = GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gx.translate(-10, 0, 0)
        self.widget.addItem(gx)
        gy = GLGridItem()
        gy.rotate(90, 1, 0, 0)
        gy.translate(0, -10, 0)
        self.widget.addItem(gy)
        gz = GLGridItem()
        gz.translate(0, 0, -10)
        self.widget.addItem(gz)

    def set_z(self, z):
        self._refresh_plot()

    def _refresh_plot(self):
        import numpy as np

        # import pyqtgraph as pg
        from pyqtgraph import opengl as gl

        self._create_grid()
        pts = np.vstack(
            [self.declaration.x, self.declaration.y,
             self.declaration.z]).transpose()
        plt = gl.GLLinePlotItem(
            pos=pts
        )  # , color=pg.glColor((i,n*1.3)), width=(i+1)/10., antialias=True)
        self.widget.addItem(plt)

    def set_grid(self, grid):
        pass
Exemplo n.º 2
0
class QtPlotItem3D(QtPlotItem2D):
    """ Use forward instance to not cause import issues if 
    not installed. """
    widget = ForwardInstance(gl_view_widget)
    
    def create_widget(self):
        from pyqtgraph.opengl import GLViewWidget
        if isinstance(self.parent(),AbstractQtPlotItem):
            self.widget = self.parent_widget()
        else:
            self.widget = GLViewWidget(parent=self.parent_widget())
            self.widget.opts['distance'] = 40
            self.widget.raise_()
            
    def init_signals(self):
        pass

    def _create_grid(self):
        from pyqtgraph.opengl import GLGridItem
        gx = GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gx.translate(-10, 0, 0)
        self.widget.addItem(gx)
        gy = GLGridItem()
        gy.rotate(90, 1, 0, 0)
        gy.translate(0, -10, 0)
        self.widget.addItem(gy)
        gz = GLGridItem()
        gz.translate(0, 0, -10)
        self.widget.addItem(gz)
        
    def set_z(self,z):
        self._refresh_plot()
        
    def _refresh_plot(self):
        import numpy as np
        #import pyqtgraph as pg
        from pyqtgraph import opengl as gl
        
        self._create_grid()
        pts = np.vstack([self.declaration.x,self.declaration.y,self.declaration.z]).transpose()
        plt = gl.GLLinePlotItem(pos=pts)#, color=pg.glColor((i,n*1.3)), width=(i+1)/10., antialias=True)
        self.widget.addItem(plt)
        
    def set_grid(self,grid):
        pass
Exemplo n.º 3
0
import time

from pyqtgraph.opengl import GLViewWidget, MeshData
from pyqtgraph.opengl.items.GLMeshItem import GLMeshItem

from PyQt5.QtGui import QApplication


volume = load(os.path.join(os.path.split(__file__)[0], 'data/input/sample.npy'))


t0 = time.time()
vertices, normals, faces = march(volume, 0)  # zero smoothing rounds
smooth_vertices, smooth_normals, faces = march(volume, 4)  # 4 smoothing rounds
t1 = time.time()
print("took", t1 - t0, "sec")

app = QApplication([])
view = GLViewWidget()

mesh = MeshData(vertices / 100.0, faces)  # scale down - otherwise camera is misplaced
# or mesh = MeshData(smooth_vertices / 100, faces)
mesh._vertexNormals = normals
# or mesh._vertexNormals = smooth_normals

item = GLMeshItem(meshdata=mesh, color=[1, 0, 0, 1], shader="normalColor")

view.addItem(item)
view.show()
app.exec_()
Exemplo n.º 4
0
class HyperSpec3DH5View(HyperSpectralBaseView):

    name = 'hyperspec_3d_h5'

    supported_measurements = [
        'oo_asi_hyperspec_3d_scan',
        'andor_asi_hyperspec_3d_scan',
    ]

    def scan_specific_setup(self):
        pass

    def setup(self):
        self.settings.New('sample', dtype=str, initial='')
        self.settings.New('z_slice', dtype=float, choices=[0.0], initial=0.0)
        self.settings.New('show_3d', dtype=bool, initial=False)
        self.settings.New('vol_alpha',
                          dtype=float,
                          vmin=0.0,
                          vmax=1.0,
                          initial=0.5)
        self.settings.New('vol_colormap',
                          dtype=str,
                          initial='viridis',
                          choices=[
                              'viridis', 'plasma', 'inferno', 'magma',
                              'cividis', 'Greys', 'Purples', 'Blues', 'Greens',
                              'Oranges', 'Reds', 'YlOrBr', 'YlOrRd', 'OrRd',
                              'PuRd', 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu',
                              'PuBuGn', 'BuGn', 'YlGn'
                          ])
        # self.settings.New('vol_percentile', dtype=int, vmin=0, vmax=49,
        #                   initial=5)
        self.settings.New('vol_percentile_min',
                          dtype=int,
                          vmin=0,
                          vmax=100,
                          initial=5)
        self.settings.New('vol_percentile_max',
                          dtype=int,
                          vmin=0,
                          vmax=100,
                          initial=95)
        self.settings.New('vol_transparent_percentile',
                          dtype=int,
                          vmin=0,
                          vmax=100,
                          initial=5)
        self.settings.New('vol_transparent_min', dtype=bool, initial=False)
        self.settings.z_slice.updated_choice_index_value.connect(
            self.on_update_zslice_choice)
        # self.settings.vol_colormap.updated_value.connect(self.calculate_volume)
        # self.settings.vol_alpha.updated_value.connect(self.calculate_volume)
        HyperSpectralBaseView.setup(self)
        voldata = np.empty((1, 1, 1, 4), dtype=np.ubyte)
        voldata[0, 0, 0, :] = [255, 255, 255, 0]
        self.volitem = GLVolumeItem(data=voldata)
        self.glview = GLViewWidget()
        self.glaxis = GLAxisItem()
        self.glgrid = GLGridItem()
        self.glview.addItem(self.glgrid)
        self.glview.addItem(self.glaxis)
        self.glview.addItem(self.volitem)
        self.gldock = self.dockarea.addDock(name='3D',
                                            widget=self.glview,
                                            position='below',
                                            relativeTo=self.image_dock)

        self.calculate_3d_pushButton = QPushButton(text='calculate_3d')
        self.settings_ui.layout().addWidget(self.calculate_3d_pushButton)
        self.calculate_3d_pushButton.clicked.connect(self.calculate_volume)
        self.image_dock.raiseDock()

    def is_file_supported(self, fname):
        return np.any([(meas_name in fname)
                       for meas_name in self.supported_measurements])

    def reset(self):
        if hasattr(self, 'dat'):
            self.dat.close()
            del self.dat

        if hasattr(self, 'spec_map'):
            del self.spec_map

        if hasattr(self, 'scalebar'):
            self.imview.getView().removeItem(self.scalebar)
            del self.scalebar

        if hasattr(self, 'volume'):
            spoof_data = np.zeros((1, 1, 1, 4), dtype=np.ubyte)
            self.volitem.setData(spoof_data)
            del self.volume

        self.settings.show_3d.update_value(False)
        self.image_dock.raiseDock()

    def load_data(self, fname):
        self.dat = h5py.File(fname)
        for meas_name in self.supported_measurements:
            if meas_name in self.dat['measurement']:
                self.M = self.dat['measurement'][meas_name]

        for map_name in ['hyperspectral_map', 'spec_map']:
            if map_name in self.M:
                self.spec_map = np.array(self.M[map_name])
                self.h_span = self.M['settings'].attrs['h_span']
                self.x_array = np.array(self.M['h_array'])
                self.z_array = np.array(self.M['z_array'])
                units = self.M['settings/units'].attrs['h_span']
                if units == 'mm':
                    self.h_span = self.h_span * 1e-3
                    self.z_span = self.z_array * 1e-3
                    self.settings.z_slice.change_unit('mm')

                if 'dark_indices' in list(self.M.keys()):
                    print('dark indices found')
                    dark_indices = self.M['dark_indices']
                    if dark_indices.len() == 0:
                        self.spec_map = np.delete(self.spec_map,
                                                  list(dark_indices.shape), -1)
                    else:
                        self.spec_map = np.delete(self.spec_map,
                                                  np.array(dark_indices), -1)
                else:
                    print('no dark indices')

        self.hyperspec_data = self.spec_map[0, :, :, :]
        self.display_image = self.hyperspec_data.sum(axis=-1)
        self.settings.z_slice.change_choice_list(self.z_array.tolist())
        self.settings.z_slice.update_value(self.z_array[0])
        self.spec_x_array = np.arange(self.hyperspec_data.shape[-1])

        for x_axis_name in [
                'wavelength', 'wls', 'wave_numbers', 'raman_shifts'
        ]:
            if x_axis_name in self.M:
                x_array = np.array(self.M[x_axis_name])
                if 'dark_indices' in list(self.M.keys()):
                    dark_indices = self.M['dark_indices']
                    # The following is to read a dataset I initialized
                    # incorrectly for dark pixels. This can be replaced with
                    # the else statement entirely now that the measurement is
                    # fixed, but I still have a long measurement that will
                    # benefit from this.
                    if dark_indices.len() == 0:
                        x_array = np.delete(x_array, list(dark_indices.shape),
                                            0)
                    else:
                        x_array = np.delete(x_array, np.array(dark_indices), 0)
                self.add_spec_x_array(x_axis_name, x_array)
                self.x_axis.update_value(x_axis_name)

        sample = self.dat['app/settings'].attrs['sample']
        self.settings.sample.update_value(sample)
        self.calculate_volume()

    def on_update_zslice_choice(self, index):
        if hasattr(self, 'spec_map'):
            self.hyperspec_data = self.spec_map[index, :, :, :]
            self.display_images['default'] = self.hyperspec_data
            self.display_images['sum'] = self.hyperspec_data.sum(axis=-1)
            self.spec_x_arrays['default'] = self.spec_x_array
            self.spec_x_arrays['index'] = np.arange(
                self.hyperspec_data.shape[-1])
            self.recalc_bandpass_map()
            self.recalc_median_map()
            self.update_display()

    def calculate_volume(self):
        if not self.settings['show_3d']:
            print('calculate_volume called without show_3d')
            return

        print('calculating 3d volume')
        t0 = time.time()

        if hasattr(self, 'volume'):
            del self.volume

        if hasattr(self, 'mappable'):
            self.mappable.set_cmap(self.settings['vol_colormap'])
        else:
            self.mappable = ScalarMappable(cmap=self.settings['vol_colormap'])

        z_span = self.z_array[-1] - self.z_array[0]
        dx = self.x_array[1] - self.x_array[0]
        z_interp_array = np.linspace(np.amin(self.z_array),
                                     np.amax(self.z_array),
                                     num=z_span / dx)
        z_interp = None
        self.volume = None
        nz = len(z_interp_array)

        if self.settings['display_image'] == 'bandpass_map':
            print('bandpass_map selected')
            x, slice = self.get_xhyperspec_data(apply_use_x_slice=True)
            ind_min = np.nonzero(self.spec_x_array == x[0])[0][0]
            ind_max = np.nonzero(self.spec_x_array == x[-1])[0][0]
            data = np.zeros((len(self.z_array), ) + slice.shape)
            data = self.spec_map[:, :, :, ind_min:ind_max]
            # for kk in range(len(self.z_array)):
            #     print(
            #         'grabbing bandpass layer %d of %d' % (kk, len(self.z_array)))
            #     self.settings.z_slice.update_value(self.z_array[kk])
            #     x, data[kk, :, :, :] = self.get_xhyperspec_data(
            #         apply_use_x_slice=True)
            z_interp = interp1d(self.z_array, data, axis=0)
        else:
            z_interp = interp1d(self.z_array, self.spec_map, axis=0)

        data = z_interp(z_interp_array)
        self.volume = np.zeros(data.shape[:-1] + (4, ), dtype=np.ubyte)

        pmin = self.settings['vol_percentile_min']
        pmax = self.settings['vol_percentile_max']
        self.mappable.set_array(data.sum(axis=-1))
        vmin = np.percentile(data.sum(axis=-1), pmin)
        vmax = np.percentile(data.sum(axis=-1), pmax)
        tmin = np.percentile(data.sum(axis=-1),
                             self.settings['vol_transparent_percentile'])
        self.mappable.set_clim(vmin=vmin, vmax=vmax)
        # self.mappable.autoscale()

        for kk in range(nz):
            print('calculating rgba vals for %d of %d layers' % (kk, nz))
            sum_data = data[kk, :, :, :].sum(axis=-1)
            # print(sum_data.shape, self.volume.shape)
            self.volume[kk, :, :, :] = self.mappable.to_rgba(
                sum_data, alpha=self.settings['vol_alpha'], bytes=True)
            if self.settings['vol_transparent_min']:
                self.volume[kk, :, :, 3][np.nonzero(sum_data <= tmin)] = 0

        print('3d volume calculation complete')
        t1 = time.time()
        print('time elapsed: %0.3f s' % (t1 - t0))

        kwargs = {'x': len(self.x_array), 'y': len(self.x_array), 'z': nz}
        self.glaxis.setSize(**kwargs)
        self.glgrid.setSize(**kwargs)
        self.glgrid.setSpacing(x=1 / dx * 5, y=1 / dx * 5, z=1 / dx * 5)
        # print(self.mappable.get_cmap().name)
        # print(data.shape, self.volume.shape)

    def update_display(self):
        if hasattr(self, 'scalebar'):
            self.imview.getView().removeItem(self.scalebar)

        if self.display_image is not None:
            # pyqtgraph axes are x,y, but data is stored in (frame, y,x, time),
            # so we need to transpose
            self.imview.getImageItem().setImage(self.display_image.T)

            nn = self.display_image.shape

            if hasattr(self, 'h_span'):
                span = self.h_span
            else:
                span = -1
            self.scalebar = ConfocalScaleBar(span=span, num_px=nn[0])
            self.scalebar.setParentItem(self.imview.getView())
            self.scalebar.anchor((1, 1), (1, 1), offset=(-20, -20))

            if hasattr(self, 'volume') and self.settings['show_3d']:
                self.volitem.setData(np.swapaxes(self.volume, 0, 2))

            self.on_change_rect_roi()
            self.on_update_circ_roi()
Exemplo n.º 5
0
import time

from pyqtgraph.opengl import GLViewWidget, MeshData
from pyqtgraph.opengl.items.GLMeshItem import GLMeshItem

from PyQt5.QtGui import QApplication

volume = load(os.path.join(
    os.path.split(__file__)[0], 'data/input/sample.npy'))

t0 = time.time()
vertices, normals, faces = march(volume, 0)  # zero smoothing rounds
smooth_vertices, smooth_normals, faces = march(volume, 4)  # 4 smoothing rounds
t1 = time.time()
print("took", t1 - t0, "sec")

app = QApplication([])
view = GLViewWidget()

mesh = MeshData(vertices / 100.0,
                faces)  # scale down - otherwise camera is misplaced
# or mesh = MeshData(smooth_vertices / 100, faces)
mesh._vertexNormals = normals
# or mesh._vertexNormals = smooth_normals

item = GLMeshItem(meshdata=mesh, color=[1, 0, 0, 1], shader="normalColor")

view.addItem(item)
view.show()
app.exec_()
Exemplo n.º 6
0
        for xi in range(-hsize, hsize + 1):
            for yi in range(-hsize, hsize + 1):
                if xi == -hsize and yi == -hsize:
                    # skip one corner for visual orientation
                    continue
                vec3 = QtGui.QVector3D(xi, yi, 0)
                pos = project.map(vec3).toPointF()
                painter.drawEllipse(pos, 1, 1)


pg.mkQApp("GLPainterItem Example")
glv = GLViewWidget()
glv.show()
glv.setWindowTitle('pyqtgraph example: GLPainterItem')
glv.setCameraPosition(distance=50, elevation=90, azimuth=0)

griditem = GLGridItem()
griditem.setSize(SIZE, SIZE)
griditem.setSpacing(1, 1)
glv.addItem(griditem)

axisitem = GLAxisItem()
axisitem.setSize(SIZE / 2, SIZE / 2, 1)
glv.addItem(axisitem)

paintitem = GLPainterItem()
glv.addItem(paintitem)

if __name__ == '__main__':
    pg.exec()
class PoseEstimation(object):
    @staticmethod
    def getframe(option):
        image = None
        ret_val = 0
        camera = 0
        if option == "camera":
            cam = cv.VideoCapture(camera)
            ret_val, image = cam.read()
        elif option == "kinect":
            image, ret_val = freenect.sync_get_video()
            image = cv.cvtColor(image, cv.COLOR_RGB2BGR)
        elif option.__contains__("/"):
            image = cv.imread(option)
        elif option == "camera_image":
            print("after waiting")
            time.sleep(5)
            print("before waiting")
            cam = cv.VideoCapture(camera)
            ret_val, image = cam.read()
        return image, ret_val

    # creating window inilatizing graph objects
    def __init__(self, args, option='camera'):
        self.args = args
        self.fpsTime = 0
        self.option = option
        self.app = QtGui.QApplication(sys.argv)
        self.window = GLViewWidget()
        self.window.setGeometry(0, 150, 1920, 1080)
        self.window.setCameraPosition(distance=50, elevation=8)
        self.window.setWindowTitle("3D Pose Estimation")
        self.window.show()
        gx = GLGridItem()
        gy = GLGridItem()
        gz = GLGridItem()
        gx.rotate(90, 0, 1, 0)
        gy.rotate(90, 1, 0, 0)
        gx.translate(-10, 0, 0)
        gy.translate(0, -10, 0)
        gz.translate(0, 0, -10)
        self.window.addItem(gx)
        self.window.addItem(gy)
        self.window.addItem(gz)
        self.lines = {}
        keypoints = []
        self.connection = [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6],
                           [0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12],
                           [12, 13], [8, 14], [14, 15], [15, 16]]
        self.w, self.h = model_wh(self.args.resize)

        if self.w > 0 and self.h > 0:
            self.e = TfPoseEstimator(get_graph_path(self.args.model),
                                     target_size=(self.w, self.h),
                                     trt_bool=str2bool(self.args.tensorrt))
        else:
            self.e = TfPoseEstimator(get_graph_path(self.args.model),
                                     target_size=(432, 368),
                                     trt_bool=str2bool(self.args.tensorrt))

        print(self.args.option)
        image, ret_val = PoseEstimation.getframe(self.args.option)

        self.poseLifting = Prob3dPose(
            'lifting/prob_model/prob_model_params.mat')
        try:

            keypoints = self.mesh(image)
        except AssertionError:
            print("body not in image")
            keypoints = np.zeros((17, 3))
        except Exception:
            print("General exception")
            keypoints = np.zeros((17, 3))

        # self.lines = {}
        # self.connection = [
        #     [13, 16]
        # ]
        # p = []
        # p.append(keypoints[13])
        # p.append(keypoints[16])
        # p = np.array(p)
        finally:
            self.points = GLScatterPlotItem(
                pos=np.array(np.array(keypoints)),
                color=glColor((12, 255, 0)),
                size=15,
            )
            self.window.addItem(self.points)
            for n, pts in enumerate(self.connection):
                self.lines[n] = GLLinePlotItem(pos=np.array(
                    [keypoints[p] for p in pts]),
                                               color=glColor((0, 0, 255)),
                                               width=3,
                                               antialias=True)
                self.window.addItem(self.lines[n])

    def mesh(self, image):
        # image_h, image_w = image.shape[:2]

        width = 640
        height = 480
        pose_2d_mpiis = []
        visibilities = []

        humans = self.e.inference(image,
                                  resize_to_default=(self.w > 0
                                                     and self.h > 0),
                                  upsample_size=self.args.resize_out_ratio)
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
        cv.putText(image, "FPS: %f" % (1.0 / (time.time() - self.fpsTime)),
                   (10, 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        cv.imshow('tf-pose-estimation result', image)
        self.fpsTime = time.time()

        # image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
        # cv2.putText(image,
        #             "FPS: %f" % (1.0 / (time.time() - terrain.fps_time)),
        #             (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
        #             (0, 255, 0), 2)
        # cv2.imshow('tf-pose-estimation result', image)
        # terrain.fps_time = time.time()
        # cv2.waitKey(1)

        for human in humans:
            pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
            pose_2d_mpiis.append([(int(y * height + 0.5), int(x * width + 0.5))
                                  for x, y in pose_2d_mpii])
            visibilities.append(visibility)

        pose_2d_mpiis = np.array(pose_2d_mpiis)
        visibilities = np.array(visibilities)
        transformed_pose2d, weights = self.poseLifting.transform_joints(
            pose_2d_mpiis, visibilities)
        pose_3d = self.poseLifting.compute_3d(transformed_pose2d, weights)

        keypoints = pose_3d[0].transpose()
        keypoints = keypoints / 100
        print(" \n")

        print(keypoints)
        return keypoints

    """
    return 3d keypoints

    """

    def update(self):
        """ tf.constant([123]) + tf.constant([321])
                update the mesh and shift the noise each time
                """
        # ret_val, image = terrain.get_video()
        # ret_val, image = self.cam.read()
        if cv.waitKey(1) & 0xFF == 27:
            cv.destroyAllWindows()
            sys.exit()

        keypoints = []
        image, ret_val = PoseEstimation.getframe(self.args.option)
        try:
            keypoints = self.mesh(image)
        except AssertionError:
            print("body not in image")
            keypoints = np.zeros((17, 3))
        except Exception:
            print("General exception")
            keypoints = np.zeros((17, 3))
        finally:
            self.points.setData(pos=np.array(keypoints))
            for n, pts in enumerate(self.connection):
                self.lines[n].setData(pos=np.array([keypoints[p]
                                                    for p in pts]))

    """
        update graph all graph objects
    """

    def start(self):
        if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
            QtGui.QApplication.instance().exec_()

    def animation(self, frametime=10):
        """u
        calls the update method to run in a loop
        """
        if not (self.option.__contains__("/")
                or self.option == "camera_image"):
            timer = QtCore.QTimer()
            timer.timeout.connect(self.update)
            timer.start(frametime)
        self.start()
Exemplo n.º 8
0
 def addItem(self, item, group=RenderGroup.Variable):
     self.renderItems[group].append(item)
     GLViewWidget.addItem(self, item)