Пример #1
0
    def test_anterograde_projections(self):

        self.projections.show_anterograde_projections(200)
        view = self.view
        view.camera.setView([100, 1325, -387], ([0.5, 0.8650, 0.05], 174))

        image_compare.capture_and_compare(view, "anterograde_projections.png")
Пример #2
0
 def test_display_simulation_true(self):
     view = rtneuron.engine.views[0]
     view.attributes.display_simulation = False
     rtneuron.engine.frame()
     rtneuron.engine.waitFrame()
     view.attributes.display_simulation = True
     image_compare.capture_and_compare(view, 'view_attributes_with_sim.png')
Пример #3
0
    def test_synaptic_projections(self):

        self.projections.show_projections(range(200,310,10), range(400,510,10))
        view = self.view
        view.camera.setView([146, 1382, -566], ([0.694, 0.718, 0.0552], 175))

        image_compare.capture_and_compare(view, "synaptic_projections.png")
Пример #4
0
    def test_retrograde_projections(self):

        self.projections.show_retrograde_projections(230)
        view = self.view
        view.camera.setView([-15, 1664, 381], ([-0.122, -0.114, 0.986], 84.5))

        image_compare.capture_and_compare(view, "retrograde_projections.png")
Пример #5
0
    def test_soma_mode(self):
        self._handler.attributes.mode = RepresentationMode.SOMA
        self._handler.update()
        self.engine.frame()
        tmp = image_compare.capture_temporary(self.view)

        self._handler.apply(NeuronClipping().clipAll(False))
        try:
            image_compare.capture_and_compare(self.view,
                                              tmp,
                                              prepend_sample_path=False)
        finally:
            os.remove(tmp)

        self._handler.attributes.mode = RepresentationMode.NO_DISPLAY
        self._handler.update()
        self.engine.frame()
        tmp = image_compare.capture_temporary(self.view)

        self._handler.apply(NeuronClipping().clipAll(True))
        try:
            image_compare.capture_and_compare(self.view,
                                              tmp,
                                              prepend_sample_path=False)
        finally:
            os.remove(tmp)
Пример #6
0
    def test_by_distance_colormaps(self):
        view = self.view
        neuron = view.scene.objects[0]

        neuron.attributes.color_scheme = \
            rtneuron.ColorScheme.BY_DISTANCE_TO_SOMA
        neuron.update()
        image_compare.capture_and_compare(view,
                                          'default_by_distance_coloring.png')

        colormap = ColorMap()
        colormap.setPoints({0: [0, 0, 1, 1], 100: [1, 0, 0, 1]})
        neuron.attributes.colormaps = AttributeMap()
        neuron.attributes.colormaps.by_distance_to_soma = colormap
        neuron.update()
        image_compare.capture_and_compare(view, 'by_distance_colormap1.png')

        colormap.setPoints({0: [0, 0, 1, 1], 100: [1, 0, 0, 1],
                            200: [1, 0.5, 0, 1], 300: [1, 1, 0, 1]})
        neuron.update()
        image_compare.capture_and_compare(view, 'by_distance_colormap2.png')

        colormap.setRange(0, 1000)
        neuron.update()
        image_compare.capture_and_compare(view, 'by_distance_colormap3.png')

        neuron.attributes.colormaps = AttributeMap()
        neuron.update()
        image_compare.capture_and_compare(view,
                                          'default_by_distance_coloring.png')
Пример #7
0
 def test_set_get_projectionPerspective(self):
     # Pre-computed value to adjust the scene content to the view limits
     fov = 23.13
     self.view.camera.setProjectionPerspective(fov)
     image_compare.capture_and_compare(self.view,
                                       'camera_projectionPerspective.png',
                                       threshold=2.0)
Пример #8
0
 def test_set_get_view(self):
     position = [30, 600, 300]
     orientation = ([0, 0, 1], 0)
     assert (self.view.camera.getView() != (position, orientation))
     self.view.camera.setView(position, orientation)
     assert (self.view.camera.getView() == (position, orientation))
     image_compare.capture_and_compare(self.view, "camera_setView.png")
Пример #9
0
    def test_by_width_colormaps(self):
        view = self.view
        neuron = view.scene.objects[0]

        neuron.attributes.color_scheme = rtneuron.ColorScheme.BY_WIDTH
        neuron.update()
        image_compare.capture_and_compare(view, 'default_by_width_coloring.png')

        colormap = ColorMap()
        colormap.setPoints({0: [1, 0, 0, 1], 30: [1, 1, 1, 1]})
        neuron.attributes.colormaps = AttributeMap()
        neuron.attributes.colormaps.by_width = colormap
        neuron.update()
        image_compare.capture_and_compare(view, 'by_width_colormap1.png')

        colormap.setPoints({0: [1, 0, 0, 1], 5: [1, 0.5, 0, 1],
                            10: [1, 1, 0, 1], 30: [1, 1, 1, 1]})
        neuron.update()
        image_compare.capture_and_compare(view, 'by_width_colormap2.png')

        colormap.setRange(0, 15)
        neuron.update()
        image_compare.capture_and_compare(view, 'by_width_colormap3.png')

        neuron.attributes.colormaps = AttributeMap()
        neuron.update()
        image_compare.capture_and_compare(view, 'default_by_width_coloring.png')
Пример #10
0
    def test_add_with_color_flat(self):
        attributes = AttributeMap()
        attributes.color = [1, 0, 0, 1]
        attributes.flat = True
        self.scene.addModel(cube_path, attributes=attributes)

        image_compare.capture_and_compare(self.view, "red_cube.png")
Пример #11
0
 def test_multiple_clip_1(self):
     clipping = NeuronClipping()
     for i in range(500):
         clipping.clip([i], [0.0], [0.5])
     self._handler.apply(clipping)
     image_compare.capture_and_compare(
         self.view, self._prefix + "simple_clip.png")
Пример #12
0
 def test_unclip(self):
     self._handler.apply(NeuronClipping().clipAll(True))
     self._handler.apply(NeuronClipping().unclip(
             range(1, 501), [0.5] * 500, [1.0] * 500))
     # This result must match the result from test_clip
     image_compare.capture_and_compare(
         self.view, self._prefix + "simple_clip.png")
Пример #13
0
 def test_compartment_report_rules2(self):
     apply_compartment_report(self.simulation, self.view, "allCompartments")
     apply_spike_data(self._spikefile.name, self.view)
     self.engine.player.window = [0, 6]
     self.switch_mode(RepresentationMode.SOMA)
     self.engine.player.timestamp = 5
     image_compare.capture_and_compare(
         self.view, "compartment_simulation_t5_somas.png")
Пример #14
0
    def test_add_and_change_color(self):
        self.scene.addModel(cube_path)

        cube = self.scene.objects[0]
        cube.attributes.color = [1, 0, 0, 1]
        cube.update()

        image_compare.capture_and_compare(self.view, "red_cube.png")
Пример #15
0
 def test_set_get_projectionOrtho(self):
     left = bottom = -0.5
     right = top = 0.5
     self.view.camera.setProjectionOrtho(left, right, bottom, top, 1)
     assert(self.view.camera.getProjectionOrtho() ==
            (left, right, bottom, top))
     image_compare.capture_and_compare(
         self.view,'camera_projectionOrtho.png', threshold=2.0)
Пример #16
0
 def test_lod_bias_0(self):
     view = rtneuron.engine.views[0]
     view.attributes.lod_bias = 1
     rtneuron.engine.frame()
     rtneuron.engine.waitFrame()
     view.attributes.lod_bias = 0
     image_compare.capture_and_compare(view,
                                       'view_attributes_lod_bias_0.png')
Пример #17
0
 def test_add_modify_clip_plane(self):
     view = self.view
     view.scene.setClipPlane(0, [0, 0, -1, 0])
     view.scene.setClipPlane(1, [-1, 0, 0, 0])
     self.engine.frame()
     view.scene.setClipPlane(0, [0, 0, -1, 75 + 32.00001])
     view.scene.setClipPlane(1, [-1, 0, 0, 74 + 34])
     image_compare.capture_and_compare(view, "scene_clipping3.png")
Пример #18
0
 def test_multiple_unclip_2(self):
     self._handler.apply(NeuronClipping().clipAll(True))
     clipping = NeuronClipping()
     for i in range(1, 501):
         self._handler.apply(NeuronClipping().unclip([i], [0.5], [1.0]))
     self._handler.apply(clipping)
     image_compare.capture_and_compare(
         self.view, self._prefix + "simple_clip.png")
Пример #19
0
 def test_res_snapshot(self):
     # So results are not GPU/drivers dependant
     self.view.attributes.idle_AA_steps = 32
     snapshot_with_res = self.view_snapshot((512, 512))
     neurons = self.scene.objects[0]
     neurons.attributes.mode = rtneuron.RepresentationMode.SOMA
     image_compare.capture_and_compare(
         self.view, 'snapshot_sized.png', 1, snapshot_with_res)
Пример #20
0
    def run_lod_test(self, scene_name):
        scene = self.scenes[scene_name]
        self.view.scene = scene
        self.engine.frame()
        scene.highlight([406], True)

        file_name = 'lod_' + scene_name + '.png'
        image_compare.capture_and_compare(self.view, file_name)
Пример #21
0
    def run_lod_test(self, scene_name):
        scene = self.scenes[scene_name]
        self.view.scene = scene
        apply_compartment_report(self.simulation, self.view, "allCompartments")
        self.engine.frame()
        scene.highlight([406], True)

        file_name = 'lod_' + scene_name + '_with_sim_and_highlight.png'
        image_compare.capture_and_compare(self.view, file_name)
Пример #22
0
    def test_subset_attributes1(self):
        image_compare.capture_and_compare(self.view,
                                          "spike_simulation_t1_somas.png")

        subset = self.scene.objects[0].query(self._gids[0::2])
        subset.attributes.color = [1, 0, 0, 1]
        subset.update()
        image_compare.capture_and_compare(self.view,
                                          "half_red_half_white_somas.png")
Пример #23
0
    def test_add_remove(self):
        handler = self.scene.addModel(cube_path)

        tmp = image_compare.capture_temporary(self.view)
        try:
            self.scene.remove(handler)
            image_compare.capture_and_compare(self.view, 'empty.png', 1, tmp)
        except:
            os.remove(tmp)
Пример #24
0
    def test_axon_mode_protected(self):
        tmp = image_compare.capture_temporary(self.view)

        self._handler.apply(NeuronClipping().unclipAll())
        try:
            image_compare.capture_and_compare(
                self.view, tmp, prepend_sample_path = False)
        finally:
            os.remove(tmp)
Пример #25
0
 def do_test_color_scheme_change(self, first, second):
     self.add_neurons(first)
     # Making sure the scene is displayed before the mode is changed
     for i in range(2):
         self._engine.frame()
         self._engine.waitFrame()
     self.change_coloring(second)
     image_compare.capture_and_compare(
         self._view, "coloring_%s.png" % str(second).lower())
Пример #26
0
 def test_set_get_projectionFrustum(self):
     left = bottom = -0.5
     right = top = 0.5
     near = 1
     self.view.camera.setProjectionFrustum(left, right, bottom, top, near)
     assert(self.view.camera.getProjectionFrustum() ==
            (left, right, bottom, top, near))
     image_compare.capture_and_compare(
         self.view,'camera_projectionFrustum.png', threshold=2.0)
Пример #27
0
    def test_soma_to_detailed(self):
        apply_compartment_report(self.simulation, self.view, "allCompartments")
        self.engine.player.window = [0, 10]

        self.engine.player.timestamp = 5
        self.engine.frame()

        self.switch_mode(RepresentationMode.WHOLE_NEURON)
        image_compare.capture_and_compare(self.view,
                                          "compartment_simulation_t5.png")
Пример #28
0
 def test_clip_chain_1(self):
     tmp = image_compare.capture_temporary(self.view)
     self._handler.apply(
         NeuronClipping().clipAll()
                         .unclip(range(500), [0] * 500, [1.0] * 500))
     try:
         image_compare.capture_and_compare(
             self.view, tmp, prepend_sample_path = False)
     finally:
         os.remove(tmp)
Пример #29
0
    def test_clip(self):
        self._handler.apply(NeuronClipping().clip(
                range(500), [0.0] * 500, [0.5] * 500))

        # Rendering at least one frame to make sure the scene is not empty for
        # the snapshot.
        self.engine.frame()
        self.engine.waitFrame()

        image_compare.capture_and_compare(
            self.view, self._prefix + "simple_clip.png")
Пример #30
0
    def test_axon_spikes(self):
        apply_spike_data(self._spikefile.name, self.view)
        self.engine.player.window = [0, 10]

        self.view.camera.setView([-187, 481, 880], ([0.0, 0.0, 1.0], 0.0))
        self.engine.player.timestamp = 4.5
        self.view.attributes.spike_tail = 1
        image_compare.capture_and_compare(
            self.view, "spike_simulation_t4.5_d1_axons.png")
        self.view.attributes.spike_tail = 2
        image_compare.capture_and_compare(
            self.view, "spike_simulation_t4.5_d2_axons.png")