コード例 #1
0
 def setUp(self):
     self.engine = RTNeuron([])
     self.simulation = brain.Simulation(brain.test.blue_config)
     scene = self.engine.createScene()
     self.engine.init(setup.medium_rect_eq_config)
     self.view = self.engine.views[0]
     self.view = scene
コード例 #2
0
ファイル: Ortho.py プロジェクト: abuchin/RTNeuron
def front_ortho(simulation, targets, **kwargs):
    """Return the frustum parameters for an orthogonal front view of a cell
    target.

    This function will load a simulation configuration file to get the soma
    position of the neurons given their gids. The frustum size is
    computed based only on these positions. In order to frame the
    scene correctly an additional camera path has to be set up.

    The target parameter can be:

    - A cell GID (as integer)
    - A numpy array of u4, u8 or i4
    - A target labels (as a string)
    - A list of any of the above

    """
    options = _CommonOptions(**kwargs)

    simulation = _brain.Simulation(simulation)
    circuit = simulation.open_circuit()
    gids = _rtneuron.util.targets_to_gids(targets, simulation)
    xys = options.fit_point_generator(gids, circuit)[:, [0, 1]]

    return _ortho_frustum(xys, **kwargs)
コード例 #3
0
def flythrough(simulation, targets, duration=10, **kwargs):
    """Return a camera path of a flythrough of a cell target.

    This function will load the simulation config file given and all the neurons
    associated with the target specification. The camera position is
    computed based only on the soma positions and corresponds to a front
    view of the circuit. The path duration must be in seconds.

    The targets parameter can be:

    - A cell GID (as integer)
    - An iterable of GIDs
    - A target labels (as a string)
    - A list of any of the above

    The optional keyword arguments are:

    - samples: Number of keyframes to generate
    - speedup: From 1 to inf, this parameter specifies a speed up for
      the initial camera speed. Use 1 for a linear camera path, if higher
      that one the camera will start faster and will decrease its speed
      non-linearly and monotonically. Recommended values are between 1
      and 3.
      The default value is 1.
    """
    options = _CommonOptions(**kwargs)
    try:
        samples = kwargs['samples']
        assert(samples > 1)
    except KeyError:
        samples = 100
    try:
        norm = float(kwargs['speedup'])
        assert(norm >= 1)
    except KeyError:
        norm = 1

    simulation = _brain.Simulation(simulation)
    circuit = simulation.open_circuit()
    positions = circuit.positions(
        _rtneuron.util.targets_to_gids(targets, simulation))

    top = positions.max(0)
    bottom = positions.min(0)
    center = (top + bottom) * 0.5
    start = _compute_eye_position(positions, options)
    end = center
    end[2] = bottom[2] - (top[2] - bottom[2]) / 5.0

    path = _rtneuron.CameraPath()

    for i in range(samples):
        i = 1 / (samples - 1.0) * i
        time = duration * i
        a = (1 - (1 - i) ** norm) ** (1 / norm)
        position = start * (1 - a) + end * a
        path.addKeyFrame(time, path.KeyFrame(position, ([1, 0, 0], 0), 1))

    return path
コード例 #4
0
    def setUp(self):
        setup.setup_empty_scene(self, eq_config=setup.medium_eq_config)

        self.view.attributes.auto_compute_home_position = False
        self.view.camera.setView([26, 965, 600], ([0.0, 0.0, 1.0], 0.0))

        circuit_config = brain.test.circuit_config
        simulation = brain.Simulation(circuit_config)
        self.scene.circuit = simulation.open_circuit()
        self.simulation = simulation

        self.tmp_images = []
コード例 #5
0
    def setUp(self, sceneAttr = AttributeMap()):
        self._engine = RTNeuron([])
        self._simulation = brain.Simulation(brain.test.blue_config)
        self._gids = [406]

        self._scene = self._engine.createScene(sceneAttr)
        self._scene.circuit = self._simulation.open_circuit()

        self._engine.init(setup.medium_rect_eq_config)
        view = self._engine.views[0]
        self._view = view
        view.attributes.idle_AA_steps = 32
        view.attributes.background = [0, 0, 0, 1]
        view.attributes.auto_compute_home_position = False
        view.camera.setView([29, 666, 563], ([0.0, 0.0, 1.0], 0.0))
        view.scene = self._scene
コード例 #6
0
def front_to_top_rotation(simulation, targets, duration=10, **kwargs):
    """Return a camera path of a rotation from front to top view of
    a set of circuit targets.

    This function will load the simulation config file given and all the neurons
    associated with the target specification. The front and top camera
    positions are  computed based on the soma positions.
    The path duration is in seconds.

    The targets parameter can be:

    - A cell GID (as integer)
    - An iterable of GIDs
    - A target labels (as a string)
    - A list of any of the above

    The optional keyword arguments are:

    - timing: A [0..1]->[0..1] function used to map sample timestamps
      from a uniform distribution to any user given distribution.
    - samples: Number of keyframes to generate
    """
    options = _CommonOptions(**kwargs)

    simulation = _brain.Simulation(simulation)
    circuit = simulation.open_circuit()
    gids = _rtneuron.util.targets_to_gids(targets, simulation)
    positions = options.fit_point_generator(gids, circuit)

    center = (positions.max(0) + positions.min(0)) * 0.5

    # Computing the front position ensuring that after the rotation the circuit
    # will be correctly frames in the top view.
    front_eye = _compute_eye_position(positions, options)
    positions = positions[:,[0, 2, 1]]
    top_eye = _compute_eye_position(positions, options)
    top_eye[1], top_eye[2] = top_eye[2], top_eye[1]

    eye = front_eye
    if abs(eye[2] - center[2]) < abs(top_eye[1] - center[1]):
        eye[2] += (top_eye[1] - center[1]) - (eye[2] - center[2])

    return rotation(center, [1, 0, 0], eye, -_math.pi/2, up="tangent",
                    duration=duration, **kwargs)
コード例 #7
0
def rotate_around(simulation, targets, duration=10, **kwargs):
    """Return a camera path of a front view rotation around a circuit target.

    This function will load the simulation config file given and all the neurons
    associated with the target specification. The start position is computed
    based on the soma positions. The path duration is in seconds.

    The target parameter can be:

    - A cell GID (as integer)
    - An iterable of GIDs
    - A target labels (as a string)
    - A list of any of the above
    """
    options = _CommonOptions(**kwargs)

    simulation = _brain.Simulation(simulation)
    circuit = simulation.open_circuit()
    gids = _rtneuron.util.targets_to_gids(targets, simulation)
    positions = options.fit_point_generator(gids, circuit)

    center = (positions.max(0) + positions.min(0)) * 0.5
    eye = _compute_eye_position(positions, options)
    return rotation(center, [0, 1, 0], eye, 2 * _math.pi, duration=duration)
コード例 #8
0
ファイル: simulation.py プロジェクト: shugraphics/RTNeuron
# BBPTestData simulation config, Layer1 as the target and voltage as the
# report
if len(sys.argv) > 1 :
    simulation_config = sys.argv[1]
    target = sys.argv[2] if len(sys.argv) > 2 else 'MiniColumn_0'
    report_name = sys.argv[3] if len(sys.argv) > 3 else 'voltage'
else :
    simulation_config= brain.test.blue_config
    target = 'L5CSPC'
    report_name = 'allCompartments'

if not simulation_config :
    print("No simulation config found, please provide one")
    exit(1)

simulation = brain.Simulation(simulation_config)

# Scene parameters.
scene_attributes = AttributeMap()
scene_attributes.use_meshes = False
scene_attributes.circuit = simulation_config
# Enabling alpha-blending using the multi-layer depth peeling algorithm
alpha_blending = AttributeMap()
alpha_blending.mode = 'multilayer_depth_peeling'
alpha_blending.slices = 1
scene_attributes.alpha_blending = alpha_blending

# Creating the scene.
# This must be done before initializing any Equalizer configuration.
scene = engine.createScene(scene_attributes)
scene.circuit = simulation.open_circuit()
コード例 #9
0
os.environ['EQ_WINDOW_IATTR_HINT_DRAWABLE'] = '-12'
os.environ['EQ_LOG_LEVEL'] = 'ERROR'
engine = RTNeuron([], attributes)

if len(sys.argv) > 1:
    blue_config = sys.argv[1]
else:
    blue_config = brain.test.blue_config
if not blue_config:
    print("No blue config found, please provide one")
if len(sys.argv) > 2:
    target = sys.argv[2]
else:
    target = None

simulation = brain.Simulation(blue_config)

gids = simulation.gids(target) if target else simulation.gids()
circuit = simulation.open_circuit()

inhibitory_gids = circuit.gids("Inhibitory")
excitatory_gids = circuit.gids("Excitatory")

inhibitory = circuit.projected_synapses(inhibitory_gids, gids)
excitatory = circuit.projected_synapses(excitatory_gids, gids)

scene_attributes = AttributeMap()
alpha_blending = AttributeMap()
alpha_blending.mode = 'multilayer_depth_peeling'
alpha_blending.slices = 1
scene_attributes.alpha_blending = alpha_blending
コード例 #10
0
 def setUp(self):
     self.simulation = brain.Simulation(brain.test.blue_config)
コード例 #11
0
 def test_open(self):
     simulation = brain.Simulation(brain.test.blue_config)
コード例 #12
0
 def test_bad_open(self):
     self.assertRaises(RuntimeError, lambda: brain.Simulation("foo"))