Esempio n. 1
0
class _IntegratorTreeFragment(core.Type):
    """
    This trait-ed class is used to build the input tree for the integrator.
    """
    integrator = integrators.Integrator(label="integrator",
                                        required=True,
                                        order=2,
                                        default=integrators.HeunDeterministic,
                                        doc="""The integrator""")
Esempio n. 2
0
class _InputTreeFragment(core.Type):
    """
    This trait-ed class is used to build the input tree for the integrator.
    """
    dynamic_name = types_basic.String(
        label="Parameter configuration name",
        required=True,
        order=1,
        doc="""The name of this parameter configuration""")

    integrator = integrators.Integrator(label="integrator",
                                        required=True,
                                        order=2,
                                        default=integrators.HeunDeterministic,
                                        doc="""The integrator""")
class PhasePlaneInteractive(core.Type):
    """
    The GUI for the interactive phase-plane viewer provides sliders for setting:
        - The value of all parameters of the Model.
        - The extent of the axes.
        - A fixed value for the state-variables which aren't currently selected.
        - The noise strength, if a stocahstic integrator is specified.

    and radio buttons for selecting:
        - Which state-variables to show on each axis.
        - Which mode to show, if the Model has them.

    Clicking on the phase-plane will generate a sample trajectory, originating
    from where you clicked.

    """

    model = models_module.Model(
        label="Model",
        default=models_module.Generic2dOscillator,
        doc="""An instance of the local dynamic model to be investigated with
        PhasePlaneInteractive.""")

    integrator = integrators_module.Integrator(
        label="Integrator",
        default=integrators_module.RungeKutta4thOrderDeterministic,
        doc="""The integration scheme used to for generating sample
        trajectories on the phase-plane. NOTE: This is not used for generating
        the phase-plane itself, ie the vector field and nulclines.""")

    def __init__(self, **kwargs):
        """
        Initialise based on provided keywords or their traited defaults. Also,
        initialise the place-holder attributes that aren't filled until the
        show() method is called.
        """
        super(PhasePlaneInteractive, self).__init__(**kwargs)
        LOG.debug(str(kwargs))

        #figure
        self.ipp_fig = None

        #phase-plane
        self.pp_ax = None
        self.X = None
        self.Y = None
        self.U = None
        self.V = None
        self.UVmag = None
        self.nullcline_x = None
        self.nullcline_y = None
        self.pp_quivers = None

        #Current state
        self.svx = None
        self.svy = None
        self.default_sv = None
        self.no_coupling = None
        self.mode = None
        self.parameters = None

        #Selectors
        self.state_variable_x = None
        self.state_variable_y = None
        self.mode_selector = None

        #Sliders
        self.param_sliders = None
        self.axes_range_sliders = None
        self.sv_sliders = None
        self.noise_slider = None

        #Reset buttons
        self.reset_param_button = None
        self.reset_sv_button = None
        self.reset_axes_button = None
        self.reset_noise_button = None
        self.reset_seed_button = None

    def show(self):
        """ Generate the interactive phase-plane figure. """
        model_name = self.model.__class__.__name__
        msg = "Generating an interactive phase-plane plot for %s"
        LOG.info(msg % model_name)

        #Make sure the model is fully configured...
        self.model.configure()

        #Setup the inital(current) state
        self.svx = self.model.state_variables[0]  #x-axis: 1st state variable
        self.svy = self.model.state_variables[1]  #y-axis: 2nd state variable
        self.mode = 0
        self.set_parameters()
        self.set_state_vector()

        #Make the figure:
        self.create_figure()

        #Selectors
        self.add_state_variable_selector()
        self.add_mode_selector()

        #Sliders
        self.add_axes_range_sliders()
        self.add_state_variable_sliders()
        self.add_param_sliders()
        if isinstance(self.integrator,
                      integrators_module.IntegratorStochastic):
            if self.integrator.noise.ntau > 0.0:
                self.integrator.noise.configure_coloured(
                    self.integrator.dt,
                    (1, self.model.nvar, 1, self.model.number_of_modes))
            else:
                self.integrator.noise.configure_white(
                    self.integrator.dt,
                    (1, self.model.nvar, 1, self.model.number_of_modes))

            self.add_noise_slider()
            self.add_reset_noise_button()
            self.add_reset_seed_button()

        #Reset buttons
        self.add_reset_param_button()
        self.add_reset_sv_button()
        self.add_reset_axes_button()

        #Calculate the phase plane
        self.set_mesh_grid()
        self.calc_phase_plane()

        #Plot phase plane
        self.plot_phase_plane()

        # add mouse handler for trajectory clicking
        self.ipp_fig.canvas.mpl_connect('button_press_event',
                                        self.click_trajectory)
        #import pdb; pdb.set_trace()

        pylab.show()

    ##------------------------------------------------------------------------##
    ##----------------- Functions for building the figure --------------------##
    ##------------------------------------------------------------------------##
    def create_figure(self):
        """ Create the figure and phase-plane axes. """
        #Figure and main phase-plane axes
        model_name = self.model.__class__.__name__
        integrator_name = self.integrator.__class__.__name__
        figsize = 10, 5
        try:
            figure_window_title = "Interactive phase-plane: " + model_name
            figure_window_title += "   --   %s" % integrator_name
            self.ipp_fig = pylab.figure(num=figure_window_title,
                                        figsize=figsize,
                                        facecolor=BACKGROUNDCOLOUR,
                                        edgecolor=EDGECOLOUR)
        except ValueError:
            LOG.info("My life would be easier if you'd update your PyLab...")
            self.ipp_fig = pylab.figure(num=42,
                                        figsize=figsize,
                                        facecolor=BACKGROUNDCOLOUR,
                                        edgecolor=EDGECOLOUR)

        self.pp_ax = self.ipp_fig.add_axes([0.265, 0.2, 0.5, 0.75])

        self.pp_splt = self.ipp_fig.add_subplot(212)
        self.ipp_fig.subplots_adjust(left=0.265,
                                     bottom=0.02,
                                     right=0.765,
                                     top=0.3,
                                     wspace=0.1,
                                     hspace=None)
        self.pp_splt.set_color_cycle(get_color(self.model.nvar))
        self.pp_splt.plot(
            numpy.arange(TRAJ_STEPS + 1) * self.integrator.dt,
            numpy.zeros((TRAJ_STEPS + 1, self.model.nvar)))
        if hasattr(self.pp_splt, 'autoscale'):
            self.pp_splt.autoscale(enable=True, axis='y', tight=True)
        self.pp_splt.legend(self.model.state_variables)

    def add_state_variable_selector(self):
        """
        Generate radio selector buttons to set which state variable is displayed
        on the x and y axis of the phase-plane plot.
        """
        svx_ind = self.model.state_variables.index(self.svx)
        svy_ind = self.model.state_variables.index(self.svy)

        #State variable for the x axis
        pos_shp = [0.07, 0.05, 0.065, 0.12 + 0.006 * self.model.nvar]
        rax = self.ipp_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="x-axis")
        self.state_variable_x = widgets.RadioButtons(
            rax, tuple(self.model.state_variables), active=svx_ind)
        self.state_variable_x.on_clicked(self.update_svx)

        #State variable for the y axis
        pos_shp = [0.14, 0.05, 0.065, 0.12 + 0.006 * self.model.nvar]
        rax = self.ipp_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="y-axis")
        self.state_variable_y = widgets.RadioButtons(
            rax, tuple(self.model.state_variables), active=svy_ind)
        self.state_variable_y.on_clicked(self.update_svy)

    def add_mode_selector(self):
        """
        Add a radio button to the figure for selecting which mode of the model
        should be displayed.
        """
        pos_shp = [0.02, 0.07, 0.04, 0.1 + 0.002 * self.model.number_of_modes]
        rax = self.ipp_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="Mode")
        mode_tuple = tuple(range(self.model.number_of_modes))
        self.mode_selector = widgets.RadioButtons(rax, mode_tuple, active=0)
        self.mode_selector.on_clicked(self.update_mode)

    def add_axes_range_sliders(self):
        """
        Add sliders to the figure to allow the phase-planes axes to be set.
        """
        self.axes_range_sliders = dict()

        default_range_x = (self.model.state_variable_range[self.svx][1] -
                           self.model.state_variable_range[self.svx][0])
        default_range_y = (self.model.state_variable_range[self.svy][1] -
                           self.model.state_variable_range[self.svy][0])
        min_val_x = self.model.state_variable_range[
            self.svx][0] - 4.0 * default_range_x
        max_val_x = self.model.state_variable_range[
            self.svx][1] + 4.0 * default_range_x
        min_val_y = self.model.state_variable_range[
            self.svy][0] - 4.0 * default_range_y
        max_val_y = self.model.state_variable_range[
            self.svy][1] + 4.0 * default_range_y

        sax = self.ipp_fig.add_axes([0.04, 0.835, 0.125, 0.025],
                                    axisbg=AXCOLOUR)
        sl_x_min = widgets.Slider(
            sax,
            "xlo",
            min_val_x,
            max_val_x,
            valinit=self.model.state_variable_range[self.svx][0])
        sl_x_min.on_changed(self.update_range)

        sax = self.ipp_fig.add_axes([0.04, 0.8, 0.125, 0.025], axisbg=AXCOLOUR)
        sl_x_max = widgets.Slider(
            sax,
            "xhi",
            min_val_x,
            max_val_x,
            valinit=self.model.state_variable_range[self.svx][1])
        sl_x_max.on_changed(self.update_range)

        sax = self.ipp_fig.add_axes([0.04, 0.765, 0.125, 0.025],
                                    axisbg=AXCOLOUR)
        sl_y_min = widgets.Slider(
            sax,
            "ylo",
            min_val_y,
            max_val_y,
            valinit=self.model.state_variable_range[self.svy][0])
        sl_y_min.on_changed(self.update_range)

        sax = self.ipp_fig.add_axes([0.04, 0.73, 0.125, 0.025],
                                    axisbg=AXCOLOUR)
        sl_y_max = widgets.Slider(
            sax,
            "yhi",
            min_val_y,
            max_val_y,
            valinit=self.model.state_variable_range[self.svy][1])
        sl_y_max.on_changed(self.update_range)

        self.axes_range_sliders["sl_x_min"] = sl_x_min
        self.axes_range_sliders["sl_x_max"] = sl_x_max
        self.axes_range_sliders["sl_y_min"] = sl_y_min
        self.axes_range_sliders["sl_y_max"] = sl_y_max

    def add_state_variable_sliders(self):
        """
        Add sliders to the figure to allow default values for the models state
        variable to be set.
        """
        msv_range = self.model.state_variable_range
        offset = 0.0
        self.sv_sliders = dict()
        for sv in range(self.model.nvar):
            offset += 0.035
            pos_shp = [0.04, 0.6 - offset, 0.125, 0.025]
            sax = self.ipp_fig.add_axes(pos_shp, axisbg=AXCOLOUR)
            sv_str = self.model.state_variables[sv]
            self.sv_sliders[sv_str] = widgets.Slider(
                sax,
                sv_str,
                msv_range[sv_str][0],
                msv_range[sv_str][1],
                valinit=self.default_sv[sv, 0, 0])
            self.sv_sliders[sv_str].on_changed(self.update_state_variables)

    # Traited paramaters as sliders
    def add_param_sliders(self):
        """
        Add sliders to the figure to allow the models parameters to be set.
        """
        offset = 0.0
        self.param_sliders = dict()
        #import pdb; pdb.set_trace()
        for param, default_param in self.parameters.items():
            offset += 0.035
            sax = self.ipp_fig.add_axes([0.825, 0.865 - offset, 0.125, 0.025],
                                        axisbg=AXCOLOUR)
            default_param = default_param[0]
            param_range = self.model.trait[param].trait.inits.kwd.get(
                'range', None)
            if param_range:
                self.param_sliders[param] = widgets.Slider(
                    sax,
                    param,
                    param_range.lo,
                    param_range.hi,
                    valinit=default_param)
            else:
                self.param_sliders[param] = widgets.Slider(
                    sax, param, -1.0, 1.0, valinit=default_param)

            self.param_sliders[param].on_changed(self.update_parameters)

    def add_noise_slider(self):
        """
        Add a slider to the figure to allow the integrators noise strength to
        be set.
        """
        pos_shp = [0.825, 0.1, 0.125, 0.025]
        sax = self.ipp_fig.add_axes(pos_shp, axisbg=AXCOLOUR)

        self.noise_slider = widgets.Slider(sax,
                                           "Log Noise",
                                           -9.0,
                                           1.0,
                                           valinit=self.integrator.noise.nsig)
        self.noise_slider.on_changed(self.update_noise)

    def add_reset_param_button(self):
        """
        Add a button to the figure for reseting the model parameter values to
        their original values.
        """
        bax = self.ipp_fig.add_axes([0.825, 0.865, 0.125, 0.04])
        self.reset_param_button = widgets.Button(bax,
                                                 'Reset parameters',
                                                 color=BUTTONCOLOUR,
                                                 hovercolor=HOVERCOLOUR)

        def reset_parameters(event):
            for param_slider in self.param_sliders:
                self.param_sliders[param_slider].reset()

        self.reset_param_button.on_clicked(reset_parameters)

    def add_reset_sv_button(self):
        """
        Add a button to the figure for reseting the model state variables to
        their default values.
        """
        bax = self.ipp_fig.add_axes([0.04, 0.60, 0.125, 0.04])
        self.reset_sv_button = widgets.Button(bax,
                                              'Reset state-variables',
                                              color=BUTTONCOLOUR,
                                              hovercolor=HOVERCOLOUR)

        def reset_state_variables(event):
            for svsl in self.sv_sliders.itervalues():
                svsl.reset()

        self.reset_sv_button.on_clicked(reset_state_variables)

    def add_reset_noise_button(self):
        """
        Add a button to the figure for reseting the noise to its default value.
        """
        bax = self.ipp_fig.add_axes([0.825, 0.135, 0.125, 0.04])
        self.reset_noise_button = widgets.Button(bax,
                                                 'Reset noise strength',
                                                 color=BUTTONCOLOUR,
                                                 hovercolor=HOVERCOLOUR)

        def reset_noise(event):
            self.noise_slider.reset()

        self.reset_noise_button.on_clicked(reset_noise)

    def add_reset_seed_button(self):
        """
        Add a button to the figure for reseting the random number generator to
        its intial state. For reproducible noise...
        """
        bax = self.ipp_fig.add_axes([0.825, 0.05, 0.125, 0.04])
        self.reset_seed_button = widgets.Button(bax,
                                                'Reset random stream',
                                                color=BUTTONCOLOUR,
                                                hovercolor=HOVERCOLOUR)

        def reset_seed(event):
            self.integrator.noise.trait["random_stream"].reset()

        self.reset_seed_button.on_clicked(reset_seed)

    def add_reset_axes_button(self):
        """
        Add a button to the figure for reseting the phase-plane axes to their
        default ranges.
        """
        bax = self.ipp_fig.add_axes([0.04, 0.87, 0.125, 0.04])
        self.reset_axes_button = widgets.Button(bax,
                                                'Reset axes',
                                                color=BUTTONCOLOUR,
                                                hovercolor=HOVERCOLOUR)

        def reset_ranges(event):
            self.axes_range_sliders["sl_x_min"].reset()
            self.axes_range_sliders["sl_x_max"].reset()
            self.axes_range_sliders["sl_y_min"].reset()
            self.axes_range_sliders["sl_y_max"].reset()

        self.reset_axes_button.on_clicked(reset_ranges)

    ##------------------------------------------------------------------------##
    ##------------------- Functions for updating the figure ------------------##
    ##------------------------------------------------------------------------##

    #NOTE: All the ax.set_xlim, poly.xy, etc, garbage below is fragile. It works
    #      at the moment, but there are currently bugs in Slider and the hackery
    #      below takes these into account... If the bugs are fixed/changed then
    #      this could break. As an example, the Slider doc says poly is a
    #      Rectangle, but it's actually a Polygon. The Slider set_val method
    #      assumes a Rectangle even though this is not the case, so the array
    #      Slider.poly.xy is corrupted by that method. The corruption isn't
    #      visible in the plot, which is probably why it hasn't been fixed...

    def update_xrange_sliders(self):
        """
        A hacky update of the x-axis range sliders that is called when the
        state-variable selected for the x-axis is changed.
        """
        default_range_x = (self.model.state_variable_range[self.svx][1] -
                           self.model.state_variable_range[self.svx][0])
        min_val_x = self.model.state_variable_range[
            self.svx][0] - 4.0 * default_range_x
        max_val_x = self.model.state_variable_range[
            self.svx][1] + 4.0 * default_range_x
        self.axes_range_sliders[
            "sl_x_min"].valinit = self.model.state_variable_range[self.svx][0]
        self.axes_range_sliders["sl_x_min"].valmin = min_val_x
        self.axes_range_sliders["sl_x_min"].valmax = max_val_x
        self.axes_range_sliders["sl_x_min"].ax.set_xlim(min_val_x, max_val_x)
        self.axes_range_sliders["sl_x_min"].poly.axes.set_xlim(
            min_val_x, max_val_x)
        self.axes_range_sliders["sl_x_min"].poly.xy[[0, 1], 0] = min_val_x
        self.axes_range_sliders["sl_x_min"].vline.set_data(([
            self.axes_range_sliders["sl_x_min"].valinit,
            self.axes_range_sliders["sl_x_min"].valinit
        ], [0, 1]))
        self.axes_range_sliders[
            "sl_x_max"].valinit = self.model.state_variable_range[self.svx][1]
        self.axes_range_sliders["sl_x_max"].valmin = min_val_x
        self.axes_range_sliders["sl_x_max"].valmax = max_val_x
        self.axes_range_sliders["sl_x_max"].ax.set_xlim(min_val_x, max_val_x)
        self.axes_range_sliders["sl_x_max"].poly.axes.set_xlim(
            min_val_x, max_val_x)
        self.axes_range_sliders["sl_x_max"].poly.xy[[0, 1], 0] = min_val_x
        self.axes_range_sliders["sl_x_max"].vline.set_data(([
            self.axes_range_sliders["sl_x_max"].valinit,
            self.axes_range_sliders["sl_x_max"].valinit
        ], [0, 1]))
        self.axes_range_sliders["sl_x_min"].reset()
        self.axes_range_sliders["sl_x_max"].reset()

    def update_yrange_sliders(self):
        """
        A hacky update of the y-axis range sliders that is called when the
        state-variable selected for the y-axis is changed.
        """
        #svy_ind = self.model.state_variables.index(self.svy)
        default_range_y = (self.model.state_variable_range[self.svy][1] -
                           self.model.state_variable_range[self.svy][0])
        min_val_y = self.model.state_variable_range[
            self.svy][0] - 4.0 * default_range_y
        max_val_y = self.model.state_variable_range[
            self.svy][1] + 4.0 * default_range_y
        self.axes_range_sliders[
            "sl_y_min"].valinit = self.model.state_variable_range[self.svy][0]
        self.axes_range_sliders["sl_y_min"].valmin = min_val_y
        self.axes_range_sliders["sl_y_min"].valmax = max_val_y
        self.axes_range_sliders["sl_y_min"].ax.set_xlim(min_val_y, max_val_y)
        self.axes_range_sliders["sl_y_min"].poly.axes.set_xlim(
            min_val_y, max_val_y)
        self.axes_range_sliders["sl_y_min"].poly.xy[[0, 1], 0] = min_val_y
        self.axes_range_sliders["sl_y_min"].vline.set_data(([
            self.axes_range_sliders["sl_y_min"].valinit,
            self.axes_range_sliders["sl_y_min"].valinit
        ], [0, 1]))
        self.axes_range_sliders[
            "sl_y_max"].valinit = self.model.state_variable_range[self.svy][1]
        self.axes_range_sliders["sl_y_max"].valmin = min_val_y
        self.axes_range_sliders["sl_y_max"].valmax = max_val_y
        self.axes_range_sliders["sl_y_max"].ax.set_xlim(min_val_y, max_val_y)
        self.axes_range_sliders["sl_y_max"].poly.axes.set_xlim(
            min_val_y, max_val_y)
        self.axes_range_sliders["sl_y_max"].poly.xy[[0, 1], 0] = min_val_y
        self.axes_range_sliders["sl_y_max"].vline.set_data(([
            self.axes_range_sliders["sl_y_max"].valinit,
            self.axes_range_sliders["sl_y_max"].valinit
        ], [0, 1]))
        self.axes_range_sliders["sl_y_min"].reset()
        self.axes_range_sliders["sl_y_max"].reset()

    def update_svx(self, label):
        """ 
        Update state variable used for x-axis based on radio buttton selection.
        """
        self.svx = label
        self.update_xrange_sliders()
        self.set_mesh_grid()
        self.calc_phase_plane()
        self.update_phase_plane()

    def update_svy(self, label):
        """ 
        Update state variable used for y-axis based on radio buttton selection.
        """
        self.svy = label
        self.update_yrange_sliders()
        self.set_mesh_grid()
        self.calc_phase_plane()
        self.update_phase_plane()

    def update_mode(self, label):
        """ Update the visualised mode based on radio button selection. """
        self.mode = label
        self.update_phase_plane()

    def update_parameters(self, val):
        """
        Update model parameters based on the current parameter slider values.

        NOTE: Haven't figured out how to update independantly, so just update
            everything.
        """
        #TODO: Grab caller and use val directly, ie independent parameter update.
        #import pdb; pdb.set_trace()
        for param in self.param_sliders:
            setattr(self.model, param,
                    numpy.array([self.param_sliders[param].val]))

        self.model.update_derived_parameters()
        self.calc_phase_plane()
        self.update_phase_plane()

    def update_noise(self, nsig):
        """ Update integrator noise based on the noise slider value. """
        self.integrator.noise.nsig = numpy.array([
            10**nsig,
        ])

    def update_range(self, val):
        """
        Update the axes ranges based on the current axes slider values.

        NOTE: Haven't figured out how to update independantly, so just update
            everything.

        """
        #TODO: Grab caller and use val directly, ie independent range update.
        self.axes_range_sliders["sl_x_min"].ax.set_axis_bgcolor(AXCOLOUR)
        self.axes_range_sliders["sl_x_max"].ax.set_axis_bgcolor(AXCOLOUR)
        self.axes_range_sliders["sl_y_min"].ax.set_axis_bgcolor(AXCOLOUR)
        self.axes_range_sliders["sl_y_max"].ax.set_axis_bgcolor(AXCOLOUR)

        if (self.axes_range_sliders["sl_x_min"].val >=
                self.axes_range_sliders["sl_x_max"].val):
            LOG.error("X-axis min must be less than max...")
            self.axes_range_sliders["sl_x_min"].ax.set_axis_bgcolor("Red")
            self.axes_range_sliders["sl_x_max"].ax.set_axis_bgcolor("Red")
            return
        if (self.axes_range_sliders["sl_y_min"].val >=
                self.axes_range_sliders["sl_y_max"].val):
            LOG.error("Y-axis min must be less than max...")
            self.axes_range_sliders["sl_y_min"].ax.set_axis_bgcolor("Red")
            self.axes_range_sliders["sl_y_max"].ax.set_axis_bgcolor("Red")
            return

        msv_range = self.model.state_variable_range
        msv_range[self.svx][0] = self.axes_range_sliders["sl_x_min"].val
        msv_range[self.svx][1] = self.axes_range_sliders["sl_x_max"].val
        msv_range[self.svy][0] = self.axes_range_sliders["sl_y_min"].val
        msv_range[self.svy][1] = self.axes_range_sliders["sl_y_max"].val
        self.set_mesh_grid()
        self.calc_phase_plane()
        self.update_phase_plane()

    def update_phase_plane(self):
        """ Clear the axes and redraw the phase-plane. """
        self.pp_ax.clear()
        self.pp_splt.clear()
        self.pp_splt.set_color_cycle(get_color(self.model.nvar))
        self.pp_splt.plot(
            numpy.arange(TRAJ_STEPS + 1) * self.integrator.dt,
            numpy.zeros((TRAJ_STEPS + 1, self.model.nvar)))
        if hasattr(self.pp_splt, 'autoscale'):
            self.pp_splt.autoscale(enable=True, axis='y', tight=True)
        self.pp_splt.legend(self.model.state_variables)
        self.plot_phase_plane()

    def update_state_variables(self, val):
        """
        Update the default state-variable values, used for non-visualised state 
        variables, based of the current slider values.
        """
        for sv in self.sv_sliders:
            k = self.model.state_variables.index(sv)
            self.default_sv[k] = self.sv_sliders[sv].val

        self.calc_phase_plane()
        self.update_phase_plane()

    def set_parameters(self):
        """
        Hopefully a temporary hack, new traits should take care of this, ie 
        provide simple access to this info... 
        """
        self.parameters = {}
        #import pdb; pdb.set_trace()
        for key in self.model.trait.keys():
            attr = getattr(self.model, key)
            if (isinstance(attr, numpy.ndarray) and (attr.size == 1)
                    and attr.dtype.type in (numpy.float, numpy.float64)):
                self.parameters[key] = attr

    def set_mesh_grid(self):
        """
        Generate the phase-plane gridding based on currently selected 
        state-variables and their range values.
        """
        xlo = self.model.state_variable_range[self.svx][0]
        xhi = self.model.state_variable_range[self.svx][1]
        ylo = self.model.state_variable_range[self.svy][0]
        yhi = self.model.state_variable_range[self.svy][1]

        self.X = numpy.mgrid[xlo:xhi:(NUMBEROFGRIDPOINTS * 1j)]
        self.Y = numpy.mgrid[ylo:yhi:(NUMBEROFGRIDPOINTS * 1j)]

    def set_state_vector(self):
        """
        Set up a vector containing the default state-variable values and create
        a filler(all zeros) for the coupling arg of the Model's dfun method.
        This method is called once at initialisation (show()).
        """
        #import pdb; pdb.set_trace()
        sv_mean = numpy.array([
            self.model.state_variable_range[key].mean()
            for key in self.model.state_variables
        ])
        sv_mean = sv_mean.reshape((self.model.nvar, 1, 1))
        self.default_sv = sv_mean.repeat(self.model.number_of_modes, axis=2)
        self.no_coupling = numpy.zeros(
            (self.model.nvar, 1, self.model.number_of_modes))

    def calc_phase_plane(self):
        """ Calculate the vector field. """
        svx_ind = self.model.state_variables.index(self.svx)
        svy_ind = self.model.state_variables.index(self.svy)

        #Calculate the vector field discretely sampled at a grid of points
        grid_point = self.default_sv.copy()
        self.U = numpy.zeros((NUMBEROFGRIDPOINTS, NUMBEROFGRIDPOINTS,
                              self.model.number_of_modes))
        self.V = numpy.zeros((NUMBEROFGRIDPOINTS, NUMBEROFGRIDPOINTS,
                              self.model.number_of_modes))
        for ii in range(NUMBEROFGRIDPOINTS):
            grid_point[svy_ind] = self.Y[ii]
            for jj in range(NUMBEROFGRIDPOINTS):
                #import pdb; pdb.set_trace()
                grid_point[svx_ind] = self.X[jj]

                d = self.model.dfun(grid_point, self.no_coupling)

                for kk in range(self.model.number_of_modes):
                    self.U[ii, jj, kk] = d[svx_ind, 0, kk]
                    self.V[ii, jj, kk] = d[svy_ind, 0, kk]

        #Colours for the vector field quivers
        #self.UVmag = numpy.sqrt(self.U**2 + self.V**2)

        #import pdb; pdb.set_trace()
        if numpy.isnan(self.U).any() or numpy.isnan(self.V).any():
            LOG.error("NaN")

    def plot_phase_plane(self):
        """ Plot the vector field and its nullclines. """
        # Set title and axis labels
        model_name = self.model.__class__.__name__
        self.pp_ax.set(title=model_name + " mode " + str(self.mode))
        self.pp_ax.set(xlabel="State Variable " + self.svx)
        self.pp_ax.set(ylabel="State Variable " + self.svy)

        #import pdb; pdb.set_trace()
        #Plot a discrete representation of the vector field
        if numpy.all(self.U[:, :, self.mode] + self.V[:, :, self.mode] == 0):
            self.pp_ax.set(title=model_name + " mode " + str(self.mode) +
                           ": NO MOTION IN THIS PLANE")
            X, Y = numpy.meshgrid(self.X, self.Y)
            self.pp_quivers = self.pp_ax.scatter(X, Y, s=8, marker=".", c="k")
        else:
            self.pp_quivers = self.pp_ax.quiver(
                self.X,
                self.Y,
                self.U[:, :, self.mode],
                self.V[:, :, self.mode],
                #self.UVmag[:, :, self.mode],
                width=0.001,
                headwidth=8)

        #Plot the nullclines
        self.nullcline_x = self.pp_ax.contour(self.X,
                                              self.Y,
                                              self.U[:, :, self.mode], [0],
                                              colors="r")
        self.nullcline_y = self.pp_ax.contour(self.X,
                                              self.Y,
                                              self.V[:, :, self.mode], [0],
                                              colors="g")
        pylab.draw()

    def plot_trajectory(self, x, y):
        """
        Plot a sample trajectory, starting at the position x,y in the
        phase-plane. This method is called as a result of a mouse click on the 
        phase-plane.
        """
        svx_ind = self.model.state_variables.index(self.svx)
        svy_ind = self.model.state_variables.index(self.svy)

        #Calculate an example trajectory
        state = self.default_sv.copy()
        self.integrator.clamped_state_variable_indices = numpy.setdiff1d(
            numpy.r_[:len(self.model.state_variables)], numpy.r_[svx_ind,
                                                                 svy_ind])
        self.integrator.clamped_state_variable_values = self.default_sv[
            self.integrator.clamped_state_variable_indices]
        state[svx_ind] = x
        state[svy_ind] = y
        scheme = self.integrator.scheme
        traj = numpy.zeros(
            (TRAJ_STEPS + 1, self.model.nvar, 1, self.model.number_of_modes))
        traj[0, :] = state
        for step in range(TRAJ_STEPS):
            #import pdb; pdb.set_trace()
            state = scheme(state, self.model.dfun, self.no_coupling, 0.0, 0.0)
            traj[step + 1, :] = state

        self.pp_ax.scatter(x, y, s=42, c='g', marker='o', edgecolor=None)
        self.pp_ax.plot(traj[:, svx_ind, 0, self.mode], traj[:, svy_ind, 0,
                                                             self.mode])

        #Plot the selected state variable trajectories as a function of time
        self.pp_splt.plot(
            numpy.arange(TRAJ_STEPS + 1) * self.integrator.dt, traj[:, :, 0,
                                                                    self.mode])

        pylab.draw()

    def click_trajectory(self, event):
        """
        This method captures mouse clicks on the phase-plane and then uses the 
        plot_trajectory() method to generate a sample trajectory.
        """
        if event.inaxes is self.pp_ax:
            x, y = event.xdata, event.ydata
            LOG.info('trajectory starting at (%f, %f)', x, y)
            self.plot_trajectory(x, y)
Esempio n. 4
0
 def test_integrator_base_class(self):
     with pytest.raises(TypeError):
         integrators.Integrator()
Esempio n. 5
0
class Simulator(core.Type):
    """
    The Simulator class coordinates classes from all other modules in the
    simulator package in order to perform simulations. 

    In general, it is necessary to initialiaze a simulator with the desired
    components and then call the simulator in a loop to obtain simulation
    data:
    
    >>> sim = Simulator(...)
    >>> for output in sim(simulation_length=1000):
            ...
    
    Please refer to the user guide and the demos for more detail.


    .. #Currently there seems to be a clash betwen traits and autodoc, autodoc
    .. #can't find the methods of the class, the class specific names below get
    .. #us around this...
    .. automethod:: Simulator.__init__
    .. automethod:: Simulator.configure
    .. automethod:: Simulator.__call__
    .. automethod:: Simulator.configure_history
    .. automethod:: Simulator.configure_integrator_noise
    .. automethod:: Simulator.memory_requirement
    .. automethod:: Simulator.runtime
    .. automethod:: Simulator.storage_requirement


    """

    connectivity = connectivity_dtype.Connectivity(
        label="Long-range connectivity",
        default=None,
        order=1,
        required=True,
        filters_ui=[
            UIFilter(linked_elem_name="projection_matrix_data",
                     linked_elem_field=FilterChain.datatype + "._sources",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="EEG"),
            UIFilter(linked_elem_name="region_mapping_data",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="surface",
                     linked_elem_parent_option=None)
        ],
        doc="""A tvb.datatypes.Connectivity object which contains the
        structural long-range connectivity data (i.e., white-matter tracts). In
        combination with the ``Long-range coupling function`` it defines the inter-regional
        connections. These couplings undergo a time delay via signal propagation 
        with a propagation speed of ``Conduction Speed``""")

    conduction_speed = basic.Float(
        label="Conduction Speed",
        default=3.0,
        order=2,
        required=False,
        range=basic.Range(lo=0.01, hi=100.0, step=1.0),
        doc="""Conduction speed for ``Long-range connectivity`` (mm/ms)""")

    coupling = coupling_module.Coupling(
        label="Long-range coupling function",
        default=coupling_module.Linear(),
        required=True,
        order=2,
        doc="""The coupling function is applied to the activity propagated
        between regions by the ``Long-range connectivity`` before it enters the local
        dynamic equations of the Model. Its primary purpose is to 'rescale' the
        incoming activity to a level appropriate to Model.""")

    surface = Cortex(
        label="Cortical surface",
        default=None,
        order=3,
        required=False,
        filters_backend=FilterChain(
            fields=[FilterChain.datatype + '._valid_for_simulations'],
            operations=["=="],
            values=[True]),
        filters_ui=[
            UIFilter(linked_elem_name="projection_matrix_data",
                     linked_elem_field=FilterChain.datatype + "._sources",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="EEG"),
            UIFilter(linked_elem_name="local_connectivity",
                     linked_elem_field=FilterChain.datatype + "._surface",
                     linked_elem_parent_name="surface",
                     linked_elem_parent_option=None)
        ],
        doc="""By default, a tvb.datatypes.Cortex object which represents the
        cortical surface defined by points in the 3D physical space and their 
        neighborhood relationship. In the current TVB version, when setting up a 
        surface-based simulation, the option to configure the spatial spread of 
        the ``Local Connectivity`` is available.""")

    stimulus = patterns_dtype.SpatioTemporalPattern(
        label="Spatiotemporal stimulus",
        default=None,
        order=4,
        required=False,
        doc=
        """A ``Spatiotemporal stimulus`` can be defined at the region or surface level.
        It's composed of spatial and temporal components. For region defined stimuli
        the spatial component is just the strength with which the temporal
        component is applied to each region. For surface defined stimuli,  a
        (spatial) function, with finite-support, is used to define the strength 
        of the stimuli on the surface centred around one or more focal points. 
        In the current version of TVB, stimuli are applied to the first state 
        variable of the ``Local dynamic model``.""")

    model = models_module.Model(
        label="Local dynamic model",
        default=models_module.Generic2dOscillator,
        required=True,
        order=5,
        doc="""A tvb.simulator.Model object which describe the local dynamic
        equations, their parameters, and, to some extent, where connectivity
        (local and long-range) enters and which state-variables the Monitors
        monitor. By default the 'Generic2dOscillator' model is used. Read the 
        Scientific documentation to learn more about this model.""")

    integrator = integrators_module.Integrator(
        label="Integration scheme",
        default=integrators_module.HeunDeterministic,
        required=True,
        order=6,
        doc="""A tvb.simulator.Integrator object which is
            an integration scheme with supporting attributes such as 
            integration step size and noise specification for stochastic 
            methods. It is used to compute the time courses of the model state 
            variables.""")

    initial_conditions = arrays_dtype.FloatArray(
        label="Initial Conditions",
        default=None,
        order=-1,
        required=False,
        doc="""Initial conditions from which the simulation will begin. By
        default, random initial conditions are provided. Needs to be the same shape
        as simulator 'history', ie, initial history function which defines the 
        minimal initial state of the network with time delays before time t=0. 
        If the number of time points in the provided array is insufficient the 
        array will be padded with random values based on the 'state_variables_range'
        attribute.""")

    monitors = monitors_module.Monitor(
        label="Monitor(s)",
        default=monitors_module.TemporalAverage,
        required=True,
        order=8,
        select_multiple=True,
        doc="""A tvb.simulator.Monitor or a list of tvb.simulator.Monitor
        objects that 'know' how to record relevant data from the simulation. Two
        main types exist: 1) simple, spatial and temporal, reductions (subsets
        or averages); 2) physiological measurements, such as EEG, MEG and fMRI.
        By default the Model's specified variables_of_interest are returned,
        temporally downsampled from the raw integration rate to a sample rate of
        1024Hz.""")

    simulation_length = basic.Float(
        label="Simulation Length (ms)",
        default=1000.0,  # ie 1 second
        required=True,
        order=9,
        doc="""The length of a simulation in milliseconds (ms).""")

    def __init__(self, **kwargs):
        """
        Use the base class' mechanisms to initialise the traited attributes 
        declared above, overriding defaults with any provided keywords. Then
        declare any non-traited attributes.

        """
        super(Simulator, self).__init__(**kwargs)
        LOG.debug(str(kwargs))

        self.calls = 0
        self.current_step = 0

        self.number_of_nodes = None
        self.horizon = None
        self.good_history_shape = None
        self.history = None
        self._memory_requirement_guess = None
        self._memory_requirement_census = None
        self._storage_requirement = None
        self._runtime = None

    def __str__(self):
        return "Simulator(**kwargs)"

    def preconfigure(self):
        """
        Configure just the basic fields, so that memory can be estimated
        """
        self.connectivity.configure()

        if self.surface:
            self.surface.configure()

        if self.stimulus:
            self.stimulus.configure()

        self.coupling.configure()
        self.model.configure()
        self.integrator.configure()

        # monitors needs to be a list or tuple, even if there is only one...
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]

        # Configure monitors
        for monitor in self.monitors:
            monitor.configure()

        ##------------- Now the the interdependant configuration -------------##

        #"Nodes" refers to either regions or vertices + non-cortical regions.
        if self.surface is None:
            self.number_of_nodes = self.connectivity.number_of_regions
        else:
            #try:
            self.number_of_nodes = self.surface.region_mapping.shape[0]
            #except AttributeError:
            #    msg = "%s: Surface needs region mapping defined... "
            #    LOG.error(msg % (repr(self)))

        # Estimate of memory usage
        self._guesstimate_memory_requirement()

    def configure(self, full_configure=True):
        """
        The first step of configuration is to run the configure methods of all
        the Simulator's components, ie its traited attributes.

        Configuration of a Simulator primarily consists of calculating the
        attributes, etc, which depend on the combinations of the Simulator's
        traited attributes (keyword args).

        Converts delays from physical time units into integration steps
        and updates attributes that depend on combinations of the 6 inputs.
        """
        if full_configure:
            # When run from GUI, preconfigure is run separately, and we want to avoid running that part twice
            self.preconfigure()

        #Make sure spatialised model parameters have the right shape (number_of_nodes, 1)
        excluded_params = ("state_variable_range", "variables_of_interest",
                           "noise", "psi_table", "nerf_table")

        for param in self.model.trait.keys():
            if param in excluded_params:
                continue
            #If it's a surface sim and model parameters were provided at the region level
            region_parameters = getattr(self.model, param)
            if self.surface is not None:
                if region_parameters.size == self.connectivity.number_of_regions:
                    new_parameters = region_parameters[
                        self.surface.region_mapping].reshape((-1, 1))
                    setattr(self.model, param, new_parameters)
            region_parameters = getattr(self.model, param)
            if region_parameters.size == self.number_of_nodes:
                new_parameters = region_parameters.reshape((-1, 1))
                setattr(self.model, param, new_parameters)

        #Configure spatial component of any stimuli
        self.configure_stimuli()

        #Set delays, provided in physical units, in integration steps.
        self.connectivity.set_idelays(self.integrator.dt)

        self.horizon = numpy.max(self.connectivity.idelays) + 1
        LOG.info("horizon is %d steps" % self.horizon)

        # workspace -- minimal state of network with delays
        self.good_history_shape = (self.horizon, self.model.nvar,
                                   self.number_of_nodes,
                                   self.model.number_of_modes)
        msg = "%s: History shape will be: %s"
        LOG.debug(msg % (repr(self), str(self.good_history_shape)))

        #Reshape integrator.noise.nsig, if necessary.
        if isinstance(self.integrator,
                      integrators_module.IntegratorStochastic):
            self.configure_integrator_noise()

        self.configure_history(self.initial_conditions)

        #Configure Monitors to work with selected Model, etc...
        self.configure_monitors()

        #Estimate of memory usage.
        self._census_memory_requirement()

    def __call__(self, simulation_length=None, random_state=None):
        """
        When a Simulator is called it returns an iterator.

        kwargs:

        ``simulation_length``:
           total time of simulation

        ``random_state``: 
           a state for the NumPy random number generator, saved from a previous 
           call to permit consistent continuation of a simulation.

        """
        #The number of times this Simulator has been called.
        self.calls += 1

        #Update the simulator objects simulation_length attribute,
        if simulation_length is None:
            simulation_length = self.simulation_length
        else:
            self.simulation_length = simulation_length

        #Estimate run time and storage requirements, with logging.
        self._guesstimate_runtime()
        self._calculate_storage_requirement()

        if random_state is not None:
            if isinstance(self.integrator,
                          integrators_module.IntegratorStochastic):
                self.integrator.noise.random_stream.set_state(random_state)
                msg = "%s: random_state supplied. Seed is: %s"
                LOG.info(
                    msg %
                    (str(self),
                     str(self.integrator.noise.random_stream.get_state()[1][0])
                     ))
            else:
                msg = "%s: random_state supplied for non-stochastic integration"
                LOG.warn(msg % str(self))

        #Determine the number of integration steps required to produce
        #data of simulation_length
        int_steps = int(simulation_length / self.integrator.dt)
        LOG.info("%s: gonna do %d integration steps" % (str(self), int_steps))

        # locals for cleaner code.
        horizon = self.horizon
        history = self.history
        dfun = self.model.dfun
        coupling = self.coupling
        scheme = self.integrator.scheme
        npsum = numpy.sum
        npdot = numpy.dot
        ncvar = len(self.model.cvar)
        number_of_regions = self.connectivity.number_of_regions
        nsn = (number_of_regions, 1, number_of_regions)

        # Exact dtypes and alignment are required by c speedups. Once we have history objects these will be encapsulated
        # cvar index array broadcastable to nodes, cvars, nodes
        cvar = numpy.array(self.model.cvar[numpy.newaxis, :, numpy.newaxis],
                           dtype=numpy.intc)
        LOG.debug("%s: cvar is: %s" % (str(self), str(cvar)))
        # idelays array broadcastable to nodes, cvars, nodes
        idelays = numpy.array(self.connectivity.idelays[:, numpy.newaxis, :],
                              dtype=numpy.intc,
                              order='c')
        LOG.debug("%s: idelays shape is: %s" % (str(self), str(idelays.shape)))
        # weights array broadcastable to nodes, cva, nodes, modes
        weights = self.connectivity.weights[:, numpy.newaxis, :, numpy.newaxis]
        LOG.debug("%s: weights shape is: %s" % (str(self), str(weights.shape)))
        # node_ids broadcastable to nodes, cvars, nodes
        node_ids = numpy.array(
            numpy.arange(number_of_regions)[numpy.newaxis, numpy.newaxis, :],
            dtype=numpy.intc)
        LOG.debug("%s: node_ids shape is: %s" %
                  (str(self), str(node_ids.shape)))

        if self.surface is None:
            local_coupling = 0.0
        else:
            region_average = self.surface.region_average
            region_history = npdot(
                region_average, history
            )  # this may be very expensive ~60sec for epileptor (many states and modes ...)
            region_history = region_history.transpose((1, 2, 0, 3))
            region_history = numpy.ascontiguousarray(
                region_history)  # required by the c speedups
            if self.surface.coupling_strength.size == 1:
                local_coupling = (self.surface.coupling_strength[0] *
                                  self.surface.local_connectivity.matrix)
            elif self.surface.coupling_strength.size == self.surface.number_of_vertices:
                ind = numpy.arange(self.number_of_nodes, dtype=int)
                vec_cs = numpy.zeros((self.number_of_nodes, ))
                vec_cs[:self.surface.
                       number_of_vertices] = self.surface.coupling_strength
                sp_cs = sparse.csc_matrix(
                    (vec_cs, (ind, ind)),
                    shape=(self.number_of_nodes, self.number_of_nodes))
                local_coupling = sp_cs * self.surface.local_connectivity.matrix

        if self.stimulus is None:
            stimulus = 0.0
        else:  # TODO: Consider changing to simulator absolute time... This is an open discussion, a matter of interpretation of the stimuli time axis.
            time = numpy.arange(0, simulation_length, self.integrator.dt)
            time = time[numpy.newaxis, :]
            self.stimulus.configure_time(time)
            stimulus = numpy.zeros((self.model.nvar, self.number_of_nodes, 1))
            LOG.debug("%s: stimulus shape is: %s" %
                      (str(self), str(stimulus.shape)))

        # initial state, history[timepoint[0], state_variables, nodes, modes]
        state = history[self.current_step % horizon, :]
        LOG.debug("%s: state shape is: %s" % (str(self), str(state.shape)))

        if self.surface is not None:
            # the vertex mapping array is huge but sparse.
            # csr because I expect the row to have one value and I expect the dot to proceed row wise.
            vertex_mapping = sparse.csr_matrix(self.surface.vertex_mapping)
            # this is big a well. same shape as the vertex mapping.
            region_average = sparse.csr_matrix(region_average)

            node_coupling_shape = (vertex_mapping.shape[0], ncvar,
                                   self.model.number_of_modes)

        delayed_state = numpy.zeros(
            (number_of_regions, ncvar, number_of_regions,
             self.model.number_of_modes))

        for step in xrange(self.current_step + 1,
                           self.current_step + int_steps + 1):
            time_indices = (step - 1 - idelays) % horizon
            if self.surface is None:
                get_state(history,
                          time_indices,
                          cvar,
                          node_ids,
                          out=delayed_state)
                node_coupling = coupling(weights, state[self.model.cvar],
                                         delayed_state)
            else:
                get_state(region_history,
                          time_indices,
                          cvar,
                          node_ids,
                          out=delayed_state)
                region_coupling = coupling(
                    weights, region_history[(step - 1) % horizon,
                                            self.model.cvar], delayed_state)
                node_coupling = numpy.empty(node_coupling_shape)

                # sparse matrices cannot multiply with 3d arrays so we use a loop over the modes
                for mi in xrange(self.model.number_of_modes):
                    node_coupling[...,
                                  mi] = vertex_mapping * region_coupling[...,
                                                                         mi].T

                node_coupling = node_coupling.transpose((1, 0, 2))

            if self.stimulus is not None:
                stimulus[self.model.cvar, :, :] = numpy.reshape(
                    self.stimulus(step - (self.current_step + 1)), (1, -1, 1))

            state = scheme(state, dfun, node_coupling, local_coupling,
                           stimulus)
            history[step % horizon, :] = state

            if self.surface is not None:
                # this optimisation is similar to the one done for vertex_mapping above
                step_avg = numpy.empty((number_of_regions, state.shape[0],
                                        self.model.number_of_modes))
                for mi in xrange(self.model.number_of_modes):
                    step_avg[..., mi] = region_average.dot(state[..., mi].T)

                region_history[step % horizon, :] = step_avg.transpose(
                    (1, 0, 2))

            # monitor.things e.g. raw, average, eeg, meg, fmri...
            output = [monitor.record(step, state) for monitor in self.monitors]
            if any(outputi is not None for outputi in output):
                yield output

        # This -1 is here for not repeating the point on resume
        self.current_step = self.current_step + int_steps - 1
        self.history = history

    def configure_history(self, initial_conditions=None):
        """
        Set initial conditions for the simulation using either the provided 
        initial_conditions or, if none are provided, the model's initial() 
        method. This method is called durin the Simulator's __init__(). 

        Any initial_conditions that are provided as an argument are expected 
        to have dimensions 1, 2, and 3 with shapse corresponding to the number
        of state_variables, nodes and modes, respectively. If the provided 
        inital_conditions are shorter in time (dim=0) than the required history
        the model's initial() method is called to make up the difference.

        """

        history = self.history
        if initial_conditions is None:
            msg = "%s: Setting default history using model's initial() method."
            LOG.info(msg % str(self))
            history = self.model.initial(self.integrator.dt,
                                         self.good_history_shape)
        else:
            # history should be [timepoints, state_variables, nodes, modes]
            LOG.info("%s: Received initial conditions as arg." % str(self))
            ic_shape = initial_conditions.shape
            if ic_shape[1:] != self.good_history_shape[1:]:
                msg = "%s: bad initial_conditions[1:] shape %s, should be %s"
                msg %= self, ic_shape[1:], self.good_history_shape[1:]
                raise ValueError(msg)
            else:
                if ic_shape[0] >= self.horizon:
                    msg = "%s: Using last %s time-steps for history."
                    LOG.info(msg % (str(self), self.horizon))
                    history = initial_conditions[
                        -self.horizon:, :, :, :].copy()
                else:
                    msg = "%s: initial_conditions shorter than required."
                    LOG.info(msg % str(self))
                    msg = "%s: Using model's initial() method for difference."
                    LOG.info(msg % str(self))
                    history = self.model.initial(self.integrator.dt,
                                                 self.good_history_shape)
                    csmh = self.current_step % self.horizon
                    history = numpy.roll(history, -csmh, axis=0)
                    history[:ic_shape[0], :, :, :] = initial_conditions
                    history = numpy.roll(history, csmh, axis=0)
                self.current_step += ic_shape[0] - 1
            msg = "%s: history shape is: %s"
            LOG.debug(msg % (str(self), str(history.shape)))
        self.history = history

    def configure_integrator_noise(self):
        """
        This enables having noise to be state variable specific and/or to enter 
        only via specific brain structures, for example it we only want to 
        consider noise as an external input entering the brain via appropriate
        thalamic nuclei.

        Support 3 possible shapes:
            1) number_of_nodes;

            2) number_of_state_variables; and 

            3) (number_of_state_variables, number_of_nodes).

        """

        noise = self.integrator.noise

        if self.integrator.noise.ntau > 0.0:
            self.integrator.noise.configure_coloured(
                self.integrator.dt, self.good_history_shape[1:])
        else:
            self.integrator.noise.configure_white(self.integrator.dt,
                                                  self.good_history_shape[1:])

        if self.surface is not None:
            if self.integrator.noise.nsig.size == self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[
                    self.surface.region_mapping]
            elif self.integrator.noise.nsig.size == self.model.nvar * self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[:,
                                                                        self.
                                                                        surface
                                                                        .
                                                                        region_mapping]

        good_nsig_shape = (self.model.nvar, self.number_of_nodes,
                           self.model.number_of_modes)
        nsig = self.integrator.noise.nsig
        LOG.debug("Simulator.integrator.noise.nsig shape: %s" %
                  str(nsig.shape))
        if nsig.shape in (good_nsig_shape, (1, )):
            return
        elif nsig.shape == (self.model.nvar, ):
            nsig = nsig.reshape((self.model.nvar, 1, 1))
        elif nsig.shape == (self.number_of_nodes, ):
            nsig = nsig.reshape((1, self.number_of_nodes, 1))
        elif nsig.shape == (self.model.nvar, self.number_of_nodes):
            nsig = nsig.reshape((self.model.nvar, self.number_of_nodes, 1))
        else:
            msg = "Bad Simulator.integrator.noise.nsig shape: %s"
            LOG.error(msg % str(nsig.shape))

        LOG.debug("Simulator.integrator.noise.nsig shape: %s" %
                  str(nsig.shape))
        self.integrator.noise.nsig = nsig

    def configure_monitors(self):
        """ Configure the requested Monitors for this Simulator """
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]

        # Configure monitors
        for monitor in self.monitors:
            monitor.config_for_sim(self)

    def configure_stimuli(self):
        """ Configure the defined Stimuli for this Simulator """
        if self.stimulus is not None:
            if self.surface:
                self.stimulus.configure_space(self.surface.region_mapping)
            else:
                self.stimulus.configure_space()

    def memory_requirement(self):
        """
        Return an estimated of the memory requirements (Bytes) for this
        simulator's current configuration.
        """
        self._guesstimate_memory_requirement()
        return self._memory_requirement_guess

    def runtime(self, simulation_length):
        """
        Return an estimated run time (seconds) for the simulator's current 
        configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._guesstimate_runtime()
        return self._runtime

    def storage_requirement(self, simulation_length):
        """
        Return an estimated storage requirement (Bytes) for the simulator's
        current configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._calculate_storage_requirement()
        return self._storage_requirement

    def _guesstimate_memory_requirement(self):
        """
        guesstimate the memory required for this simulator.

        Guesstimate is based on the shape of the dominant arrays, and as such 
        can operate before configuration.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        if self.surface:
            number_of_nodes = self.surface.number_of_vertices
        else:
            number_of_nodes = self.connectivity.number_of_regions

        number_of_regions = self.connectivity.number_of_regions

        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        bits_64 = 8.0  # Bytes
        bits_32 = 4.0  # Bytes
        #NOTE: The speed hack for getting the first element of hist shape should
        #      partially resolves calling of this method with a non-configured
        #     connectivity, there remains the less common issue if no tract_lengths...
        hist_shape = (
            self.connectivity.tract_lengths.max() /
            (self.conduction_speed or self.connectivity.speed or 3.0) /
            self.integrator.dt, self.model.nvar, number_of_nodes,
            self.model.number_of_modes)
        memreq = numpy.prod(hist_shape) * bits_64
        if self.surface:
            memreq += self.surface.number_of_triangles * 3 * bits_32 * 2  # normals
            memreq += self.surface.number_of_vertices * 3 * bits_64 * 2  # normals
            memreq += number_of_nodes * number_of_regions * bits_64 * 4  # vertex_mapping, region_average, region_sum
            #???memreq += self.surface.local_connectivity.matrix.nnz * 8

        if not isinstance(self.monitors, (list, tuple)):
            monitors = [self.monitors]
        else:
            monitors = self.monitors
        for monitor in monitors:
            if not isinstance(monitor, monitors_module.Bold):
                stock_shape = (monitor.period / self.integrator.dt,
                               self.model.variables_of_interest.shape[0],
                               number_of_nodes, self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                if hasattr(monitor, "sensors"):
                    try:
                        memreq += number_of_nodes * monitor.sensors.number_of_sensors * bits_64  # projection_matrix
                    except AttributeError:
                        LOG.debug(
                            "No sensors specified, guessing memory based on default EEG."
                        )
                        memreq += number_of_nodes * 62.0 * bits_64

            else:
                stock_shape = (monitor.hrf_length * monitor._stock_sample_rate,
                               self.model.variables_of_interest.shape[0],
                               number_of_nodes, self.model.number_of_modes)
                interim_stock_shape = (
                    1.0 / (2.0**-2 * self.integrator.dt),
                    self.model.variables_of_interest.shape[0], number_of_nodes,
                    self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                memreq += numpy.prod(interim_stock_shape) * bits_64

        if psutil and memreq > psutil.virtual_memory().total:
            LOG.error("This is gonna get ugly...")

        self._memory_requirement_guess = magic_number * memreq
        msg = "Memory requirement guesstimate: simulation will need about %.1f MB"
        LOG.info(msg % (self._memory_requirement_guess / 1048576.0))

    def _census_memory_requirement(self):
        """
        Guesstimate the memory required for this simulator. 

        Guesstimate is based on a census of the dominant arrays after the
        simulator has been configured.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        memreq = self.history.nbytes
        try:
            memreq += self.surface.triangles.nbytes * 2
            memreq += self.surface.vertices.nbytes * 2
            memreq += self.surface.vertex_mapping.nbytes * 4  # vertex_mapping, region_average, region_sum
            memreq += self.surface.eeg_projection.nbytes
            memreq += self.surface.local_connectivity.matrix.nnz * 8
        except AttributeError:
            pass

        for monitor in self.monitors:
            memreq += monitor._stock.nbytes
            if isinstance(monitor, monitors_module.Bold):
                memreq += monitor._interim_stock.nbytes

        if psutil and memreq > psutil.virtual_memory().total:
            LOG.error("This is gonna get ugly...")

        self._memory_requirement_census = magic_number * memreq
        #import pdb; pdb.set_trace()
        msg = "Memory requirement census: simulation will need about %.1f MB"
        LOG.info(msg % (self._memory_requirement_census / 1048576.0))

    def _guesstimate_runtime(self):
        """
        Estimate the runtime for this simulator.

        Spread in parallel executions of larger arrays means this will be an over-estimation,
        or rather a single threaded estimation...
        Different choice of integrators and monitors has an additional effect,
        on the magic number though relatively minor

        """
        magic_number = 6.57e-06  # seconds
        self._runtime = (magic_number * self.number_of_nodes *
                         self.model.nvar * self.model.number_of_modes *
                         self.simulation_length / self.integrator.dt)
        msg = "Simulation single-threaded runtime should be about %s seconds!"
        LOG.info(msg % str(int(self._runtime)))

    def _calculate_storage_requirement(self):
        """
        Calculate the storage requirement for the simulator, configured with
        models, monitors, etc being run for a particular simulation length. 
        While this is only approximate, it is far more reliable/accurate than
        the memory and runtime guesstimates.
        """
        LOG.info("Calculating storage requirement for ...")
        strgreq = 0
        for monitor in self.monitors:
            # Avoid division by zero for monitor not yet configured
            # (in framework this is executed, when only preconfigure has been called):
            current_period = monitor.period or self.integrator.dt
            strgreq += (TvbProfile.current.MAGIC_NUMBER *
                        self.simulation_length * self.number_of_nodes *
                        self.model.nvar * self.model.number_of_modes /
                        current_period)
        LOG.info("Calculated storage requirement for simulation: %d " %
                 int(strgreq))
        self._storage_requirement = int(strgreq)
Esempio n. 6
0
 def test_integrator_base_class(self):
     integrator = integrators.Integrator()
     assert integrator.dt == dt
     with pytest.raises(NotImplementedError):
         self._call_base_scheme(integrator)
Esempio n. 7
0
class Simulator(core.Type):
    "A Simulator assembles components required to perform simulations."

    connectivity = connectivity.Connectivity(
        label="Long-range connectivity",
        default=None,
        order=1,
        required=True,
        filters_ui=[
            UIFilter(linked_elem_name="region_mapping_data",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="surface",
                     linked_elem_parent_option=None),
            UIFilter(linked_elem_name="region_mapping",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="EEG"),
            UIFilter(linked_elem_name="region_mapping",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="MEG"),
            UIFilter(linked_elem_name="region_mapping",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="iEEG")
        ],
        doc="""A tvb.datatypes.Connectivity object which contains the
        structural long-range connectivity data (i.e., white-matter tracts). In
        combination with the ``Long-range coupling function`` it defines the inter-regional
        connections. These couplings undergo a time delay via signal propagation 
        with a propagation speed of ``Conduction Speed``""")

    conduction_speed = basic.Float(
        label="Conduction Speed",
        default=3.0,
        order=2,
        required=False,
        range=basic.Range(lo=0.01, hi=100.0, step=1.0),
        doc="""Conduction speed for ``Long-range connectivity`` (mm/ms)""")

    coupling = coupling.Coupling(
        label="Long-range coupling function",
        default=coupling.Linear(),
        required=True,
        order=2,
        doc="""The coupling function is applied to the activity propagated
        between regions by the ``Long-range connectivity`` before it enters the local
        dynamic equations of the Model. Its primary purpose is to 'rescale' the
        incoming activity to a level appropriate to Model.""")

    surface = cortex.Cortex(
        label="Cortical surface",
        default=None,
        order=3,
        required=False,
        filters_backend=FilterChain(
            fields=[FilterChain.datatype + '._valid_for_simulations'],
            operations=["=="],
            values=[True]),
        filters_ui=[
            UIFilter(linked_elem_name="projection_matrix_data",
                     linked_elem_field=FilterChain.datatype + "._sources",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="EEG"),
            UIFilter(linked_elem_name="local_connectivity",
                     linked_elem_field=FilterChain.datatype + "._surface",
                     linked_elem_parent_name="surface",
                     linked_elem_parent_option=None)
        ],
        doc="""By default, a Cortex object which represents the
        cortical surface defined by points in the 3D physical space and their 
        neighborhood relationship. In the current TVB version, when setting up a 
        surface-based simulation, the option to configure the spatial spread of 
        the ``Local Connectivity`` is available.""")

    stimulus = patterns.SpatioTemporalPattern(
        label="Spatiotemporal stimulus",
        default=None,
        order=4,
        required=False,
        doc=
        """A ``Spatiotemporal stimulus`` can be defined at the region or surface level.
        It's composed of spatial and temporal components. For region defined stimuli
        the spatial component is just the strength with which the temporal
        component is applied to each region. For surface defined stimuli,  a
        (spatial) function, with finite-support, is used to define the strength 
        of the stimuli on the surface centred around one or more focal points. 
        In the current version of TVB, stimuli are applied to the first state 
        variable of the ``Local dynamic model``.""")

    model = models.Model(
        label="Local dynamic model",
        default=models.Generic2dOscillator,
        required=True,
        order=5,
        doc="""A tvb.simulator.Model object which describe the local dynamic
        equations, their parameters, and, to some extent, where connectivity
        (local and long-range) enters and which state-variables the Monitors
        monitor. By default the 'Generic2dOscillator' model is used. Read the 
        Scientific documentation to learn more about this model.""")

    integrator = integrators.Integrator(
        label="Integration scheme",
        default=integrators.HeunDeterministic,
        required=True,
        order=6,
        doc="""A tvb.simulator.Integrator object which is
            an integration scheme with supporting attributes such as 
            integration step size and noise specification for stochastic 
            methods. It is used to compute the time courses of the model state 
            variables.""")

    initial_conditions = arrays.FloatArray(
        label="Initial Conditions",
        default=None,
        order=-1,
        required=False,
        doc="""Initial conditions from which the simulation will begin. By
        default, random initial conditions are provided. Needs to be the same shape
        as simulator 'history', ie, initial history function which defines the 
        minimal initial state of the network with time delays before time t=0. 
        If the number of time points in the provided array is insufficient the 
        array will be padded with random values based on the 'state_variables_range'
        attribute.""")

    monitors = monitors.Monitor(
        label="Monitor(s)",
        default=monitors.TemporalAverage,
        required=True,
        order=8,
        select_multiple=True,
        doc="""A tvb.simulator.Monitor or a list of tvb.simulator.Monitor
        objects that 'know' how to record relevant data from the simulation. Two
        main types exist: 1) simple, spatial and temporal, reductions (subsets
        or averages); 2) physiological measurements, such as EEG, MEG and fMRI.
        By default the Model's specified variables_of_interest are returned,
        temporally downsampled from the raw integration rate to a sample rate of
        1024Hz.""")

    simulation_length = basic.Float(
        label="Simulation Length (ms, s, m, h)",
        default=1000.0,  # ie 1 second
        required=True,
        order=9,
        doc="""The length of a simulation (default in milliseconds).""")

    history = None  # type: SparseHistory

    @property
    def good_history_shape(self):
        "Returns expected history shape."
        n_reg = self.connectivity.number_of_regions
        shape = self.horizon, len(
            self.model.state_variables), n_reg, self.model.number_of_modes
        return shape

    calls = 0
    current_step = 0
    number_of_nodes = None
    _memory_requirement_guess = None
    _memory_requirement_census = None
    _storage_requirement = None
    _runtime = None

    # methods consist of
    # 1) generic configure
    # 2) component specific configure
    # 3) loop preparation
    # 4) loop step
    # 5) estimations

    def preconfigure(self):
        "Configure just the basic fields, so that memory can be estimated."
        self.connectivity.configure()
        if self.surface:
            self.surface.configure()
        if self.stimulus:
            self.stimulus.configure()
        self.coupling.configure()
        self.model.configure()
        self.integrator.configure()
        # monitors needs to be a list or tuple, even if there is only one...
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]
        # Configure monitors
        for monitor in self.monitors:
            monitor.configure()
        # "Nodes" refers to either regions or vertices + non-cortical regions.
        if self.surface is None:
            self.number_of_nodes = self.connectivity.number_of_regions
            LOG.info('Region simulation with %d ROI nodes',
                     self.number_of_nodes)
        else:
            rm = self.surface.region_mapping
            unmapped = self.connectivity.unmapped_indices(rm)
            self._regmap = numpy.r_[rm, unmapped]
            self.number_of_nodes = self._regmap.shape[0]
            LOG.info(
                'Surface simulation with %d vertices + %d non-cortical, %d total nodes',
                rm.size, unmapped.size, self.number_of_nodes)
        self._guesstimate_memory_requirement()

    def configure(self, full_configure=True):
        """Configure simulator and its components.

        The first step of configuration is to run the configure methods of all
        the Simulator's components, ie its traited attributes.

        Configuration of a Simulator primarily consists of calculating the
        attributes, etc, which depend on the combinations of the Simulator's
        traited attributes (keyword args).

        Converts delays from physical time units into integration steps
        and updates attributes that depend on combinations of the 6 inputs.

        Returns
        -------
        sim: Simulator
            The configured Simulator instance.

        """
        if full_configure:
            # When run from GUI, preconfigure is run separately, and we want to avoid running that part twice
            self.preconfigure()
        # Make sure spatialised model parameters have the right shape (number_of_nodes, 1)
        excluded_params = ("state_variable_range", "variables_of_interest",
                           "noise", "psi_table", "nerf_table")
        spatial_reshape = self.model.spatial_param_reshape
        for param in self.model.trait.keys():
            if param in excluded_params:
                continue
            # If it's a surface sim and model parameters were provided at the region level
            region_parameters = getattr(self.model, param)
            if self.surface is not None:
                if region_parameters.size == self.connectivity.number_of_regions:
                    new_parameters = region_parameters[
                        self.surface.region_mapping].reshape(spatial_reshape)
                    setattr(self.model, param, new_parameters)
            region_parameters = getattr(self.model, param)
            if region_parameters.size == self.number_of_nodes:
                new_parameters = region_parameters.reshape(spatial_reshape)
                setattr(self.model, param, new_parameters)
        # Configure spatial component of any stimuli
        self._configure_stimuli()
        # Set delays, provided in physical units, in integration steps.
        self.connectivity.set_idelays(self.integrator.dt)
        self.horizon = self.connectivity.idelays.max() + 1
        # Reshape integrator.noise.nsig, if necessary.
        if isinstance(self.integrator, integrators.IntegratorStochastic):
            self._configure_integrator_noise()
        # Setup history
        self._configure_history(self.initial_conditions)
        # Configure Monitors to work with selected Model, etc...
        self._configure_monitors()
        # Estimate of memory usage.
        self._census_memory_requirement()
        # Allow user to chain configure to another call or assignment.
        return self

    def _handle_random_state(self, random_state):
        if random_state is not None:
            if isinstance(self.integrator, integrators.IntegratorStochastic):
                self.integrator.noise.random_stream.set_state(random_state)
                msg = "random_state supplied with seed %s"
                LOG.info(msg,
                         self.integrator.noise.random_stream.get_state()[1][0])
            else:
                LOG.warn(
                    "random_state supplied for non-stochastic integration")

    def _prepare_local_coupling(self):
        if self.surface is None:
            local_coupling = 0.0
        else:
            if self.surface.coupling_strength.size == 1:
                local_coupling = (self.surface.coupling_strength[0] *
                                  self.surface.local_connectivity.matrix)
            elif self.surface.coupling_strength.size == self.surface.number_of_vertices:
                ind = numpy.arange(self.number_of_nodes, dtype=numpy.intc)
                vec_cs = numpy.zeros((self.number_of_nodes, ))
                vec_cs[:self.surface.
                       number_of_vertices] = self.surface.coupling_strength
                sp_cs = scipy.sparse.csc_matrix(
                    (vec_cs, (ind, ind)),
                    shape=(self.number_of_nodes, self.number_of_nodes))
                local_coupling = sp_cs * self.surface.local_connectivity.matrix
            if local_coupling.shape[1] < self.number_of_nodes:
                # must match unmapped indices handling in preconfigure
                from scipy.sparse import csr_matrix, vstack, hstack
                nn = self.number_of_nodes
                npad = nn - local_coupling.shape[0]
                rpad = csr_matrix((local_coupling.shape[0], npad))
                bpad = csr_matrix((npad, nn))
                local_coupling = vstack([hstack([local_coupling, rpad]), bpad])
        return local_coupling

    def _prepare_stimulus(self):
        if self.stimulus is None:
            stimulus = 0.0
        else:
            time = numpy.r_[0.0:self.simulation_length:self.integrator.dt]
            self.stimulus.configure_time(time.reshape((1, -1)))
            stimulus = numpy.zeros((self.model.nvar, self.number_of_nodes, 1))
            LOG.debug("stimulus shape is: %s", stimulus.shape)
        return stimulus

    def _loop_compute_node_coupling(self, step):
        "Compute delayed node coupling values."
        coupling = self.coupling(step, self.history)
        if self.surface is not None:
            coupling = coupling[:, self._regmap]
        return coupling

    def _loop_update_stimulus(self, step, stimulus):
        "Update stimulus values for current time step."
        if self.stimulus is not None:
            # TODO stim_step != current step
            stim_step = step - (self.current_step + 1)
            stimulus[self.model.cvar, :, :] = self.stimulus(stim_step).reshape(
                (1, -1, 1))

    def _loop_update_history(self, step, n_reg, state):
        "Update history."
        if self.surface is not None and state.shape[
                1] > self.connectivity.number_of_regions:
            region_state = numpy.zeros(
                (n_reg, state.shape[0],
                 state.shape[2]))  # temp (node, cvar, mode)
            numpy_add_at(region_state, self._regmap, state.transpose(
                (1, 0, 2)))  # sum within region
            region_state /= numpy.bincount(self._regmap).reshape(
                (-1, 1, 1))  # div by n node in region
            state = region_state.transpose((1, 0, 2))  # (cvar, node, mode)
        self.history.update(step, state)

    def _loop_monitor_output(self, step, state):
        observed = self.model.observe(state)
        output = [monitor.record(step, observed) for monitor in self.monitors]
        if any(outputi is not None for outputi in output):
            return output

    def __call__(self, simulation_length=None, random_state=None):
        """
        Return an iterator which steps through simulation time, generating monitor outputs.

        See the run method for a convenient way to collect all output in one call.

        :param simulation_length: Length of the simulation to perform in ms.
        :param random_state:  State of NumPy RNG to use for stochastic integration.
        :return: Iterator over monitor outputs.
        """

        self.calls += 1
        if simulation_length is not None:
            self.simulation_length = simulation_length

        # intialization
        self._guesstimate_runtime()
        self._calculate_storage_requirement()
        self._handle_random_state(random_state)
        n_reg = self.connectivity.number_of_regions
        local_coupling = self._prepare_local_coupling()
        stimulus = self._prepare_stimulus()
        state = self.current_state

        # integration loop
        n_steps = int(math.ceil(self.simulation_length / self.integrator.dt))
        for step in range(self.current_step + 1,
                          self.current_step + n_steps + 1):
            # needs implementing by hsitory + coupling?
            node_coupling = self._loop_compute_node_coupling(step)
            self._loop_update_stimulus(step, stimulus)
            state = self.integrator.scheme(state, self.model.dfun,
                                           node_coupling, local_coupling,
                                           stimulus)
            self._loop_update_history(step, n_reg, state)
            output = self._loop_monitor_output(step, state)
            if output is not None:
                yield output

        self.current_state = state
        self.current_step = self.current_step + n_steps - 1  # -1 : don't repeat last point

    def _configure_history(self, initial_conditions):
        """
        Set initial conditions for the simulation using either the provided
        initial_conditions or, if none are provided, the model's initial()
        method. This method is called durin the Simulator's __init__().

        Any initial_conditions that are provided as an argument are expected
        to have dimensions 1, 2, and 3 with shapse corresponding to the number
        of state_variables, nodes and modes, respectively. If the provided
        inital_conditions are shorter in time (dim=0) than the required history
        the model's initial() method is called to make up the difference.

        """
        rng = numpy.random
        if hasattr(self.integrator, 'noise'):
            rng = self.integrator.noise.random_stream
        # Default initial conditions
        if initial_conditions is None:
            n_time, n_svar, n_node, n_mode = self.good_history_shape
            LOG.info(
                'Preparing initial history of shape %r using model.initial()',
                self.good_history_shape)
            if self.surface is not None:
                n_node = self.number_of_nodes
            history = self.model.initial(self.integrator.dt,
                                         (n_time, n_svar, n_node, n_mode), rng)
        # ICs provided
        else:
            # history should be [timepoints, state_variables, nodes, modes]
            LOG.info('Using provided initial history of shape %r',
                     initial_conditions.shape)
            n_time, n_svar, n_node, n_mode = ic_shape = initial_conditions.shape
            nr = self.connectivity.number_of_regions
            if self.surface is not None and n_node == nr:
                initial_conditions = initial_conditions[:, :, self._regmap]
                return self._configure_history(initial_conditions)
            elif ic_shape[1:] != self.good_history_shape[1:]:
                raise ValueError(
                    "Incorrect history sample shape %s, expected %s" %
                    ic_shape[1:], self.good_history_shape[1:])
            else:
                if ic_shape[0] >= self.horizon:
                    LOG.debug("Using last %d time-steps for history.",
                              self.horizon)
                    history = initial_conditions[
                        -self.horizon:, :, :, :].copy()
                else:
                    LOG.debug('Padding initial conditions with model.initial')
                    history = self.model.initial(self.integrator.dt,
                                                 self.good_history_shape, rng)
                    shift = self.current_step % self.horizon
                    history = numpy.roll(history, -shift, axis=0)
                    history[:ic_shape[0], :, :, :] = initial_conditions
                    history = numpy.roll(history, shift, axis=0)
                self.current_step += ic_shape[0] - 1
        LOG.info('Final initial history shape is %r', history.shape)
        # create initial state from history
        self.current_state = history[self.current_step % self.horizon].copy()
        LOG.debug('initial state has shape %r' % (self.current_state.shape, ))
        if self.surface is not None and history.shape[
                2] > self.connectivity.number_of_regions:
            n_reg = self.connectivity.number_of_regions
            (nt, ns, _, nm), ax = history.shape, (2, 0, 1, 3)
            region_history = numpy.zeros((nt, ns, n_reg, nm))
            numpy_add_at(region_history.transpose(ax), self._regmap,
                         history.transpose(ax))
            region_history /= numpy.bincount(self._regmap).reshape((-1, 1))
            history = region_history
        # create history query implementation
        self.history = SparseHistory(self.connectivity.weights,
                                     self.connectivity.idelays,
                                     self.model.cvar,
                                     self.model.number_of_modes)
        # initialize its buffer
        self.history.initialize(history)

    def _configure_integrator_noise(self):
        """
        This enables having noise to be state variable specific and/or to enter 
        only via specific brain structures, for example it we only want to 
        consider noise as an external input entering the brain via appropriate
        thalamic nuclei.

        Support 3 possible shapes:
            1) number_of_nodes;

            2) number_of_state_variables; and 

            3) (number_of_state_variables, number_of_nodes).

        """

        noise = self.integrator.noise

        if self.integrator.noise.ntau > 0.0:
            self.integrator.noise.configure_coloured(
                self.integrator.dt, self.good_history_shape[1:])
        else:
            self.integrator.noise.configure_white(self.integrator.dt,
                                                  self.good_history_shape[1:])

        if self.surface is not None:
            if self.integrator.noise.nsig.size == self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[
                    self.surface.region_mapping]
            elif self.integrator.noise.nsig.size == self.model.nvar * self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[:,
                                                                        self.
                                                                        surface
                                                                        .
                                                                        region_mapping]

        good_nsig_shape = (self.model.nvar, self.number_of_nodes,
                           self.model.number_of_modes)
        nsig = self.integrator.noise.nsig
        LOG.debug("Given noise shape is %s", nsig.shape)
        if nsig.shape in (good_nsig_shape, (1, )):
            return
        elif nsig.shape == (self.model.nvar, ):
            nsig = nsig.reshape((self.model.nvar, 1, 1))
        elif nsig.shape == (self.number_of_nodes, ):
            nsig = nsig.reshape((1, self.number_of_nodes, 1))
        elif nsig.shape == (self.model.nvar, self.number_of_nodes):
            nsig = nsig.reshape((self.model.nvar, self.number_of_nodes, 1))
        else:
            msg = "Bad Simulator.integrator.noise.nsig shape: %s"
            LOG.error(msg % str(nsig.shape))

        LOG.debug("Corrected noise shape is %s", nsig.shape)
        self.integrator.noise.nsig = nsig

    def _configure_monitors(self):
        """ Configure the requested Monitors for this Simulator """
        # Coerce to list if required
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]
        # Configure monitors
        for monitor in self.monitors:
            monitor.config_for_sim(self)

    def _configure_stimuli(self):
        """ Configure the defined Stimuli for this Simulator """
        if self.stimulus is not None:
            if self.surface:
                self.stimulus.configure_space(self.surface.region_mapping)
            else:
                self.stimulus.configure_space()

    # used by simulator adaptor
    def memory_requirement(self):
        """
        Return an estimated of the memory requirements (Bytes) for this
        simulator's current configuration.
        """
        self._guesstimate_memory_requirement()
        return self._memory_requirement_guess

    # appears to be unused
    def runtime(self, simulation_length):
        """
        Return an estimated run time (seconds) for the simulator's current 
        configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._guesstimate_runtime()
        return self._runtime

    # used by simulator adaptor
    def storage_requirement(self, simulation_length):
        """
        Return an estimated storage requirement (Bytes) for the simulator's
        current configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._calculate_storage_requirement()
        return self._storage_requirement

    def _guesstimate_memory_requirement(self):
        """
        guesstimate the memory required for this simulator.

        Guesstimate is based on the shape of the dominant arrays, and as such 
        can operate before configuration.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        if self.surface:
            number_of_nodes = self.surface.number_of_vertices
        else:
            number_of_nodes = self.connectivity.number_of_regions

        number_of_regions = self.connectivity.number_of_regions

        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        bits_64 = 8.0  # Bytes
        bits_32 = 4.0  # Bytes
        #NOTE: The speed hack for getting the first element of hist shape should
        #      partially resolves calling of this method with a non-configured
        #     connectivity, there remains the less common issue if no tract_lengths...
        hist_shape = (
            self.connectivity.tract_lengths.max() /
            (self.conduction_speed or self.connectivity.speed or 3.0) /
            self.integrator.dt, self.model.nvar, number_of_nodes,
            self.model.number_of_modes)
        LOG.debug("Estimated history shape is %r", hist_shape)

        memreq = numpy.prod(hist_shape) * bits_64
        if self.surface:
            memreq += self.surface.number_of_triangles * 3 * bits_32 * 2  # normals
            memreq += self.surface.number_of_vertices * 3 * bits_64 * 2  # normals
            memreq += number_of_nodes * number_of_regions * bits_64 * 4  # region_mapping, region_average, region_sum
            #???memreq += self.surface.local_connectivity.matrix.nnz * 8

        if not hasattr(self.monitors, '__len__'):
            self.monitors = [self.monitors]

        for monitor in self.monitors:
            if not isinstance(monitor, monitors.Bold):
                stock_shape = (monitor.period / self.integrator.dt,
                               self.model.variables_of_interest.shape[0],
                               number_of_nodes, self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                if hasattr(monitor, "sensors"):
                    try:
                        memreq += number_of_nodes * monitor.sensors.number_of_sensors * bits_64  # projection_matrix
                    except AttributeError:
                        LOG.debug(
                            "No sensors specified, guessing memory based on default EEG."
                        )
                        memreq += number_of_nodes * 62.0 * bits_64

            else:
                stock_shape = (monitor.hrf_length * monitor._stock_sample_rate,
                               self.model.variables_of_interest.shape[0],
                               number_of_nodes, self.model.number_of_modes)
                interim_stock_shape = (
                    1.0 / (2.0**-2 * self.integrator.dt),
                    self.model.variables_of_interest.shape[0], number_of_nodes,
                    self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                memreq += numpy.prod(interim_stock_shape) * bits_64

        if psutil and memreq > psutil.virtual_memory().total:
            LOG.warning(
                "There may be insufficient memory for this simulation.")

        self._memory_requirement_guess = magic_number * memreq
        msg = "Memory requirement estimate: simulation will need about %.1f MB"
        LOG.info(msg, self._memory_requirement_guess / 2**20)

    def _census_memory_requirement(self):
        """
        Guesstimate the memory required for this simulator. 

        Guesstimate is based on a census of the dominant arrays after the
        simulator has been configured.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        memreq = self.history.nbytes
        try:
            memreq += self.surface.triangles.nbytes * 2
            memreq += self.surface.vertices.nbytes * 2
            memreq += self.surface.region_mapping.nbytes * self.number_of_nodes * 8. * 4  # region_average, region_sum
            memreq += self.surface.eeg_projection.nbytes
            memreq += self.surface.local_connectivity.matrix.nnz * 8
        except AttributeError:
            pass

        for monitor in self.monitors:
            memreq += monitor._stock.nbytes
            if isinstance(monitor, monitors.Bold):
                memreq += monitor._interim_stock.nbytes

        if psutil and memreq > psutil.virtual_memory().total:
            LOG.warning("Memory estimate exceeds total available RAM.")

        self._memory_requirement_census = magic_number * memreq
        #import pdb; pdb.set_trace()
        msg = "Memory requirement census: simulation will need about %.1f MB"
        LOG.info(msg % (self._memory_requirement_census / 1048576.0))

    def _guesstimate_runtime(self):
        """
        Estimate the runtime for this simulator.

        Spread in parallel executions of larger arrays means this will be an over-estimation,
        or rather a single threaded estimation...
        Different choice of integrators and monitors has an additional effect,
        on the magic number though relatively minor

        """
        magic_number = 6.57e-06  # seconds
        self._runtime = (magic_number * self.number_of_nodes *
                         self.model.nvar * self.model.number_of_modes *
                         self.simulation_length / self.integrator.dt)
        msg = "Simulation runtime should be about %0.3f seconds"
        LOG.info(msg, self._runtime)

    def _calculate_storage_requirement(self):
        """
        Calculate the storage requirement for the simulator, configured with
        models, monitors, etc being run for a particular simulation length. 
        While this is only approximate, it is far more reliable/accurate than
        the memory and runtime guesstimates.
        """
        LOG.info("Calculating storage requirement for ...")
        strgreq = 0
        for monitor in self.monitors:
            # Avoid division by zero for monitor not yet configured
            # (in framework this is executed, when only preconfigure has been called):
            current_period = monitor.period or self.integrator.dt
            strgreq += (TvbProfile.current.MAGIC_NUMBER *
                        self.simulation_length * self.number_of_nodes *
                        self.model.nvar * self.model.number_of_modes /
                        current_period)
        LOG.info("Calculated storage requirement for simulation: %d " %
                 int(strgreq))
        self._storage_requirement = int(strgreq)

    def run(self, **kwds):
        "Convenience method to call the simulator with **kwds and collect output data."
        ts, xs = [], []
        for _ in self.monitors:
            ts.append([])
            xs.append([])
        wall_time_start = time.time()
        for data in self(**kwds):
            for tl, xl, t_x in zip(ts, xs, data):
                if t_x is not None:
                    t, x = t_x
                    tl.append(t)
                    xl.append(x)
        elapsed_wall_time = time.time() - wall_time_start
        LOG.info("%.3f s elapsed, %.3fx real time", elapsed_wall_time,
                 elapsed_wall_time * 1e3 / self.simulation_length)
        for i in range(len(ts)):
            ts[i] = numpy.array(ts[i])
            xs[i] = numpy.array(xs[i])
        return list(zip(ts, xs))
Esempio n. 8
0
 def test_integrator_base_class(self):
     integrator = integrators.Integrator()
     self.assertEqual(integrator.dt, dt)
     self.assertRaises(NotImplementedError, self._call_base_scheme,
                       integrator)
Esempio n. 9
0
class BalloonModel(core.Type):
    """

    A class for calculating the simulated BOLD signal given a TimeSeries
    object of TVB and returning another TimeSeries object.

    The haemodynamic model parameters based on constants for a 1.5 T scanner.
        
    """

    #NOTE: a potential problem when the input is a TimeSeriesSurface.
    #TODO: add an spatial averaging for TimeSeriesSurface.

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries that represents the input neural activity""",
        order=1)
    # it also sets the bold sampling period.
    dt = basic.Float(
        label=":math:`dt`",
        default=0.002,
        required=True,
        doc="""The integration time step size for the balloon model (s).
        If none is provided, by default, the TimeSeries sample period is used.""",
        order=2)

    integrator = integrators_module.Integrator(
        label="Integration scheme",
        default=integrators_module.HeunDeterministic,
        required=True,
        order=-1,
        doc=""" A tvb.simulator.Integrator object which is
        an integration scheme with supporting attributes such as 
        integration step size and noise specification for stochastic 
        methods. It is used to compute the time courses of the balloon model state 
        variables.""")

    bold_model = basic.Enumerate(
        label="Select BOLD model equations",
        options=["linear", "nonlinear"],
        default=["nonlinear"],
        select_multiple=False,
        doc="""Select the set of equations for the BOLD model.""",
        order=4)

    RBM = basic.Bool(
        label="Revised BOLD Model",
        default=True,
        required=True,
        doc="""Select classical vs revised BOLD model (CBM or RBM).
        Coefficients  k1, k2 and k3 will be derived accordingly.""",
        order=5)

    neural_input_transformation = basic.Enumerate(
        label="Neural input transformation",
        options=["none", "abs_diff", "sum"],
        default=["none"],
        select_multiple=False,
        doc=
        """ This represents the operation to perform on the state-variable(s) of
        the model used to generate the input TimeSeries. ``none`` takes the
        first state-variable as neural input; `` abs_diff`` is the absolute
        value of the derivative (first order difference) of the first state variable; 
        ``sum``: sum all the state-variables of the input TimeSeries.""",
        order=3)

    tau_s = basic.Float(
        label=r":math:`\tau_s`",
        default=0.65,
        required=True,
        doc="""Balloon model parameter. Time of signal decay (s)""",
        order=-1)

    tau_f = basic.Float(
        label=r":math:`\tau_f`",
        default=0.41,
        required=True,
        doc=""" Balloon model parameter. Time of flow-dependent elimination or
        feedback regulation (s). The average  time blood take to traverse the
        venous compartment. It is the  ratio of resting blood volume (V0) to
        resting blood flow (F0).""",
        order=-1)

    tau_o = basic.Float(label=r":math:`\tau_o`",
                        default=0.98,
                        required=True,
                        doc="""
        Balloon model parameter. Haemodynamic transit time (s). The average
        time blood take to traverse the venous compartment. It is the  ratio
        of resting blood volume (V0) to resting blood flow (F0).""",
                        order=-1)

    alpha = basic.Float(
        label=r":math:`\tau_f`",
        default=0.32,
        required=True,
        doc=
        """Balloon model parameter. Stiffness parameter. Grubb's exponent.""",
        order=-1)

    TE = basic.Float(label=":math:`TE`",
                     default=0.04,
                     required=True,
                     doc="""BOLD parameter. Echo Time""",
                     order=-1)

    V0 = basic.Float(label=":math:`V_0`",
                     default=4.0,
                     required=True,
                     doc="""BOLD parameter. Resting blood volume fraction.""",
                     order=-1)

    E0 = basic.Float(
        label=":math:`E_0`",
        default=0.4,
        required=True,
        doc="""BOLD parameter. Resting oxygen extraction fraction.""",
        order=-1)

    epsilon = arrays.FloatArray(
        label=":math:`\epsilon`",
        default=numpy.array([0.5]),
        range=basic.Range(lo=0.5, hi=2.0, step=0.25),
        required=True,
        doc=
        """ BOLD parameter. Ratio of intra- and extravascular signals. In principle  this
        parameter could be derived from empirical data and spatialized.""",
        order=-1)

    nu_0 = basic.Float(
        label=r":math:`\nu_0`",
        default=40.3,
        required=True,
        doc=
        """BOLD parameter. Frequency offset at the outer surface of magnetized vessels (Hz).""",
        order=-1)

    r_0 = basic.Float(
        label=":math:`r_0`",
        default=25.,
        required=True,
        doc=
        """ BOLD parameter. Slope r0 of intravascular relaxation rate (Hz). Only used for
        ``revised`` coefficients. """,
        order=-1)

    def evaluate(self):
        """
        Calculate simulated BOLD signal
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        #NOTE: Just using the first state variable, although in the Bold monitor
        #      input is the sum over the state-variables. Only time-series
        #      from basic monitors should be used as inputs.

        neural_activity, t_int = self.input_transformation(
            self.time_series, self.neural_input_transformation)
        input_shape = neural_activity.shape
        result_shape = self.result_shape(input_shape)
        LOG.debug("Result shape will be: %s" % str(result_shape))

        if self.dt is None:
            self.dt = self.time_series.sample_period / 1000.  # (s) integration time step
            msg = "Integration time step size for the balloon model is %s seconds" % str(
                self.dt)
            LOG.debug(msg)

        #NOTE: Avoid upsampling ...
        if self.dt < (self.time_series.sample_period / 1000.):
            msg = "Integration time step shouldn't be smaller than the sampling period of the input signal."
            LOG.error(msg)

        balloon_nvar = 4

        #NOTE: hard coded initial conditions
        state = numpy.zeros((input_shape[0], balloon_nvar, input_shape[2],
                             input_shape[3]))  # s
        state[0, 1, :] = 1.  # f
        state[0, 2, :] = 1.  # v
        state[0, 3, :] = 1.  # q

        # BOLD model coefficients
        k = self.compute_derived_parameters()
        k1, k2, k3 = k[0], k[1], k[2]

        # prepare integrator
        self.integrator.dt = self.dt
        self.integrator.configure()
        LOG.debug("Integration time step size will be: %s seconds" %
                  str(self.integrator.dt))

        scheme = self.integrator.scheme

        # NOTE: the following variables are not used in this integration but
        # required due to the way integrators scheme has been defined.

        local_coupling = 0.0
        stimulus = 0.0

        # Do some checks:
        if numpy.isnan(neural_activity).any():
            LOG.warning("NaNs detected in the neural activity!!")

        # normalise the time-series.
        neural_activity = neural_activity - neural_activity.mean(
            axis=0)[numpy.newaxis, :]

        # solve equations
        for step in range(1, t_int.shape[0]):
            state[step, :] = scheme(state[step - 1, :], self.balloon_dfun,
                                    neural_activity[step, :], local_coupling,
                                    stimulus)
            if numpy.isnan(state[step, :]).any():
                LOG.warning("NaNs detected...")

        # NOTE: just for the sake of clarity, define the variables used in the BOLD model
        s = state[:, 0, :]
        f = state[:, 1, :]
        v = state[:, 2, :]
        q = state[:, 3, :]

        #import pdb; pdb.set_trace()

        # BOLD models
        if self.bold_model == "nonlinear":
            """
            Non-linear BOLD model equations.
            Page 391. Eq. (13) top in [Stephan2007]_
            """
            y_bold = numpy.array(self.V0 * (k1 * (1. - q) + k2 *
                                            (1. - q / v) + k3 * (1. - v)))
            y_b = y_bold[:, numpy.newaxis, :, :]
            LOG.debug("Max value: %s" % str(y_b.max()))

        else:
            """
            Linear BOLD model equations.
            Page 391. Eq. (13) bottom in [Stephan2007]_ 
            """
            y_bold = numpy.array(self.V0 * ((k1 + k2) * (1. - q) + (k3 - k2) *
                                            (1. - v)))
            y_b = y_bold[:, numpy.newaxis, :, :]

        sample_period = 1. / self.dt

        bold_signal = time_series.TimeSeriesRegion(data=y_b,
                                                   time=t_int,
                                                   sample_period=sample_period,
                                                   sample_period_unit='s',
                                                   use_storage=False)

        return bold_signal

    def compute_derived_parameters(self):
        """
        Compute derived parameters :math:`k_1`, :math:`k_2` and :math:`k_3`.
        """

        if not self.RBM:
            """
            Classical BOLD Model Coefficients [Obata2004]_
            Page 389 in [Stephan2007]_, Eq. (3)
            """
            k1 = 7. * self.E0
            k2 = 2. * self.E0
            k3 = 1. - self.epsilon
        else:
            """
            Revised BOLD Model Coefficients.
            Generalized BOLD signal model.
            Page 400 in [Stephan2007]_, Eq. (12)
            """
            k1 = 4.3 * self.nu_0 * self.E0 * self.TE
            k2 = self.epsilon * self.r_0 * self.E0 * self.TE
            k3 = 1 - self.epsilon

        return numpy.array([k1, k2, k3])

    def input_transformation(self, time_series, mode):
        """
        Perform an operation on the input time-series.
        """

        LOG.debug("Computing: %s on the input time series" % str(mode))

        if mode == "none":
            ts = time_series.data[:, 0, :, :]
            ts = ts[:, numpy.newaxis, :, :]
            t_int = time_series.time / 1000.  # (s)

        elif mode == "abs_diff":
            ts = abs(numpy.diff(time_series.data, axis=0))
            t_int = (time_series.time[1:] -
                     time_series.time[0:-1]) / 1000.  # (s)

        elif mode == "sum":
            ts = numpy.sum(time_series.data, axis=1)
            ts = ts[:, numpy.newaxis, :, :]
            t_int = time_series.time / 1000.  # (s)

        else:
            LOG.error("Bad operation/transformation mode, must be one of:")
            LOG.error("('abs_diff', 'sum', 'none')")
            raise Exception("Bad transformation mode")

        return ts, t_int

    def balloon_dfun(self, state_variables, neural_input, local_coupling=0.0):
        r"""
        The Balloon model equations. See Eqs. (4-10) in [Stephan2007]_
        .. math::
                \frac{ds}{dt} &= x - \kappa\,s - \gamma \,(f-1) \\
                \frac{df}{dt} &= s \\
                \frac{dv}{dt} &= \frac{1}{\tau_o} \, (f - v^{1/\alpha})\\
                \frac{dq}{dt} &= \frac{1}{\tau_o}(f \, \frac{1-(1-E_0)^{1/\alpha}}{E_0} - v^{&/\alpha} \frac{q}{v})\\
                \kappa &= \frac{1}{\tau_s}\\
                \gamma &= \frac{1}{\tau_f}
        """

        s = state_variables[0, :]
        f = state_variables[1, :]
        v = state_variables[2, :]
        q = state_variables[3, :]

        x = neural_input[0, :]

        ds = x - (1. / self.tau_s) * s - (1. / self.tau_f) * (f - 1)
        df = s
        dv = (1. / self.tau_o) * (f - v**(1. / self.alpha))
        dq = (1. / self.tau_o) * ((f * (1. -
                                        (1. - self.E0)**(1. / f)) / self.E0) -
                                  (v**(1. / self.alpha)) * (q / v))

        return numpy.array([ds, df, dv, dq])

    def result_shape(self, input_shape):
        """Returns the shape of the main result of fmri balloon ..."""
        result_shape = (input_shape[0], input_shape[1], input_shape[2],
                        input_shape[3])
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of .
        """
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  # Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the ....
        That is, it includes storage of the evaluated ... attributes
        such as ..., etc.
        """
        extend_size = self.result_size(
            input_shape)  # Currently no derived attributes.
        return extend_size
Test for tvb.simulator.coupling module
# TODO: evaluate equations?

.. moduleauthor:: Paula Sanz Leon <*****@*****.**>
.. moduleauthor:: Marmaduke Woodman <*****@*****.**>

"""

import numpy
import pytest
from tvb.tests.library.base_testcase import BaseTestCase
from tvb.simulator import integrators
from tvb.simulator import noise

# For the moment all integrators inherit dt from the base class
dt = integrators.Integrator().dt


class TestIntegrators(BaseTestCase):
    """
    Define test cases for coupling:
        - initialise each class
        - check default parameters
        - change parameters

    """
    def _dummy_dfun(self, X, coupling, local_coupling):
        # equiv to linear system with identity matrix
        return X

    def _test_scheme(self, integrator):
Esempio n. 11
0
 def test_integrator_base_class(self):
     integrator = integrators.Integrator()
     self.assertEqual(integrator.dt, dt)