Esempio n. 1
0
    def _configure_cosimulation(self):
        """This method will
           - set the synchronization time and number of steps,
           - check the time and the variable of interest are correct
           - create and initialize CosimHistory,
           - configure the cosimulation monitor
           - zero connectivity weights to/from nodes modelled exclusively by the other cosimulator
           """
        # the synchronization time should be at least equal to integrator.dt:
        self.synchronization_time = numpy.maximum(self.synchronization_time,
                                                  self.integrator.dt)
        # Compute the number of synchronization time steps:
        self.synchronization_n_step = iround(self.synchronization_time /
                                             self.integrator.dt)
        # Check if the synchronization time is smaller than the delay of the connectivity
        # the condition is probably not correct. It will change with usage.
        if self.synchronization_n_step > numpy.min(
                self.connectivity.idelays[numpy.nonzero(
                    self.connectivity.idelays)]):
            raise ValueError('the synchronization time is too long')

        # Check if the couplings variables are in the cosimulation variables of interest
        for cvar in self.model.cvar:
            if cvar not in self.voi:
                raise ValueError(
                    'The variables of interest need to contain the coupling variables'
                )

        self.good_cosim_update_values_shape = (self.synchronization_n_step,
                                               self.voi.shape[0],
                                               self.proxy_inds.shape[0],
                                               self.model.number_of_modes)
        # We create a CosimHistory,
        # for delayed state [synchronization_step+1, n_var, n_node, n_mode],
        # including, initialization of the delayed state from the simulator's history,
        # which must be already configured.
        self.cosim_history = CosimHistory.from_simulator(self)

        # Reconfigure the connectivity for regions modelled by the other cosimulator exclusively:
        if self.exclusive:
            self.connectivity.weights[self.proxy_inds][:,
                                                       self.proxy_inds] = 0.0
            self.connectivity.configure()

        # Configure the cosimulator monitor
        self.number_of_cosim_monitors = len(self.cosim_monitors)
        self._cosim_monitors_noncoupling_indices = list(
            range(self.number_of_cosim_monitors))
        self._cosim_monitors_coupling_indices = []
        for iM, monitor in enumerate(self.cosim_monitors):
            monitor.configure()
            monitor.config_for_sim(self)
            if isinstance(monitor, CosimMonitorFromCoupling):
                self._cosim_monitors_noncoupling_indices.remove(iM)
                self._cosim_monitors_coupling_indices.append(iM)
Esempio n. 2
0
    def config_for_sim(self, simulator):
        """Configure monitor for given simulator.

        Grab the Simulator's integration step size. Set the monitor's variables
        of interest based on the Monitor's 'variables_of_interest' attribute, if
        it was specified, otherwise use the 'variables_of_interest' specified
        for the Model. Calculate the number of integration steps (isteps)
        between returns by the record method. This method is called from within
        the the Simulator's configure() method.

        """
        self.dt = simulator.integrator.dt
        self.istep = iround(self.period / self.dt)
        self.voi = self.variables_of_interest
        if self.voi is None or self.voi.size == 0:
            self.voi = numpy.r_[:len(simulator.model.variables_of_interest)]
Esempio n. 3
0
    def evaluate(self):
        """
        Calculate the continuous wavelet transform of time_series.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        ts_shape = self.time_series.data.shape

        if self.frequencies.step == 0:
            LOG.warning(
                "Frequency step can't be 0! Trying default step, 2e-3.")
            self.frequencies.step = 0.002

        freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                             self.frequencies.step)

        if (freqs.size == 0) or any(
                freqs <= 0.0
        ):  #TODO: Maybe should limit number of freqs... ~100 is probably a reasonable upper bound.
            LOG.warning("Invalid frequency range! Falling back to default.")
            util.log_debug_array(LOG, freqs, "freqs")
            self.frequencies = basic.Range(lo=0.008, hi=0.060, step=0.002)
            freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                                 self.frequencies.step)

        util.log_debug_array(LOG, freqs, "freqs")

        sample_rate = self.time_series.sample_rate

        # Duke: code below is as given by Andreas Spiegler, I've just wrapped
        # some of the original argument names
        nf = len(freqs)
        temporal_step = max(
            (1, iround(self.sample_period / self.time_series.sample_period)))
        nt = int(numpy.ceil(ts_shape[0] / temporal_step))

        if not isinstance(self.q_ratio, numpy.ndarray):
            Q_ratio = self.q_ratio * numpy.ones((1, nf))

        if numpy.nanmin(Q_ratio) < 5:
            msg = "Q_ratio must be not lower than 5 !"
            LOG.error(msg)
            raise Exception(msg)

        if numpy.nanmax(freqs) > sample_rate / 2.0:
            msg = "Sampling rate is too low for the requested frequency range !"
            LOG.error(msg)
            raise Exception(msg)

        #TODO: This isn't used, but min frequency seems like it should be important... Check with A.S.
        #  fmin = 3.0 * numpy.nanmin(Q_ratio) * sample_rate / numpy.pi / nt
        sigma_f = freqs / Q_ratio
        sigma_t = 1.0 / (2.0 * numpy.pi * sigma_f)

        if self.normalisation == 'energy':
            Amp = 1.0 / numpy.sqrt(
                sample_rate * numpy.sqrt(numpy.pi) * sigma_t)
        elif self.normalisation == 'gabor':
            Amp = numpy.sqrt(2.0 / numpy.pi) / sample_rate / sigma_t

        coef_shape = (nf, nt, ts_shape[1], ts_shape[2], ts_shape[3])

        coef = numpy.zeros(coef_shape, dtype=numpy.complex128)
        util.log_debug_array(LOG, coef, "coef")
        scales = numpy.arange(0, nf, 1)
        for i in scales:
            f0 = freqs[i]
            SDt = sigma_t[(0, i)]
            A = Amp[(0, i)]
            x = numpy.arange(0, 4.0 * SDt * sample_rate, 1) / sample_rate
            wvlt = A * numpy.exp(-x**2 / (2.0 * SDt**2)) * numpy.exp(
                2j * numpy.pi * f0 * x)
            wvlt = numpy.hstack((numpy.conjugate(wvlt[-1:0:-1]), wvlt))
            #util.log_debug_array(LOG, wvlt, "wvlt")

            for var in range(ts_shape[1]):
                for node in range(ts_shape[2]):
                    for mode in range(ts_shape[3]):
                        data = self.time_series.data[:, var, node, mode]
                        wt = signal.convolve(data, wvlt, 'same')
                        #util.log_debug_array(LOG, wt, "wt")
                        res = wt[0::temporal_step]
                        #NOTE: this is a horrible horrible quick hack (alas, a solution) to avoid broadcasting errors
                        # when using dt and sample periods which are not powers of 2.
                        coef[i, :, var, node, mode] = res if len(
                            res) == nt else res[:coef.shape[1]]

        util.log_debug_array(LOG, coef, "coef")

        spectra = spectral.WaveletCoefficients(
            source=self.time_series,
            mother=self.mother,
            sample_period=self.sample_period,
            frequencies=self.frequencies,
            normalisation=self.normalisation,
            q_ratio=self.q_ratio,
            array_data=coef,
            use_storage=False)

        return spectra
 def evaluate(self):
     """
     Calculate the continuous wavelet transform of time_series.
     """
     cls_attr_name = self.__class__.__name__+".time_series"
     self.time_series.trait["data"].log_debug(owner = cls_attr_name)
     
     ts_shape = self.time_series.data.shape
     
     if self.frequencies.step == 0:
         LOG.warning("Frequency step can't be 0! Trying default step, 2e-3.")
         self.frequencies.step = 0.002
     
     freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                          self.frequencies.step)
     
     if (freqs.size == 0) or any(freqs <= 0.0): #TODO: Maybe should limit number of freqs... ~100 is probably a reasonable upper bound.
         LOG.warning("Invalid frequency range! Falling back to default.")
         util.log_debug_array(LOG, freqs, "freqs")
         self.frequencies = basic.Range(lo = 0.008, hi = 0.060, step = 0.002)
         freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                              self.frequencies.step)
     
     util.log_debug_array(LOG, freqs, "freqs")
     
     sample_rate = self.time_series.sample_rate
     
     # Duke: code below is as given by Andreas Spiegler, I've just wrapped 
     # some of the original argument names
     nf = len(freqs)
     temporal_step = max((1, iround(self.sample_period / self.time_series.sample_period)))
     nt = int(numpy.ceil(ts_shape[0] /  temporal_step))
     
     
     if not isinstance(self.q_ratio, numpy.ndarray):
         Q_ratio = self.q_ratio * numpy.ones((1, nf))
     
     if numpy.nanmin(Q_ratio) < 5:
         msg = "Q_ratio must be not lower than 5 !"
         LOG.error(msg)
         raise Exception, msg
     
     if numpy.nanmax(freqs) > sample_rate / 2.0:
         msg = "Sampling rate is too low for the requested frequency range !"
         LOG.error(msg)
         raise Exception, msg
     
     #TODO: This isn't used, but min frequency seems like it should be important... Check with A.S. 
     #  fmin = 3.0 * numpy.nanmin(Q_ratio) * sample_rate / numpy.pi / nt
     sigma_f = freqs / Q_ratio
     sigma_t = 1.0 / (2.0 * numpy.pi * sigma_f)
     
     if self.normalisation == 'energy':
         Amp = 1.0 / numpy.sqrt(sample_rate * numpy.sqrt(numpy.pi) * sigma_t)
     elif self.normalisation == 'gabor': 
         Amp = numpy.sqrt(2.0 / numpy.pi) / sample_rate / sigma_t
     
     coef_shape = (nf, nt, ts_shape[1], ts_shape[2], ts_shape[3])
     
     coef = numpy.zeros(coef_shape, dtype = numpy.complex128)
     util.log_debug_array(LOG, coef, "coef")
     scales = numpy.arange(0, nf, 1)
     for i in scales:
         f0 = freqs[i]
         SDt = sigma_t[(0, i)]
         A = Amp[(0, i)]
         x = numpy.arange(0, 4.0 * SDt * sample_rate, 1) / sample_rate
         wvlt = A * numpy.exp(-x**2 / (2.0 * SDt**2) ) * numpy.exp(2j * numpy.pi * f0 * x )
         wvlt = numpy.hstack((numpy.conjugate(wvlt[-1:0:-1]), wvlt))
         #util.log_debug_array(LOG, wvlt, "wvlt")
         
         for var in range(ts_shape[1]):
             for node in range(ts_shape[2]):
                 for mode in range(ts_shape[3]):
                     data = self.time_series.data[:, var, node, mode]
                     wt = signal.convolve(data, wvlt, 'same')
                     #util.log_debug_array(LOG, wt, "wt")
                     res = wt[0::temporal_step]
                     #NOTE: this is a horrible horrible quick hack (alas, a solution) to avoid broadcasting errors
                     # when using dt and sample periods which are not powers of 2.
                     coef[i, :, var, node, mode] = res if len(res) == nt else res[:coef.shape[1]] 
                     
     
     util.log_debug_array(LOG, coef, "coef")
     
     spectra = spectral.WaveletCoefficients(
         source = self.time_series,
         mother = self.mother,
         sample_period = self.sample_period,
         frequencies = self.frequencies,
         normalisation = self.normalisation,
         q_ratio = self.q_ratio,
         array_data = coef,
         use_storage = False)
     
     return spectra