def get_spike_train(self, **run_params):
        vm = self.get_membrane_potential(**run_params)
        #spike_train = sf.get_spike_train(vm)
        #if str('ADEXP') in self._backend.name:
        if hasattr(self._backend, 'name'):
            self._backend.threshold = np.max(vm) - np.max(vm) / 250.0
            spike_train = sf.get_spike_train(vm, self._backend.threshold)
        else:
            spike_train = sf.get_spike_train(vm)

        return spike_train
Esempio n. 2
0
    def get_spike_train(self):
        """Gets computed spike times from the model.

        Arguments: None.
        Returns: a neo.core.SpikeTrain object.
        """

        return spike_functions.get_spike_train(self.get_membrane_potential())
Esempio n. 3
0
    def run(self,
            cell_model,
            param_values,
            sim=None,
            isolate=None,
            timeout=None):
        """Instantiate protocol"""

        if isolate is None:
            isolate = True
        if isolate:  # and not cell_model.name in 'L5PC':

            def _reduce_method(meth):
                """Overwrite reduce"""
                return (getattr, (meth.__self__, meth.__func__.__name__))

            import copyreg
            import types
            copyreg.pickle(types.MethodType, _reduce_method)

            import pebble
            from concurrent.futures import TimeoutError

            if timeout is not None:
                if timeout < 0:
                    raise ValueError("timeout should be > 0")
            ###
            # Foriegn code
            ###

            with pebble.ProcessPool(max_workers=1, max_tasks=1) as pool:
                tasks = pool.schedule(self._run_func,
                                      kwargs={
                                          'cell_model': cell_model,
                                          'param_values': param_values,
                                          'sim': sim
                                      },
                                      timeout=timeout)
                ##
                # works if inverted try for except etc
                ##

                try:
                    responses = tasks.result()
                except:
                    responses = self._run_func(cell_model=cell_model,
                                               param_values=param_values,
                                               sim=sim)

        else:
            responses = self._run_func(cell_model=cell_model,
                                       param_values=param_values,
                                       sim=sim)
        new_responses = {}
        for k, v in responses.items():
            if hasattr(v, 'response'):
                time = v.response[
                    'time'].values  #[r.response[0] for r in self.recording.repsonse ]
                vm = v.response[
                    'voltage'].values  #[ r.response[1] for r in self.recording.repsonse ]
                if not hasattr(cell_model, 'l5pc'):
                    new_responses['neo_' + str(k)] = AnalogSignal(
                        vm, units=pq.mV, sampling_period=(1 / 0.01255) * pq.s)

                else:
                    new_responses['neo_' + str(k)] = AnalogSignal(
                        vm,
                        units=pq.mV,
                        sampling_period=(time[1] - time[0]) * pq.s)
                train_len = len(
                    sf.get_spike_train(new_responses['neo_' + str(k)]))
                if train_len > 0:
                    pass

        responses.update(new_responses)
        return responses
Esempio n. 4
0
 def get_spike_train(self, **run_params):
     vm = self.get_membrane_potential(**run_params)
     spike_train = sf.get_spike_train(vm)
     return spike_train
Esempio n. 5
0
 def get_spike_train(self, rerun=False, **run_params):
     vm = self.get_membrane_potential(rerun=rerun, **run_params)
     spike_train = sf.get_spike_train(vm)
     return spike_train
Esempio n. 6
0
 def get_spike_train(self, rerun=False, **run_params):
     vm = self.get_membrane_potential(rerun=rerun, **run_params)
     spike_train = sf.get_spike_train(vm)
     return spike_train
def three_feature_sets_on_static_models(model, debug=False, challenging=False):
    '''
    Conventions:
        variables ending with 15 refer to 1.5 current injection protocols.
        variables ending with 30 refer to 3.0 current injection protocols.
    Inputs:
        NML-DB models, a method designed to be called inside an iteration loop, where a list of
        models is iterated over, and on each iteration a new model is supplied to this method.

    Outputs:
        A dictionary of dataframes, for features sought according to: Druckman, EFEL, AllenSDK

    '''

    ##
    # wrangle data in preperation for computing
    # Allen Features
    ##

    #import pdb; pdb.set_trace()
    times = np.array([float(t) for t in model.vm30.times])
    volts = np.array([float(v) for v in model.vm30])
    try:
        import asciiplotlib as apl
        fig = apl.figure()
        fig.plot(times,
                 volts,
                 label="V_{m} (mV), versus time (ms)",
                 width=100,
                 height=80)
        fig.show()
    except:
        pass
    ##
    # Allen Features
    ##
    #frame_shape,frame_dynamics,per_spike_info, meaned_features_overspikes
    all_allen_features30, allen_features = allen_format(volts,
                                                        times,
                                                        optional_vm=model.vm30)
    #if frame30 is not None:
    #    frame30['protocol'] = 3.0
    ##

    # wrangle data in preperation for computing
    # Allen Features
    ##
    times = np.array([float(t) for t in model.vm15.times])
    volts = np.array([float(v) for v in model.vm15])

    ##
    # Allen Features
    ##

    all_allen_features15, allen_features = allen_format(volts,
                                                        times,
                                                        optional_vm=model.vm15)
    ##
    # Get Druckman features, this is mainly handled in external files.
    ##
    #if model.ir_currents
    if hasattr(model, 'druckmann2013_input_resistance_currents'):
        DMTNMLO = dm_test_interoperable.DMTNMLO()
        DMTNMLO.test_setup(None, None, model=model)
        dm_test_features = DMTNMLO.runTest()
    else:
        dm_test_features = None
    #for d in dm_test_features:
    #    if d is None:
    ##
    # Wrangle data to prepare for EFEL feature calculation.
    ##
    trace3 = {}
    trace3['T'] = [float(t) for t in model.vm30.times.rescale('ms')]
    trace3['V'] = [float(v) for v in model.vm30.magnitude]  #temp_vm
    #trace3['peak_voltage'] = [ np.max(model.vm30) ]

    trace3['stimulus_current'] = [model.druckmann2013_strong_current]
    if not hasattr(model, 'allen'):
        trace3['stim_end'] = [trace3['T'][-1]]
        trace3['stim_start'] = [float(model.protocol['Time_Start'])]

    else:
        trace3['stim_end'] = [float(model.protocol['Time_End']) * 1000.0]
        trace3['stim_start'] = [float(model.protocol['Time_Start']) * 1000.0]

    traces3 = [
        trace3
    ]  # Now we pass 'traces' to the efel and ask it to calculate the feature# values

    trace15 = {}
    trace15['T'] = [float(t) for t in model.vm15.times.rescale('ms')]
    trace15['V'] = [float(v) for v in model.vm15.magnitude]  #temp_vm
    if not hasattr(model, 'allen'):
        trace15['stim_end'] = [trace15['T'][-1]]
        trace15['stim_start'] = [float(model.protocol['Time_Start'])]
    else:
        trace15['stim_end'] = [float(model.protocol['Time_End']) * 1000.0]
        trace15['stim_start'] = [float(model.protocol['Time_Start']) * 1000.0]
    trace15['stimulus_current'] = [model.druckmann2013_standard_current]
    trace15['stim_end'] = [trace15['T'][-1]]
    traces15 = [
        trace15
    ]  # Now we pass 'traces' to the efel and ask it to calculate the feature# values
    ##
    # Compute
    # EFEL features (HBP)
    ##

    efel_15 = efel.getFeatureValues(traces15, list(efel.getFeatureNames()))  #

    efel_30 = efel.getFeatureValues(traces3, list(efel.getFeatureNames()))  #

    if challenging:
        efel_results_inh = more_challenging(model)

    if challenging:
        nu_preds = standard_nu_tests_two(DMTNMLO.model.nmldb_model)

    if debug == True:
        ##
        # sort of a bit like unit testing, but causes a dowload which slows everything down:
        ##
        assert DMTNMLO.model.druckmann2013_standard_current != DMTNMLO.model.druckmann2013_strong_current
        from neuronunit.capabilities import spike_functions as sf
        _ = not_necessary_for_program_completion(DMTNMLO)
        print(
            'note: False in evidence of spiking is not completely damning \n')
        print(
            'a threshold of 0mV is used to detect spikes, many models dont have a peak amp'
        )
        print(
            'above 0mV, so 0 spikes using the threshold technique is not final'
        )
        print('druckman tests use derivative approach')

        # print(len(DMTNMLO.model.nmldb_model.get_APs()))

        print(len(sf.get_spike_train(model.vm30)) > 1)
        print(len(sf.get_spike_train(model.vm15)) > 1)

    print('\n\n\n\n\n\n successful run \n\n\n\n\n\n')
    if hasattr(model, 'information'):
        return {
            'model_id': model.name,
            'model_information': model.information,
            'efel_15': efel_15,
            'efel_30': efel_30,
            'dm': dm_test_features,
            'allen_15': all_allen_features15,
            'allen_30': all_allen_features30
        }
    else:
        return {
            'model_id': model.name,
            'model_information': 'allen_data',
            'efel_15': efel_15,
            'efel_30': efel_30,
            'dm': dm_test_features,
            'allen_15': all_allen_features15,
            'allen_30': all_allen_features30
        }
Esempio n. 8
0
 def get_spike_count(self):
     train = sf.get_spike_train(self.vm)
     return len(train)