Exemplo n.º 1
0
def load_spikes_file(config_file=None, spikes_file=None):
    if spikes_file is not None:
        return SpikeTrains.load(spikes_file)

    elif config_file is not None:
        config = ConfigDict.from_json(config_file)
        return SpikeTrains.load(config.spikes_file)
Exemplo n.º 2
0
def spike_trains():
    st = SpikeTrains(default_population='V1')
    for n in range(0, 20):
        times = np.random.uniform(0.0, 1500.0, 10)
        times = np.sort(times)
        st.add_spikes(node_ids=n, timestamps=times)

    return st
Exemplo n.º 3
0
def get_population_spike_rates_by_epoch(spikes_file, simulation, groupby=None, epochs=None, **filterparams):
    """

    :param spikes_file: path to .h5 file
    :param simulation: :class:'BioSimulator'
    :param groupby: str
    :param epochs: dict: {str: tuple of float (ms)}
    :param filterparams:
    :return: pd.dataframe
    """
    def get_epoch_firing_rate(r, start, stop):
        """

        :param r: pd.Series
        :param start: float (ms)
        :param stop: float (ms)
        :return: pd.Series
        """
        d = {}
        count = len(np.where((r['timestamps'] > start) & (r['timestamps'] <= stop))[0])
        d['firing_rate'] = count / (stop - start) * 1000.  # Hz

        return pd.Series(d, index=['firing_rate'])

    def get_epoch_dataframe(epoch_df, nodes_df, groupby):
        """

        :param epoch_df: pd.DataFrame
        :param nodes_df: pd.DataFrame
        :param groupby: str
        :return: pd.DataFrame
        """
        epoch_df.index.names = ['population', 'node_id']
        epoch_df = pd.merge(nodes_df, epoch_df, left_index=True, right_index=True, how='left')
        epoch_df = epoch_df.fillna({'firing_rate': 0.0})
        epoch_df = epoch_df.groupby(groupby)[['firing_rate']].agg([np.mean, np.std])

        return epoch_df

    spike_trains = SpikeTrains.load(spikes_file)
    spike_train_df = spike_trains.to_dataframe()
    nodes_df = simulation.net.node_properties(**filterparams)
    sim_time_ms = simulation.simulation_time(units='ms')
    rate_df = dict()

    if epochs is None:
        full_df = spike_train_df.groupby(['population', 'node_ids']).apply(get_epoch_firing_rate, 0., sim_time_ms)
        full_df = get_epoch_dataframe(full_df, nodes_df, groupby)
        return full_df
    else:
        for epoch_name, epoch_dict in epochs.items():
            if 'node_ids' in epoch_dict and len(epoch_dict['node_ids']) > 0:
                epoch_df = spike_train_df[spike_train_df.node_ids.isin(epoch_dict['node_ids'])]
            else:
                epoch_df = spike_train_df.copy()
            epoch_df = epoch_df.groupby(['population', 'node_ids']).apply(
                get_epoch_firing_rate, epoch_dict['start'], epoch_dict['stop'])
            rate_df[epoch_name] = get_epoch_dataframe(epoch_df, nodes_df, groupby)

    return rate_df
Exemplo n.º 4
0
def spike_statistics(spikes_file, simulation=None, simulation_time=None, groupby=None, network=None, **filterparams):
    spike_trains = SpikeTrains.load(spikes_file)

    def calc_stats(r):
        d = {}
        vals = np.sort(r['timestamps'])
        diffs = np.diff(vals)
        if diffs.size > 0:
            d['isi'] = np.mean(np.diff(vals))
        else:
            d['isi'] = 0.0

        d['count'] = len(vals)

        return pd.Series(d, index=['count', 'isi'])

    spike_counts_df = spike_trains.to_dataframe().groupby(['population', 'node_ids']).apply(calc_stats)
    spike_counts_df = spike_counts_df.rename({'timestamps': 'counts'}, axis=1)
    spike_counts_df.index.names = ['population', 'node_id']

    if simulation is not None:
        nodes_df = simulation.net.node_properties(**filterparams)
        sim_time_s = simulation.simulation_time(units='s')
        spike_counts_df['firing_rate'] = spike_counts_df['count'] / sim_time_s

        vals_df = pd.merge(nodes_df, spike_counts_df, left_index=True, right_index=True, how='left')
        vals_df = vals_df.fillna({'count': 0.0, 'firing_rate': 0.0, 'isi': 0.0})

        vals_df = vals_df.groupby(groupby)[['firing_rate', 'count', 'isi']].agg([np.mean, np.std])
        return vals_df
    else:
        return spike_counts_df
Exemplo n.º 5
0
def show_plot():
    st = SpikeTrains(default_population='V1')
    for n in range(0, 100):
        n_vals = np.sin(n * np.pi / 100) * 150 + 10
        times = np.random.uniform(0.0, 1500.0, int(n_vals))
        times = np.sort(times)
        st.add_spikes(node_ids=n, timestamps=times)

    # plotting.plot_raster(spike_trains=st, title='V1 Spikes')
    # plotting.plot_rates(
    #     spike_trains=st,
    #     node_groups=[{'node_ids': [0, 1, 2, 3, 4, 5, 7, 8, 9, 10], 'label': 'low'},
    #                  {'node_ids': np.array([11, 12, 13, 14, 15]), 'label': 'mid'},
    #                  {'node_ids': range(16, 110), 'label': 'high'}],
    #     smoothing=True
    # )

    node_groups = [{
        'node_ids': [0, 1, 2, 3, 4, 5, 7, 8, 9, 10],
        'label': 'low'
    }, {
        'node_ids': np.array([11, 12, 13, 14, 15]),
        'label': 'mid'
    }, {
        'node_ids': range(16, 110),
        'label': 'high'
    }]

    plotting.plot_rates_boxplot(spike_trains=st,
                                node_groups=[{
                                    'node_ids':
                                    [0, 1, 2, 3, 4, 5, 7, 8, 9, 10],
                                    'label':
                                    'low'
                                }, {
                                    'node_ids':
                                    np.array([11, 12, 13, 14, 15]),
                                    'label':
                                    'mid'
                                }, {
                                    'node_ids': range(16, 110),
                                    'label': 'high'
                                }]
                                # node_groups=node_groups
                                )
    print(node_groups)
Exemplo n.º 6
0
def get_firing_rates_by_cell_type_from_file(spikes_file, simulation, population='l4', cell_type_attr_name='model_name',
                                            epochs=None):
    """

    :param spikes_file: path to .h5 file
    :param simulation: :class:'BioSimulator'
    :param population: str
    :param cell_type_attr_name: str
    :param epochs: dict: {str: tuple of float (ms)}
    :return: dict
    """
    def get_firing_rates_by_cell_type(spike_trains, population, node_ids, cell_type_dict, start, stop):
        """

        """
        rate_dict = dict()
        duration = (stop - start) / 1000.  # sec
        for gid in node_ids:
            cell_type = cell_type_dict[gid]
            if cell_type not in rate_dict:
                rate_dict[cell_type] = []
            spike_times = spike_trains.get_times(gid, population=population, time_window=[start, stop])
            rate = len(spike_times) / duration  # Hz
            rate_dict[cell_type].append(rate)
        return rate_dict

    spike_trains = SpikeTrains.load(spikes_file)
    cell_type_dict = dict()
    all_nodes_df = simulation.net.get_node_groups(populations=population)
    all_node_ids = all_nodes_df['node_id'].values
    for gid, cell_type in all_nodes_df[['node_id', cell_type_attr_name]].values:
        cell_type_dict[gid] = cell_type
    sim_end = simulation.simulation_time(units='ms')

    rate_dict = dict()
    if epochs is None:
        rate_dict['all'] = \
            get_firing_rates_by_cell_type(spike_trains, population, all_node_ids, cell_type_dict, start=0.,
                                          stop=sim_end)
    else:
        for epoch_name, epoch_dict in epochs.items():
            if 'node_ids' in epoch_dict and len(epoch_dict['node_ids']) > 0:
                this_node_ids = epoch_dict['node_ids']
            else:
                this_node_ids = all_node_ids
            rate_dict[epoch_name] = \
                get_firing_rates_by_cell_type(spike_trains, population, this_node_ids, cell_type_dict,
                                              start=epoch_dict['start'], stop=epoch_dict['stop'])

    return rate_dict
Exemplo n.º 7
0
def _find_spikes(spikes_file=None, config_file=None, population=None):
    candidate_spikes = []

    # Get spikes file(s)
    if spikes_file:
        # User has explicity set the location of the spike files
        candidate_spikes.append(spikes_file)

    elif config_file is not None:
        # Otherwise search the config.json for all possible output spikes_files. We can use the simulation_reports
        # module to find any spikes output file specified in config's "output" or "reports" section.
        config = SonataConfig.from_json(config_file)
        sim_reports = simulation_reports.from_config(config)
        for report in sim_reports:
            if report.module == 'spikes_report':
                # BMTK can end up output the same spikes file in SONATA, CSV, and NWB format. Try fetching the SONATA
                # version first, then CSV, and finally NWB if it exists.
                spikes_sonata = report.params.get('spikes_file', None)
                spikes_csv = report.params.get('spikes_file_csv', None)
                spikes_nwb = report.params.get('spikes_file_nwb', None)

                if spikes_sonata is not None:
                    candidate_spikes.append(spikes_sonata)
                elif spikes_csv is not None:
                    candidate_spikes.append(spikes_csv)
                elif spikes_csv is not None:
                    candidate_spikes.append(spikes_nwb)

        # TODO: Should we also look in the "inputs" for displaying input spike statistics?

    if not candidate_spikes:
        raise ValueError(
            'Could not find an output spikes-file. Use "spikes_file" parameter option.'
        )

    # Find file that contains spikes for the specified "population" of nodes. If "population" parameter is not
    # specified try to guess that spikes that the user wants to visualize.
    if population is not None:
        spikes_obj = None
        for spikes_f in candidate_spikes:
            st = SpikeTrains.load(spikes_f)
            if population in st.populations:
                if spikes_obj is None:
                    spikes_obj = st
                else:
                    spikes_obj.merge(st)

        if spikes_obj is None:
            raise ValueError(
                'Could not fine spikes file with node population "{}".'.format(
                    population))
        else:
            return population, spikes_obj

    else:
        if len(candidate_spikes) > 1:
            raise ValueError('Found more than one spike-trains file')

        spikes_f = candidate_spikes[0]
        if not os.path.exists(spikes_f):
            raise ValueError(
                'Did not find spike-trains file {}. Make sure the simulation has completed.'
                .format(spikes_f))

        spikes_obj = SpikeTrains.load(spikes_f)

        if len(spikes_obj.populations) > 1:
            raise ValueError(
                'Spikes file {} contains more than one node population.'.
                format(spikes_f))
        else:
            return spikes_obj.populations[0], spikes_obj