Beispiel #1
0
def network_features(env, target_trj_rate_map_dict, t_start, t_stop,
                     target_populations):

    features_dict = dict()

    temporal_resolution = float(env.stimulus_config['Temporal Resolution'])
    time_bins = np.arange(t_start, t_stop, temporal_resolution)

    pop_spike_dict = spikedata.get_env_spike_dict(env,
                                                  include_artificial=False)

    for pop_name in target_populations:

        has_target_trj_rate_map = pop_name in target_trj_rate_map_dict

        n_active = 0
        sum_mean_rate = 0.
        spike_density_dict = spikedata.spike_density_estimate(
            pop_name, pop_spike_dict[pop_name], time_bins)
        for gid, dens_dict in utils.viewitems(spike_density_dict):
            mean_rate = np.mean(dens_dict['rate'])
            sum_mean_rate += mean_rate
            if mean_rate > 0.:
                n_active += 1

        n_total = len(env.cells[pop_name]) - len(
            env.artificial_cells[pop_name])

        n_target_rate_map = 0
        sum_target_rate_dist_residual = None
        if has_target_trj_rate_map:
            pop_target_trj_rate_map_dict = target_trj_rate_map_dict[pop_name]
            n_target_rate_map = len(pop_target_trj_rate_map_dict)
            target_rate_dist_residuals = []
            for gid in pop_target_trj_rate_map_dict:
                target_trj_rate_map = pop_target_trj_rate_map_dict[gid]
                rate_map_len = len(target_trj_rate_map)
                if gid in spike_density_dict:
                    residual = np.abs(
                        np.sum(target_trj_rate_map -
                               spike_density_dict[gid]['rate'][:rate_map_len]))
                else:
                    residual = np.abs(np.sum(target_trj_rate_map))
                target_rate_dist_residuals.append(residual)
            sum_target_rate_dist_residual = np.sum(target_rate_dist_residuals)

        pop_features_dict = {}
        pop_features_dict['n_total'] = n_total
        pop_features_dict['n_active'] = n_active
        pop_features_dict['n_target_rate_map'] = n_target_rate_map
        pop_features_dict['sum_mean_rate'] = sum_mean_rate
        pop_features_dict[
            'sum_target_rate_dist_residual'] = sum_target_rate_dist_residual

        features_dict[pop_name] = pop_features_dict

    return features_dict
Beispiel #2
0
def compute_features_firing_rate_fraction_active(x, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    update_source_contexts(x, context)
    context.env.results_file_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))

    network.run(context.env, output=context.output_results, shutdown=False)

    pop_spike_dict = spikedata.get_env_spike_dict(context.env, include_artificial=False)

    t_start = 250.
    t_stop = context.env.tstop
    
    time_bins  = np.arange(t_start, t_stop, context.bin_size)

    for pop_name in context.target_populations:

        mean_rate_sum = 0.
        spike_density_dict = spikedata.spike_density_estimate (pop_name, pop_spike_dict[pop_name], time_bins)
        for gid, dens_dict in utils.viewitems(spike_density_dict):
            mean_rate_sum += np.mean(dens_dict['rate'])
        mean_rate_sum = context.env.comm.allreduce(mean_rate_sum, op=MPI.SUM)

        n_total = context.env.comm.allreduce(len(context.env.cells[pop_name]) - len(context.env.artificial_cells[pop_name]), op=MPI.SUM)
        n_active = context.env.comm.allreduce(len(spike_density_dict), op=MPI.SUM)

        if n_active > 0:
            mean_rate = mean_rate_sum / n_active
        else:
            mean_rate = 0.

        if n_total > 0:
            fraction_active = n_active / n_total
        else:
            fraction_active = 0.

        rank = int(context.env.pc.id())
        if rank == 0:
            context.logger.info('population %s: n_active = %d n_total = %d' % (pop_name, n_active, n_total))

        results['%s firing rate' % pop_name] = mean_rate
        results['%s fraction active' % pop_name] = fraction_active

    return results
Beispiel #3
0
def compute_features_firing_rate(x, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    update_source_contexts(x, context)
    context.env.results_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))

    network.run(context.env, output=context.output_results, shutdown=False)

    pop_spike_dict = spikedata.get_env_spike_dict(context.env)

    t_start = 0.
    t_stop = context.env.tstop

    time_bins = np.arange(t_start, t_stop, context.bin_size)

    pop_name = 'GC'

    mean_rate_sum = 0.
    spike_density_dict = spikedata.spike_density_estimate(
        pop_name, pop_spike_dict[pop_name], time_bins)
    for gid, dens_dict in utils.viewitems(spike_density_dict):
        mean_rate_sum += np.mean(dens_dict['rate'])

    n = len(spike_density_dict)
    if n > 0:
        mean_rate = mean_rate_sum / n
    else:
        mean_rate = 0.

    results['firing_rate'] = mean_rate

    return results
Beispiel #4
0
def compute_network_features(x, model_id=None, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    update_source_contexts(x, context)
    # TODO: Do you want this to be identical on all ranks in a subworld? You can use context.comm.bcast
    context.env.results_file_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))

    temporal_resolution = float(
        context.env.stimulus_config['Temporal Resolution'])
    time_bins = np.arange(context.t_start, context.t_stop, temporal_resolution)

    context.env.tstop = context.t_stop
    network.run(context.env, output=context.output_results, shutdown=False)

    pop_spike_dict = spikedata.get_env_spike_dict(context.env,
                                                  include_artificial=False)

    for pop_name in context.target_populations:

        n_active_local = 0
        mean_rate_sum_local = 0.
        spike_density_dict = spikedata.spike_density_estimate(
            pop_name, pop_spike_dict[pop_name], time_bins)
        for gid, dens_dict in utils.viewitems(spike_density_dict):
            mean_rate = np.mean(dens_dict['rate'])
            mean_rate_sum_local += mean_rate
            if mean_rate > 0.:
                n_active_local += 1
        mean_rate_sum = context.env.comm.allreduce(mean_rate_sum_local,
                                                   op=MPI.SUM)
        context.env.comm.barrier()

        n_local = len(context.env.cells[pop_name]) - len(
            context.env.artificial_cells[pop_name])
        n_total = context.env.comm.allreduce(n_local, op=MPI.SUM)
        n_active = context.env.comm.allreduce(n_active_local, op=MPI.SUM)
        context.env.comm.barrier()

        if n_active > 0:
            mean_rate = mean_rate_sum / n_active
        else:
            mean_rate = 0.

        if n_total > 0:
            fraction_active = n_active / n_total
        else:
            fraction_active = 0.

        mean_target_rate_dist_residual = None
        if n_active > 0:
            pop_name_is_in_network = pop_name in context.target_trj_rate_map_dict and \
                                     len(context.target_trj_rate_map_dict[pop_name] > 0)
            pop_name_is_in_network_list = context.env.comm.gather(
                pop_name_is_in_network, root=0)
            if context.env.comm.rank == 0:
                if any(pop_name_is_in_network_list):
                    pop_name_is_in_network = True
            pop_name_is_in_network = context.env.comm.bcast(
                pop_name_is_in_network, root=0)

            if pop_name_is_in_network:
                mean_target_rate_dist_residual = 0.
                target_rate_dist_residuals = []
                if pop_name in context.target_trj_rate_map_dict:
                    target_trj_rate_map_dict = context.target_trj_rate_map_dict[
                        pop_name]

                    for gid in target_trj_rate_map_dict:
                        target_trj_rate_map = target_trj_rate_map_dict[gid]
                        rate_map_len = len(target_trj_rate_map)
                        if gid in spike_density_dict:
                            residual = np.sum(
                                target_trj_rate_map -
                                spike_density_dict[gid]['rate'][:rate_map_len])
                        else:
                            residual = np.sum(target_trj_rate_map)
                        target_rate_dist_residuals.append(residual)
                residual_sum_local = np.sum(target_rate_dist_residuals)
                residual_sum = context.env.comm.allreduce(residual_sum_local,
                                                          op=MPI.SUM)
                mean_target_rate_dist_residual = residual_sum / len(
                    target_trj_rate_map_dict)
            context.env.comm.barrier()

        rank = int(context.env.pc.id())
        if context.env.comm.rank == 0:
            context.logger.info(
                'population %s: n_active = %d n_total = %d mean rate = %s' %
                (pop_name, n_active, n_total, str(mean_rate)))

        results['%s firing rate' % pop_name] = mean_rate
        results['%s fraction active' % pop_name] = fraction_active
        if mean_target_rate_dist_residual is not None:
            results['%s target rate dist residual' %
                    pop_name] = mean_target_rate_dist_residual

    return results