Exemplo n.º 1
0
def rate_maps_from_features(env,
                            pop_name,
                            input_features_path,
                            input_features_namespace,
                            cell_index_set,
                            time_range=None,
                            n_trials=1):
    """Initializes presynaptic spike sources from a file with input selectivity features represented as firing rates."""

    if time_range is not None:
        if time_range[0] is None:
            time_range[0] = 0.0

    spatial_resolution = float(env.stimulus_config['Spatial Resolution'])
    temporal_resolution = float(env.stimulus_config['Temporal Resolution'])

    this_input_features_namespace = '%s %s' % (input_features_namespace,
                                               env.arena_id)

    input_features_attr_names = [
        'Selectivity Type', 'Num Fields', 'Field Width', 'Peak Rate',
        'Module ID', 'Grid Spacing', 'Grid Orientation',
        'Field Width Concentration Factor', 'X Offset', 'Y Offset'
    ]

    selectivity_type_names = {
        i: n
        for n, i in viewitems(env.selectivity_types)
    }

    arena = env.stimulus_config['Arena'][env.arena_id]
    arena_x, arena_y = stimulus.get_2D_arena_spatial_mesh(
        arena=arena, spatial_resolution=spatial_resolution)

    trajectory = arena.trajectories[env.trajectory_id]
    t, x, y, d = stimulus.generate_linear_trajectory(
        trajectory, temporal_resolution=temporal_resolution)
    if time_range is not None:
        t_range_inds = np.where((t < time_range[1]) & (t >= time_range[0]))[0]
        t = t[t_range_inds]
        x = x[t_range_inds]
        y = y[t_range_inds]
        d = d[t_range_inds]

    input_rate_map_dict = {}
    pop_index = int(env.Populations[pop_name])
    input_features_iter = scatter_read_cell_attribute_selection(
        input_features_path,
        pop_name,
        selection=list(cell_index_set),
        namespace=this_input_features_namespace,
        mask=set(input_features_attr_names),
        comm=env.comm,
        io_size=env.io_size)
    for gid, selectivity_attr_dict in input_features_iter:

        this_selectivity_type = selectivity_attr_dict['Selectivity Type'][0]
        this_selectivity_type_name = selectivity_type_names[
            this_selectivity_type]
        input_cell_config = stimulus.get_input_cell_config(
            selectivity_type=this_selectivity_type,
            selectivity_type_names=selectivity_type_names,
            selectivity_attr_dict=selectivity_attr_dict)
        if input_cell_config.num_fields > 0:
            rate_map = input_cell_config.get_rate_map(x=x, y=y)
            input_rate_map_dict[gid] = rate_map

    return input_rate_map_dict
def main(config, config_prefix, selectivity_path, selectivity_namespace,
         arena_id, populations, n_trials, io_size, chunk_size,
         value_chunk_size, cache_size, write_size, output_path,
         spikes_namespace, spike_train_attr_name, gather, debug, plot,
         show_fig, save_fig, save_fig_dir, font_size, fig_format, verbose,
         dry_run):
    """

    :param config: str (.yaml file name)
    :param config_prefix: str (path to dir)
    :param selectivity_path: str (path to file)
    :param selectivity_namespace: str
    :param arena_id: str
    :param populations: str
    :param n_trials: int
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param cache_size: int
    :param write_size: int
    :param output_path: str (path to file)
    :param spikes_namespace: str
    :param spike_train_attr_name: str
    :param gather: bool
    :param debug: bool
    :param plot: bool
    :param show_fig: bool
    :param save_fig: str (base file name)
    :param save_fig_dir:  str (path to dir)
    :param font_size: float
    :param fig_format: str
    :param verbose: bool
    :param dry_run: bool
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank

    config_logging(verbose)

    env = Env(comm=comm,
              config_file=config,
              config_prefix=config_prefix,
              template_paths=None)
    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if save_fig is not None:
        plot = True

    if plot:
        from dentate.plot import default_fig_options

        fig_options = copy.copy(default_fig_options)
        fig_options.saveFigDir = save_fig_dir
        fig_options.fontSize = font_size
        fig_options.figFormat = fig_format
        fig_options.showFig = show_fig

    population_ranges = read_population_ranges(selectivity_path, comm)[0]

    if len(populations) == 0:
        populations = sorted(population_ranges.keys())

    if arena_id not in env.stimulus_config['Arena']:
        raise RuntimeError(
            'Arena with ID: %s not specified by configuration at file path: %s'
            % (arena_id, config_prefix + '/' + config))
    arena = env.stimulus_config['Arena'][arena_id]

    valid_selectivity_namespaces = dict()
    if rank == 0:
        for population in populations:
            if population not in population_ranges:
                raise RuntimeError(
                    'generate_input_spike_trains: specified population: %s not found in '
                    'provided selectivity_path: %s' %
                    (population, selectivity_path))
            if population not in env.stimulus_config[
                    'Selectivity Type Probabilities']:
                raise RuntimeError(
                    'generate_input_spike_trains: selectivity type not specified for '
                    'population: %s' % population)
            valid_selectivity_namespaces[population] = []
            with h5py.File(selectivity_path, 'r') as selectivity_f:
                for this_namespace in selectivity_f['Populations'][population]:
                    if 'Selectivity %s' % arena_id in this_namespace:
                        valid_selectivity_namespaces[population].append(
                            this_namespace)
                if len(valid_selectivity_namespaces[population]) == 0:
                    raise RuntimeError(
                        'generate_input_spike_trains: no selectivity data in arena: %s found '
                        'for specified population: %s in provided selectivity_path: %s'
                        % (arena_id, population, selectivity_path))
    comm.barrier()

    valid_selectivity_namespaces = comm.bcast(valid_selectivity_namespaces,
                                              root=0)
    selectivity_type_names = dict(
        (val, key) for (key, val) in viewitems(env.selectivity_types))

    equilibrate = get_equilibration(env)

    for trajectory_id in sorted(arena.trajectories.keys()):
        trajectory = arena.trajectories[trajectory_id]
        t, x, y, d = None, None, None, None
        if rank == 0:
            t, x, y, d = generate_linear_trajectory(
                trajectory,
                temporal_resolution=env.stimulus_config['Temporal Resolution'],
                equilibration_duration=env.
                stimulus_config['Equilibration Duration'])

        t = comm.bcast(t, root=0)
        x = comm.bcast(x, root=0)
        y = comm.bcast(y, root=0)
        d = comm.bcast(d, root=0)

        trajectory = t, x, y, d
        trajectory_namespace = 'Trajectory %s %s' % (arena_id, trajectory_id)
        output_namespace = '%s %s %s' % (spikes_namespace, arena_id,
                                         trajectory_id)

        if not dry_run and rank == 0:
            if output_path is None:
                raise RuntimeError(
                    'generate_input_spike_trains: missing output_path')
            if not os.path.isfile(output_path):
                with h5py.File(output_path, 'w') as output_file:
                    input_file = h5py.File(selectivity_path, 'r')
                    input_file.copy('/H5Types', output_file)
                    input_file.close()
            with h5py.File(output_path, 'a') as f:
                if trajectory_namespace not in f:
                    logger.info('Appending %s datasets to file at path: %s' %
                                (trajectory_namespace, output_path))
                group = f.create_group(trajectory_namespace)
                for key, value in zip(['t', 'x', 'y', 'd'], [t, x, y, d]):
                    dataset = group.create_dataset(key,
                                                   data=value,
                                                   dtype='float32')
                else:
                    loaded_t = f[trajectory_namespace]['t'][:]
                    if len(t) != len(loaded_t):
                        raise RuntimeError(
                            'generate_input_spike_trains: file at path: %s already contains the '
                            'namespace: %s, but the dataset sizes are inconsistent with the provided input'
                            'configuration' %
                            (output_path, trajectory_namespace))
        comm.barrier()

        if rank == 0:
            context.update(locals())

        spike_hist_sum_dict = {}
        spike_hist_resolution = 1000

        write_every = max(1, int(math.floor(write_size / comm.size)))
        for population in populations:

            this_spike_hist_sum = defaultdict(
                lambda: np.zeros(spike_hist_resolution))

            process_time = dict()
            for this_selectivity_namespace in sorted(
                    valid_selectivity_namespaces[population]):

                if rank == 0:
                    logger.info(
                        'Generating input source spike trains for population %s [%s]...'
                        % (population, this_selectivity_namespace))

                start_time = time.time()
                selectivity_attr_gen = NeuroH5CellAttrGen(
                    selectivity_path,
                    population,
                    namespace=this_selectivity_namespace,
                    comm=comm,
                    io_size=io_size,
                    cache_size=cache_size)
                spikes_attr_dict = dict()
                gid_count = 0
                for iter_count, (gid, selectivity_attr_dict
                                 ) in enumerate(selectivity_attr_gen):
                    if gid is not None:
                        context.update(locals())
                        spikes_attr_dict[gid] = \
                            generate_input_spike_trains(env, selectivity_type_names, trajectory,
                                                        gid, selectivity_attr_dict, n_trials=n_trials,
                                                        spike_train_attr_name=spike_train_attr_name,
                                                        spike_hist_resolution=spike_hist_resolution,
                                                        equilibrate=equilibrate,
                                                        spike_hist_sum=this_spike_hist_sum,
                                                        debug= (debug_callback, context) if debug else False)
                        gid_count += 1

                    if (iter_count > 0 and iter_count % write_every
                            == 0) or (debug and iter_count == 10):
                        total_gid_count = comm.reduce(gid_count,
                                                      root=0,
                                                      op=MPI.SUM)
                        if rank == 0:
                            logger.info(
                                'generated spike trains for %i %s cells' %
                                (total_gid_count, population))

                        if not dry_run:
                            append_cell_attributes(
                                output_path,
                                population,
                                spikes_attr_dict,
                                namespace=output_namespace,
                                comm=comm,
                                io_size=io_size,
                                chunk_size=chunk_size,
                                value_chunk_size=value_chunk_size)
                        del spikes_attr_dict
                        spikes_attr_dict = dict()

                        if debug and iter_count == 10:
                            break

            if not dry_run:
                append_cell_attributes(output_path,
                                       population,
                                       spikes_attr_dict,
                                       namespace=output_namespace,
                                       comm=comm,
                                       io_size=io_size,
                                       chunk_size=chunk_size,
                                       value_chunk_size=value_chunk_size)
                del spikes_attr_dict
                spikes_attr_dict = dict()
            process_time = time.time() - start_time

            total_gid_count = comm.reduce(gid_count, root=0, op=MPI.SUM)
            if rank == 0:
                logger.info(
                    'generated spike trains for %i %s cells in %.2f s' %
                    (total_gid_count, population, process_time))

            if gather:
                spike_hist_sum_dict[population] = this_spike_hist_sum

        if gather:
            this_spike_hist_sum = dict([
                (key, dict(val.items()))
                for key, val in viewitems(spike_hist_sum_dict)
            ])
            spike_hist_sum = comm.gather(this_spike_hist_sum, root=0)

            if rank == 0:
                merged_spike_hist_sum = defaultdict(lambda: defaultdict(
                    lambda: np.zeros(spike_hist_resolution)))
                for each_spike_hist_sum in spike_hist_sum:
                    for population in each_spike_hist_sum:
                        for selectivity_type_name in each_spike_hist_sum[
                                population]:
                            merged_spike_hist_sum[population][selectivity_type_name] = \
                                np.add(merged_spike_hist_sum[population][selectivity_type_name],
                                       each_spike_hist_sum[population][selectivity_type_name])

                if plot:

                    if save_fig is not None:
                        fig_options.saveFig = save_fig

                        plot_summed_spike_psth(t, trajectory_id,
                                               selectivity_type_name,
                                               merged_spike_hist_sum,
                                               spike_hist_resolution,
                                               fig_options)

        comm.barrier()

    if is_interactive and rank == 0:
        context.update(locals())
Exemplo n.º 3
0
def main(file_path, namespace, attribute, population, io_size, cache_size,
         trajectory_id):
    """

    :param file_path: str (path)
    :param namespace: str
    :param attribute: str
    :param population: str
    :param io_size: int
    :param cache_size: int
    :param trajectory_id: int
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print('%s: %i ranks have been allocated' %
              (os.path.basename(__file__).split('.py')[0], comm.size))
    sys.stdout.flush()

    trajectory_namespace = 'Trajectory %s' % str(trajectory_id)

    arena_dimension = 100.  # minimum distance from origin to boundary (cm)
    default_run_vel = 30.  # cm/s
    spatial_resolution = 1.  # cm

    with h5py.File(file_path, 'a', driver='mpio', comm=comm) as f:
        if trajectory_namespace not in f:
            print('Rank: %i; Creating %s datasets' %
                  (rank, trajectory_namespace))
            group = f.create_group(trajectory_namespace)
            t, x, y, d = stimulus.generate_linear_trajectory(
                arena_dimension=arena_dimension,
                velocity=default_run_vel,
                spatial_resolution=spatial_resolution)
            for key, value in zip(['x', 'y', 'd', 't'], [x, y, d, t]):
                dataset = group.create_dataset(key, (value.shape[0], ),
                                               dtype='float32')
                with dataset.collective:
                    dataset[:] = value.astype('float32', copy=False)
        else:
            print('Rank: %i; Reading %s datasets' %
                  (rank, trajectory_namespace))
            group = f[trajectory_namespace]
            dataset = group['x']
            with dataset.collective:
                x = dataset[:]
            dataset = group['y']
            with dataset.collective:
                y = dataset[:]
            dataset = group['d']
            with dataset.collective:
                d = dataset[:]
            dataset = group['t']
            with dataset.collective:
                t = dataset[:]

    target = population

    pop_ranges, pop_size = read_population_ranges(file_path, comm=comm)
    target_gid_offset = pop_ranges[target][0]

    attr_gen = NeuroH5CellAttrGen(file_path,
                                  target,
                                  comm=comm,
                                  io_size=io_size,
                                  cache_size=cache_size,
                                  namespace=namespace)
    index_map = get_cell_attributes_index_map(comm, file_path, target,
                                              namespace)

    maxiter = 10
    matched = 0
    processed = 0
    for itercount, (target_gid, attr_dict) in enumerate(attr_gen):
        print(
            'Rank: %i receieved target_gid: %s from the attribute generator.' %
            (rank, str(target_gid)))
        attr_dict2 = select_cell_attributes(
            target_gid,
            comm,
            file_path,
            index_map,
            target,
            namespace,
            population_offset=target_gid_offset)
        if np.all(attr_dict[attribute][:] == attr_dict2[attribute][:]):
            print('Rank: %i; cell attributes match!' % rank)
            matched += 1
        else:
            print('Rank: %i; cell attributes do not match.' % rank)
        comm.barrier()
        processed += 1
        if itercount > maxiter:
            break
    matched = comm.gather(matched, root=0)
    processed = comm.gather(processed, root=0)
    if comm.rank == 0:
        print('%i / %i processed gids had matching cell attributes returned by both read methods' % \
              (np.sum(matched), np.sum(processed)))