def main(inst_rates_path, inst_rates_namespace, include, bin_size, nstdev,
         baseline_fraction, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)

    if not include:
        population_names = read_population_names(inst_rates_path)
        for pop in population_names:
            include.append(pop)

    for i, population in enumerate(include):

        rate_inst_iter = read_cell_attributes(inst_rates_path,
                                              population,
                                              namespace=inst_rates_namespace)

        rate_inst_dict = dict(rate_inst_iter)

        spikedata.place_fields(population,
                               bin_size,
                               rate_inst_dict,
                               nstdev,
                               baseline_fraction=baseline_fraction,
                               saveData=inst_rates_path)
Beispiel #2
0
def main(coords_path, coords_namespace):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    print('Allocated %i ranks' % size)

    population_ranges = read_population_ranges(coords_path)[0]
    print(population_ranges)

    soma_coords = {}
    populations = ['GC']
    for population in population_ranges.keys():

        print('Population %s' % population)
        it = read_cell_attributes(coords_path,
                                  population,
                                  namespace=coords_namespace)

        print('it = %s' % str(it))
        for cell_gid, coords_dict in it:

            print(hasattr(coords_dict, 'U Coordinate'))
            cell_u = getattr(coords_dict, 'U Coordinate')
            cell_u = getattr(coords_dict, 'U Coordinate')
            cell_v = getattr(coords_dict, 'V Coordinate')

            print('Rank %i: gid = %i u = %f v = %f' %
                  (rank, cell_gid, cell_u, cell_v))
Beispiel #3
0
def main(coords_path, coords_namespace):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    print('Allocated %i ranks' % size)

    population_ranges = read_population_ranges(coords_path)[0]
    print(population_ranges)

    soma_coords = {}
    for population in sorted(population_ranges.keys()):

        print('Population %s' % population)
        it, tuple_info = read_cell_attributes(coords_path,
                                              population,
                                              namespace=coords_namespace,
                                              return_type='tuple')

        u_index = tuple_info['U Coordinate']
        v_index = tuple_info['V Coordinate']

        for cell_gid, coords_tuple in it:
            cell_u = coords_tuple[u_index]
            cell_v = coords_tuple[v_index]

            print('Rank %i: gid = %i u = %f v = %f' %
                  (rank, cell_gid, cell_u, cell_v))
Beispiel #4
0
def main(population, features_path, features_namespace, extra_columns):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    population_ranges = read_population_ranges(features_path)[0]

    soma_coords = {}

    extra_columns_list = extra_columns.split(",")
    columns = ['Field Width', 'X Offset', 'Y Offset'] + extra_columns_list
    df_dict = {}
    it = read_cell_attributes(features_path,
                              population,
                              namespace=features_namespace)

    for cell_gid, features_dict in it:
        cell_field_width = features_dict['Field Width'][0]
        cell_xoffset = features_dict['X Offset'][0]
        cell_yoffset = features_dict['Y Offset'][0]

        df_dict[cell_gid] = [cell_field_width, cell_xoffset, cell_yoffset]

    df = pd.DataFrame.from_dict(df_dict, orient='index', columns=columns)
    df = df.reindex(sorted(df_dict.keys()))
    df.to_csv('features.%s.csv' % population)
Beispiel #5
0
def main(config, config_prefix, coords_path, distances_namespace, population, graph_type, bin_size, verbose):

    utils.config_logging(verbose)

    env = Env(config_file=config, config_prefix=config_prefix)

    soma_distances = read_cell_attributes(coords_path, population, namespace=distances_namespace)

    plot.plot_positions (env, population, soma_distances, bin_size=bin_size, graph_type=graph_type, saveFig=True)
def main(features_path, cell_type, arena_id, show_fig, save_fig):

    font = {'family': 'normal', 'weight': 'bold', 'size': 6}
    matplotlib.rc('font', **font)

    comm = MPI.COMM_WORLD
    modules = np.arange(10) + 1

    if cell_type == 'grid':
        mpp_grid = read_cell_attributes(features_path, 'MPP',
                                        'Grid Selectivity %s' % arena_id)
        cells_modules_dictionary = gid2module_dictionary([mpp_grid], modules)
    elif cell_type == 'place':
        lpp_place = read_cell_attributes(features_path, 'LPP',
                                         'Place Selectivity %s' % arena_id)
        mpp_place = read_cell_attributes(features_path, 'MPP',
                                         'Place Selectivity %s' % arena_id)
        cells_modules_dictionary = gid2module_dictionary(
            [mpp_place, lpp_place], modules)
    elif cell_type == 'both':
        lpp_place = read_cell_attributes(features_path, 'LPP',
                                         'Place Selectivity %s' % arena_id)
        mpp_place = read_cell_attributes(features_path, 'MPP',
                                         'Place Selectivity %s' % arena_id)
        mpp_grid = read_cell_attributes(features_path, 'MPP',
                                        'Grid Selectivity %s' % arena_id)
        cells_modules_dictionary = gid2module_dictionary(
            [mpp_grid, mpp_place, lpp_place], modules)

    kwargs = {'ctype': cell_type}
    plot_group(cells_modules_dictionary,
               modules,
               plot=show_fig,
               save=save_fig,
               **kwargs)
Beispiel #7
0
def read_spike_events(input_file, population_names, namespace_id, spike_train_attr_name='t', time_range=None,
                      max_spikes=None, n_trials=-1, merge_trials=False):
    """
    Reads spike trains from a NeuroH5 file, and returns a dictionary with spike times and cell indices.
    :param input_file: str (path to file)
    :param population_names: list of str
    :param namespace_id: str
    :param spike_train_attr_name: str
    :param time_range: list of float
    :param max_spikes: float
    :param n_trials: int
    :param merge_trials: bool
    :return: dict
    """
    assert((n_trials >= 1) | (n_trials == -1))

    
    spkpoplst = []
    spkindlst = []
    spktlst = []
    spktrials = []
    num_cell_spks = {}
    pop_active_cells = {}

    tmin = float('inf')
    tmax = 0.

    for pop_name in population_names:

        if time_range is None or time_range[1] is None:
            logger.info('Reading spike data for population %s...' % pop_name)
        else:
            logger.info('Reading spike data for population %s in time range %s...' % (pop_name, str(time_range)))

        spkiter = read_cell_attributes(input_file, pop_name, namespace=namespace_id)
        this_num_cell_spks = 0
        active_set = set([])

        pop_spkindlst = []
        pop_spktlst = []
        pop_spktriallst = []

        # Time Range
        if time_range is None or time_range[1] is None:
            for spkind, spkts in spkiter:
                slen = len(spkts[spike_train_attr_name])
                trial_dur = spkts.get('Trial Duration', np.asarray([0.]))
                trial_ind = spkts.get('Trial Index', np.zeros((slen,),dtype=np.uint8))
                if n_trials == -1:
                    n_trials = trial_dur.shape[0]
                for spk_i, spkt in enumerate(spkts[spike_train_attr_name]):
                    trial_i = trial_ind[spk_i]
                    if trial_i >= n_trials:
                        continue
                    if merge_trials:
                        spkt += np.sum(trial_dur[:trial_i])
                    pop_spkindlst.append(spkind)
                    pop_spktlst.append(spkt)
                    pop_spktriallst.append(trial_i)
                    if spkt < tmin:
                        tmin = spkt
                    if spkt > tmax:
                        tmax = spkt
                    this_num_cell_spks += 1
                    active_set.add(spkind)
        else:
            if time_range[0] is None:
                time_range[0] = 0.0
            time_span = time_range[1] - time_range[0]
            for spkind, spkts in spkiter:
                trial_dur = spkts.get('Trial Duration', np.asarray([0.]))
                trial_dur[1:] = np.asarray([max(x, time_span) for x in trial_dur[1:] ],
                                           dtype=np.float32)
                trial_ind = spkts.get('Trial Index', np.zeros((len(spkts[spike_train_attr_name])),dtype=np.int))
                if n_trials == -1:
                    n_trials = trial_dur.shape[0]
                for spk_i, spkt in enumerate(spkts[spike_train_attr_name]):
                    trial_i = trial_ind[spk_i]
                    if trial_i >= n_trials:
                        continue
                    if time_range[0] <= spkt <= time_range[1]:
                        if merge_trials:
                            spkt += np.sum(trial_dur[:trial_i])
                        pop_spkindlst.append(spkind)
                        pop_spktlst.append(spkt)
                        pop_spktriallst.append(trial_i)
                        if spkt < tmin:
                            tmin = spkt
                        if spkt > tmax:
                            tmax = spkt
                        this_num_cell_spks += 1
                        active_set.add(spkind)

        if not active_set:
            continue

        pop_active_cells[pop_name] = active_set
        num_cell_spks[pop_name] = this_num_cell_spks

        pop_spkts = np.asarray(pop_spktlst, dtype=np.float32)
        del (pop_spktlst)
        pop_spkinds = np.asarray(pop_spkindlst, dtype=np.uint32)
        del (pop_spkindlst)
        pop_spktrials = np.asarray(pop_spktriallst, dtype=np.uint32)
        del (pop_spktriallst)

        # Limit to max_spikes
        if (max_spikes is not None) and (len(pop_spkts) > max_spikes):
            logger.warn(' Reading only randomly sampled %i out of %i spikes for population %s' %
                        (max_spikes, len(pop_spkts), pop_name))
            sample_inds = np.random.randint(0, len(pop_spkinds) - 1, size=int(max_spikes))
            pop_spkts = pop_spkts[sample_inds]
            pop_spkinds = pop_spkinds[sample_inds]
            pop_spktrials = pop_spkinds[sample_inds]
            tmax = max(tmax, max(pop_spkts))

        spkpoplst.append(pop_name)
        pop_trial_spkindlst = []
        pop_trial_spktlst = []
        for trial_i in range(n_trials):
            trial_idxs = np.where(pop_spktrials == trial_i)[0]
            sorted_trial_idxs = np.argsort(pop_spkts[trial_idxs])
            pop_trial_spktlst.append(np.take(pop_spkts[trial_idxs], sorted_trial_idxs))
            pop_trial_spkindlst.append(np.take(pop_spkinds[trial_idxs], sorted_trial_idxs))
                
        del pop_spkts
        del pop_spkinds
        del pop_spktrials

        if merge_trials:
            pop_spkinds = np.concatenate(pop_trial_spkindlst)
            pop_spktlst = np.concatenate(pop_trial_spktlst)
            spkindlst.append(pop_spkinds)
            spktlst.append(pop_spktlst)
        else:
            spkindlst.append(pop_trial_spkindlst)
            spktlst.append(pop_trial_spktlst)
            

        logger.info(' Read %i spikes for population %s' % (this_num_cell_spks, pop_name))

    return {'spkpoplst': spkpoplst, 'spktlst': spktlst, 'spkindlst': spkindlst, 'tmin': tmin, 'tmax': tmax,
            'pop_active_cells': pop_active_cells, 'num_cell_spks': num_cell_spks,
            'n_trials': n_trials}
Beispiel #8
0
def read_state(input_file,
               population_names,
               namespace_id,
               time_variable='t',
               state_variable='v',
               time_range=None,
               max_units=None,
               gid=None,
               comm=None,
               n_trials=-1):
    if comm is None:
        comm = MPI.COMM_WORLD
    pop_state_dict = {}

    logger.info(
        'Reading state data from populations %s, namespace %s gid = %s...' %
        (str(population_names), namespace_id, str(gid)))

    attr_info_dict = read_cell_attribute_info(input_file,
                                              populations=population_names,
                                              read_cell_index=True)

    for pop_name in population_names:
        cell_index = None
        pop_state_dict[pop_name] = {}
        for attr_name, attr_cell_index in attr_info_dict[pop_name][
                namespace_id]:
            if state_variable == attr_name:
                cell_index = attr_cell_index

        if cell_index is None:
            raise RuntimeError(
                'read_state: Unable to find recordings for state variable %s in population %s namespace %s'
                % (state_variable, pop_name, str(namespace_id)))
        cell_set = set(cell_index)

        # Limit to max_units
        if gid is None:
            if (max_units is not None) and (len(cell_set) > max_units):
                logger.info(
                    '  Reading only randomly sampled %i out of %i units for population %s'
                    % (max_units, len(cell_set), pop_name))
                sample_inds = np.random.randint(0,
                                                len(cell_set) - 1,
                                                size=int(max_units))
                cell_set_lst = list(cell_set)
                gid_set = set([cell_set_lst[i] for i in sample_inds])
            else:
                gid_set = cell_set
        else:
            gid_set = set(gid)

        state_dict = {}
        if gid is None:
            valiter = read_cell_attributes(input_file,
                                           pop_name,
                                           namespace=namespace_id,
                                           comm=comm)
        else:
            valiter = read_cell_attribute_selection(input_file,
                                                    pop_name,
                                                    namespace=namespace_id,
                                                    selection=list(gid_set),
                                                    comm=comm)

        if time_range is None:
            for cellind, vals in valiter:
                if cellind is not None:
                    trial_dur = vals.get('trial duration', None)
                    distance = vals.get('distance', [None])[0]
                    section = vals.get('section', [None])[0]
                    loc = vals.get('loc', [None])[0]
                    tvals = np.asarray(vals[time_variable], dtype=np.float32)
                    svals = np.asarray(vals[state_variable], dtype=np.float32)
                    trial_bounds = list(
                        np.where(np.isclose(tvals, tvals[0], atol=1e-4))[0])
                    n_trial_bounds = len(trial_bounds)
                    trial_bounds.append(len(tvals))
                    if n_trials == -1:
                        this_n_trials = n_trial_bounds
                    else:
                        this_n_trials = min(n_trial_bounds, n_trials)

                    if this_n_trials > 1:
                        state_dict[cellind] = (np.split(
                            tvals, trial_bounds[1:n_trials]),
                                               np.split(
                                                   svals,
                                                   trial_bounds[1:n_trials]),
                                               distance, section, loc)
                    else:
                        state_dict[cellind] = ([tvals[:trial_bounds[1]]
                                                ], [svals[:trial_bounds[1]]],
                                               distance, section, loc)

        else:
            for cellind, vals in valiter:
                if cellind is not None:
                    distance = vals.get('distance', [None])[0]
                    section = vals.get('section', [None])[0]
                    loc = vals.get('loc', [None])[0]
                    tinds = np.argwhere((vals[time_variable] <= time_range[1])
                                        &
                                        (vals[time_variable] >= time_range[0]))
                    tvals = np.asarray(vals[time_variable][tinds],
                                       dtype=np.float32).reshape((-1, ))
                    svals = np.asarray(vals[state_variable][tinds],
                                       dtype=np.float32).reshape((-1, ))
                    trial_bounds = list(
                        np.where(np.isclose(tvals, tvals[0], atol=1e-4))[0])
                    n_trial_bounds = len(trial_bounds)
                    trial_bounds.append(len(tvals))
                    if n_trials == -1:
                        this_n_trials = n_trial_bounds
                    else:
                        this_n_trials = min(n_trial_bounds, n_trials)

                    if this_n_trials > 1:
                        state_dict[cellind] = (np.split(
                            tvals, trial_bounds[1:n_trials]),
                                               np.split(
                                                   svals,
                                                   trial_bounds[1:n_trials]),
                                               distance, section, loc)
                    else:
                        state_dict[cellind] = ([tvals[:trial_bounds[1]]
                                                ], [svals[:trial_bounds[1]]],
                                               distance, section, loc)

        pop_state_dict[pop_name] = state_dict

    return {
        'states': pop_state_dict,
        'time_variable': time_variable,
        'state_variable': state_variable
    }
def main(config, config_prefix, include, forest_path, connectivity_path,
         connectivity_namespace, coords_path, coords_namespace,
         synapses_namespace, distances_namespace, resolution,
         interp_chunk_size, io_size, chunk_size, value_chunk_size, cache_size,
         write_size, verbose, dry_run, debug):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config, config_prefix=config_prefix)
    configure_hoc_env(env)

    connection_config = env.connection_config
    extent = {}

    if (not dry_run) and (rank == 0):
        if not os.path.isfile(connectivity_path):
            input_file = h5py.File(coords_path, 'r')
            output_file = h5py.File(connectivity_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    population_ranges = read_population_ranges(coords_path)[0]
    populations = sorted(list(population_ranges.keys()))

    color = 0
    if rank == 0:
        color = 1
    comm0 = comm.Split(color, 0)

    soma_distances = {}
    soma_coords = {}
    for population in populations:
        if rank == 0:
            logger.info(f'Reading {population} coordinates...')
            coords_iter = read_cell_attributes(
                coords_path,
                population,
                comm=comm0,
                mask=set(['U Coordinate', 'V Coordinate', 'L Coordinate']),
                namespace=coords_namespace)
            distances_iter = read_cell_attributes(
                coords_path,
                population,
                comm=comm0,
                mask=set(['U Distance', 'V Distance']),
                namespace=distances_namespace)

            soma_coords[population] = {
                k: (float(v['U Coordinate'][0]), float(v['V Coordinate'][0]),
                    float(v['L Coordinate'][0]))
                for (k, v) in coords_iter
            }

            distances = {
                k: (float(v['U Distance'][0]), float(v['V Distance'][0]))
                for (k, v) in distances_iter
            }

            if len(distances) > 0:
                soma_distances[population] = distances

            gc.collect()

    comm.barrier()
    comm0.Free()

    soma_distances = comm.bcast(soma_distances, root=0)
    soma_coords = comm.bcast(soma_coords, root=0)

    forest_populations = sorted(read_population_names(forest_path))
    if (include is None) or (len(include) == 0):
        destination_populations = forest_populations
    else:
        destination_populations = []
        for p in include:
            if p in forest_populations:
                destination_populations.append(p)
    if rank == 0:
        logger.info(
            f'Generating connectivity for populations {destination_populations}...'
        )

    if len(soma_distances) == 0:
        (origin_ranges, ip_dist_u,
         ip_dist_v) = make_distance_interpolant(env,
                                                resolution=resolution,
                                                nsample=nsample)
        ip_dist = (origin_ranges, ip_dist_u, ip_dist_v)
        soma_distances = measure_distances(env,
                                           soma_coords,
                                           ip_dist,
                                           resolution=resolution)

    for destination_population in destination_populations:

        if rank == 0:
            logger.info(
                f'Generating connection probabilities for population {destination_population}...'
            )

        connection_prob = ConnectionProb(destination_population, soma_coords, soma_distances, \
                                         env.connection_extents)

        synapse_seed = int(
            env.model_config['Random Seeds']['Synapse Projection Partitions'])

        connectivity_seed = int(env.model_config['Random Seeds']
                                ['Distance-Dependent Connectivity'])
        cluster_seed = int(
            env.model_config['Random Seeds']['Connectivity Clustering'])

        if rank == 0:
            logger.info(
                f'Generating connections for population {destination_population}...'
            )

        populations_dict = env.model_config['Definitions']['Populations']
        generate_uv_distance_connections(comm,
                                         populations_dict,
                                         connection_config,
                                         connection_prob,
                                         forest_path,
                                         synapse_seed,
                                         connectivity_seed,
                                         cluster_seed,
                                         synapses_namespace,
                                         connectivity_namespace,
                                         connectivity_path,
                                         io_size,
                                         chunk_size,
                                         value_chunk_size,
                                         cache_size,
                                         write_size,
                                         dry_run=dry_run,
                                         debug=debug)
    MPI.Finalize()
def main(include, coords_path, coords_namespace, distances_namespace, resolution, io_size,
         verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))
    
    comm = MPI.COMM_WORLD
    rank = comm.rank

    connection_config = env.connection_config
    extent      = {}
    
    population_ranges = read_population_ranges(coords_path)[0]
    populations = sorted(list(population_ranges.keys()))

    color = 0
    if rank == 0:
         color = 1
    comm0 = comm.Split(color, 0)

    soma_distances = {}
    soma_coords = {}
    for population in populations:
        if rank == 0:
            logger.info(f'Reading {population} coordinates...')
            coords_iter = read_cell_attributes(coords_path, population, comm=comm0,
                                               mask=set(['U Coordinate', 'V Coordinate', 'L Coordinate']),
                                               namespace=coords_namespace)
            distances_iter = read_cell_attributes(coords_path, population, comm=comm0,
                                                  mask=set(['U Distance', 'V Distance']),
                                                  namespace=distances_namespace)

            soma_coords[population] = { k: (float(v['U Coordinate'][0]), 
                                            float(v['V Coordinate'][0]), 
                                            float(v['L Coordinate'][0])) for (k,v) in coords_iter }

            distances = { k: (float(v['U Distance'][0]), 
                              float(v['V Distance'][0])) for (k,v) in distances_iter }
            
            if len(distances) > 0:
                 soma_distances[population] = distances
        
            gc.collect()

    comm.barrier()
    comm0.Free()

    soma_distances = comm.bcast(soma_distances, root=0)
    soma_coords = comm.bcast(soma_coords, root=0)

    extra_columns_list = extra_columns.split(",")
    columns = ['Field Width', 'X Offset', 'Y Offset']+extra_columns_list
    df_dict = {}
    it = read_cell_attributes(features_path, population, 
                              namespace=features_namespace)

    for cell_gid, features_dict in it:
        cell_field_width = features_dict['Field Width'][0]
        cell_xoffset = features_dict['X Offset'][0]
        cell_yoffset = features_dict['Y Offset'][0]
        
        df_dict[cell_gid] = [cell_field_width, cell_xoffset, cell_yoffset]

        
    df = pd.DataFrame.from_dict(df_dict, orient='index', columns=columns)
    df = df.reindex(sorted(df_dict.keys()))
    df.to_csv('features.%s.csv' % population)


    MPI.Finalize()
Beispiel #11
0
def vertex_distribution(connectivity_path,
                        coords_path,
                        distances_namespace,
                        destination,
                        sources,
                        bin_size=20.0,
                        cache_size=100,
                        comm=None):
    """
    Obtain spatial histograms of source vertices connecting to a given destination population.

    :param connectivity_path:
    :param coords_path:
    :param distances_namespace: 
    :param destination: 
    :param source: 

    """

    if comm is None:
        comm = MPI.COMM_WORLD

    rank = comm.Get_rank()

    color = 0
    if rank == 0:
        color = 1
    comm0 = comm.Split(color, 0)

    (population_ranges, _) = read_population_ranges(coords_path)

    destination_start = population_ranges[destination][0]
    destination_count = population_ranges[destination][1]

    destination_soma_distances = {}
    if rank == 0:
        logger.info(f'Reading {destination} distances...')
        distances_iter = read_cell_attributes(
            coords_path,
            destination,
            comm=comm0,
            mask=set(['U Distance', 'V Distance']),
            namespace=distances_namespace)

        destination_soma_distances = {
            k: (float(v['U Distance'][0]), float(v['V Distance'][0]))
            for (k, v) in distances_iter
        }

        gc.collect()

    comm.barrier()

    destination_soma_distances = comm.bcast(destination_soma_distances, root=0)
    destination_soma_distance_U = {}
    destination_soma_distance_V = {}
    for k, v in viewitems(destination_soma_distances):
        destination_soma_distance_U[k] = v[0]
        destination_soma_distance_V[k] = v[1]

    del (destination_soma_distances)

    if sources == ():
        sources = []
        for (src, dst) in read_projection_names(connectivity_path):
            if dst == destination:
                sources.append(src)

    source_soma_distances = {}
    if rank == 0:
        for s in sources:
            logger.info(f'Reading {s} distances...')
            distances_iter = read_cell_attributes(
                coords_path,
                s,
                comm=comm0,
                mask=set(['U Distance', 'V Distance']),
                namespace=distances_namespace)

            source_soma_distances[s] = {
                k: (float(v['U Distance'][0]), float(v['V Distance'][0]))
                for (k, v) in distances_iter
            }

            gc.collect()

    comm.barrier()
    comm0.Free()

    source_soma_distances = comm.bcast(source_soma_distances, root=0)

    source_soma_distance_U = {}
    source_soma_distance_V = {}
    for s in sources:
        this_source_soma_distance_U = {}
        this_source_soma_distance_V = {}
        for k, v in viewitems(source_soma_distances[s]):
            this_source_soma_distance_U[k] = v[0]
            this_source_soma_distance_V[k] = v[1]
        source_soma_distance_U[s] = this_source_soma_distance_U
        source_soma_distance_V[s] = this_source_soma_distance_V
    del (source_soma_distances)

    if rank == 0:
        logger.info('Reading connections %s -> %s...' %
                    (str(sources), destination))

    dist_bins = defaultdict(dict)
    dist_u_bins = defaultdict(dict)
    dist_v_bins = defaultdict(dict)

    gg = [
        NeuroH5ProjectionGen(connectivity_path,
                             source,
                             destination,
                             cache_size=cache_size,
                             comm=comm) for source in sources
    ]

    for prj_gen_tuple in zip_longest(*gg):
        destination_gid = prj_gen_tuple[0][0]
        if rank == 0 and destination_gid is not None:
            logger.info('%d' % destination_gid)
        if not all([
                prj_gen_elt[0] == destination_gid
                for prj_gen_elt in prj_gen_tuple
        ]):
            raise RuntimeError(
                'destination %s: destination gid %i not matched across multiple projection generators: '
                '%s' % (destination, destination_gid,
                        [prj_gen_elt[0] for prj_gen_elt in prj_gen_tuple]))

        if destination_gid is not None:
            for (source, (this_destination_gid,
                          rest)) in zip_longest(sources, prj_gen_tuple):
                this_source_soma_distance_U = source_soma_distance_U[source]
                this_source_soma_distance_V = source_soma_distance_V[source]
                this_dist_bins = dist_bins[source]
                this_dist_u_bins = dist_u_bins[source]
                this_dist_v_bins = dist_v_bins[source]
                (source_indexes, attr_dict) = rest
                dst_U = destination_soma_distance_U[destination_gid]
                dst_V = destination_soma_distance_V[destination_gid]
                for source_gid in source_indexes:
                    dist_u = dst_U - this_source_soma_distance_U[source_gid]
                    dist_v = dst_V - this_source_soma_distance_V[source_gid]
                    dist = abs(dist_u) + abs(dist_v)

                    update_bins(this_dist_bins, bin_size, dist)
                    update_bins(this_dist_u_bins, bin_size, dist_u)
                    update_bins(this_dist_v_bins, bin_size, dist_v)

    add_bins_op = MPI.Op.Create(add_bins, commute=True)
    for source in sources:
        dist_bins[source] = comm.reduce(dist_bins[source], op=add_bins_op)
        dist_u_bins[source] = comm.reduce(dist_u_bins[source], op=add_bins_op)
        dist_v_bins[source] = comm.reduce(dist_v_bins[source], op=add_bins_op)

    dist_hist_dict = defaultdict(dict)
    dist_u_hist_dict = defaultdict(dict)
    dist_v_hist_dict = defaultdict(dict)

    if rank == 0:
        for source in sources:
            dist_hist_dict[destination][source] = finalize_bins(
                dist_bins[source], bin_size)
            dist_u_hist_dict[destination][source] = finalize_bins(
                dist_u_bins[source], bin_size)
            dist_v_hist_dict[destination][source] = finalize_bins(
                dist_v_bins[source], bin_size)

    return {
        'Total distance': dist_hist_dict,
        'U distance': dist_u_hist_dict,
        'V distance': dist_v_hist_dict
    }
Beispiel #12
0
def vertex_metrics(connectivity_path,
                   coords_path,
                   vertex_metrics_namespace,
                   distances_namespace,
                   destination,
                   sources,
                   bin_size=50.,
                   metric='Indegree'):
    """
    Obtain vertex metrics with respect to septo-temporal position (longitudinal and transverse arc distances to reference points).

    :param connectivity_path:
    :param coords_path:
    :param distances_namespace: 
    :param destination: 
    :param source: 
    :param bin_size: 
    :param metric: 

    """

    (population_ranges, _) = read_population_ranges(coords_path)

    destination_start = population_ranges[destination][0]
    destination_count = population_ranges[destination][1]

    if sources == ():
        sources = []
        for (src, dst) in read_projection_names(connectivity_path):
            if dst == destination:
                sources.append(src)

    degrees_dict = {}
    with h5py.File(connectivity_path, 'r') as f:
        for source in sources:
            degrees_dict[source] = f['Nodes'][vertex_metrics_namespace][
                '%s %s -> %s' %
                (metric, source,
                 destination)]['Attribute Value'][0:destination_count]

    for source in sources:
        logger.info('projection: %s -> %s: max: %i min: %i mean: %i stdev: %i (%d units)' % \
                        (source, destination, \
                         np.max(degrees_dict[source]), \
                         np.min(degrees_dict[source]), \
                         np.mean(degrees_dict[source]), \
                         np.std(degrees_dict[source]), \
                         len(degrees_dict[source])))

    if metric == 'Indegree':
        distances = read_cell_attributes(coords_path,
                                         destination,
                                         namespace=distances_namespace)
        soma_distances = {
            k: (v['U Distance'][0], v['V Distance'][0])
            for (k, v) in distances
        }
        del distances
    elif metric == 'Outdegree':
        distances = read_cell_attributes(coords_path,
                                         sources[0],
                                         namespace=distances_namespace)
        soma_distances = {
            k: (v['U Distance'][0], v['V Distance'][0])
            for (k, v) in distances
        }
        del distances

    gids = sorted(soma_distances.keys())
    distance_U = np.asarray([soma_distances[gid][0] for gid in gids])
    distance_V = np.asarray([soma_distances[gid][1] for gid in gids])

    return (distance_U, distance_V, degrees_dict)
Beispiel #13
0
def main(arena_id, config, config_prefix, dataset_prefix, distances_namespace, distance_limits, spike_input_path, spike_input_namespace, spike_input_attr, output_path, io_size, trajectory_id, write_selection, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank
    if io_size == -1:
        io_size = comm.size

    env = Env(comm=comm, config_file=config, 
              config_prefix=config_prefix, dataset_prefix=dataset_prefix, 
              results_path=output_path, spike_input_path=spike_input_path, 
              spike_input_namespace=spike_input_namespace, spike_input_attr=spike_input_attr,
              arena_id=arena_id, trajectory_id=trajectory_id, io_size=io_size)

    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    pop_ranges, pop_size = read_population_ranges(env.connectivity_file_path, comm=comm)

    distance_U_dict = {}
    distance_V_dict = {}
    range_U_dict = {}
    range_V_dict = {}

    selection_dict = defaultdict(set)

    comm0 = env.comm.Split(2 if rank == 0 else 0, 0)

    if rank == 0:
        for population in pop_ranges:
            distances = read_cell_attributes(env.data_file_path, population, namespace=distances_namespace, comm=comm0)
            soma_distances = { k: (v['U Distance'][0], v['V Distance'][0]) for (k,v) in distances }
            del distances
        
            numitems = len(list(soma_distances.keys()))
            logger.info('read %s distances (%i elements)' % (population, numitems))

            if numitems == 0:
                continue

            distance_U_array = np.asarray([soma_distances[gid][0] for gid in soma_distances])
            distance_V_array = np.asarray([soma_distances[gid][1] for gid in soma_distances])

            U_min = np.min(distance_U_array)
            U_max = np.max(distance_U_array)
            V_min = np.min(distance_V_array)
            V_max = np.max(distance_V_array)

            range_U_dict[population] = (U_min, U_max)
            range_V_dict[population] = (V_min, V_max)
            
            distance_U = { gid: soma_distances[gid][0] for gid in soma_distances }
            distance_V = { gid: soma_distances[gid][1] for gid in soma_distances }
            
            distance_U_dict[population] = distance_U
            distance_V_dict[population] = distance_V
            
            min_dist = U_min
            max_dist = U_max 
            if distance_limits:
                min_dist = distance_limits[0]
                max_dist = distance_limits[1]

            selection_dict[population] = set([ k for k in distance_U if (distance_U[k] >= min_dist) and 
                                            (distance_U[k] <= max_dist)  ])
    
        yaml_output_dict = {}
        for k, v in utils.viewitems(selection_dict):
            yaml_output_dict[k] = list(v)
         
        yaml_output_path = '%s/DG_slice.yaml' % output_path
        with open(yaml_output_path, 'w') as outfile:
            yaml.dump(yaml_output_dict, outfile)

        del(yaml_output_dict)

    env.comm.barrier()

    write_selection_file_path = None
    if write_selection:
        write_selection_file_path =  "%s/%s_selection.h5" % (env.results_path, env.modelName)

    if write_selection_file_path is not None:
        if rank == 0:
            io_utils.mkout(env, write_selection_file_path)
        env.comm.barrier()
        selection_dict = env.comm.bcast(dict(selection_dict), root=0)
        env.cell_selection = selection_dict
        io_utils.write_cell_selection(env, write_selection_file_path)
        input_selection = io_utils.write_connection_selection(env, write_selection_file_path)
        io_utils.write_input_cell_selection(env, input_selection, write_selection_file_path)
    mapping = {name: idx for name, start, count, idx in defs}
    dt = h5py.special_dtype(enum=(np.uint16, mapping))
    h5[path_population_labels] = dt

    dt = np.dtype([("Start", np.uint64), ("Count", np.uint32),
                   ("Population", h5[path_population_labels].dtype)])
    h5[path_population_range] = dt

    # create an HDF5 compound type for population ranges
    dt = h5[path_population_range].dtype

    g = h5_get_group(h5, grp_h5types)

    dset = h5_get_dataset(g, grp_populations, maxshape=(n_pop, ), dtype=dt)
    dset.resize((n_pop, ))
    a = np.zeros(n_pop, dtype=dt)
    idx = 0
    for name, start, count, idx in defs:
        a[idx]["Start"] = start
        a[idx]["Count"] = count
        a[idx]["Population"] = idx
        idx += 1

    dset[:] = a

write_cell_attributes(output_path,
                      pop_name,
                      attr_dict,
                      namespace='Test Attributes')
print(list(read_cell_attributes(output_path, pop_name, 'Test Attributes')))
Beispiel #15
0
def main(arena_id, config, config_prefix, dataset_prefix, distances_namespace, spike_input_path, spike_input_namespace, spike_input_attr, input_features_namespaces, input_features_path, selection_path, output_path, io_size, trajectory_id, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank
    if io_size == -1:
        io_size = comm.size

    env = Env(comm=comm, config_file=config, 
              config_prefix=config_prefix, dataset_prefix=dataset_prefix, 
              results_path=output_path, spike_input_path=spike_input_path, 
              spike_input_namespace=spike_input_namespace, spike_input_attr=spike_input_attr,
              arena_id=arena_id, trajectory_id=trajectory_id, io_size=io_size)

    selection = []
    f = open(selection_path, 'r')
    for line in f.readlines():
        selection.append(int(line))
    f.close()
    selection = set(selection)

    pop_ranges, pop_size = read_population_ranges(env.connectivity_file_path, comm=comm)

    distance_U_dict = {}
    distance_V_dict = {}
    range_U_dict = {}
    range_V_dict = {}

    selection_dict = defaultdict(set)

    comm0 = env.comm.Split(2 if rank == 0 else 0, 0)

    if rank == 0:
        for population in pop_ranges:
            distances = read_cell_attributes(env.data_file_path, population, namespace=distances_namespace, comm=comm0)
            soma_distances = { k: (v['U Distance'][0], v['V Distance'][0]) for (k,v) in distances }
            del distances
        
            numitems = len(list(soma_distances.keys()))

            if numitems == 0:
                continue

            distance_U_array = np.asarray([soma_distances[gid][0] for gid in soma_distances])
            distance_V_array = np.asarray([soma_distances[gid][1] for gid in soma_distances])

            U_min = np.min(distance_U_array)
            U_max = np.max(distance_U_array)
            V_min = np.min(distance_V_array)
            V_max = np.max(distance_V_array)

            range_U_dict[population] = (U_min, U_max)
            range_V_dict[population] = (V_min, V_max)
            
            distance_U = { gid: soma_distances[gid][0] for gid in soma_distances }
            distance_V = { gid: soma_distances[gid][1] for gid in soma_distances }
            
            distance_U_dict[population] = distance_U
            distance_V_dict[population] = distance_V
            
            min_dist = U_min
            max_dist = U_max 

            selection_dict[population] = set([ k for k in distance_U if k in selection ])
    

    env.comm.barrier()

    write_selection_file_path =  "%s/%s_selection.h5" % (env.results_path, env.modelName)

    if rank == 0:
        io_utils.mkout(env, write_selection_file_path)
    env.comm.barrier()
    selection_dict = env.comm.bcast(dict(selection_dict), root=0)
    env.cell_selection = selection_dict
    io_utils.write_cell_selection(env, write_selection_file_path)
    input_selection = io_utils.write_connection_selection(env, write_selection_file_path)
    if spike_input_path:
        io_utils.write_input_cell_selection(env, input_selection, write_selection_file_path)
    if input_features_path:
        for this_input_features_namespace in sorted(input_features_namespaces):
            for population in sorted(input_selection):
                logger.info(f"Extracting input features {this_input_features_namespace} for population {population}...")
                it = read_cell_attribute_selection(input_features_path, population, 
                                                   namespace=f"{this_input_features_namespace} {arena_id}", 
                                                   selection=input_selection[population], comm=env.comm)
                output_features_dict = { cell_gid : cell_features_dict for cell_gid, cell_features_dict in it }
                append_cell_attributes(write_selection_file_path, population, output_features_dict,
                                       namespace=f"{this_input_features_namespace} {arena_id}", 
                                       io_size=io_size, comm=env.comm)
    env.comm.barrier()
Beispiel #16
0
def main(arena_id, bin_sample_count, config, config_prefix, dataset_prefix,
         distances_namespace, distance_bin_extent, input_features_path,
         input_features_namespaces, populations, spike_input_path,
         spike_input_namespace, spike_input_attr, output_path, io_size,
         trajectory_id, write_selection, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm,
              config_file=config,
              config_prefix=config_prefix,
              dataset_prefix=dataset_prefix,
              results_path=output_path,
              spike_input_path=spike_input_path,
              spike_input_namespace=spike_input_namespace,
              spike_input_attr=spike_input_attr,
              arena_id=arena_id,
              trajectory_id=trajectory_id)

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    pop_ranges, pop_size = read_population_ranges(env.connectivity_file_path,
                                                  comm=comm)

    distance_U_dict = {}
    distance_V_dict = {}
    range_U_dict = {}
    range_V_dict = {}

    selection_dict = defaultdict(set)

    comm0 = env.comm.Split(2 if rank == 0 else 0, 0)

    local_random = np.random.RandomState()
    local_random.seed(1000)

    if len(populations) == 0:
        populations = sorted(pop_ranges.keys())

    if rank == 0:
        for population in populations:
            distances = read_cell_attributes(env.data_file_path,
                                             population,
                                             namespace=distances_namespace,
                                             comm=comm0)

            soma_distances = {}
            if input_features_path is not None:
                num_fields_dict = {}
                for input_features_namespace in input_features_namespaces:
                    if arena_id is not None:
                        this_features_namespace = '%s %s' % (
                            input_features_namespace, arena_id)
                    else:
                        this_features_namespace = input_features_namespace
                    input_features_iter = read_cell_attributes(
                        input_features_path,
                        population,
                        namespace=this_features_namespace,
                        mask=set(['Num Fields']),
                        comm=comm0)
                    count = 0
                    for gid, attr_dict in input_features_iter:
                        num_fields_dict[gid] = attr_dict['Num Fields']
                        count += 1
                    logger.info(
                        'Read feature data from namespace %s for %i cells in population %s'
                        % (this_features_namespace, count, population))

                for (gid, v) in distances:
                    num_fields = num_fields_dict.get(gid, 0)
                    if num_fields > 0:
                        soma_distances[gid] = (v['U Distance'][0],
                                               v['V Distance'][0])
            else:
                for (gid, v) in distances:
                    soma_distances[gid] = (v['U Distance'][0],
                                           v['V Distance'][0])

            numitems = len(list(soma_distances.keys()))
            logger.info('read %s distances (%i elements)' %
                        (population, numitems))

            if numitems == 0:
                continue

            gid_array = np.asarray([gid for gid in soma_distances])
            distance_U_array = np.asarray(
                [soma_distances[gid][0] for gid in gid_array])
            distance_V_array = np.asarray(
                [soma_distances[gid][1] for gid in gid_array])

            U_min = np.min(distance_U_array)
            U_max = np.max(distance_U_array)
            V_min = np.min(distance_V_array)
            V_max = np.max(distance_V_array)

            range_U_dict[population] = (U_min, U_max)
            range_V_dict[population] = (V_min, V_max)

            distance_U = {
                gid: soma_distances[gid][0]
                for gid in soma_distances
            }
            distance_V = {
                gid: soma_distances[gid][1]
                for gid in soma_distances
            }

            distance_U_dict[population] = distance_U
            distance_V_dict[population] = distance_V

            min_dist = U_min
            max_dist = U_max

            distance_bins = np.arange(U_min, U_max, distance_bin_extent)
            distance_bin_array = np.digitize(distance_U_array, distance_bins)

            selection_set = set([])
            for bin_index in range(len(distance_bins) + 1):
                bin_gids = gid_array[np.where(
                    distance_bin_array == bin_index)[0]]
                if len(bin_gids) > 0:
                    selected_bin_gids = local_random.choice(
                        bin_gids, replace=False, size=bin_sample_count)
                    for gid in selected_bin_gids:
                        selection_set.add(int(gid))
            selection_dict[population] = selection_set

        yaml_output_dict = {}
        for k, v in utils.viewitems(selection_dict):
            yaml_output_dict[k] = list(sorted(v))

        yaml_output_path = '%s/DG_slice.yaml' % output_path
        with open(yaml_output_path, 'w') as outfile:
            yaml.dump(yaml_output_dict, outfile)

        del (yaml_output_dict)

    env.comm.barrier()

    write_selection_file_path = None
    if write_selection:
        write_selection_file_path = "%s/%s_selection.h5" % (env.results_path,
                                                            env.modelName)

    if write_selection_file_path is not None:
        if rank == 0:
            io_utils.mkout(env, write_selection_file_path)
        env.comm.barrier()
        selection_dict = env.comm.bcast(dict(selection_dict), root=0)
        env.cell_selection = selection_dict
        io_utils.write_cell_selection(env,
                                      write_selection_file_path,
                                      populations=populations)
        input_selection = io_utils.write_connection_selection(
            env, write_selection_file_path, populations=populations)

        if env.spike_input_ns is not None:
            io_utils.write_input_cell_selection(env,
                                                input_selection,
                                                write_selection_file_path,
                                                populations=populations)
    env.comm.barrier()
    MPI.Finalize()
from mpi4py import MPI
from neuroh5.io import read_trees, read_cell_attributes
import numpy as np

comm = MPI.COMM_WORLD
rank = comm.Get_rank()

va = read_cell_attributes(
    "/home/igr/src/model/dentate/datasets/Full_Scale_Control/dentate_Full_Scale_Control_coords_20170614.h5",
    "MEC",
    namespace="Sampled Coordinates")

ks = va.keys()
print ks
if rank == 0:
    print "rank ", rank, ": len va.keys = ", len(ks)
    print "rank ", rank, ": va[", ks[0], " = ", va[ks[0]].keys()
    for k in va[ks[0]].keys():
        print "rank ", rank, ": ", k, " size = ", va[ks[0]][k].size
        print "rank ", rank, ": ", k, " = ", va[ks[0]][k]
if rank == 1:
    print "rank ", rank, ": len va.keys = ", len(ks)
    print "rank ", rank, ": va[", ks[0], " = ", va[ks[0]].keys()
    for k in va[ks[0]].keys():
        print "rank ", rank, ": ", k, " size = ", va[ks[0]][k].size
        print "rank ", rank, ": ", k, " = ", va[ks[0]][k]