예제 #1
0
def report_topology(env, cell, node=None):
    """
    Traverse a cell and report topology and number of synapses.
    :param cell:
    :param env:
    :param node:
    """
    if node is None:
        node = cell.root
    syn_attrs = env.synapse_attributes
    num_exc_syns = len(
        syn_attrs.filter_synapses(cell.gid,
                                  syn_sections=[node.index],
                                  syn_types=[env.Synapse_Types['excitatory']]))
    num_inh_syns = len(
        syn_attrs.filter_synapses(cell.gid,
                                  syn_sections=[node.index],
                                  syn_types=[env.Synapse_Types['inhibitory']]))

    diams_str = ', '.join('%.2f' % node.sec.diam3d(i)
                          for i in range(node.sec.n3d()))
    report = f'node: {node.name}, L: {node.sec.L:.1f}, diams: [{diams_str}], nseg: {node.sec.nseg}, ' \
             f'children: {len(node.sec.children())}, exc_syns: {num_exc_syns}, inh_syns: {num_inh_syns}'
    parent, edge_data = get_node_parent(cell, node, return_edge_data=True)
    if parent is not None:
        report += f", parent: {parent.name}; connection_loc: {edge_data['parent_loc']:.1f}"
    logger.info(report)
    children = get_node_children(cell, node)
    for child in children:
        report_topology(env, cell, child)
예제 #2
0
 def get_layer(self, x=None):
     """
     NEURON sections can be assigned a layer type for convenience in order to later specify synaptic mechanisms and
     properties for each layer. If 3D points are used to specify cell morphology, each element in the list
     corresponds to the layer of the 3D point with the same index.
     :param x: float in [0, 1] : optional relative location in section
     :return: list or float or None
     """
     layer = self.content.get('layer', None)
     if x is None:
         result = layer
     else:
         for i in range(self.sec.n3d()):
             result = layer[i]
             if (self.sec.arc3d(i) / self.sec.L) >= x:
                 break
     return result
예제 #3
0
파일: io_utils.py 프로젝트: soltesz-lab/ca1
def show_celltypes(input_path, output=print):

    with h5py.File(input_path, "r") as h5:

        dt_population_labels = h5[path_population_labels]
        enum_dtype_dict = h5py.h5t.check_enum_dtype(dt_population_labels.dtype)
        population_idx_dict = {enum_dtype_dict[k]: k for k in enum_dtype_dict}

        g = h5_get_group(h5, grp_h5types)

        populations = h5_get_dataset(g, grp_populations)
        output(f"{'Name':<10} {'Start':<8} {'Count':<8}")
        output(f"{'====':<10} {'=====':<8} {'=====':<8}")
        for i in range(len(populations)):
            start, count, idx = populations[i]
            name = population_idx_dict[idx]
            output(f"{name:<10} {start:<8} {count:<8}")

    h5.close()
예제 #4
0
파일: io_utils.py 프로젝트: soltesz-lab/ca1
def recsout(env,
            output_path,
            t_start=None,
            clear_data=False,
            write_cell_location_data=False,
            write_trial_data=False):
    """
    Writes intracellular state traces to specified NeuroH5 output file.

    :param env:
    :param output_path:
    :param clear_data:
    :param reduce_data:
    :return:
    """
    t_rec = env.t_rec
    equilibration_duration = float(
        env.stimulus_config['Equilibration Duration'])
    reduce_data = env.recording_profile.get('reduce', None)
    n_trials = env.n_trials

    trial_time_ranges = get_trial_time_ranges(env.t_rec.to_python(),
                                              env.n_trials)
    trial_time_bins = [
        t_trial_start for t_trial_start, t_trial_end in trial_time_ranges
    ]
    trial_dur = np.asarray([env.tstop + equilibration_duration] * n_trials,
                           dtype=np.float32)

    for pop_name in sorted(env.celltypes.keys()):
        local_rec_types = list(env.recs_dict[pop_name].keys())
        rec_types = sorted(
            set(env.comm.allreduce(local_rec_types, op=mpi_op_concat)))
        for rec_type in rec_types:
            recs = env.recs_dict[pop_name][rec_type]
            attr_dict = defaultdict(lambda: {})
            for rec in recs:
                gid = rec['gid']
                data_vec = np.array(rec['vec'],
                                    copy=clear_data,
                                    dtype=np.float32)
                time_vec = np.array(t_rec, copy=clear_data, dtype=np.float32)
                if t_start is not None:
                    time_inds = np.where(time_vec >= t_start)[0]
                    time_vec = time_vec[time_inds]
                    data_vec = data_vec[time_inds]
                trial_bins = np.digitize(time_vec, trial_time_bins) - 1
                for trial_i in range(n_trials):
                    trial_inds = np.where(trial_bins == trial_i)[0]
                    time_vec[trial_inds] -= np.sum(
                        trial_dur[:(trial_i)]) + equilibration_duration
                label = rec['label']
                if label in attr_dict[gid]:
                    if reduce_data is None:
                        raise RuntimeError(
                            'recsout: duplicate recorder labels and no reduce strategy specified'
                        )
                    elif reduce_data is True:
                        attr_dict[gid][label] += data_vec
                    else:
                        raise RuntimeError(
                            'recsout: unsupported reduce strategy specified')
                else:
                    attr_dict[gid][label] = data_vec
                    attr_dict[gid]['t'] = time_vec
                if write_trial_data:
                    attr_dict[gid]['trial duration'] = trial_dur
                if write_cell_location_data:
                    distance = rec.get('distance', None)
                    if distance is not None:
                        attr_dict[gid]['distance'] = np.asarray(
                            [distance], dtype=np.float32)
                    section = rec.get('section', None)
                    if section is not None:
                        attr_dict[gid]['section'] = np.asarray([section],
                                                               dtype=np.int16)
                    loc = rec.get('loc', None)
                    if loc is not None:
                        attr_dict[gid]['loc'] = np.asarray([loc],
                                                           dtype=np.float32)
                if clear_data:
                    rec['vec'].resize(0)
            if env.results_namespace_id is None:
                namespace_id = "Intracellular %s" % (rec_type)
            else:
                namespace_id = "Intracellular %s %s" % (
                    rec_type, str(env.results_namespace_id))
            append_cell_attributes(output_path,
                                   pop_name,
                                   attr_dict,
                                   namespace=namespace_id,
                                   comm=env.comm,
                                   io_size=env.io_size)
    if clear_data:
        env.t_rec.resize(0)

    env.comm.barrier()
    if env.comm.Get_rank() == 0:
        logger.info("*** Output intracellular state results to file %s" %
                    output_path)
예제 #5
0
파일: io_utils.py 프로젝트: soltesz-lab/ca1
def spikeout(env, output_path, t_start=None, clear_data=False):
    """
    Writes spike times to specified NeuroH5 output file.

    :param env:
    :param output_path:
    :param clear_data: 
    :return:
    """
    equilibration_duration = float(
        env.stimulus_config['Equilibration Duration'])
    n_trials = env.n_trials

    t_vec = np.array(env.t_vec, dtype=np.float32)
    id_vec = np.array(env.id_vec, dtype=np.uint32)

    trial_time_ranges = get_trial_time_ranges(env.t_rec.to_python(),
                                              env.n_trials)
    trial_time_bins = [
        t_trial_start for t_trial_start, t_trial_end in trial_time_ranges
    ]
    trial_dur = np.asarray([env.tstop + equilibration_duration] * n_trials,
                           dtype=np.float32)

    binlst = []
    typelst = sorted(env.celltypes.keys())
    binvect = np.asarray([env.celltypes[k]['start'] for k in typelst])
    sort_idx = np.argsort(binvect, axis=0)
    pop_names = [typelst[i] for i in sort_idx]
    bins = binvect[sort_idx][1:]
    inds = np.digitize(id_vec, bins)

    if env.results_namespace_id is None:
        namespace_id = "Spike Events"
    else:
        namespace_id = "Spike Events %s" % str(env.results_namespace_id)

    for i, pop_name in enumerate(pop_names):
        spkdict = {}
        sinds = np.where(inds == i)
        if len(sinds) > 0:
            ids = id_vec[sinds]
            ts = t_vec[sinds]
            for j in range(0, len(ids)):
                gid = ids[j]
                t = ts[j]
                if (t_start is None) or (t >= t_start):
                    if gid in spkdict:
                        spkdict[gid]['t'].append(t)
                    else:
                        spkdict[gid] = {'t': [t]}
            for gid in spkdict:
                is_artificial = gid in env.artificial_cells[pop_name]
                spiketrain = np.array(spkdict[gid]['t'], dtype=np.float32)
                if gid in env.spike_onset_delay:
                    spiketrain -= env.spike_onset_delay[gid]
                trial_bins = np.digitize(spiketrain, trial_time_bins) - 1
                trial_spikes = [
                    np.copy(spiketrain[np.where(trial_bins == trial_i)[0]])
                    for trial_i in range(n_trials)
                ]
                for trial_i, trial_spiketrain in enumerate(trial_spikes):
                    trial_spiketrain = trial_spikes[trial_i]
                    trial_spiketrain -= np.sum(
                        trial_dur[:(trial_i)]) + equilibration_duration
                spkdict[gid]['t'] = np.concatenate(trial_spikes)
                spkdict[gid]['Trial Duration'] = trial_dur
                spkdict[gid]['Trial Index'] = np.asarray(trial_bins,
                                                         dtype=np.uint8)
                spkdict[gid]['artificial'] = np.asarray(
                    [1 if is_artificial else 0], dtype=np.uint8)
        append_cell_attributes(output_path,
                               pop_name,
                               spkdict,
                               namespace=namespace_id,
                               comm=env.comm,
                               io_size=env.io_size)
        del (spkdict)

    if clear_data:
        env.t_vec.resize(0)
        env.id_vec.resize(0)

    env.comm.barrier()
    if env.comm.Get_rank() == 0:
        logger.info("*** Output spike results to file %s" % output_path)
예제 #6
0
def generate_synaptic_connections(rank,
                                  gid,
                                  ranstream_syn,
                                  ranstream_con,
                                  cluster_seed,
                                  destination_gid,
                                  synapse_dict,
                                  population_dict,
                                  projection_synapse_dict,
                                  projection_prob_dict,
                                  connection_dict,
                                  random_choice=random_choice_w_replacement,
                                  debug_flag=False):
    """
    Given a set of synapses for a particular gid, projection
    configuration, projection and connection probability dictionaries,
    generates a set of possible connections for each synapse. The
    procedure first assigns each synapse to a projection, using the
    given proportions of each synapse type, and then chooses source
    gids for each synapse using the given projection probability
    dictionary.

    :param ranstream_syn: random stream for the synapse partitioning step
    :param ranstream_con: random stream for the choosing source gids step
    :param destination_gid: destination gid
    :param synapse_dict: synapse configurations, a dictionary with fields: 1) syn_ids (synapse ids) 2) syn_types (excitatory, inhibitory, etc).,
                        3) swc_types (SWC types(s) of synapse location in the neuronal morphological structure 3) syn_layers (synapse layer placement)
    :param population_dict: mapping of population names to population indices
    :param projection_synapse_dict: mapping of projection names to a tuple of the form: <syn_layer, swc_type, syn_type, syn_proportion>
    :param projection_prob_dict: mapping of presynaptic population names to sets of source probabilities and source gids
    :param connection_dict: output connection dictionary
    :param random_choice: random choice procedure (default uses np.ranstream.multinomial)

    """
    num_projections = len(projection_synapse_dict)
    source_populations = sorted(projection_synapse_dict)
    prj_pop_index = {population: i for (i, population) in enumerate(source_populations)}
    synapse_prj_counts = np.zeros((num_projections,))
    synapse_prj_partition = defaultdict(lambda: defaultdict(list))
    maxit = 10
    it = 0
    syn_cdist_dict = {}
    ## assign each synapse to a projection
    while (np.count_nonzero(synapse_prj_counts) < num_projections) and (it < maxit):
        log_flag = it > 1
        if log_flag or debug_flag:
            logger.info(f"generate_synaptic_connections: gid {gid}: iteration {it}: "
                        f"source_populations = {source_populations} "
                        f"synapse_prj_counts = {synapse_prj_counts}")
        if debug_flag:
            logger.info(f'synapse_dict = {synapse_dict}')
        synapse_prj_counts.fill(0)
        synapse_prj_partition.clear()
        for (syn_id, syn_cdist, syn_type, swc_type, syn_layer) in zip(synapse_dict['syn_ids'],
                                                                      synapse_dict['syn_cdists'],
                                                                      synapse_dict['syn_types'],
                                                                      synapse_dict['swc_types'],
                                                                      synapse_dict['syn_layers']):
            syn_cdist_dict[syn_id] = syn_cdist
            projection = choose_synapse_projection(ranstream_syn, syn_layer, swc_type, syn_type, \
                                                   population_dict, projection_synapse_dict, log=log_flag)
            if log_flag or debug_flag:
                logger.info(f'generate_synaptic_connections: gid {gid}: '
                            f'syn_id = {syn_id} syn_type = {syn_type} swc_type = {swc_type} '
                            f'syn_layer = {syn_layer} source = {projection}')
            log_flag = False
            assert (projection is not None)
            synapse_prj_counts[prj_pop_index[projection]] += 1
            synapse_prj_partition[projection][syn_layer].append(syn_id)
        it += 1

    empty_projections = []

    for projection in projection_synapse_dict:
        logger.debug(f'Rank {rank}: gid {destination_gid}: source {projection} has {len(synapse_prj_partition[projection])} synapses')
        if not (len(synapse_prj_partition[projection]) > 0):
            empty_projections.append(projection)

    if len(empty_projections) > 0:
        logger.warning(f"Rank {rank}: gid {destination_gid}: projections {empty_projections} have an empty synapse list; "
                       f"swc types are {set(synapse_dict['swc_types'].flat)} layers are {set(synapse_dict['syn_layers'].flat)}")
    assert (len(empty_projections) == 0)

    ## Choose source connections based on distance-weighted probability
    count = 0
    for projection, prj_layer_dict in viewitems(synapse_prj_partition):
        (syn_config_type, syn_config_layers, syn_config_sections, syn_config_proportions, syn_config_contacts) = \
            projection_synapse_dict[projection]
        gid_dict = connection_dict[projection]
        prj_source_vertices = []
        prj_syn_ids = []
        prj_distances = []
        for prj_layer, syn_ids in viewitems(prj_layer_dict):
            source_probs, source_gids, distances_u, distances_v = \
                projection_prob_dict[projection][prj_layer]
            distance_dict = {source_gid: distance_u + distance_v \
                             for (source_gid, distance_u, distance_v) in \
                             zip(source_gids, distances_u, distances_v)}
            if len(source_gids) > 0:
                ordered_syn_ids = sorted(syn_ids, key=lambda x: syn_cdist_dict[x])
                n_syn_groups = int(math.ceil(float(len(syn_ids)) / float(syn_config_contacts)))
                source_gid_counts = random_choice(ranstream_con, n_syn_groups, source_probs)
                total_count = 0
                if syn_config_contacts > 1:
                    ncontacts = int(math.ceil(syn_config_contacts))
                    for i in range(0, len(source_gid_counts)):
                        if source_gid_counts[i] > 0:
                            source_gid_counts[i] *= ncontacts
                if len(source_gid_counts) == 0:
                    logger.warning(f'Rank {rank}: source vertices list is empty for gid: {destination_gid} ' 
                                   f'source: {projection} layer: {layer} '
                                   f'source probs: {source_probs} distances_u: {distances_u} distances_v: {distances_v}')

                source_vertices = np.asarray(random_clustered_shuffle(len(source_gids), \
                                                                      source_gid_counts, \
                                                                      center_ids=source_gids, \
                                                                      cluster_std=2.0, \
                                                                      random_seed=cluster_seed), \
                                             dtype=np.uint32)[0:len(syn_ids)]
                assert (len(source_vertices) == len(syn_ids))
                distances = np.asarray([distance_dict[gid] for gid in source_vertices], \
                                       dtype=np.float32).reshape(-1, )
                prj_source_vertices.append(source_vertices)
                prj_syn_ids.append(ordered_syn_ids)
                prj_distances.append(distances)
                gid_dict[destination_gid] = (np.asarray([], dtype=np.uint32),
                                             {'Synapses': {'syn_id': np.asarray([], dtype=np.uint32)},
                                              'Connections': {'distance': np.asarray([], dtype=np.float32)}
                                              })
                cluster_seed += 1
        if len(prj_source_vertices) > 0:
            prj_source_vertices_array = np.concatenate(prj_source_vertices)
        else:
            prj_source_vertices_array = np.asarray([], dtype=np.uint32)
        del (prj_source_vertices)
        if len(prj_syn_ids) > 0:
            prj_syn_ids_array = np.concatenate(prj_syn_ids)
        else:
            prj_syn_ids_array = np.asarray([], dtype=np.uint32)
        del (prj_syn_ids)
        if len(prj_distances) > 0:
            prj_distances_array = np.concatenate(prj_distances)
        else:
            prj_distances_array = np.asarray([], dtype=np.float32)
        del (prj_distances)
        if len(prj_source_vertices_array) == 0:
            logger.warning(f'Rank {rank}: source gid list is empty for gid: {destination_gid} source: {projection}')
        count += len(prj_source_vertices_array)
        gid_dict[destination_gid] = (prj_source_vertices_array,
                                     {'Synapses': {'syn_id': np.asarray(prj_syn_ids_array, \
                                                                        dtype=np.uint32)},
                                      'Connections': {'distance': prj_distances_array}
                                      })

    return count
예제 #7
0
def make_morph_graph(biophys_cell, node_filters={}):
    """
    Creates a graph of 3d points that follows the morphological organization of the given neuron.
    :param neurotree_dict:
    :return: NetworkX.DiGraph
    """
    import networkx as nx

    nodes = filter_nodes(biophys_cell, **node_filters)
    tree = biophys_cell.tree

    sec_layers = {}
    src_sec = []
    dst_sec = []
    connection_locs = []
    pt_xs = []
    pt_ys = []
    pt_zs = []
    pt_locs = []
    pt_idxs = []
    pt_layers = []
    pt_idx = 0
    sec_pts = collections.defaultdict(list)

    for node in nodes:
        sec = node.sec
        nn = sec.n3d()
        L = sec.L
        for i in range(nn):
            pt_xs.append(sec.x3d(i))
            pt_ys.append(sec.y3d(i))
            pt_zs.append(sec.z3d(i))
            loc = sec.arc3d(i) / L
            pt_locs.append(loc)
            pt_layers.append(node.get_layer(loc))
            pt_idxs.append(pt_idx)
            sec_pts[node.index].append(pt_idx)
            pt_idx += 1

        for child in tree.successors(node):
            src_sec.append(node.index)
            dst_sec.append(child.index)
            connection_locs.append(h.parent_connection(sec=child.sec))

    sec_pt_idxs = {}
    edges = []
    for sec, pts in viewitems(sec_pts):
        sec_pt_idxs[pts[0]] = sec
        for i in range(1, len(pts)):
            sec_pt_idxs[pts[i]] = sec
            src_pt = pts[i - 1]
            dst_pt = pts[i]
            edges.append((src_pt, dst_pt))

    for (s, d, parent_loc) in zip(src_sec, dst_sec, connection_locs):
        for src_pt in sec_pts[s]:
            if pt_locs[src_pt] >= parent_loc:
                break
        dst_pt = sec_pts[d][0]
        edges.append((src_pt, dst_pt))

    morph_graph = nx.Graph()
    morph_graph.add_nodes_from([(i, {
        'x': x,
        'y': y,
        'z': z,
        'sec': sec_pt_idxs[i],
        'loc': loc,
        'layer': layer
    }) for (i, x, y, z, loc, layer) in zip(range(len(pt_idxs)), pt_xs, pt_ys,
                                           pt_zs, pt_locs, pt_layers)])
    for i, j in edges:
        morph_graph.add_edge(i, j)

    return morph_graph