Esempio n. 1
0
def lfpout(env, output_path):
    """
    Writes local field potential voltage traces to specified HDF5 output file.

    :param env:
    :param output_path:
    :param clear_data:
    :return:
    """

    for lfp in list(env.lfp.values()):

        if env.results_namespace_id is None:
            namespace_id = "Local Field Potential %s" % str(lfp.label)
        else:
            namespace_id = "Local Field Potential %s %s" % (str(
                lfp.label), str(env.results_namespace_id))
        import h5py
        output = h5py.File(output_path, 'a')

        grp = output.create_group(namespace_id)

        grp['t'] = np.asarray(lfp.t, dtype=np.float32)
        grp['v'] = np.asarray(lfp.meanlfp, dtype=np.float32)

        output.close()

    if env.comm.Get_rank() == 0:
        logger.info("*** Output LFP results to file %s" % output_path)
Esempio n. 2
0
    def load_celltypes(self):
        """

        :return:
        """
        rank = self.comm.Get_rank()
        size = self.comm.Get_size()
        celltypes = self.celltypes
        typenames = sorted(celltypes.keys())

        if rank == 0:
            self.logger.info('env.data_file_path = %s' %
                             str(self.data_file_path))

        (population_ranges,
         _) = read_population_ranges(self.data_file_path, self.comm)
        if rank == 0:
            self.logger.info('population_ranges = %s' % str(population_ranges))

        for k in typenames:
            population_range = population_ranges.get(k, None)
            if population_range is not None:
                celltypes[k]['start'] = population_ranges[k][0]
                celltypes[k]['num'] = population_ranges[k][1]
                if 'mechanism file' in celltypes[k]:
                    celltypes[k]['mech_file_path'] = '%s/%s' % (
                        self.config_prefix, celltypes[k]['mechanism file'])
                    mech_dict = read_from_yaml(celltypes[k]['mech_file_path'])
                    celltypes[k]['mech_dict'] = mech_dict
                if 'synapses' in celltypes[k]:
                    synapses_dict = celltypes[k]['synapses']
                    if 'weights' in synapses_dict:
                        weights_config = synapses_dict['weights']
                        if isinstance(weights_config, list):
                            weights_dicts = weights_config
                        else:
                            weights_dicts = [weights_config]
                        for weights_dict in weights_dicts:
                            if 'expr' in weights_dict:
                                expr = weights_dict['expr']
                                parameter = weights_dict['parameter']
                                const = weights_dict.get('const', {})
                                clos = ExprClosure(parameter, expr, const)
                                weights_dict['closure'] = clos
                        synapses_dict['weights'] = weights_dicts

        population_names = read_population_names(self.data_file_path,
                                                 self.comm)
        if rank == 0:
            self.logger.info('population_names = %s' % str(population_names))
        self.cell_attribute_info = read_cell_attribute_info(
            self.data_file_path, population_names, comm=self.comm)

        if rank == 0:
            self.logger.info('attribute info: %s' %
                             str(self.cell_attribute_info))
Esempio n. 3
0
    def is_same_cell(self, cell1, cell2):
        """

        :param cell1: :class:'BiophysCell' or :class:'h.hocObject'
        :param cell2: :class:'BiophysCell' or :class:'h.hocObject'
        :return: bool
        """
        if cell1 == cell2:
            return True
        elif hasattr(cell1, 'tree'):
            return cell1.tree.root.sec.cell() == cell2
        else:
            raise RuntimeError(
                'QuickSim: problem comparing cell objects: %s and %s' %
                (str(cell1), str(cell2)))
Esempio n. 4
0
    def get_prob(self, destination_gid, source, source_layers):
        """
        Given the soma coordinates of a destination neuron and a
        population source, return an array of connection probabilities
        and an array of corresponding source gids.

        :param destination_gid: int
        :param source: string
        :return: array of float, array of int

        """
        prob_dict = {}
        for layer in source_layers:
            destination_u, destination_v, source_u, source_v, distance_u, distance_v, source_gid = \
                self.filter_by_distance(destination_gid, source, layer)
            if layer in self.p_dist[source]:
                layer_key = layer
            elif 'default' in self.p_dist[source]:
                layer_key = 'default'
            else:
                raise RuntimeError('connection_generator.get_prob: missing configuration for layer %s' % \
                                   str(layer))
            p = self.p_dist[source][layer_key](distance_u, distance_v)
            psum = np.sum(p)
            assert ((p >= 0.).all() and (p <= 1.).all())
            if psum > 0.:
                pn = p / p.sum()
            else:
                pn = p
            prob_dict[layer] = (pn.ravel(), source_gid.ravel(),
                                distance_u.ravel(), distance_v.ravel())
        return prob_dict
Esempio n. 5
0
    def plot(self, axes=None, show=True):
        """

        """
        import matplotlib.pyplot as plt
        from dentate.plot import clean_axes
        if len(self.recs) == 0:
            return
        if axes is None:
            fig, axes = plt.subplots()
        else:
            fig = axes.get_figure()
        for name, rec_dict in viewitems(self.recs):
            description = str(rec_dict['description'])
            axes.plot(self.tvec, rec_dict['vec'],
                      label='%s: %s(%.2f) %s' % (name, rec_dict['node'].name, rec_dict['loc'], description))
            axes.set_xlabel('Time (ms)')
            axes.set_ylabel('%s (%s)' % (rec_dict['ylabel'], rec_dict['units']))
        axes.legend(loc='best', frameon=False, framealpha=0.5)
        title = None
        if 'title' in self.parameters:
            title = self.parameters['title']
        if 'description' in self.parameters:
            if title is not None:
                title = title + '; ' + self.parameters['description']
            else:
                title = self.parameters['description']
        if title is not None:
            axes.set_title(title)
        clean_axes(axes)
        if show:
            fig.tight_layout()
            fig.show()
        else:
            return axes
Esempio n. 6
0
def choose_synapse_projection(ranstream_syn,
                              syn_layer,
                              swc_type,
                              syn_type,
                              population_dict,
                              projection_synapse_dict,
                              log=False):
    """
    Given a synapse projection, SWC synapse location, and synapse type,
    chooses a projection from the given projection dictionary based on
    1) whether the projection properties match the given synapse
    properties and 2) random choice between all the projections that
    satisfy the given criteria.

    :param ranstream_syn: random state object
    :param syn_layer: synapse layer
    :param swc_type: SWC location for synapse (soma, axon, apical, basal)
    :param syn_type: synapse type (excitatory, inhibitory, neuromodulatory)
    :param population_dict: mapping of population names to population indices
    :param projection_synapse_dict: mapping of projection names to a tuple of the form: <type, layers, swc sections, proportions>

    """
    ivd = {v: k for k, v in viewitems(population_dict)}
    projection_lst = []
    projection_prob_lst = []
    for k, (syn_config_type, syn_config_layers, syn_config_sections,
            syn_config_proportions,
            syn_config_contacts) in viewitems(projection_synapse_dict):
        if (syn_type == syn_config_type) and (swc_type in syn_config_sections):
            ord_indices = list_find_all(lambda x: x == swc_type,
                                        syn_config_sections)
            for ord_index in ord_indices:
                if syn_layer == syn_config_layers[ord_index]:
                    projection_lst.append(population_dict[k])
                    projection_prob_lst.append(
                        syn_config_proportions[ord_index])
    if len(projection_lst) > 1:
        candidate_projections = np.asarray(projection_lst)
        candidate_probs = np.asarray(projection_prob_lst)
        if log:
            logger.info("candidate_projections: %s candidate_probs: %s" % \
                        (str(candidate_projections), str(candidate_probs)))
        projection = ranstream_syn.choice(candidate_projections,
                                          1,
                                          p=candidate_probs)[0]
    elif len(projection_lst) > 0:
        projection = projection_lst[0]
    else:
        projection = None

    if projection is None:
        logger.error('Projection is none for syn_type = %s syn_layer = %s swc_type = %s' % \
                     (str(syn_type), str(syn_layer), str(swc_type)))
        logger.error(str(projection_synapse_dict))

    if projection is not None:
        return ivd[projection]
    else:
        return None
Esempio n. 7
0
 def parse_syn_mechparams(self, mechparams_dict):
     res = {}
     for mech_name, mech_params in viewitems(mechparams_dict):
         mech_params1 = {}
         for k, v in viewitems(mech_params):
             if isinstance(v, dict):
                 if 'expr' in v:
                     mech_params1[k] = ExprClosure([v['parameter']],
                                                   v['expr'],
                                                   v.get('const',
                                                         None), ['x'])
                 else:
                     raise RuntimeError(
                         'parse_syn_mechparams: unknown parameter type %s' %
                         str(v))
             else:
                 mech_params1[k] = v
         res[mech_name] = mech_params1
     return res
Esempio n. 8
0
def get_h5py_group(file, hierarchy, create=False):
    """

    :param file: :class: in ['h5py.File', 'h5py.Group']
    :param hierarchy: list of str
    :param create: bool
    :return: :class:'h5py.Group'
    """
    target = file
    for key in hierarchy:
        if key is not None:
            key = str(key)
            if key not in target:
                if create:
                    target = target.create_group(key)
                else:
                    raise KeyError(
                        'get_h5py_group: target: %s does not contain key: %s; valid keys: %s'
                        % (target, key, list(target.keys())))
            else:
                target = target[key]
    return target
Esempio n. 9
0
def write_input_cell_selection(env,
                               input_sources,
                               write_selection_file_path,
                               populations=None,
                               write_kwds={}):
    """
    Writes out predefined spike trains when only a subset of the network is instantiated.

    :param env: an instance of the `dentate.Env` class
    :param input_sources: a dictionary of the form { pop_name, gid_sources }
    """

    if 'comm' not in write_kwds:
        write_kwds['comm'] = env.comm
    if 'io_size' not in write_kwds:
        write_kwds['io_size'] = env.io_size

    rank = int(env.comm.Get_rank())
    nhosts = int(env.comm.Get_size())

    dataset_path = env.dataset_path
    input_file_path = env.data_file_path

    if populations is None:
        pop_names = sorted(env.celltypes.keys())
    else:
        pop_names = populations

    for pop_name, gid_range in sorted(viewitems(input_sources)):

        if pop_name not in pop_names:
            continue

        spikes_output_dict = {}

        if (env.cell_selection is not None) and (pop_name
                                                 in env.cell_selection):
            local_gid_range = gid_range.difference(
                set(env.cell_selection[pop_name]))
        else:
            local_gid_range = gid_range

        gid_ranges = env.comm.allgather(local_gid_range)
        this_gid_range = set([])
        for gid_range in gid_ranges:
            for gid in gid_range:
                if gid % nhosts == rank:
                    this_gid_range.add(gid)

        has_spike_train = False
        spike_input_source_loc = []
        if (env.spike_input_attribute_info is not None) and (env.spike_input_ns
                                                             is not None):
            if (pop_name in env.spike_input_attribute_info) and \
                    (env.spike_input_ns in env.spike_input_attribute_info[pop_name]):
                has_spike_train = True
                spike_input_source_loc.append(
                    (env.spike_input_path, env.spike_input_ns))
        if (env.cell_attribute_info is not None) and (env.spike_input_ns
                                                      is not None):
            if (pop_name in env.cell_attribute_info) and \
                    (env.spike_input_ns in env.cell_attribute_info[pop_name]):
                has_spike_train = True
                spike_input_source_loc.append(
                    (input_file_path, env.spike_input_ns))

        if rank == 0:
            logger.info(
                '*** Reading spike trains for population %s: %d cells: has_spike_train = %s'
                % (pop_name, len(this_gid_range), str(has_spike_train)))

        if has_spike_train:

            vecstim_attr_set = set(['t'])
            if env.spike_input_attr is not None:
                vecstim_attr_set.add(env.spike_input_attr)
            if 'spike train' in env.celltypes[pop_name]:
                vecstim_attr_set.add(
                    env.celltypes[pop_name]['spike train']['attribute'])

            cell_spikes_iters = [ read_cell_attribute_selection(input_path, pop_name, \
                                                                list(this_gid_range), \
                                                                namespace=input_ns, \
                                                                mask=vecstim_attr_set, \
                                                                comm=env.comm) for (input_path, input_ns) in spike_input_source_loc ]

            for cell_spikes_iter in cell_spikes_iters:
                spikes_output_dict.update(dict(list(cell_spikes_iter)))

        if rank == 0:
            logger.info('*** Writing spike trains for population %s: %s' %
                        (pop_name, str(spikes_output_dict)))


        write_cell_attributes(write_selection_file_path, pop_name, spikes_output_dict,  \
                              namespace=env.spike_input_ns, **write_kwds)
Esempio n. 10
0
def write_connection_selection(env,
                               write_selection_file_path,
                               populations=None,
                               write_kwds={}):
    """
    Loads NeuroH5 connectivity file, and writes the corresponding
    synapse and network connection mechanisms for the selected postsynaptic cells.

    :param env: an instance of the `dentate.Env` class
    """

    if 'comm' not in write_kwds:
        write_kwds['comm'] = env.comm
    if 'io_size' not in write_kwds:
        write_kwds['io_size'] = env.io_size

    connectivity_file_path = env.connectivity_file_path
    forest_file_path = env.forest_file_path
    rank = int(env.comm.Get_rank())
    nhosts = int(env.comm.Get_size())
    syn_attrs = env.synapse_attributes

    if populations is None:
        pop_names = sorted(env.cell_selection.keys())
    else:
        pop_names = populations

    input_sources = {pop_name: set([]) for pop_name in env.celltypes}

    for (postsyn_name, presyn_names) in sorted(viewitems(env.projection_dict)):

        if rank == 0:
            logger.info('*** Writing connection selection of population %s' %
                        (postsyn_name))

        if postsyn_name not in pop_names:
            continue

        gid_range = [
            gid for gid in env.cell_selection[postsyn_name]
            if gid % nhosts == rank
        ]

        synapse_config = env.celltypes[postsyn_name]['synapses']

        if 'weights' in synapse_config:
            has_weights = synapse_config['weights']
        else:
            has_weights = False

        weights_namespaces = []
        if 'weights' in synapse_config:
            has_weights = synapse_config['weights']
            if has_weights:
                if 'weights namespace' in synapse_config:
                    weights_namespaces.append(
                        synapse_config['weights namespace'])
                elif 'weights namespaces' in synapse_config:
                    weights_namespaces.extend(
                        synapse_config['weights namespaces'])
                else:
                    weights_namespaces.append('Weights')
        else:
            has_weights = False

        if rank == 0:
            logger.info('*** Reading synaptic attributes of population %s' %
                        (postsyn_name))

        syn_attributes_iter = read_cell_attribute_selection(
            forest_file_path,
            postsyn_name,
            selection=gid_range,
            namespace='Synapse Attributes',
            comm=env.comm)

        syn_attributes_output_dict = dict(list(syn_attributes_iter))
        write_cell_attributes(write_selection_file_path,
                              postsyn_name,
                              syn_attributes_output_dict,
                              namespace='Synapse Attributes',
                              **write_kwds)
        del syn_attributes_output_dict
        del syn_attributes_iter

        if has_weights:
            for weights_namespace in sorted(weights_namespaces):
                weight_attributes_iter = read_cell_attribute_selection(
                    forest_file_path,
                    postsyn_name,
                    selection=gid_range,
                    namespace=weights_namespace,
                    comm=env.comm)
                weight_attributes_output_dict = dict(
                    list(weight_attributes_iter))
                write_cell_attributes(write_selection_file_path,
                                      postsyn_name,
                                      weight_attributes_output_dict,
                                      namespace=weights_namespace,
                                      **write_kwds)
                del weight_attributes_output_dict
                del weight_attributes_iter

        logger.info(
            '*** Rank %i: reading connectivity selection from file %s for postsynaptic population: %s: selection: %s'
            % (rank, connectivity_file_path, postsyn_name, str(gid_range)))

        (graph, attr_info) = read_graph_selection(connectivity_file_path, selection=gid_range, \
                                                  projections=[ (presyn_name, postsyn_name) for presyn_name in sorted(presyn_names) ], \
                                                  comm=env.comm, namespaces=['Synapses', 'Connections'])

        for presyn_name in sorted(presyn_names):
            gid_dict = {}
            edge_count = 0
            node_count = 0
            if postsyn_name in graph:

                if postsyn_name in attr_info and presyn_name in attr_info[
                        postsyn_name]:
                    edge_attr_info = attr_info[postsyn_name][presyn_name]
                else:
                    raise RuntimeError('write_connection_selection: missing edge attributes for projection %s -> %s' % \
                                       (presyn_name, postsyn_name))

                if 'Synapses' in edge_attr_info and \
                        'syn_id' in edge_attr_info['Synapses'] and \
                        'Connections' in edge_attr_info and \
                        'distance' in edge_attr_info['Connections']:
                    syn_id_attr_index = edge_attr_info['Synapses']['syn_id']
                    distance_attr_index = edge_attr_info['Connections'][
                        'distance']
                else:
                    raise RuntimeError('write_connection_selection: missing edge attributes for projection %s -> %s' % \
                                           (presyn_name, postsyn_name))

                edge_iter = compose_iter(lambda edgeset: input_sources[presyn_name].update(edgeset[1][0]), \
                                         graph[postsyn_name][presyn_name])
                for (postsyn_gid, edges) in edge_iter:

                    presyn_gids, edge_attrs = edges
                    edge_syn_ids = edge_attrs['Synapses'][syn_id_attr_index]
                    edge_dists = edge_attrs['Connections'][distance_attr_index]

                    gid_dict[postsyn_gid] = (presyn_gids, {
                        'Synapses': {
                            'syn_id': edge_syn_ids
                        },
                        'Connections': {
                            'distance': edge_dists
                        }
                    })
                    edge_count += len(presyn_gids)
                    node_count += 1

            logger.info(
                '*** Rank %d: Writing projection %s -> %s selection: %d nodes, %d edges'
                % (rank, presyn_name, postsyn_name, node_count, edge_count))
            write_graph(write_selection_file_path, \
                        src_pop_name=presyn_name, dst_pop_name=postsyn_name, \
                        edges=gid_dict, comm=env.comm, io_size=env.io_size)
        env.comm.barrier()

    return input_sources
Esempio n. 11
0
def statistics(parts):
    npart = len(parts)
    total_cx = 0
    max_part_cx = 0
    ncx = 0
    max_cx = 0
    for part in parts:
        ncx += len(part[1])
        total_cx += part[0]
        if part[0] > max_part_cx:
            max_part_cx = part[0]
        for cx in part[1]:
            if cx[0] > max_cx:
                max_cx = cx[0]
    avg_part_cx = total_cx / npart
    loadbal = 1.0
    if max_part_cx > 0.:
        loadbal = avg_part_cx / max_part_cx
    logger.info(
        "*** loadbal=%g total_cx=%g npart=%d ncx=%d max_part_cx=%g max_cx=%g" %
        (loadbal, total_cx, npart, ncx, max_part_cx, max_cx))


if __name__ == '__main__':
    for cx in ([(i, i) for i in range(10)], []):
        logger.info('%i complexity items %s' % (len(cx), cx))
        pinfo = lpt(cx, 3)
        logger.info('%i lpt partitions %s' % (len(pinfo), str(pinfo)))
        statistics(pinfo)
Esempio n. 12
0
def recsout(env,
            output_path,
            t_start=None,
            clear_data=False,
            write_cell_location_data=False,
            write_trial_data=False):
    """
    Writes intracellular state traces to specified NeuroH5 output file.

    :param env:
    :param output_path:
    :param clear_data:
    :param reduce_data:
    :return:
    """
    t_rec = env.t_rec
    equilibration_duration = float(
        env.stimulus_config['Equilibration Duration'])
    reduce_data = env.recording_profile.get('reduce', None)
    n_trials = env.n_trials

    trial_time_ranges = get_trial_time_ranges(env.t_rec.to_python(),
                                              env.n_trials)
    trial_time_bins = [
        t_trial_start for t_trial_start, t_trial_end in trial_time_ranges
    ]
    trial_dur = np.asarray([env.tstop + equilibration_duration] * n_trials,
                           dtype=np.float32)

    for pop_name in sorted(env.celltypes.keys()):
        local_rec_types = list(env.recs_dict[pop_name].keys())
        rec_types = sorted(
            set(env.comm.allreduce(local_rec_types, op=mpi_op_concat)))
        for rec_type in rec_types:
            recs = env.recs_dict[pop_name][rec_type]
            attr_dict = defaultdict(lambda: {})
            for rec in recs:
                gid = rec['gid']
                data_vec = np.array(rec['vec'],
                                    copy=clear_data,
                                    dtype=np.float32)
                time_vec = np.array(t_rec, copy=clear_data, dtype=np.float32)
                if t_start is not None:
                    time_inds = np.where(time_vec >= t_start)[0]
                    time_vec = time_vec[time_inds]
                    data_vec = data_vec[time_inds]
                trial_bins = np.digitize(time_vec, trial_time_bins) - 1
                for trial_i in range(n_trials):
                    trial_inds = np.where(trial_bins == trial_i)[0]
                    time_vec[trial_inds] -= np.sum(
                        trial_dur[:(trial_i)]) + equilibration_duration
                label = rec['label']
                if label in attr_dict[gid]:
                    if reduce_data is None:
                        raise RuntimeError(
                            'recsout: duplicate recorder labels and no reduce strategy specified'
                        )
                    elif reduce_data is True:
                        attr_dict[gid][label] += data_vec
                    else:
                        raise RuntimeError(
                            'recsout: unsupported reduce strategy specified')
                else:
                    attr_dict[gid][label] = data_vec
                    attr_dict[gid]['t'] = time_vec
                if write_trial_data:
                    attr_dict[gid]['trial duration'] = trial_dur
                if write_cell_location_data:
                    distance = rec.get('distance', None)
                    if distance is not None:
                        attr_dict[gid]['distance'] = np.asarray(
                            [distance], dtype=np.float32)
                    section = rec.get('section', None)
                    if section is not None:
                        attr_dict[gid]['section'] = np.asarray([section],
                                                               dtype=np.int16)
                    loc = rec.get('loc', None)
                    if loc is not None:
                        attr_dict[gid]['loc'] = np.asarray([loc],
                                                           dtype=np.float32)
                if clear_data:
                    rec['vec'].resize(0)
            if env.results_namespace_id is None:
                namespace_id = "Intracellular %s" % (rec_type)
            else:
                namespace_id = "Intracellular %s %s" % (
                    rec_type, str(env.results_namespace_id))
            append_cell_attributes(output_path,
                                   pop_name,
                                   attr_dict,
                                   namespace=namespace_id,
                                   comm=env.comm,
                                   io_size=env.io_size)
    if clear_data:
        env.t_rec.resize(0)

    env.comm.barrier()
    if env.comm.Get_rank() == 0:
        logger.info("*** Output intracellular state results to file %s" %
                    output_path)
Esempio n. 13
0
def generate_synaptic_connections(rank,
                                  gid,
                                  ranstream_syn,
                                  ranstream_con,
                                  cluster_seed,
                                  destination_gid,
                                  synapse_dict,
                                  population_dict,
                                  projection_synapse_dict,
                                  projection_prob_dict,
                                  connection_dict,
                                  random_choice=random_choice_w_replacement):
    """
    Given a set of synapses for a particular gid, projection
    configuration, projection and connection probability dictionaries,
    generates a set of possible connections for each synapse. The
    procedure first assigns each synapse to a projection, using the
    given proportions of each synapse type, and then chooses source
    gids for each synapse using the given projection probability
    dictionary.

    :param ranstream_syn: random stream for the synapse partitioning step
    :param ranstream_con: random stream for the choosing source gids step
    :param destination_gid: destination gid
    :param synapse_dict: synapse configurations, a dictionary with fields: 1) syn_ids (synapse ids) 2) syn_types (excitatory, inhibitory, etc).,
                        3) swc_types (SWC types(s) of synapse location in the neuronal morphological structure 3) syn_layers (synapse layer placement)
    :param population_dict: mapping of population names to population indices
    :param projection_synapse_dict: mapping of projection names to a tuple of the form: <syn_layer, swc_type, syn_type, syn_proportion>
    :param projection_prob_dict: mapping of presynaptic population names to sets of source probabilities and source gids
    :param connection_dict: output connection dictionary
    :param random_choice: random choice procedure (default uses np.ranstream.multinomial)

    """
    num_projections = len(projection_synapse_dict)
    prj_pop_index = {
        population: i
        for (i, population) in enumerate(projection_synapse_dict)
    }
    synapse_prj_counts = np.zeros((num_projections, ))
    synapse_prj_partition = defaultdict(lambda: defaultdict(list))
    maxit = 10
    it = 0
    ## assign each synapse to a projection
    while (np.count_nonzero(synapse_prj_counts) < num_projections) and (it <
                                                                        maxit):
        log_flag = it > 1
        if log_flag:
            logger.info("generate_synaptic_connections: gid %i: iteration %i" %
                        (gid, it))
        synapse_prj_counts.fill(0)
        synapse_prj_partition.clear()
        for (syn_id, syn_type, swc_type, syn_layer) in zip(
                synapse_dict['syn_ids'], synapse_dict['syn_types'],
                synapse_dict['swc_types'], synapse_dict['syn_layers']):
            projection = choose_synapse_projection(ranstream_syn, syn_layer, swc_type, syn_type, \
                                                   population_dict, projection_synapse_dict, log=log_flag)
            if log_flag:
                logger.info('generate_synaptic_connections: gid %i: ' \
                            'syn_id = %i syn_type = %i swc_type = %i syn_layer = %i projection = %s' % \
                            (gid, syn_id, syn_type, swc_type, syn_layer, projection))
            assert (projection is not None)
            synapse_prj_counts[prj_pop_index[projection]] += 1
            synapse_prj_partition[projection][syn_layer].append(syn_id)
        it += 1

    empty_projections = []

    for projection in projection_synapse_dict:
        logger.debug('Rank %i: gid %i: projection %s has %i synapses' %
                     (rank, destination_gid, projection,
                      len(synapse_prj_partition[projection])))
        if not (len(synapse_prj_partition[projection]) > 0):
            empty_projections.append(projection)

    if len(empty_projections) > 0:
        logger.warning('Rank %i: gid %i: projections %s have an empty synapse list; ' \
                       'swc types are %s layers are %s' % \
                       (rank, destination_gid, str(empty_projections), str(set(synapse_dict['swc_types'].flat)), \
                        str(set(synapse_dict['syn_layers'].flat))))
    assert (len(empty_projections) == 0)

    ## Choose source connections based on distance-weighted probability
    count = 0
    for projection, prj_layer_dict in viewitems(synapse_prj_partition):
        (syn_config_type, syn_config_layers, syn_config_sections, syn_config_proportions, syn_config_contacts) = \
            projection_synapse_dict[projection]
        gid_dict = connection_dict[projection]
        prj_source_vertices = []
        prj_syn_ids = []
        prj_distances = []
        for prj_layer, syn_ids in viewitems(prj_layer_dict):
            source_probs, source_gids, distances_u, distances_v = \
                projection_prob_dict[projection][prj_layer]
            distance_dict = {source_gid: distance_u + distance_v \
                             for (source_gid, distance_u, distance_v) in \
                             zip(source_gids, distances_u, distances_v)}
            if len(source_gids) > 0:
                n_syn_groups = int(
                    math.ceil(
                        float(len(syn_ids)) / float(syn_config_contacts)))
                source_gid_counts = random_choice(ranstream_con, n_syn_groups,
                                                  source_probs)
                total_count = 0
                if syn_config_contacts > 1:
                    ncontacts = int(math.ceil(syn_config_contacts))
                    for i in range(0, len(source_gid_counts)):
                        if source_gid_counts[i] > 0:
                            source_gid_counts[i] *= ncontacts
                if len(source_gid_counts) == 0:
                    logger.warning('Rank %i: source vertices list is empty for gid: %i projection: %s layer: %s ' \
                                   'source probs: %s distances_u: %s distances_v: %s' % \
                                   (rank, destination_gid, projection, str(layer), \
                                    str(source_probs), str(distances_u), str(distances_v)))

                uv_distance_sums = np.add(distances_u,
                                          distances_v,
                                          dtype=np.float32)
                source_vertices = np.asarray(random_clustered_shuffle(len(source_gids), \
                                                                      source_gid_counts, \
                                                                      center_ids=source_gids, \
                                                                      cluster_std=2.0, \
                                                                      random_seed=cluster_seed), \
                                             dtype=np.uint32)[0:len(syn_ids)]
                assert (len(source_vertices) == len(syn_ids))
                distances = np.asarray([distance_dict[gid] for gid in source_vertices], \
                                       dtype=np.float32).reshape(-1, )
                prj_source_vertices.append(source_vertices)
                prj_syn_ids.append(syn_ids)
                prj_distances.append(distances)
                gid_dict[destination_gid] = (np.asarray([], dtype=np.uint32), {
                    'Synapses': {
                        'syn_id': np.asarray([], dtype=np.uint32)
                    },
                    'Connections': {
                        'distance': np.asarray([], dtype=np.float32)
                    }
                })
                cluster_seed += 1
        if len(prj_source_vertices) > 0:
            prj_source_vertices_array = np.concatenate(prj_source_vertices)
        else:
            prj_source_vertices_array = np.asarray([], dtype=np.uint32)
        del (prj_source_vertices)
        if len(prj_syn_ids) > 0:
            prj_syn_ids_array = np.concatenate(prj_syn_ids)
        else:
            prj_syn_ids_array = np.asarray([], dtype=np.uint32)
        del (prj_syn_ids)
        if len(prj_distances) > 0:
            prj_distances_array = np.concatenate(prj_distances)
        else:
            prj_distances_array = np.asarray([], dtype=np.float32)
        del (prj_distances)
        if len(prj_source_vertices_array) == 0:
            logger.warning(
                'Rank %i: source gid list is empty for gid: %i projection: %s'
                % (rank, destination_gid, projection))
        count += len(prj_source_vertices_array)
        gid_dict[destination_gid] = (prj_source_vertices_array,
                                     {'Synapses': {'syn_id': np.asarray(prj_syn_ids_array, \
                                                                        dtype=np.uint32)},
                                      'Connections': {'distance': prj_distances_array}
                                      })

    return count
Esempio n. 14
0
    def parse_connection_config(self):
        """

        :return:
        """
        connection_config = self.model_config['Connection Generator']

        self.connection_velocity = connection_config['Connection Velocity']

        syn_mech_names = connection_config['Synapse Mechanisms']
        syn_param_rules = connection_config['Synapse Parameter Rules']

        self.synapse_attributes = SynapseAttributes(self, syn_mech_names,
                                                    syn_param_rules)

        extent_config = connection_config['Axon Extent']
        self.connection_extents = {}

        for population in extent_config:

            pop_connection_extents = {}
            for layer_name in extent_config[population]:

                if layer_name == 'default':
                    pop_connection_extents[layer_name] = \
                        {'width': extent_config[population][layer_name]['width'], \
                         'offset': extent_config[population][layer_name]['offset']}
                else:
                    layer_index = self.layers[layer_name]
                    pop_connection_extents[layer_index] = \
                        {'width': extent_config[population][layer_name]['width'], \
                         'offset': extent_config[population][layer_name]['offset']}

            self.connection_extents[population] = pop_connection_extents

        synapse_config = connection_config['Synapses']
        connection_dict = {}

        for (key_postsyn, val_syntypes) in viewitems(synapse_config):
            connection_dict[key_postsyn] = {}

            for (key_presyn, syn_dict) in viewitems(val_syntypes):
                val_type = syn_dict['type']
                val_synsections = syn_dict['sections']
                val_synlayers = syn_dict['layers']
                val_proportions = syn_dict['proportions']
                if 'contacts' in syn_dict:
                    val_contacts = syn_dict['contacts']
                else:
                    val_contacts = 1
                mechparams_dict = None
                swctype_mechparams_dict = None
                if 'mechanisms' in syn_dict:
                    mechparams_dict = syn_dict['mechanisms']
                else:
                    swctype_mechparams_dict = syn_dict['swctype mechanisms']

                res_type = self.Synapse_Types[val_type]
                res_synsections = []
                res_synlayers = []
                res_mechparams = {}

                for name in val_synsections:
                    res_synsections.append(self.SWC_Types[name])
                for name in val_synlayers:
                    res_synlayers.append(self.layers[name])
                if swctype_mechparams_dict is not None:
                    for swc_type in swctype_mechparams_dict:
                        swc_type_index = self.SWC_Types[swc_type]
                        res_mechparams[
                            swc_type_index] = self.parse_syn_mechparams(
                                swctype_mechparams_dict[swc_type])
                else:
                    res_mechparams['default'] = self.parse_syn_mechparams(
                        mechparams_dict)

                connection_dict[key_postsyn][key_presyn] = \
                    SynapseConfig(res_type, res_synsections, res_synlayers, val_proportions, val_contacts, \
                                  res_mechparams)

            config_dict = defaultdict(lambda: 0.0)
            for (key_presyn,
                 conn_config) in viewitems(connection_dict[key_postsyn]):
                for (s, l, p) in zip(conn_config.sections, conn_config.layers,
                                     conn_config.proportions):
                    config_dict[(conn_config.type, s, l)] += p

            for (k, v) in viewitems(config_dict):
                try:
                    assert (np.isclose(v, 1.0))
                except Exception as e:
                    self.logger.error(
                        'Connection configuration: probabilities for %s do not sum to 1: %s = %f'
                        % (key_postsyn, str(k), v))
                    raise e

        self.connection_config = connection_dict
Esempio n. 15
0
def icp_transform(comm,
                  env,
                  soma_coords,
                  projection_ls,
                  population_extents,
                  rotate=None,
                  populations=None,
                  icp_iter=1000,
                  opt_iter=100):
    """
    Uses the iterative closest point (ICP) algorithm of the PCL library to transform soma coordinates onto a surface for a particular L value.
    http://pointclouds.org/documentation/tutorials/iterative_closest_point.php#iterative-closest-point

    """

    import dlib, pcl

    rank = comm.rank
    size = comm.size

    if populations is None:
        populations = list(soma_coords.keys())

    srf_resample = 25

    layer_extents = env.geometry['Parametric Surface']['Layer Extents']

    (extent_u, extent_v, extent_l) = get_total_extents(layer_extents)

    min_u, max_u = extent_u
    min_v, max_v = extent_v
    min_l, max_l = extent_l

    ## This parameter is used to expand the range of L and avoid
    ## situations where the endpoints of L end up outside of the range
    ## of the distance interpolant
    safety = 0.01

    extent_u = (min_u - safety, max_u + safety)
    extent_v = (min_v - safety, max_v + safety)

    projection_ptclouds = []
    for obs_l in projection_ls:
        srf = make_surface(extent_u, extent_v, obs_l, rotate=rotate)
        U, V = srf._resample_uv(srf_resample, srf_resample)
        meshpts = srf.ev(U, V)
        projection_ptcloud = pcl.PointCloud()
        projection_ptcloud.from_array(meshpts)
        projection_ptclouds.append(projection_ptcloud)

    soma_coords_dict = {}
    for pop in populations:
        coords_dict = soma_coords[pop]
        if rank == 0:
            logger.info('Computing point transformation for population %s...' %
                        pop)
        count = 0
        xyz_coords = []
        gids = []
        for gid, coords in viewitems(coords_dict):
            if gid % size == rank:
                soma_u, soma_v, soma_l = coords
                xyz_coords.append(
                    DG_volume(soma_u, soma_v, soma_l, rotate=rotate))
                gids.append(gid)
        xyz_pts = np.vstack(xyz_coords)

        cloud_in = pcl.PointCloud()
        cloud_in.from_array(xyz_pts)

        icp = cloud_in.make_IterativeClosestPoint()

        all_est_xyz_coords = []
        all_est_uvl_coords = []
        all_interp_err = []

        for (k, cloud_prj) in enumerate(projection_ls):
            k_est_xyz_coords = np.zeros((len(gids), 3))
            k_est_uvl_coords = np.zeros((len(gids), 3))
            interp_err = np.zeros((len(gids), ))
            converged, transf, estimate, fitness = icp.icp(cloud_in,
                                                           cloud_prj,
                                                           max_iter=icp_iter)
            logger.info('Transformation of population %s has converged: ' %
                        (pop) + str(converged) + ' score: %f' % (fitness))
            for i, gid in zip(list(range(0, estimate.size)), gids):
                est_xyz_coords = estimate[i]
                k_est_xyz_coords[i, :] = est_xyz_coords
                f_uvl_distance = make_uvl_distance(est_xyz_coords,
                                                   rotate=rotate)
                uvl_coords, err = dlib.find_min_global(f_uvl_distance,
                                                       limits[0], limits[1],
                                                       opt_iter)
                k_est_uvl_coords[i, :] = uvl_coords
                interp_err[i, ] = err
                if rank == 0:
                    logger.info(
                        'gid %i: u: %f v: %f l: %f' %
                        (gid, uvl_coords[0], uvl_coords[1], uvl_coords[2]))
            all_est_xyz_coords.append(k_est_xyz_coords)
            all_est_uvl_coords.append(k_est_uvl_coords)
            all_interp_err.append(interp_err)

        coords_dict = {}
        for (i, gid) in enumerate(gids):
            coords_dict[gid] = {
                'X Coordinate':
                np.asarray([col[i, 0] for col in all_est_xyz_coords],
                           dtype='float32'),
                'Y Coordinate':
                np.asarray([col[i, 1] for col in all_est_xyz_coords],
                           dtype='float32'),
                'Z Coordinate':
                np.asarray([col[i, 2] for col in all_est_xyz_coords],
                           dtype='float32'),
                'U Coordinate':
                np.asarray([col[i, 0] for col in all_est_uvl_coords],
                           dtype='float32'),
                'V Coordinate':
                np.asarray([col[i, 1] for col in all_est_uvl_coords],
                           dtype='float32'),
                'L Coordinate':
                np.asarray([col[i, 2] for col in all_est_uvl_coords],
                           dtype='float32'),
                'Interpolation Error':
                np.asarray([err[i] for err in all_interp_err], dtype='float32')
            }

        soma_coords_dict[pop] = coords_dict

    return soma_coords_dict
Esempio n. 16
0
    def load_celltypes(self):
        """

        :return:
        """
        rank = self.comm.Get_rank()
        size = self.comm.Get_size()
        celltypes = self.celltypes

        if rank == 0:
            color = 1
        else:
            color = 0
        ## comm0 includes only rank 0
        comm0 = self.comm.Split(color, 0)

        if rank == 0:
            self.logger.info('env.data_file_path = %s' %
                             str(self.data_file_path))

        self.cell_attribute_info = None
        population_ranges = None
        population_names = None
        type_names = None
        if rank == 0:
            population_names = read_population_names(self.data_file_path,
                                                     comm0)
            (population_ranges,
             _) = read_population_ranges(self.data_file_path, comm0)
            type_names = sorted(population_ranges.keys())
            self.cell_attribute_info = read_cell_attribute_info(
                self.data_file_path, population_names, comm=comm0)
            self.logger.info('population_names = %s' % str(population_names))
            self.logger.info('population_ranges = %s' % str(population_ranges))
            self.logger.info('attribute info: %s' %
                             str(self.cell_attribute_info))
        population_ranges = self.comm.bcast(population_ranges, root=0)
        population_names = self.comm.bcast(population_names, root=0)
        type_names = self.comm.bcast(type_names, root=0)
        self.cell_attribute_info = self.comm.bcast(self.cell_attribute_info,
                                                   root=0)
        comm0.Free()

        for k in type_names:
            population_range = population_ranges.get(k, None)
            if population_range is not None:
                if k not in celltypes:
                    celltypes[k] = {}
                celltypes[k]['start'] = population_ranges[k][0]
                celltypes[k]['num'] = population_ranges[k][1]
                if 'mechanism file' in celltypes[k]:
                    celltypes[k]['mech_file_path'] = '%s/%s' % (
                        self.config_prefix, celltypes[k]['mechanism file'])
                    mech_dict = None
                    if rank == 0:
                        mech_dict = read_from_yaml(
                            celltypes[k]['mech_file_path'])
                    mech_dict = self.comm.bcast(mech_dict, root=0)
                    celltypes[k]['mech_dict'] = mech_dict
                if 'synapses' in celltypes[k]:
                    synapses_dict = celltypes[k]['synapses']
                    if 'weights' in synapses_dict:
                        weights_config = synapses_dict['weights']
                        if isinstance(weights_config, list):
                            weights_dicts = weights_config
                        else:
                            weights_dicts = [weights_config]
                        for weights_dict in weights_dicts:
                            if 'expr' in weights_dict:
                                expr = weights_dict['expr']
                                parameter = weights_dict['parameter']
                                const = weights_dict.get('const', None)
                                clos = ExprClosure(parameters=parameter,
                                                   expr=expr,
                                                   consts=const)
                                weights_dict['closure'] = clos
                        synapses_dict['weights'] = weights_dicts
Esempio n. 17
0
def vertex_distribution(connectivity_path,
                        coords_path,
                        distances_namespace,
                        destination,
                        sources,
                        bin_size=20.0,
                        cache_size=100,
                        comm=None):
    """
    Obtain spatial histograms of source vertices connecting to a given destination population.

    :param connectivity_path:
    :param coords_path:
    :param distances_namespace: 
    :param destination: 
    :param source: 

    """

    if comm is None:
        comm = MPI.COMM_WORLD

    rank = comm.Get_rank()

    color = 0
    if rank == 0:
        color = 1
    comm0 = comm.Split(color, 0)

    (population_ranges, _) = read_population_ranges(coords_path)

    destination_start = population_ranges[destination][0]
    destination_count = population_ranges[destination][1]

    destination_soma_distances = {}
    if rank == 0:
        logger.info(f'Reading {destination} distances...')
        distances_iter = read_cell_attributes(
            coords_path,
            destination,
            comm=comm0,
            mask=set(['U Distance', 'V Distance']),
            namespace=distances_namespace)

        destination_soma_distances = {
            k: (float(v['U Distance'][0]), float(v['V Distance'][0]))
            for (k, v) in distances_iter
        }

        gc.collect()

    comm.barrier()

    destination_soma_distances = comm.bcast(destination_soma_distances, root=0)
    destination_soma_distance_U = {}
    destination_soma_distance_V = {}
    for k, v in viewitems(destination_soma_distances):
        destination_soma_distance_U[k] = v[0]
        destination_soma_distance_V[k] = v[1]

    del (destination_soma_distances)

    if sources == ():
        sources = []
        for (src, dst) in read_projection_names(connectivity_path):
            if dst == destination:
                sources.append(src)

    source_soma_distances = {}
    if rank == 0:
        for s in sources:
            logger.info(f'Reading {s} distances...')
            distances_iter = read_cell_attributes(
                coords_path,
                s,
                comm=comm0,
                mask=set(['U Distance', 'V Distance']),
                namespace=distances_namespace)

            source_soma_distances[s] = {
                k: (float(v['U Distance'][0]), float(v['V Distance'][0]))
                for (k, v) in distances_iter
            }

            gc.collect()

    comm.barrier()
    comm0.Free()

    source_soma_distances = comm.bcast(source_soma_distances, root=0)

    source_soma_distance_U = {}
    source_soma_distance_V = {}
    for s in sources:
        this_source_soma_distance_U = {}
        this_source_soma_distance_V = {}
        for k, v in viewitems(source_soma_distances[s]):
            this_source_soma_distance_U[k] = v[0]
            this_source_soma_distance_V[k] = v[1]
        source_soma_distance_U[s] = this_source_soma_distance_U
        source_soma_distance_V[s] = this_source_soma_distance_V
    del (source_soma_distances)

    if rank == 0:
        logger.info('Reading connections %s -> %s...' %
                    (str(sources), destination))

    dist_bins = defaultdict(dict)
    dist_u_bins = defaultdict(dict)
    dist_v_bins = defaultdict(dict)

    gg = [
        NeuroH5ProjectionGen(connectivity_path,
                             source,
                             destination,
                             cache_size=cache_size,
                             comm=comm) for source in sources
    ]

    for prj_gen_tuple in zip_longest(*gg):
        destination_gid = prj_gen_tuple[0][0]
        if rank == 0 and destination_gid is not None:
            logger.info('%d' % destination_gid)
        if not all([
                prj_gen_elt[0] == destination_gid
                for prj_gen_elt in prj_gen_tuple
        ]):
            raise RuntimeError(
                'destination %s: destination gid %i not matched across multiple projection generators: '
                '%s' % (destination, destination_gid,
                        [prj_gen_elt[0] for prj_gen_elt in prj_gen_tuple]))

        if destination_gid is not None:
            for (source, (this_destination_gid,
                          rest)) in zip_longest(sources, prj_gen_tuple):
                this_source_soma_distance_U = source_soma_distance_U[source]
                this_source_soma_distance_V = source_soma_distance_V[source]
                this_dist_bins = dist_bins[source]
                this_dist_u_bins = dist_u_bins[source]
                this_dist_v_bins = dist_v_bins[source]
                (source_indexes, attr_dict) = rest
                dst_U = destination_soma_distance_U[destination_gid]
                dst_V = destination_soma_distance_V[destination_gid]
                for source_gid in source_indexes:
                    dist_u = dst_U - this_source_soma_distance_U[source_gid]
                    dist_v = dst_V - this_source_soma_distance_V[source_gid]
                    dist = abs(dist_u) + abs(dist_v)

                    update_bins(this_dist_bins, bin_size, dist)
                    update_bins(this_dist_u_bins, bin_size, dist_u)
                    update_bins(this_dist_v_bins, bin_size, dist_v)

    add_bins_op = MPI.Op.Create(add_bins, commute=True)
    for source in sources:
        dist_bins[source] = comm.reduce(dist_bins[source], op=add_bins_op)
        dist_u_bins[source] = comm.reduce(dist_u_bins[source], op=add_bins_op)
        dist_v_bins[source] = comm.reduce(dist_v_bins[source], op=add_bins_op)

    dist_hist_dict = defaultdict(dict)
    dist_u_hist_dict = defaultdict(dict)
    dist_v_hist_dict = defaultdict(dict)

    if rank == 0:
        for source in sources:
            dist_hist_dict[destination][source] = finalize_bins(
                dist_bins[source], bin_size)
            dist_u_hist_dict[destination][source] = finalize_bins(
                dist_u_bins[source], bin_size)
            dist_v_hist_dict[destination][source] = finalize_bins(
                dist_v_bins[source], bin_size)

    return {
        'Total distance': dist_hist_dict,
        'U distance': dist_u_hist_dict,
        'V distance': dist_v_hist_dict
    }
Esempio n. 18
0
def spatial_bin_graph(connectivity_path,
                      coords_path,
                      distances_namespace,
                      destination,
                      sources,
                      extents,
                      bin_size=20.0,
                      cache_size=100,
                      comm=None):
    """
    Obtain reduced graphs of the specified projections by binning nodes according to their spatial position.

    :param connectivity_path:
    :param coords_path:
    :param distances_namespace: 
    :param destination: 
    :param source: 

    """

    import networkx as nx

    if comm is None:
        comm = MPI.COMM_WORLD

    rank = comm.Get_rank()

    (population_ranges, _) = read_population_ranges(coords_path)

    destination_start = population_ranges[destination][0]
    destination_count = population_ranges[destination][1]

    if rank == 0:
        logger.info('reading %s distances...' % destination)

    destination_soma_distances = bcast_cell_attributes(
        coords_path,
        destination,
        namespace=distances_namespace,
        comm=comm,
        root=0)

    ((x_min, x_max), (y_min, y_max)) = extents
    u_bins = np.arange(x_min, x_max, bin_size)
    v_bins = np.arange(y_min, y_max, bin_size)

    dest_u_bins = {}
    dest_v_bins = {}
    destination_soma_distance_U = {}
    destination_soma_distance_V = {}
    for k, v in destination_soma_distances:
        dist_u = v['U Distance'][0]
        dist_v = v['V Distance'][0]
        dest_u_bins[k] = np.searchsorted(u_bins, dist_u, side='left')
        dest_v_bins[k] = np.searchsorted(v_bins, dist_v, side='left')
        destination_soma_distance_U[k] = dist_u
        destination_soma_distance_V[k] = dist_v

    del (destination_soma_distances)

    if (sources == ()) or (sources == []) or (sources is None):
        sources = []
        for (src, dst) in read_projection_names(connectivity_path):
            if dst == destination:
                sources.append(src)

    source_soma_distances = {}
    for s in sources:
        if rank == 0:
            logger.info('reading %s distances...' % s)
        source_soma_distances[s] = bcast_cell_attributes(
            coords_path, s, namespace=distances_namespace, comm=comm, root=0)

    source_u_bins = {}
    source_v_bins = {}
    source_soma_distance_U = {}
    source_soma_distance_V = {}
    for s in sources:
        this_source_soma_distance_U = {}
        this_source_soma_distance_V = {}
        this_source_u_bins = {}
        this_source_v_bins = {}
        for k, v in source_soma_distances[s]:
            dist_u = v['U Distance'][0]
            dist_v = v['V Distance'][0]
            this_source_u_bins[k] = np.searchsorted(u_bins,
                                                    dist_u,
                                                    side='left')
            this_source_v_bins[k] = np.searchsorted(v_bins,
                                                    dist_v,
                                                    side='left')
            this_source_soma_distance_U[k] = dist_u
            this_source_soma_distance_V[k] = dist_v
        source_soma_distance_U[s] = this_source_soma_distance_U
        source_soma_distance_V[s] = this_source_soma_distance_V
        source_u_bins[s] = this_source_u_bins
        source_v_bins[s] = this_source_v_bins
    del (source_soma_distances)

    if rank == 0:
        logger.info('reading connections %s -> %s...' %
                    (str(sources), destination))
    gg = [
        NeuroH5ProjectionGen(connectivity_path,
                             source,
                             destination,
                             cache_size=cache_size,
                             comm=comm) for source in sources
    ]

    dist_bins = defaultdict(dict)
    dist_u_bins = defaultdict(dict)
    dist_v_bins = defaultdict(dict)

    local_u_bin_graph = defaultdict(dict)
    local_v_bin_graph = defaultdict(dict)

    for prj_gen_tuple in zip_longest(*gg):
        destination_gid = prj_gen_tuple[0][0]
        if not all([
                prj_gen_elt[0] == destination_gid
                for prj_gen_elt in prj_gen_tuple
        ]):
            raise RuntimeError(
                'destination %s: destination_gid %i not matched across multiple projection generators: '
                '%s' % (destination, destination_gid,
                        [prj_gen_elt[0] for prj_gen_elt in prj_gen_tuple]))

        if destination_gid is not None:
            dest_u_bin = dest_u_bins[destination_gid]
            dest_v_bin = dest_v_bins[destination_gid]
            for (source, (this_destination_gid,
                          rest)) in zip_longest(sources, prj_gen_tuple):
                this_source_u_bins = source_u_bins[source]
                this_source_v_bins = source_v_bins[source]
                (source_indexes, attr_dict) = rest
                source_u_bin_dict = defaultdict(int)
                source_v_bin_dict = defaultdict(int)
                for source_gid in source_indexes:
                    source_u_bin = this_source_u_bins[source_gid]
                    source_v_bin = this_source_v_bins[source_gid]
                    source_u_bin_dict[source_u_bin] += 1
                    source_v_bin_dict[source_v_bin] += 1
                local_u_bin_graph[dest_u_bin][source] = source_u_bin_dict
                local_v_bin_graph[dest_v_bin][source] = source_v_bin_dict

    local_u_bin_graphs = comm.gather(dict(local_u_bin_graph), root=0)
    local_v_bin_graphs = comm.gather(dict(local_v_bin_graph), root=0)

    u_bin_graph = None
    v_bin_graph = None
    nu = None
    nv = None

    if rank == 0:

        u_bin_edges = {destination: dict(ChainMap(*local_u_bin_graphs))}
        v_bin_edges = {destination: dict(ChainMap(*local_v_bin_graphs))}

        nu = len(u_bins)
        u_bin_graph = nx.Graph()
        for pop in [destination] + list(sources):
            for i in range(nu):
                u_bin_graph.add_node((pop, i))

        for i, ss in viewitems(u_bin_edges[destination]):
            for source, ids in viewitems(ss):
                u_bin_graph.add_weighted_edges_from([
                    ((source, j), (destination, i), count)
                    for j, count in viewitems(ids)
                ])

        nv = len(v_bins)
        v_bin_graph = nx.Graph()
        for pop in [destination] + list(sources):
            for i in range(nv):
                v_bin_graph.add_node((pop, i))

        for i, ss in viewitems(v_bin_edges[destination]):
            for source, ids in viewitems(ss):
                v_bin_graph.add_weighted_edges_from([
                    ((source, j), (destination, i), count)
                    for j, count in viewitems(ids)
                ])

    label = '%s to %s' % (str(sources), destination)

    return {
        'label': label,
        'bin size': bin_size,
        'destination': destination,
        'sources': sources,
        'U graph': u_bin_graph,
        'V graph': v_bin_graph
    }
Esempio n. 19
0
    def filter_by_distance(self, destination_gid, source_population,
                           source_layer):
        """
        Given the id of a target neuron, returns the distances along u and v
        and the gids of source neurons whose axons potentially contact the target neuron.

        :param destination_gid: int
        :param source_population: string
        :return: tuple of array of int
        """
        destination_coords = self.soma_coords[
            self.destination_population][destination_gid]
        source_coords = self.soma_coords[source_population]

        destination_distances = self.soma_distances[
            self.destination_population][destination_gid]

        source_distances = self.soma_distances[source_population]

        destination_u, destination_v, destination_l = destination_coords
        destination_distance_u, destination_distance_v = destination_distances

        distance_u_lst = []
        distance_v_lst = []
        source_u_lst = []
        source_v_lst = []
        source_gid_lst = []

        if source_layer in self.width[source_population]:
            layer_key = source_layer
        elif 'default' in self.width[source_population]:
            layer_key = 'default'
        else:
            raise RuntimeError('connection_generator.filter_by_distance: missing configuration for layer %s' % \
                               str(source_layer))

        source_width = self.width[source_population][layer_key]
        source_offset = self.offset[source_population][layer_key]

        max_distance_u = source_width['u'] + source_offset['u']
        max_distance_v = source_width['v'] + source_offset['v']

        for (source_gid, coords) in viewitems(source_coords):

            source_u, source_v, source_l = coords

            source_distance_u, source_distance_v = source_distances[source_gid]

            distance_u = abs(destination_distance_u - source_distance_u)
            distance_v = abs(destination_distance_v - source_distance_v)

            if ((max_distance_u - distance_u) > 0.0) and (
                (max_distance_v - distance_v) > 0.0):
                source_u_lst.append(source_u)
                source_v_lst.append(source_v)
                distance_u_lst.append(distance_u)
                distance_v_lst.append(distance_v)
                source_gid_lst.append(source_gid)

        return destination_u, destination_v, np.asarray(
            source_u_lst), np.asarray(source_v_lst), np.asarray(
                distance_u_lst), np.asarray(distance_v_lst), np.asarray(
                    source_gid_lst, dtype=np.uint32)
Esempio n. 20
0
def generate_uv_distance_connections(comm,
                                     population_dict,
                                     connection_config,
                                     connection_prob,
                                     forest_path,
                                     synapse_seed,
                                     connectivity_seed,
                                     cluster_seed,
                                     synapse_namespace,
                                     connectivity_namespace,
                                     connectivity_path,
                                     io_size,
                                     chunk_size,
                                     value_chunk_size,
                                     cache_size,
                                     write_size=1,
                                     dry_run=False):
    """
    Generates connectivity based on U, V distance-weighted probabilities.

    :param comm: mpi4py MPI communicator
    :param connection_config: connection configuration object (instance of env.ConnectionConfig)
    :param connection_prob: ConnectionProb instance
    :param forest_path: location of file with neuronal trees and synapse information
    :param synapse_seed: random seed for synapse partitioning
    :param connectivity_seed: random seed for connectivity generation
    :param cluster_seed: random seed for determining connectivity clustering for repeated connections from the same source
    :param synapse_namespace: namespace of synapse properties
    :param connectivity_namespace: namespace of connectivity attributes
    :param io_size: number of I/O ranks to use for parallel connectivity append
    :param chunk_size: HDF5 chunk size for connectivity file (pointer and index datasets)
    :param value_chunk_size: HDF5 chunk size for connectivity file (value datasets)
    :param cache_size: how many cells to read ahead
    :param write_size: how many cells to write out at the same time
    """

    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    start_time = time.time()

    ranstream_syn = np.random.RandomState()
    ranstream_con = np.random.RandomState()

    destination_population = connection_prob.destination_population

    source_populations = sorted(
        connection_config[destination_population].keys())

    for source_population in source_populations:
        if rank == 0:
            logger.info('%s -> %s:' %
                        (source_population, destination_population))
            logger.info(
                str(connection_config[destination_population]
                    [source_population]))

    projection_config = connection_config[destination_population]
    projection_synapse_dict = {
        source_population: (projection_config[source_population].type,
                            projection_config[source_population].layers,
                            projection_config[source_population].sections,
                            projection_config[source_population].proportions,
                            projection_config[source_population].contacts)
        for source_population in source_populations
    }

    comm.barrier()

    total_count = 0
    gid_count = 0
    connection_dict = defaultdict(lambda: {})
    projection_dict = {}
    for destination_gid, synapse_dict in NeuroH5CellAttrGen(forest_path, \
                                                            destination_population, \
                                                            namespace=synapse_namespace, \
                                                            comm=comm, io_size=io_size, \
                                                            cache_size=cache_size):
        last_time = time.time()
        if destination_gid is None:
            logger.info('Rank %i destination gid is None' % rank)
        else:
            logger.info(
                'Rank %i received attributes for destination: %s, gid: %i' %
                (rank, destination_population, destination_gid))
            ranstream_con.seed(destination_gid + connectivity_seed)
            ranstream_syn.seed(destination_gid + synapse_seed)

            projection_prob_dict = {}
            for source_population in source_populations:
                source_layers = projection_config[source_population].layers
                projection_prob_dict[source_population] = \
                    connection_prob.get_prob(destination_gid, source_population, source_layers)


                for layer, (probs, source_gids, distances_u, distances_v) in \
                        viewitems(projection_prob_dict[source_population]):
                    if len(distances_u) > 0:
                        max_u_distance = np.max(distances_u)
                        min_u_distance = np.min(distances_u)
                        if rank == 0:
                            logger.info(
                                'Rank %i has %d possible sources from population %s for destination: %s, layer %s, gid: %i; max U distance: %f min U distance: %f'
                                % (rank, len(source_gids),
                                   source_population, destination_population,
                                   str(layer), destination_gid, max_u_distance,
                                   min_u_distance))
                    else:
                        logger.warning(
                            'Rank %i has %d possible sources from population %s for destination: %s, layer %s, gid: %i'
                            % (rank, len(source_gids),
                               source_population, destination_population,
                               str(layer), destination_gid))
            count = generate_synaptic_connections(
                rank, destination_gid, ranstream_syn, ranstream_con,
                cluster_seed + destination_gid, destination_gid, synapse_dict,
                population_dict, projection_synapse_dict, projection_prob_dict,
                connection_dict)
            total_count += count

            logger.info(
                'Rank %i took %i s to compute %d edges for destination: %s, gid: %i'
                % (rank, time.time() - last_time, count,
                   destination_population, destination_gid))

        if gid_count % write_size == 0:
            last_time = time.time()
            if len(connection_dict) > 0:
                projection_dict = {destination_population: connection_dict}
            else:
                projection_dict = {}
            if not dry_run:
                append_graph(connectivity_path,
                             projection_dict,
                             io_size=io_size,
                             comm=comm)
            if rank == 0:
                if connection_dict:
                    for (prj, prj_dict) in viewitems(connection_dict):
                        logger.info("%s: %s" %
                                    (prj, str(list(prj_dict.keys()))))
                    logger.info(
                        'Appending connectivity for %i projections took %i s' %
                        (len(connection_dict), time.time() - last_time))
            projection_dict.clear()
            connection_dict.clear()
            gc.collect()

        gid_count += 1

    last_time = time.time()
    if len(connection_dict) > 0:
        projection_dict = {destination_population: connection_dict}
    else:
        projection_dict = {}
    if not dry_run:
        append_graph(connectivity_path,
                     projection_dict,
                     io_size=io_size,
                     comm=comm)
    if rank == 0:
        if connection_dict:
            for (prj, prj_dict) in viewitems(connection_dict):
                logger.info("%s: %s" % (prj, str(list(prj_dict.keys()))))
                logger.info(
                    'Appending connectivity for %i projections took %i s' %
                    (len(connection_dict), time.time() - last_time))

    global_count = comm.gather(total_count, root=0)
    if rank == 0:
        logger.info(
            '%i ranks took %i s to generate %i edges' %
            (comm.size, time.time() - start_time, np.sum(global_count)))
Esempio n. 21
0
    def __init__(self,
                 comm=None,
                 config_file=None,
                 template_paths="templates",
                 hoc_lib_path=None,
                 configure_nrn=True,
                 dataset_prefix=None,
                 config_prefix=None,
                 results_path=None,
                 results_file_id=None,
                 results_namespace_id=None,
                 node_rank_file=None,
                 io_size=0,
                 recording_profile=None,
                 recording_fraction=1.0,
                 coredat=False,
                 tstop=0.,
                 v_init=-65,
                 stimulus_onset=0.0,
                 n_trials=1,
                 max_walltime_hours=0.5,
                 checkpoint_interval=500.0,
                 checkpoint_clear_data=True,
                 results_write_time=0,
                 dt=0.025,
                 ldbal=False,
                 lptbal=False,
                 transfer_debug=False,
                 cell_selection_path=None,
                 spike_input_path=None,
                 spike_input_namespace=None,
                 spike_input_attr=None,
                 cleanup=True,
                 cache_queries=False,
                 profile_memory=False,
                 verbose=False,
                 **kwargs):
        """
        :param comm: :class:'MPI.COMM_WORLD'
        :param config_file: str; model configuration file name
        :param template_paths: str; colon-separated list of paths to directories containing hoc cell templates
        :param hoc_lib_path: str; path to directory containing required hoc libraries
        :param dataset_prefix: str; path to directory containing required neuroh5 data files
        :param config_prefix: str; path to directory containing network and cell mechanism config files
        :param results_path: str; path to directory to export output files
        :param results_file_id: str; label for neuroh5 files to write spike and voltage trace data
        :param results_namespace_id: str; label for neuroh5 namespaces to write spike and voltage trace data
        :param node_rank_file: str; name of file specifying assignment of node gids to MPI ranks
        :param io_size: int; the number of MPI ranks to be used for I/O operations
        :param recording_profile: str; intracellular recording configuration to use
        :param coredat: bool; Save CoreNEURON data
        :param tstop: int; physical time to simulate (ms)
        :param v_init: float; initialization membrane potential (mV)
        :param stimulus_onset: float; starting time of stimulus (ms)
        :param max_walltime_hours: float; maximum wall time (hours)
        :param results_write_time: float; time to write out results at end of simulation
        :param dt: float; simulation time step
        :param ldbal: bool; estimate load balance based on cell complexity
        :param lptbal: bool; calculate load balance with LPT algorithm
        :param cleanup: bool; clean up auxiliary cell and synapse structures after network init
        :param profile: bool; profile memory usage
        :param cache_queries: bool; whether to use a cache to speed up queries to filter_synapses
        :param verbose: bool; print verbose diagnostic messages while constructing the network
        """
        self.kwargs = kwargs

        self.SWC_Types = {}
        self.SWC_Type_index = {}
        self.Synapse_Types = {}
        self.layers = {}
        self.globals = {}

        self.gidset = set([])
        self.gjlist = []
        self.cells = defaultdict(list)
        self.artificial_cells = defaultdict(dict)
        self.biophys_cells = defaultdict(dict)
        self.spike_onset_delay = {}
        self.recording_sets = {}

        self.pc = None
        if comm is None:
            self.comm = MPI.COMM_WORLD
        else:
            self.comm = comm
        rank = self.comm.Get_rank()

        if configure_nrn:
            from dentate.neuron_utils import h, find_template

        # If true, the biophysical cells and synapses dictionary will be freed
        # as synapses and connections are instantiated.
        self.cleanup = cleanup

        # If true, compute and print memory usage at various points
        # during simulation initialization
        self.profile_memory = profile_memory

        # print verbose diagnostic messages
        self.verbose = verbose
        config_logging(verbose)
        self.logger = get_root_logger()

        # Directories for cell templates
        if template_paths is not None:
            self.template_paths = template_paths.split(':')
        else:
            self.template_paths = []
        self.template_dict = {}

        # The location of required hoc libraries
        self.hoc_lib_path = hoc_lib_path

        # Checkpoint interval in ms of simulation time
        self.checkpoint_interval = max(float(checkpoint_interval), 1.0)
        self.checkpoint_clear_data = checkpoint_clear_data
        self.last_checkpoint = 0.

        # The location of all datasets
        self.dataset_prefix = dataset_prefix

        # The path where results files should be written
        self.results_path = results_path

        # Identifier used to construct results data namespaces
        self.results_namespace_id = results_namespace_id
        # Identifier used to construct results data files
        self.results_file_id = results_file_id

        # Number of MPI ranks to be used for I/O operations
        self.io_size = int(io_size)

        # Initialization voltage
        self.v_init = float(v_init)

        # simulation time [ms]
        self.tstop = float(tstop)

        # stimulus onset time [ms]
        self.stimulus_onset = float(stimulus_onset)

        # number of trials
        self.n_trials = int(n_trials)

        # maximum wall time in hours
        self.max_walltime_hours = float(max_walltime_hours)

        # time to write out results at end of simulation
        self.results_write_time = float(results_write_time)

        # time step
        self.dt = float(dt)

        # used to estimate cell complexity
        self.cxvec = None

        # measure/perform load balancing
        self.optldbal = ldbal
        self.optlptbal = lptbal

        self.transfer_debug = transfer_debug

        # Save CoreNEURON data
        self.coredat = coredat

        # cache queries to filter_synapses
        self.cache_queries = cache_queries

        self.config_prefix = config_prefix
        if config_file is not None:
            if config_prefix is not None:
                config_file_path = self.config_prefix + '/' + config_file
            else:
                config_file_path = config_file
            if not os.path.isfile(config_file_path):
                raise RuntimeError("configuration file %s was not found" %
                                   config_file_path)
            with open(config_file_path) as fp:
                self.model_config = yaml.load(fp, IncludeLoader)
        else:
            raise RuntimeError("missing configuration file")

        if 'Definitions' in self.model_config:
            self.parse_definitions()
            self.SWC_Type_index = dict([(item[1], item[0])
                                        for item in viewitems(self.SWC_Types)])

        if 'Global Parameters' in self.model_config:
            self.parse_globals()

        self.geometry = None
        if 'Geometry' in self.model_config:
            self.geometry = self.model_config['Geometry']

        if 'Origin' in self.geometry['Parametric Surface']:
            self.parse_origin_coords()

        self.celltypes = self.model_config['Cell Types']
        self.cell_attribute_info = {}

        # The name of this model
        if 'Model Name' in self.model_config:
            self.modelName = self.model_config['Model Name']
        # The dataset to use for constructing the network
        if 'Dataset Name' in self.model_config:
            self.datasetName = self.model_config['Dataset Name']

        if rank == 0:
            self.logger.info('env.dataset_prefix = %s' %
                             str(self.dataset_prefix))

        # Cell selection for simulations of subsets of the network
        self.cell_selection = None
        self.cell_selection_path = cell_selection_path
        if rank == 0:
            self.logger.info('env.cell_selection_path = %s' %
                             str(self.cell_selection_path))
        if cell_selection_path is not None:
            with open(cell_selection_path) as fp:
                self.cell_selection = yaml.load(fp, IncludeLoader)

        # Spike input path
        self.spike_input_path = spike_input_path
        self.spike_input_ns = spike_input_namespace
        self.spike_input_attr = spike_input_attr
        self.spike_input_attribute_info = None
        if self.spike_input_path is not None:
            if rank == 0:
                self.logger.info('env.spike_input_path = %s' %
                                 str(self.spike_input_path))
            self.spike_input_attribute_info = \
              read_cell_attribute_info(self.spike_input_path, sorted(self.Populations.keys()), comm=self.comm)
            if rank == 0:
                self.logger.info('env.spike_input_attribute_info = %s' %
                                 str(self.spike_input_attribute_info))
        if results_path:
            if self.results_file_id is None:
                self.results_file_path = "%s/%s_results.h5" % (
                    self.results_path, self.modelName)
            else:
                self.results_file_path = "%s/%s_results_%s.h5" % (
                    self.results_path, self.modelName, self.results_file_id)
        else:
            if self.results_file_id is None:
                self.results_file_path = "%s_results.h5" % (self.modelName)
            else:
                self.results_file_path = "%s_results_%s.h5" % (
                    self.modelName, self.results_file_id)

        if 'Connection Generator' in self.model_config:
            self.parse_connection_config()
            self.parse_gapjunction_config()

        if self.dataset_prefix is not None:
            self.dataset_path = os.path.join(self.dataset_prefix,
                                             self.datasetName)
            if 'Cell Data' in self.model_config:
                self.data_file_path = os.path.join(
                    self.dataset_path, self.model_config['Cell Data'])
                self.forest_file_path = os.path.join(
                    self.dataset_path, self.model_config['Cell Data'])
                self.load_celltypes()
            else:
                self.data_file_path = None
                self.forest_file_path = None
            if rank == 0:
                self.logger.info('env.data_file_path = %s' %
                                 self.data_file_path)
            if 'Connection Data' in self.model_config:
                self.connectivity_file_path = os.path.join(
                    self.dataset_path, self.model_config['Connection Data'])
            else:
                self.connectivity_file_path = None
            if 'Gap Junction Data' in self.model_config:
                self.gapjunctions_file_path = os.path.join(
                    self.dataset_path, self.model_config['Gap Junction Data'])
            else:
                self.gapjunctions_file_path = None
        else:
            self.dataset_path = None
            self.data_file_path = None
            self.connectivity_file_path = None
            self.forest_file_path = None
            self.gapjunctions_file_path = None

        self.node_ranks = None
        if node_rank_file:
            self.load_node_ranks(node_rank_file)

        self.netclamp_config = None
        if 'Network Clamp' in self.model_config:
            self.parse_netclamp_config()

        self.stimulus_config = None
        self.arena_id = None
        self.trajectory_id = None
        if 'Stimulus' in self.model_config:
            self.parse_stimulus_config()
            self.init_stimulus_config(**kwargs)

        self.analysis_config = None
        if 'Analysis' in self.model_config:
            self.analysis_config = self.model_config['Analysis']

        self.projection_dict = defaultdict(list)
        if self.dataset_prefix is not None:
            if rank == 0:
                self.logger.info('env.connectivity_file_path = %s' %
                                 str(self.connectivity_file_path))
            if self.connectivity_file_path is not None:
                for (src,
                     dst) in read_projection_names(self.connectivity_file_path,
                                                   comm=self.comm):
                    self.projection_dict[dst].append(src)
                if rank == 0:
                    self.logger.info('projection_dict = %s' %
                                     str(self.projection_dict))

        # Configuration profile for recording intracellular quantities
        assert ((recording_fraction >= 0.0) and (recording_fraction <= 1.0))
        self.recording_fraction = recording_fraction
        self.recording_profile = None
        if ('Recording' in self.model_config) and (recording_profile
                                                   is not None):
            self.recording_profile = self.model_config['Recording'][
                'Intracellular'][recording_profile]
            self.recording_profile['label'] = recording_profile
            for recvar, recdict in viewitems(
                    self.recording_profile.get('synaptic quantity', {})):
                filters = {}
                if 'syn types' in recdict:
                    filters['syn_types'] = recdict['syn types']
                if 'swc types' in recdict:
                    filters['swc_types'] = recdict['swc types']
                if 'layers' in recdict:
                    filters['layers'] = recdict['layers']
                if 'sources' in recdict:
                    filters['sources'] = recdict['sources']
                syn_filters = get_syn_filter_dict(self, filters, convert=True)
                recdict['syn_filters'] = syn_filters

        # Configuration profile for recording local field potentials
        self.LFP_config = {}
        if 'Recording' in self.model_config:
            for label, config in viewitems(
                    self.model_config['Recording']['LFP']):
                self.LFP_config[label] = {
                    'position': tuple(config['position']),
                    'maxEDist': config['maxEDist'],
                    'fraction': config['fraction'],
                    'rho': config['rho'],
                    'dt': config['dt']
                }

        self.t_vec = None
        self.id_vec = None
        self.t_rec = None
        self.recs_dict = {}  # Intracellular samples on this host
        for pop_name, _ in viewitems(self.Populations):
            self.recs_dict[pop_name] = defaultdict(list)

        # used to calculate model construction times and run time
        self.mkcellstime = 0
        self.mkstimtime = 0
        self.connectcellstime = 0
        self.connectgjstime = 0

        self.simtime = None
        self.lfp = {}

        self.edge_count = defaultdict(dict)
        self.syns_set = defaultdict(set)
Esempio n. 22
0
 def export_to_file(self,
                    file_path,
                    model_label=None,
                    category=None,
                    append=True):
     """
     Exports simulated data and metadata to an HDF5 file. Arrays are saved as datasets and metadata is saved as
     attributes. Repeated simulations are stored in enumerated groups.
     :param file_path: str (path)
     :param model_label: int or str
     :param category: str
     :param append: bool
     """
     if append:
         io_type = 'a'
     else:
         io_type = 'w'
     with h5py.File(file_path, io_type) as f:
         target = get_h5py_group(f, [model_label, 'sim_output', category],
                                 create=True)
         target.attrs['enumerated'] = True
         simiter = len(target)
         if str(simiter) not in target:
             target.create_group(str(simiter))
         target[str(simiter)].create_dataset('time',
                                             compression='gzip',
                                             data=self.tvec)
         target[str(simiter)]['time'].attrs['dt'] = self.dt
         for parameter in self.parameters:
             set_h5py_attr(target[str(simiter)].attrs, parameter,
                           self.parameters[parameter])
         if len(self.stims) > 0:
             target[str(simiter)].create_group('stims')
             for name, stim_dict in viewitems(self.stims):
                 stim = target[str(simiter)]['stims'].create_dataset(
                     name, compression='gzip', data=stim_dict['vec'])
                 cell = stim_dict['cell']
                 stim.attrs['cell'] = cell.gid
                 node = stim_dict['node']
                 stim.attrs['index'] = node.index
                 set_h5py_attr(stim.attrs, 'type', node.type)
                 loc = stim_dict['stim'].get_segment().x
                 stim.attrs['loc'] = loc
                 distance = get_distance_to_node(cell, cell.tree.root, node,
                                                 loc)
                 stim.attrs['soma_distance'] = distance
                 distance = get_distance_to_node(
                     cell, get_dendrite_origin(cell, node), node, loc)
                 stim.attrs['branch_distance'] = distance
                 stim.attrs['amp'] = stim_dict['stim'].amp
                 stim.attrs['delay'] = stim_dict['stim'].delay
                 stim.attrs['dur'] = stim_dict['stim'].dur
                 set_h5py_attr(stim.attrs, 'description',
                               stim_dict['description'])
         target[str(simiter)].create_group('recs')
         for name, rec_dict in viewitems(self.recs):
             rec = target[str(simiter)]['recs'].create_dataset(
                 name, compression='gzip', data=rec_dict['vec'])
             cell = rec_dict['cell']
             rec.attrs['cell'] = cell.gid
             node = rec_dict['node']
             rec.attrs['index'] = node.index
             set_h5py_attr(rec.attrs, 'type', node.type)
             loc = rec_dict['loc']
             rec.attrs['loc'] = loc
             distance = get_distance_to_node(cell, cell.tree.root, node,
                                             loc)
             rec.attrs['soma_distance'] = distance
             distance = get_distance_to_node(
                 cell, get_dendrite_origin(cell, node), node, loc)
             node_is_terminal = is_terminal(node)
             branch_order = get_branch_order(cell, node)
             rec.attrs['branch_distance'] = distance
             rec.attrs['is_terminal'] = node_is_terminal
             rec.attrs['branch_order'] = branch_order
             set_h5py_attr(rec.attrs, 'ylabel', rec_dict['ylabel'])
             set_h5py_attr(rec.attrs, 'units', rec_dict['units'])
             set_h5py_attr(rec.attrs, 'description',
                           rec_dict['description'])
Esempio n. 23
0
def spikeout(env, output_path, t_start=None, clear_data=False):
    """
    Writes spike times to specified NeuroH5 output file.

    :param env:
    :param output_path:
    :param clear_data: 
    :return:
    """
    equilibration_duration = float(
        env.stimulus_config['Equilibration Duration'])
    n_trials = env.n_trials

    t_vec = np.array(env.t_vec, dtype=np.float32)
    id_vec = np.array(env.id_vec, dtype=np.uint32)

    trial_time_ranges = get_trial_time_ranges(env.t_rec.to_python(),
                                              env.n_trials)
    trial_time_bins = [
        t_trial_start for t_trial_start, t_trial_end in trial_time_ranges
    ]
    trial_dur = np.asarray([env.tstop + equilibration_duration] * n_trials,
                           dtype=np.float32)

    binlst = []
    typelst = sorted(env.celltypes.keys())
    binvect = np.asarray([env.celltypes[k]['start'] for k in typelst])
    sort_idx = np.argsort(binvect, axis=0)
    pop_names = [typelst[i] for i in sort_idx]
    bins = binvect[sort_idx][1:]
    inds = np.digitize(id_vec, bins)

    if env.results_namespace_id is None:
        namespace_id = "Spike Events"
    else:
        namespace_id = "Spike Events %s" % str(env.results_namespace_id)

    for i, pop_name in enumerate(pop_names):
        spkdict = {}
        sinds = np.where(inds == i)
        if len(sinds) > 0:
            ids = id_vec[sinds]
            ts = t_vec[sinds]
            for j in range(0, len(ids)):
                gid = ids[j]
                t = ts[j]
                if (t_start is None) or (t >= t_start):
                    if gid in spkdict:
                        spkdict[gid]['t'].append(t)
                    else:
                        spkdict[gid] = {'t': [t]}
            for gid in spkdict:
                spiketrain = np.array(spkdict[gid]['t'], dtype=np.float32)
                if gid in env.spike_onset_delay:
                    spiketrain -= env.spike_onset_delay[gid]
                trial_bins = np.digitize(spiketrain, trial_time_bins) - 1
                trial_spikes = [
                    np.copy(spiketrain[np.where(trial_bins == trial_i)[0]])
                    for trial_i in range(n_trials)
                ]
                for trial_i, trial_spiketrain in enumerate(trial_spikes):
                    trial_spiketrain = trial_spikes[trial_i]
                    trial_spiketrain -= np.sum(
                        trial_dur[:(trial_i)]) + equilibration_duration
                spkdict[gid]['t'] = np.concatenate(trial_spikes)
                spkdict[gid]['Trial Duration'] = trial_dur
                spkdict[gid]['Trial Index'] = np.asarray(trial_bins,
                                                         dtype=np.uint8)
        append_cell_attributes(output_path,
                               pop_name,
                               spkdict,
                               namespace=namespace_id,
                               comm=env.comm,
                               io_size=env.io_size)
        del (spkdict)

    if clear_data:
        env.t_vec.resize(0)
        env.id_vec.resize(0)

    env.comm.barrier()
    if env.comm.Get_rank() == 0:
        logger.info("*** Output spike results to file %s" % output_path)
Esempio n. 24
0
def interp_soma_distances(comm,
                          ip_dist_u,
                          ip_dist_v,
                          soma_coords,
                          layer_extents,
                          population_layers,
                          interp_chunk_size=1000,
                          populations=None,
                          allgather=False):
    """Interpolates path lengths of cell coordinates along the dimensions of an `RBFVolume` instance.

    Parameters
    ----------
    comm : MPIComm
        mpi4py MPI communicator
    ip_dist_u : RBFInterpolant
        Interpolation function for computing arc distances along the first dimension of the volume.
    ip_dist_v : RBFInterpolant
        Interpolation function for computing arc distances along the second dimension of the volume.
    soma_coords : { population_name : coords_dict }
        A dictionary that maps each cell population name to a dictionary of coordinates. The dictionary of coordinates must have the following type:
          coords_dict : { gid : (u, v, l) }
          where:
          - gid: cell identifier
          - u, v, l: floating point coordinates
    population_layers: { population_name : layers }
        A dictionary of population count per layer
        Argument layers has the following type:
         { layer_name: count }
    allgather: boolean (default: False)
       if True, the results are gathered from all ranks and combined
    Returns
    -------
    A dictionary of the form:

      { population: { gid: (distance_U, distance_V } }

    """

    rank = comm.rank
    size = comm.size

    if populations is None:
        populations = sorted(soma_coords.keys())

    soma_distances = {}
    for pop in populations:
        coords_dict = soma_coords[pop]
        if rank == 0:
            logger.info(
                'Computing soma distances for %d cells from population %s...' %
                (len(coords_dict), pop))
        count = 0
        local_dist_dict = {}
        pop_layer = population_layers[pop]
        u_obs = []
        v_obs = []
        gids = []
        for gid, coords in viewitems(coords_dict):
            if gid % size == rank:
                soma_u, soma_v, soma_l = coords
                try:
                    assert (uvl_in_bounds(coords, layer_extents, pop_layer))
                except Exception as e:
                    logger.error("gid %i: out of limits error for coordinates: %f %f %f)" % \
                                 (gid, soma_u, soma_v, soma_l))
                    raise e

                u_obs.append(np.array([soma_u, soma_v, soma_l]).ravel())
                v_obs.append(np.array([soma_u, soma_v, soma_l]).ravel())
                gids.append(gid)
        if len(u_obs) > 0:
            u_obs_array = np.vstack(u_obs)
            v_obs_array = np.vstack(v_obs)
            distance_u_obs = ip_dist_u(u_obs_array).reshape(-1, 1)
            distance_v_obs = ip_dist_v(v_obs_array).reshape(-1, 1)
            distance_u = np.mean(distance_u_obs, axis=1)
            distance_v = np.mean(distance_v_obs, axis=1)
            try:
                assert (np.all(np.isfinite(distance_u)))
                assert (np.all(np.isfinite(distance_v)))
            except Exception as e:
                u_nan_idxs = np.where(np.isnan(distance_u))[0]
                v_nan_idxs = np.where(np.isnan(distance_v))[0]
                logger.error('Invalid distances: u: %s; v: %s',
                             str(u_obs_array[u_nan_idxs]),
                             str(v_obs_array[v_nan_idxs]))
                raise e

        for (i, gid) in enumerate(gids):
            local_dist_dict[gid] = (distance_u[i], distance_v[i])
            if rank == 0:
                logger.info('gid %i: distances: %f %f' %
                            (gid, distance_u[i], distance_v[i]))
        if allgather:
            dist_dicts = comm.allgather(local_dist_dict)
            combined_dist_dict = {}
            for dist_dict in dist_dicts:
                for k, v in viewitems(dist_dict):
                    combined_dist_dict[k] = v
            soma_distances[pop] = combined_dist_dict
        else:
            soma_distances[pop] = local_dist_dict

    return soma_distances
Esempio n. 25
0
    def __init__(self, destination_population, soma_coords, soma_distances,
                 extents):
        """
        Warning: This method does not produce an absolute probability. It must be normalized so that the total area
        (volume) under the distribution is 1 before sampling.
        :param destination_population: post-synaptic population name
        :param soma_distances: a dictionary that contains per-population dicts of u, v distances of cell somas
        :param extent: dict: {source: 'width': (tuple of float), 'offset': (tuple of float)}
        """
        self.destination_population = destination_population
        self.soma_coords = soma_coords
        self.soma_distances = soma_distances
        self.p_dist = defaultdict(dict)
        self.width = defaultdict(dict)
        self.offset = defaultdict(dict)
        self.scale_factor = defaultdict(dict)

        for source_population, layer_extents in viewitems(extents):

            for layer, extents in viewitems(layer_extents):

                extent_width = extents['width']
                if 'offset' in extents:
                    extent_offset = extents['offset']
                else:
                    extent_offset = (0., 0.)

                u_extent = (float(extent_width[0]) / 2.0) - float(
                    extent_offset[0])
                v_extent = (float(extent_width[1]) / 2.0) - float(
                    extent_offset[1])
                self.width[source_population][layer] = {
                    'u': u_extent,
                    'v': v_extent
                }

                self.scale_factor[source_population][layer] = \
                    {axis: self.width[source_population][layer][axis] / 3. \
                     for axis in self.width[source_population][layer]}

                if extent_offset is None:
                    self.offset[source_population][layer] = {'u': 0., 'v': 0.}
                else:
                    self.offset[source_population][layer] = {'u': float(extent_offset[0]), \
                                                             'v': float(extent_offset[1])}

                self.p_dist[source_population][layer] = \
                    (lambda source_population, layer: \
                         np.vectorize(lambda distance_u, distance_v: \
                                          (norm.pdf(np.abs(distance_u) - self.offset[source_population][layer]['u'], \
                                                    scale=self.scale_factor[source_population][layer]['u']) * \
                                           norm.pdf(np.abs(distance_v) - self.offset[source_population][layer]['v'], \
                                                    scale=self.scale_factor[source_population][layer]['v'])), \
                                      otypes=[float]))(source_population, layer)

                logger.info('population %s: layer: %s: u ' \
                            'width: %f v width: %f u scale_factor: %f v scale_factor: %f' % \
                            (source_population, str(layer), \
                             self.width[source_population][layer]['u'], \
                             self.width[source_population][layer]['v'], \
                             self.scale_factor[source_population][layer]['u'], \
                             self.scale_factor[source_population][layer]['v']))