Beispiel #1
0
def make_param_spec(pop_names, param_dict):

    """Constructs a flat list representation of synaptic parameters."""
    
    param_names = []
    param_initial_dict = {}
    param_tuples = []

    for pop_name in pop_names:
        param_specs = param_dict[pop_name]
        keyfun = lambda kv: str(kv[0])
        for source, source_dict in sorted(viewitems(param_specs), key=keyfun):
            for sec_type, sec_type_dict in sorted(viewitems(source_dict), key=keyfun):
                for syn_name, syn_mech_dict in sorted(viewitems(sec_type_dict), key=keyfun):
                    for param_fst, param_rst in sorted(viewitems(syn_mech_dict), key=keyfun):
                        if isinstance(param_rst, dict):
                            for const_name, const_range in sorted(viewitems(param_rst)):
                                param_path = (param_fst, const_name)
                                param_tuples.append(SynParam(pop_name, source, sec_type, syn_name, param_path, const_range))
                                param_key = '%s.%s.%s.%s.%s.%s' % (pop_name, str(source), sec_type, syn_name, param_fst, const_name)
                                param_names.append(param_key)
                        else:
                            param_name = param_fst
                            param_range = param_rst
                            param_tuples.append(SynParam(pop_name, source, sec_type, syn_name, param_name, param_range))
                            param_key = '%s.%s.%s.%s.%s' % (pop_name, source, sec_type, syn_name, param_name)
                            param_names.append(param_key)
                                
    return ParamSpec(param_names=param_names, param_tuples=param_tuples)
Beispiel #2
0
def spike_density_estimate(population, spkdict, time_bins, arena_id=None, trajectory_id=None, output_file_path=None,
                            progress=False, inferred_rate_attr_name='Inferred Rate Map', **kwargs):
    """
    Calculates spike density function for the given spike trains.
    :param population:
    :param spkdict:
    :param time_bins:
    :param arena_id: str
    :param trajectory_id: str
    :param output_file_path:
    :param progress:
    :param inferred_rate_attr_name: str
    :param kwargs: dict
    :return: dict
    """
    if progress:
        from tqdm import tqdm

    analysis_options = copy.copy(default_baks_analysis_options)
    analysis_options.update(kwargs)

    def make_spktrain(lst, t_start, t_stop):
        spkts = np.asarray(lst, dtype=np.float32)
        return spkts[(spkts >= t_start) & (spkts <= t_stop)]

    
    t_start = time_bins[0]
    t_stop = time_bins[-1]

    spktrains = {ind: make_spktrain(lst, t_start, t_stop) for (ind, lst) in viewitems(spkdict)}
    baks_args = dict()
    baks_args['a'] = analysis_options['BAKS Alpha']
    baks_args['b'] = analysis_options['BAKS Beta']
    
    if progress:
        seq = tqdm(viewitems(spktrains))
    else:
        seq = viewitems(spktrains)
        
    spk_rate_dict = {ind: baks(spkts / 1000., time_bins / 1000., **baks_args)[0].reshape((-1,))
                     if len(spkts) > 1 else np.zeros(time_bins.shape)
                     for ind, spkts in seq}

    if output_file_path is not None:
        if arena_id is None or trajectory_id is None:
            raise RuntimeError('spike_density_estimate: arena_id and trajectory_id required to write Spike Density'
                               'Function namespace')
        namespace = 'Spike Density Function %s %s' % (arena_id, trajectory_id)
        attr_dict = {ind: {inferred_rate_attr_name: np.asarray(spk_rate_dict[ind], dtype='float32')}
                     for ind in spk_rate_dict}
        write_cell_attributes(output_file_path, population, attr_dict, namespace=namespace)

    result = {ind: {'rate': rate, 'time': time_bins} for ind, rate in viewitems(spk_rate_dict)}

        
    result = { ind: { 'rate': rate, 'time': time_bins }
              for ind, rate in viewitems(spk_rate_dict) }
    
    return result
Beispiel #3
0
def choose_synapse_projection(ranstream_syn,
                              syn_layer,
                              swc_type,
                              syn_type,
                              population_dict,
                              projection_synapse_dict,
                              log=False):
    """
    Given a synapse projection, SWC synapse location, and synapse type,
    chooses a projection from the given projection dictionary based on
    1) whether the projection properties match the given synapse
    properties and 2) random choice between all the projections that
    satisfy the given criteria.

    :param ranstream_syn: random state object
    :param syn_layer: synapse layer
    :param swc_type: SWC location for synapse (soma, axon, apical, basal)
    :param syn_type: synapse type (excitatory, inhibitory, neuromodulatory)
    :param population_dict: mapping of population names to population indices
    :param projection_synapse_dict: mapping of projection names to a tuple of the form: <type, layers, swc sections, proportions>

    """
    ivd = {v: k for k, v in viewitems(population_dict)}
    projection_lst = []
    projection_prob_lst = []
    for k, (syn_config_type, syn_config_layers, syn_config_sections,
            syn_config_proportions,
            syn_config_contacts) in viewitems(projection_synapse_dict):
        if (syn_type == syn_config_type) and (swc_type in syn_config_sections):
            ord_indices = list_find_all(lambda x: x == swc_type,
                                        syn_config_sections)
            for ord_index in ord_indices:
                if syn_layer == syn_config_layers[ord_index]:
                    projection_lst.append(population_dict[k])
                    projection_prob_lst.append(
                        syn_config_proportions[ord_index])
    if len(projection_lst) > 1:
        candidate_projections = np.asarray(projection_lst)
        candidate_probs = np.asarray(projection_prob_lst)
        if log:
            logger.info("candidate_projections: %s candidate_probs: %s" % \
                        (str(candidate_projections), str(candidate_probs)))
        projection = ranstream_syn.choice(candidate_projections,
                                          1,
                                          p=candidate_probs)[0]
    elif len(projection_lst) > 0:
        projection = projection_lst[0]
    else:
        projection = None

    if projection is None:
        logger.error('Projection is none for syn_type = %s syn_layer = %s swc_type = %s' % \
                     (str(syn_type), str(syn_layer), str(swc_type)))
        logger.error(str(projection_synapse_dict))

    if projection is not None:
        return ivd[projection]
    else:
        return None
Beispiel #4
0
def spatial_coactive_sets(population,
                          spkdict,
                          time_bins,
                          trajectory,
                          return_tree=False):
    """
    Estimates spatially co-active activity ensembles from the given spike dictionary.
    """

    import sklearn
    from sklearn.neighbors import BallTree

    x, y, d, t = trajectory

    pch_x = interpolate.pchip(t, x)
    pch_y = interpolate.pchip(t, y)

    spatial_bins = np.column_stack(
        [pch_x(time_bins[:-1]), pch_y(time_bins[:-1])])

    acv_dict = {
        gid: np.histogram(np.asarray(lst), bins=time_bins)[0]
        for (gid, lst) in viewitems(spkdict[population]) if len(lst) > 1
    }
    n_features = len(time_bins) - 1
    n_samples = len(acv_dict)

    active_gid = {}
    active_bins = np.zeros((n_samples, n_features), dtype=np.bool)
    for i, (gid, acv) in enumerate(viewitems(acv_dict)):
        active_bins[i, :] = acv > 0
        active_gid[i] = gid

    tree = BallTree(active_bins, metric='jaccard')
    qbins = np.zeros((n_features, n_features), dtype=np.bool)
    for ibin in range(n_features):
        qbins[ibin, ibin] = True

    nnrs, nndists = tree.query_radius(qbins, r=1, return_distance=True)

    fnnrs = []
    fnndists = []
    for i, (nns, nndist) in enumerate(zip(nnrs, nndists)):
        inds = [
            inn for inn, nn in enumerate(nns)
            if np.any(np.logical_and(active_bins[nn, :], active_bins[i, :]))
        ]
        fnns = np.asarray([nns[inn] for inn in inds])
        fdist = np.asarray([nndist[inn] for inn in inds])
        fnnrs.append(fnns)
        fnndists.append(fdist)

    if return_tree:
        return n_samples, spatial_bins, fnnrs, fnndists, (tree, active_gid)
    else:
        return n_samples, spatial_bins, fnnrs, fnndists
Beispiel #5
0
def selectivity_optimization_params(optimization_config, pop_names,
                                    param_config_name):
    """Constructs a flat list representation of selectivity optimization parameters."""

    mask_param_tuples = []
    mask_param_names = []

    if param_config_name is None:
        return None

    for pop_name in pop_names:
        if pop_name in optimization_config['selectivity']:
            opt_params = optimization_config['selectivity'][pop_name]
            structured_weights_masks = opt_params['Structured weights masks'][
                param_config_name]
        else:
            raise RuntimeError(
                "selectivity_optimization_params: population %s does not have optimization configuration"
                % pop_name)
        keyfun = lambda kv: str(kv[0])
        for source, source_dict in sorted(viewitems(structured_weights_masks),
                                          key=keyfun):
            for sec_type, sec_type_dict in sorted(viewitems(source_dict),
                                                  key=keyfun):
                for syn_name, syn_mech_dict in sorted(viewitems(sec_type_dict),
                                                      key=keyfun):
                    for param_fst, param_rst in sorted(
                            viewitems(syn_mech_dict), key=keyfun):
                        if isinstance(param_rst, dict):
                            for const_name, const_value in sorted(
                                    viewitems(param_rst)):
                                param_path = (param_fst, const_name)
                                param_key = '%s.%s.%s.%s.%s.%s' % (
                                    pop_name, str(source), sec_type, syn_name,
                                    param_fst, const_name)
                                mask_param_names.append(param_key)
                                mask_param_tuples.append(
                                    SynParam(pop_name, source, sec_type,
                                             syn_name, param_path,
                                             const_value))
                        else:
                            param_name = param_fst
                            param_value = param_rst
                            param_key = '%s.%s.%s.%s.%s' % (pop_name, source,
                                                            sec_type, syn_name,
                                                            param_name)
                            mask_param_names.append(param_key)
                            mask_param_tuples.append(
                                SynParam(pop_name, source, sec_type, syn_name,
                                         param_name, param_value))

    return SelectivityOptConfig(mask_param_names=mask_param_names,
                                mask_param_tuples=mask_param_tuples)
Beispiel #6
0
def spike_covariate(population, spkdict, time_bins, nbins_before, nbins_after):
    """
    Creates the spike covariate matrix.

    X: a matrix of size nbins x nadj x ncells
    """

    spk_matrix = np.column_stack([
        np.histogram(np.asarray(lst), bins=time_bins)[0]
        for i, (gid, lst) in enumerate(viewitems(spkdict[population]))
        if len(lst) > 1
    ])

    nbins = spk_matrix.shape[0]
    ncells = spk_matrix.shape[1]
    nadj = nbins_before + nbins_after + 1

    X = np.empty([nbins, nadj, ncells])
    X[:] = np.NaN

    start_idx = 0
    for i in range(nbins - nbins_before - nbins_after):
        end_idx = start_idx + nadj
        X[i + nbins_before, :, :] = spk_matrix[start_idx:end_idx, :]
        start_idx = start_idx + 1

    return X
def exchange_input_features(comm, requested_gids, input_features_attr_dict):

    my_gids = list(input_features_attr_dict.keys())
    requested_gids_per_rank = comm.allgather(requested_gids)
    gid_rank_map = defaultdict(list)
    for rank, gids in enumerate(requested_gids_per_rank):
        for gid in gids:
            gid_rank_map[gid].append(rank)

    nranks = comm.size
    input_features_sendbuf = [list() for i in range(nranks)]
    for gid, features_dict in viewitems(input_features_attr_dict):
        if gid in gid_rank_map:
            for dst_rank in gid_rank_map[gid]:
                input_features_sendbuf[dst_rank].append((gid, features_dict))

    input_features_recvbuf = comm.alltoall(input_features_sendbuf)
    comm.barrier()

    result_input_features_attr_dict = {}
    for l in input_features_recvbuf:
        for gid, features_dict in l:
            result_input_features_attr_dict[gid] = features_dict

    return result_input_features_attr_dict
Beispiel #8
0
    def f(pp, **kwargs):
        if problem_regime == ProblemRegime.every:
            results_dict = eval_problem_fun(pp, **kwargs)
            result = results_dict
        elif problem_regime == ProblemRegime.mean:
            mpp = { gid: pp for gid in cell_index_set }
            results_dict = eval_problem_fun(mpp, **kwargs)
            result = np.mean([ v for k,v in viewitems(results_dict) ])
        elif problem_regime == ProblemRegime.max:
            mpp = { gid: pp for gid in cell_index_set }
            results_dict = eval_problem_fun(mpp, **kwargs)
            result = np.max([ v for k,v in viewitems(results_dict) ])
        else:
            raise RuntimeError("opt_eval_fun: unknown problem regime %s" % str(problem_regime))

        return result
Beispiel #9
0
    def plot(self, axes=None, show=True):
        """

        """
        import matplotlib.pyplot as plt
        from dentate.plot import clean_axes
        if len(self.recs) == 0:
            return
        if axes is None:
            fig, axes = plt.subplots()
        else:
            fig = axes.get_figure()
        for name, rec_dict in viewitems(self.recs):
            description = str(rec_dict['description'])
            axes.plot(self.tvec, rec_dict['vec'],
                      label='%s: %s(%.2f) %s' % (name, rec_dict['node'].name, rec_dict['loc'], description))
            axes.set_xlabel('Time (ms)')
            axes.set_ylabel('%s (%s)' % (rec_dict['ylabel'], rec_dict['units']))
        axes.legend(loc='best', frameon=False, framealpha=0.5)
        title = None
        if 'title' in self.parameters:
            title = self.parameters['title']
        if 'description' in self.parameters:
            if title is not None:
                title = title + '; ' + self.parameters['description']
            else:
                title = self.parameters['description']
        if title is not None:
            axes.set_title(title)
        clean_axes(axes)
        if show:
            fig.tight_layout()
            fig.show()
        else:
            return axes
Beispiel #10
0
def determine_cell_participation(gid_module_assignments):

    input_config = context.env.inputConfig[context.stimulus_id]
    feature_type_dict = input_config['feature type']
    feature_seed_offset = int(
        context.env.modelConfig['Random Seeds']['Input Spatial Selectivity'])
    feature_type_random = np.random.RandomState(feature_seed_offset - 1)
    num_field_random = np.random.RandomState(feature_seed_offset - 1)

    gid_attributes = {}
    module_probabilities = [
        calculate_field_distribution(pi, pr)
        for (pi, pr) in zip(module_pi, module_pr)
    ]

    population_ranges = context.population_ranges
    total_num_fields = 0
    for population in ['MPP', 'LPP']:
        gid_attributes[population] = {}

        population_start = population_ranges[population][0]
        population_count = population_ranges[population][1]

        feature_type_values_lst = []
        feature_type_prob_lst = []
        for t, p in viewitems(feature_type_dict[population]):
            feature_type_values_lst.append(t)
            feature_type_prob_lst.append(p)
        feature_type_values = np.asarray(feature_type_values_lst)
        feature_type_probs = np.asarray(feature_type_prob_lst)
        feature_types = feature_type_random.choice(feature_type_values,
                                                   p=feature_type_probs,
                                                   size=(population_count, ))

        population_end = population_start + population_count
        gids = np.arange(population_start, population_end, 1)
        for (i, gid) in enumerate(gids):
            num_field_random.seed(feature_seed_offset + gid)
            cell = {}
            module = gid_module_assignments[gid]
            cell['Module'] = np.array([module], dtype='uint8')
            cell['Feature Type'] = np.array([feature_types[i]], dtype='uint8')
            nfields = 1
            if feature_types[i] == feature_grid:
                cell['Num Fields'] = np.array([nfields], dtype='uint8')
            elif feature_types[i] == feature_place:
                field_probabilities = module_probabilities[module - 1]
                field_set = [i for i in xrange(field_probabilities.shape[0])]
                nfields = num_field_random.choice(field_set,
                                                  p=field_probabilities,
                                                  size=(10, ))[-1]
                cell['Num Fields'] = np.array([nfields], dtype='uint8')
            gid_attributes[population][gid] = cell
            total_num_fields += nfields
            logger.info('Rank %i: computed features for gid %i' %
                        (context.env.comm.rank, gid))

    return total_num_fields, gid_attributes
Beispiel #11
0
def network_features(env, target_trj_rate_map_dict, t_start, t_stop,
                     target_populations):

    features_dict = dict()

    temporal_resolution = float(env.stimulus_config['Temporal Resolution'])
    time_bins = np.arange(t_start, t_stop, temporal_resolution)

    pop_spike_dict = spikedata.get_env_spike_dict(env,
                                                  include_artificial=False)

    for pop_name in target_populations:

        has_target_trj_rate_map = pop_name in target_trj_rate_map_dict

        n_active = 0
        sum_mean_rate = 0.
        spike_density_dict = spikedata.spike_density_estimate(
            pop_name, pop_spike_dict[pop_name], time_bins)
        for gid, dens_dict in utils.viewitems(spike_density_dict):
            mean_rate = np.mean(dens_dict['rate'])
            sum_mean_rate += mean_rate
            if mean_rate > 0.:
                n_active += 1

        n_total = len(env.cells[pop_name]) - len(
            env.artificial_cells[pop_name])

        n_target_rate_map = 0
        sum_target_rate_dist_residual = None
        if has_target_trj_rate_map:
            pop_target_trj_rate_map_dict = target_trj_rate_map_dict[pop_name]
            n_target_rate_map = len(pop_target_trj_rate_map_dict)
            target_rate_dist_residuals = []
            for gid in pop_target_trj_rate_map_dict:
                target_trj_rate_map = pop_target_trj_rate_map_dict[gid]
                rate_map_len = len(target_trj_rate_map)
                if gid in spike_density_dict:
                    residual = np.abs(
                        np.sum(target_trj_rate_map -
                               spike_density_dict[gid]['rate'][:rate_map_len]))
                else:
                    residual = np.abs(np.sum(target_trj_rate_map))
                target_rate_dist_residuals.append(residual)
            sum_target_rate_dist_residual = np.sum(target_rate_dist_residuals)

        pop_features_dict = {}
        pop_features_dict['n_total'] = n_total
        pop_features_dict['n_active'] = n_active
        pop_features_dict['n_target_rate_map'] = n_target_rate_map
        pop_features_dict['sum_mean_rate'] = sum_mean_rate
        pop_features_dict[
            'sum_target_rate_dist_residual'] = sum_target_rate_dist_residual

        features_dict[pop_name] = pop_features_dict

    return features_dict
Beispiel #12
0
 def parse_syn_mechparams(self, mechparams_dict):
     res = {}
     for mech_name, mech_params in viewitems(mechparams_dict):
         mech_params1 = {}
         for k, v in viewitems(mech_params):
             if isinstance(v, dict):
                 if 'expr' in v:
                     mech_params1[k] = ExprClosure([v['parameter']],
                                                   v['expr'],
                                                   v.get('const',
                                                         None), ['x'])
                 else:
                     raise RuntimeError(
                         'parse_syn_mechparams: unknown parameter type %s' %
                         str(v))
             else:
                 mech_params1[k] = v
         res[mech_name] = mech_params1
     return res
Beispiel #13
0
def interspike_intervals(spkdict):
    """
    Calculates interspike intervals from the given spike dictionary.
    """
    isi_dict = {}
    for ind, lst in viewitems(spkdict):
        if len(lst) > 1:
            isi_dict[ind] = np.diff(np.asarray(lst))
        else:
            isi_dict[ind] = np.asarray([], dtype=np.float32)
    return isi_dict
Beispiel #14
0
def spike_bin_counts(spkdict, time_bins):
    bin_dict = {}
    for (ind, lst) in viewitems(spkdict):

        if len(lst) > 0:
            spkts = np.asarray(lst, dtype=np.float32)
            bins, bin_edges = np.histogram(spkts, bins=time_bins)

            bin_dict[ind] = bins

    return bin_dict
Beispiel #15
0
def generate_gap_junctions(connection_prob, coupling_coeffs, coupling_params, ranstream_gj, gids_a, gids_b, gj_probs,
                           gj_distances, cell_dict_a, cell_dict_b, gj_dict):
    k = int(round(connection_prob * len(gj_distances)))
    selected = ranstream_gj.choice(np.arange(0, len(gj_distances)), size=k, replace=False, p=gj_probs)
    count = len(selected)

    gid_dict = defaultdict(list)
    for i in selected:
        gid_a = gids_a[i]
        gid_b = gids_b[i]
        gid_dict[gid_a].append(gid_b)

    for gid_a, gids_b in viewitems(gid_dict):
        sections_a = []
        positions_a = []
        sections_b = []
        positions_b = []
        couplings_a = []
        couplings_b = []

        cell_a = cell_dict_a[gid_a]

        for gid_b in gids_b:
            cell_b = cell_dict_b[gid_b]

            section_a, position_a, distance_a, section_b, position_b, distance_b = \
                choose_gj_locations(ranstream_gj, cell_a, cell_b)

            sections_a.append(section_a)
            positions_a.append(position_a)

            sections_b.append(section_b)
            positions_b.append(position_b)

            coupling_weight_a = np.polyval(coupling_params, distance_a)
            coupling_weight_b = np.polyval(coupling_params, distance_b)

            coupling_a = coupling_coeffs * coupling_weight_a
            coupling_b = coupling_coeffs * coupling_weight_b

            couplings_a.append(coupling_a)
            couplings_b.append(coupling_b)

        if len(gids_b) > 0:
            gj_dict[gid_a] = (np.asarray(gids_b, dtype=np.uint32),
                              {'Location': {'Source section': np.asarray(sections_a, dtype=np.uint32),
                                            'Source position': np.asarray(positions_a, dtype=np.float32),
                                            'Destination section': np.asarray(sections_b, dtype=np.uint32),
                                            'Destination position': np.asarray(positions_b, dtype=np.float32)},
                               'Coupling strength': {'Source': np.asarray(couplings_a, dtype=np.float32),
                                                     'Destination': np.asarray(couplings_b, dtype=np.float32)}})

    return count
Beispiel #16
0
    def parse_stimulus_config(self):
        stimulus_dict = self.model_config['Stimulus']
        stimulus_config = {}

        for k, v in viewitems(stimulus_dict):
            if k == 'Selectivity Type Probabilities':
                selectivity_type_prob_dict = {}
                for (pop, dvals) in viewitems(v):
                    pop_selectivity_type_prob_dict = {}
                    for (selectivity_type_name,
                         selectivity_type_prob) in viewitems(dvals):
                        pop_selectivity_type_prob_dict[int(self.selectivity_types[selectivity_type_name])] = \
                            float(selectivity_type_prob)
                    selectivity_type_prob_dict[
                        pop] = pop_selectivity_type_prob_dict
                stimulus_config[
                    'Selectivity Type Probabilities'] = selectivity_type_prob_dict
            elif k == 'Peak Rate':
                peak_rate_dict = {}
                for (pop, dvals) in viewitems(v):
                    pop_peak_rate_dict = {}
                    for (selectivity_type_name, peak_rate) in viewitems(dvals):
                        pop_peak_rate_dict[int(
                            self.selectivity_types[selectivity_type_name]
                        )] = float(peak_rate)
                    peak_rate_dict[pop] = pop_peak_rate_dict
                stimulus_config['Peak Rate'] = peak_rate_dict
            elif k == 'Arena':
                stimulus_config['Arena'] = {}
                for arena_id, arena_val in viewitems(v):
                    arena_properties = {}
                    arena_domain = None
                    arena_trajectories = {}
                    for kk, vv in viewitems(arena_val):
                        if kk == 'Domain':
                            arena_domain = self.parse_arena_domain(vv)
                        elif kk == 'Trajectory':
                            for name, trajectory_config in viewitems(vv):
                                trajectory = self.parse_arena_trajectory(
                                    trajectory_config)
                                arena_trajectories[name] = trajectory
                        else:
                            arena_properties[kk] = vv
                    stimulus_config['Arena'][arena_id] = ArenaConfig(
                        arena_id, arena_domain, arena_trajectories,
                        arena_properties)
            else:
                stimulus_config[k] = v

        self.stimulus_config = stimulus_config
Beispiel #17
0
def spike_rates(spkdict):
    """
    Calculates firing rates based on interspike intervals computed from the given spike dictionary.
    """
    rate_dict = {}
    isidict = interspike_intervals(spkdict)
    for ind, isiv in viewitems(isidict):
        if isiv.size > 0:
            rate = 1.0 / (np.mean(isiv) / 1000.0)
        else:
            rate = 0.0
        rate_dict[ind] = rate
    return rate_dict
Beispiel #18
0
def uvl_in_bounds(uvl_coords, layer_extents, pop_layers):
    for layer, count in viewitems(pop_layers):
        if count > 0:
            min_extent = layer_extents[layer][0]
            max_extent = layer_extents[layer][1]
            result = (uvl_coords[0] < (max_extent[0] + 0.001)) and \
                     (uvl_coords[0] > (min_extent[0] - 0.001)) and \
                     (uvl_coords[1] < (max_extent[1] + 0.001)) and \
                     (uvl_coords[1] > (min_extent[1] - 0.001)) and \
                     (uvl_coords[2] < (max_extent[2] + 0.001)) and \
                     (uvl_coords[2] > (min_extent[2] - 0.001))
            if result:
                return True
    return False
Beispiel #19
0
def compute_features_firing_rate_fraction_active(x, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    update_source_contexts(x, context)
    context.env.results_file_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))

    network.run(context.env, output=context.output_results, shutdown=False)

    pop_spike_dict = spikedata.get_env_spike_dict(context.env, include_artificial=False)

    t_start = 250.
    t_stop = context.env.tstop
    
    time_bins  = np.arange(t_start, t_stop, context.bin_size)

    for pop_name in context.target_populations:

        mean_rate_sum = 0.
        spike_density_dict = spikedata.spike_density_estimate (pop_name, pop_spike_dict[pop_name], time_bins)
        for gid, dens_dict in utils.viewitems(spike_density_dict):
            mean_rate_sum += np.mean(dens_dict['rate'])
        mean_rate_sum = context.env.comm.allreduce(mean_rate_sum, op=MPI.SUM)

        n_total = context.env.comm.allreduce(len(context.env.cells[pop_name]) - len(context.env.artificial_cells[pop_name]), op=MPI.SUM)
        n_active = context.env.comm.allreduce(len(spike_density_dict), op=MPI.SUM)

        if n_active > 0:
            mean_rate = mean_rate_sum / n_active
        else:
            mean_rate = 0.

        if n_total > 0:
            fraction_active = n_active / n_total
        else:
            fraction_active = 0.

        rank = int(context.env.pc.id())
        if rank == 0:
            context.logger.info('population %s: n_active = %d n_total = %d' % (pop_name, n_active, n_total))

        results['%s firing rate' % pop_name] = mean_rate
        results['%s fraction active' % pop_name] = fraction_active

    return results
Beispiel #20
0
def write_params(output_path, pop_params_dict):

    output_pop_parameters = {}
    param_key_list = []
    for population in pop_params_dict:
        this_pop_output_parameters = {}
        for gid in pop_params_dict[population]:
            this_gid_param_dicts = pop_params_dict[population][gid]
            this_output_params = {}
            for pd in this_gid_param_dicts:
                param_key = f'{pd["population"]}.{pd["source"]}.{pd["sec_type"]}.{pd["syn_name"]}.{pd["param_path"]}'
                param_val = pd["param_val"]
                param_key_list.append(param_key)
                this_output_params[param_key] = param_val
            this_pop_output_parameters[f'{gid}'] = this_output_params
        output_pop_parameters[population] = this_pop_output_parameters

    param_keys = set(param_key_list)

    output_file = h5py.File(output_path, 'a')

    param_mapping = {name: idx for (idx, name) in enumerate(param_keys)}

    parameters_grp = h5_get_group(output_file, 'Parameters')
    if 'parameters_type' not in parameters_grp:
        dt = h5py.enum_dtype(param_mapping, basetype=np.uint16)
        parameters_grp['parameter_enum'] = dt
        dt = np.dtype([("parameter", parameters_grp['parameter_enum']),
                       ("value", np.float32)])
        parameters_grp['parameters_type'] = dt
    for population in output_pop_parameters:
        pop_grp = h5_get_group(parameters_grp, population)
        this_pop_output_parameters = output_pop_parameters[population]
        for id_str in this_pop_output_parameters:
            this_output_params = this_pop_output_parameters[id_str]
            dset = h5_get_dataset(
                pop_grp,
                id_str,
                maxshape=(len(this_output_params), ),
                dtype=parameters_grp['parameters_type'].dtype)
            dset.resize((len(this_output_params), ))
            a = np.zeros(len(this_output_params),
                         dtype=parameters_grp['parameters_type'].dtype)
            for idx, (parm, val) in enumerate(viewitems(this_output_params)):
                a[idx]["parameter"] = param_mapping[parm]
                a[idx]["value"] = val
            dset[:] = a

    output_file.close()
Beispiel #21
0
def assign_cells_to_module(gid_normed_distances, p_width=2. / 3, displace=0.1):

    offsets = np.linspace(-displace, 1. + displace, 10)
    positions = np.linspace(0., 1., 1000)
    p_module = lambda width, offset: lambda x: np.exp(-(
        (x - offset) / (width / 3. / np.sqrt(2.)))**2.)
    p_modules = np.array([
        p_module(p_width, offset)(positions)
        for (i, offset) in enumerate(offsets)
    ],
                         dtype='float32')
    p_sum = np.sum(p_modules, axis=0)
    p_density = np.divide(p_modules, p_sum)  # 10 x 1000
    #p_modules_max = np.max(p_density, axis=1)
    #mean_peak     = np.mean(p_modules_max[1:-1])

    left_offset = 0
    right_offset = len(positions)
    valid_indices = np.arange(left_offset, right_offset, 1)
    valid_positions = positions[valid_indices]
    renormalized_positions = (valid_positions - np.min(valid_positions)) / (
        np.max(valid_positions) - np.min(valid_positions))

    #plt.figure()
    #for i in xrange(len(p_density)):
    #    plt.plot(renormalized_positions, p_density[i][valid_indices])

    feature_seed_offset = int(
        context.env.modelConfig['Random Seeds']['Input Spatial Selectivity'])
    local_random = np.random.RandomState()
    gid_module_assignments = dict()

    for gid, (u, _, _, _) in viewitems(gid_normed_distances):
        local_random.seed(gid + feature_seed_offset)
        interpolated_density_values = []
        for i in xrange(len(p_density)):
            module_density = p_density[i][valid_indices]
            interpolated_density_values.append(
                np.interp(u, renormalized_positions, module_density))
        remaining_density = 1. - np.sum(interpolated_density_values)
        max_density_index = np.argmax(interpolated_density_values)
        interpolated_density_values[max_density_index] += remaining_density
        module = local_random.choice(np.arange(len(p_density)) + 1,
                                     p=interpolated_density_values,
                                     size=(1, ))
        gid_module_assignments[gid] = module[0]

    return gid_module_assignments
Beispiel #22
0
def optimize_inverse_uvl_coords(xyz_coords,
                                rotate,
                                layer_extents,
                                pop_layers,
                                optiter=100):
    import dlib
    f_uvl_distance = make_uvl_distance(xyz_coords, rotate=rotate)
    for layer, count in viewitems(pop_layers):
        if count > 0:
            min_extent = layer_extents[layer][0]
            max_extent = layer_extents[layer][1]
            uvl_coords, dist = dlib.find_min_global(f_uvl_distance, min_extent,
                                                    max_extent, optiter)
            if uvl_in_bounds(uvl_coords, layer_extents, {layer: count}):
                return uvl_coords
    return None
Beispiel #23
0
def save_to_h5(cell_attributes):

    for population in cell_attributes.keys():
        place_cells, grid_cells = {}, {}
        for gid, cell in viewitems(cell_attributes[population]):

            if cell['Feature Type'][0] == feature_grid:
                grid_cells[gid] = cell
            elif cell['Feature Type'][0] == feature_place:
                place_cells[gid] = cell

        append_cell_attributes(context.output_path, population, grid_cells, namespace='Grid Selectivity',\
                               comm=context.comm, io_size=context.io_size, chunk_size=context.chunk_size,\
                               value_chunk_size=context.value_chunk_size)

        append_cell_attributes(context.output_path, population, place_cells, namespace='Place Selectivity',\
                               comm=context.comm, io_size=context.io_size, chunk_size=context.chunk_size,\
                               value_chunk_size=context.value_chunk_size)
Beispiel #24
0
 def clear(self):
     self.gidset = set([])
     self.gjlist = []
     self.cells = defaultdict(list)
     self.artificial_cells = defaultdict(dict)
     self.biophys_cells = defaultdict(dict)
     self.recording_sets = {}
     if self.pc is not None:
         self.pc.gid_clear()
     if self.t_vec is not None:
         self.t_vec.resize(0)
     if self.id_vec is not None:
         self.id_vec.resize(0)
     if self.t_rec is not None:
         self.t_rec.resize(0)
     self.recs_dict = {}
     for pop_name, _ in viewitems(self.Populations):
         self.recs_dict[pop_name] = defaultdict(list)
Beispiel #25
0
def optimization_params(optimization_config, pop_names, param_config_name, param_type='synaptic'):

    """Constructs a flat list representation of synaptic optimization parameters based on network clamp optimization configuration."""
    
    param_bounds = {}
    param_names = []
    param_initial_dict = {}
    param_tuples = []
    opt_targets = {}

    for pop_name in pop_names:
        if param_type == 'synaptic':
            if pop_name in optimization_config['synaptic']:
                opt_params = optimization_config['synaptic'][pop_name]
                param_ranges = opt_params['Parameter ranges'][param_config_name]
            else:
                raise RuntimeError(
                    "optimization_params: population %s does not have optimization configuration" % pop_name)
            for target_name, target_val in viewitems(opt_params['Targets']):
                opt_targets['%s %s' % (pop_name, target_name)] = target_val
            keyfun = lambda kv: str(kv[0])
            for source, source_dict in sorted(viewitems(param_ranges), key=keyfun):
                for sec_type, sec_type_dict in sorted(viewitems(source_dict), key=keyfun):
                    for syn_name, syn_mech_dict in sorted(viewitems(sec_type_dict), key=keyfun):
                        for param_fst, param_rst in sorted(viewitems(syn_mech_dict), key=keyfun):
                            if isinstance(param_rst, dict):
                                for const_name, const_range in sorted(viewitems(param_rst)):
                                    param_path = (param_fst, const_name)
                                    param_tuples.append(SynParam(pop_name, source, sec_type, syn_name, param_path, const_range))
                                    param_key = '%s.%s.%s.%s.%s.%s' % (pop_name, str(source), sec_type, syn_name, param_fst, const_name)
                                    param_initial_value = (const_range[1] - const_range[0]) / 2.0
                                    param_initial_dict[param_key] = param_initial_value
                                    param_bounds[param_key] = const_range
                                    param_names.append(param_key)
                            else:
                                param_name = param_fst
                                param_range = param_rst
                                param_tuples.append(SynParam(pop_name, source, sec_type, syn_name, param_name, param_range))
                                param_key = '%s.%s.%s.%s.%s' % (pop_name, source, sec_type, syn_name, param_name)
                                param_initial_value = (param_range[1] - param_range[0]) / 2.0
                                param_initial_dict[param_key] = param_initial_value
                                param_bounds[param_key] = param_range
                                param_names.append(param_key)
                                
        else:
            raise RuntimeError("optimization_params: unknown parameter type %s" % param_type)

    return OptConfig(param_bounds=param_bounds, 
                     param_names=param_names, 
                     param_initial_dict=param_initial_dict, 
                     param_tuples=param_tuples, 
                     opt_targets=opt_targets)
Beispiel #26
0
def get_total_extents(layer_extents):

    min_u = float('inf')
    max_u = 0.0

    min_v = float('inf')
    max_v = 0.0

    min_l = float('inf')
    max_l = 0.0

    for layer, extent in viewitems(layer_extents):
        min_u = min(extent[0][0], min_u)
        min_v = min(extent[0][1], min_v)
        min_l = min(extent[0][2], min_l)
        max_u = max(extent[1][0], max_u)
        max_v = max(extent[1][1], max_v)
        max_l = max(extent[1][2], max_l)

    return ((min_u, max_u), (min_v, max_v), (min_l, max_l))
Beispiel #27
0
    def parse_netclamp_config(self):
        """

        :return:
        """
        netclamp_config_dict = self.model_config['Network Clamp']
        weight_generator_dict = netclamp_config_dict['Weight Generator']
        template_param_rules_dict = netclamp_config_dict[
            'Template Parameter Rules']
        opt_param_rules_dict = {}
        if 'Synaptic Optimization' in netclamp_config_dict:
            opt_param_rules_dict['synaptic'] = netclamp_config_dict[
                'Synaptic Optimization']

        template_params = {}
        for (template_name, params) in viewitems(template_param_rules_dict):
            template_params[template_name] = params

        self.netclamp_config = NetclampConfig(template_params,
                                              weight_generator_dict,
                                              opt_param_rules_dict)
Beispiel #28
0
def histogram_autocorrelation(spkdata, bin_size=1., lag=1, quantity='count'):
    """Compute autocorrelation coefficients of the spike count or firing rate histogram of each population. """

    spkpoplst = spkdata['spkpoplst']
    spkindlst = spkdata['spkindlst']
    spktlst = spkdata['spktlst']
    num_cell_spks = spkdata['num_cell_spks']
    pop_active_cells = spkdata['pop_active_cells']
    tmin = spkdata['tmin']
    tmax = spkdata['tmax']

    bins = np.arange(tmin, tmax, bin_size)

    corr_dict = {}
    for subset, spkinds, spkts in zip(spkpoplst, spkindlst, spktlst):
        i = 0
        spk_dict = defaultdict(list)
        for spkind, spkt in zip(np.nditer(spkinds), np.nditer(spkts)):
            spk_dict[int(spkind)].append(spkt)
        x_lst = []
        for ind, lst in viewitems(spk_dict):
            spkts = np.asarray(lst)
            if quantity == 'rate':
                q = akde(spkts / 1000., time_bins / 1000.)[0]
            else:
                count, bin_edges = np.histogram(spkts, bins=bins)
                q = count
            x_lst.append(q)
            i = i + 1

        x_matrix = np.matrix(x_lst)

        corr_matrix = np.apply_along_axis(lambda y: autocorr(y, lag), 1,
                                          x_matrix)

        corr_dict[subset] = corr_matrix

    return corr_dict
Beispiel #29
0
def compute_features_firing_rate(x, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    update_source_contexts(x, context)
    context.env.results_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))

    network.run(context.env, output=context.output_results, shutdown=False)

    pop_spike_dict = spikedata.get_env_spike_dict(context.env)

    t_start = 0.
    t_stop = context.env.tstop

    time_bins = np.arange(t_start, t_stop, context.bin_size)

    pop_name = 'GC'

    mean_rate_sum = 0.
    spike_density_dict = spikedata.spike_density_estimate(
        pop_name, pop_spike_dict[pop_name], time_bins)
    for gid, dens_dict in utils.viewitems(spike_density_dict):
        mean_rate_sum += np.mean(dens_dict['rate'])

    n = len(spike_density_dict)
    if n > 0:
        mean_rate = mean_rate_sum / n
    else:
        mean_rate = 0.

    results['firing_rate'] = mean_rate

    return results
Beispiel #30
0
def assign_cells_to_normalized_position():

    rank = context.comm.rank
    population_distances = []
    gid_arc_distance = dict()
    gid_normed_distances = dict()

    for population in ['MPP', 'LPP']:
        #(population_start, population_count) = context.population_ranges[population]
        attr_gen = NeuroH5CellAttrGen(context.coords_path,
                                      population,
                                      namespace=context.distances_namespace,
                                      comm=context.comm,
                                      io_size=context.io_size,
                                      cache_size=context.cache_size)

        for (gid, distances_dict) in attr_gen:
            if gid is None:
                break
            arc_distance_u = distances_dict['U Distance'][0]
            arc_distance_v = distances_dict['V Distance'][0]
            gid_arc_distance[gid] = (arc_distance_u, arc_distance_v)
            population_distances.append((arc_distance_u, arc_distance_v))

    population_distances = np.asarray(population_distances, dtype='float32')

    min_u, max_u = np.min(population_distances[:, 0]), np.max(
        population_distances[:, 0])
    min_v, max_v = np.min(population_distances[:, 1]), np.max(
        population_distances[:, 1])
    for (gid, (arc_distance_u, arc_distance_v)) in viewitems(gid_arc_distance):
        normalized_u = (arc_distance_u - min_u) / (max_u - min_u)
        normalized_v = (arc_distance_v - min_v) / (max_v - min_v)
        gid_normed_distances[gid] = (normalized_u, normalized_v,
                                     arc_distance_u, arc_distance_v)

    return gid_normed_distances