Ejemplo n.º 1
0
def make_rotate3d(rotate):
    """Creates a rotation matrix based on angles in degrees."""
    for i in range(0, 3):
        if rotate[i] != 0.:
            a = float(np.deg2rad(rotate[i]))
            rot = rotate3d([1 if i == j else 0 for j in range(0, 3)], a)

    return rot
Ejemplo n.º 2
0
    def trial_state_residuals(gid, target_outfld, t_peak_idxs, t_trough_idxs, t_infld_idxs, t_outfld_idxs, state_values, masked_state_values):

        state_value_arrays = np.row_stack(state_values)
        masked_state_value_arrays = None
        if masked_state_values is not None:
            masked_state_value_arrays = np.row_stack(masked_state_values)
        
        residuals_outfld = []
        peak_inflds = []
        trough_inflds = []
        mean_outflds = []
        for i in range(state_value_arrays.shape[0]):
            state_value_array = state_value_arrays[i, :]
            peak_infld = np.mean(state_value_array[t_peak_idxs])
            trough_infld = np.mean(state_value_array[t_trough_idxs])
            mean_infld = np.mean(state_value_array[t_infld_idxs])

            masked_state_value_array = masked_state_value_arrays[i, :]
            mean_masked = np.mean(masked_state_value_array)
            residual_masked = np.mean(masked_state_value_array) - target_outfld

            mean_outfld = mean_masked
            if t_outfld_idxs is not None:
                mean_outfld = np.mean(state_value_array[t_outfld_idxs])
                
            peak_inflds.append(peak_infld)
            trough_inflds.append(trough_infld)
            mean_outflds.append(mean_outfld)
            residuals_outfld.append(residual_masked)
            logger.info(f'selectivity objective: state values of gid {gid}: '
                        f'peak/trough/mean in/mean out/masked: {peak_infld:.02f} / {trough_infld:.02f} / {mean_infld:.02f} / {mean_outfld:.02f} / residual masked: {residual_masked:.04f}')

        state_features = [np.mean(peak_inflds), np.mean(trough_inflds), np.mean(mean_outflds)]
        return (np.asarray(residuals_outfld), state_features)
Ejemplo n.º 3
0
def lpt(cx, npart):
    ''' From the list of (cx, gid) return a npart length list with each partition
        being a total_cx followed by a list of (cx, gid). '''
    cx.sort(key=lambda x: x[0], reverse=True)
    # initialize a priority queue for fast determination of current
    # partition with least complexity. The priority queue always has
    # npart items in it. At this time we do not care which partition will
    # be associated with which rank so a partition on the heap is just
    # (totalcx, [list of (cx, gid)]
    h = []
    for i in range(npart):
        heapq.heappush(h, (0.0, []))
    # each cx item goes into the current least complex partition
    for c in cx:
        lp = heapq.heappop(h)  # least partition
        lp[1].append(c)
        heapq.heappush(h, (lp[0] + c[0], lp[1]))
    parts = [heapq.heappop(h) for i in range(len(h))]
    return parts
Ejemplo n.º 4
0
    def gid_firing_rate_vectors(spkdict, cell_index_set):
        rates_dict = defaultdict(list)
        for i in range(n_trials):
            spkdict1 = {}
            for gid in cell_index_set:
                if gid in spkdict[population]:
                    spkdict1[gid] = spkdict[population][gid][i]
                else:
                    spkdict1[gid] = np.asarray([], dtype=np.float32)
            spike_density_dict = spikedata.spike_density_estimate (population, spkdict1, time_bins)
            for gid in cell_index_set:
                rate_vector = spike_density_dict[gid]['rate']
                rate_vector[np.isclose(rate_vector, 0., atol=1e-3, rtol=1e-3)] = 0.
                rates_dict[gid].append(rate_vector)
                logger.info(f'selectivity objective: trial {i} firing rate min/max of gid {gid}: '
                            f'{np.min(rates_dict[gid]):.02f} / {np.max(rates_dict[gid]):.02f} Hz')

        return rates_dict
Ejemplo n.º 5
0
def read_params(input_path):
    output_file = h5py.File(output_path, 'a')

    pop_params_dict = {}
    parameters_group = h5_get_group(output_file, 'Parameters')
    for population in parameters_group.keys():
        this_pop_params_dict = {}
        pop_group = h5_get_group(parameters_group, population)
        for id_str in pop_group.keys():
            params_data = pop_group[id_str][:]
            this_id_params_dict = {}
            for i in range(len(params_data)):
                this_id_params_dict[params_data[i]
                                    ["parameter"]] = params_data[i]["value"]
            this_pop_params_dict[int(id_str)] = this_id_params_dict
        pop_params_dict[population] = this_pop_params_dict
    output_file.close()
    return pop_params_dict
Ejemplo n.º 6
0
    def trial_snr_residuals(gid, peak_idxs, trough_idxs, infld_idxs, outfld_idxs, 
                            rate_vectors, masked_rate_vectors, target_rate_vector):

        n_trials = len(rate_vectors)
        residual_inflds = []
        trial_inflds = []
        trial_outflds = []

        target_infld = target_rate_vector[infld_idxs]
        target_max_infld = np.max(target_infld)
        target_mean_trough = np.mean(target_rate_vector[trough_idxs])
        logger.info(f'selectivity objective: target max infld/mean trough of gid {gid}: '
                    f'{target_max_infld:.02f} {target_mean_trough:.02f}')
        for trial_i in range(n_trials):

            rate_vector = rate_vectors[trial_i]
            infld_rate_vector = rate_vector[infld_idxs]
            masked_rate_vector = masked_rate_vectors[trial_i]
            if outfld_idxs is None:
                outfld_rate_vector = masked_rate_vectors[trial_i]
            else:
                outfld_rate_vector = rate_vector[outfld_idxs]

            mean_peak = np.mean(rate_vector[peak_idxs])
            mean_trough = np.mean(rate_vector[trough_idxs])
            min_infld = np.min(infld_rate_vector)
            max_infld = np.max(infld_rate_vector)
            mean_infld = np.mean(infld_rate_vector)
            mean_outfld = np.mean(outfld_rate_vector)

            residual_infld = np.abs(np.sum(target_infld - infld_rate_vector))
            logger.info(f'selectivity objective: max infld/mean infld/mean peak/trough/mean outfld/residual_infld of gid {gid} trial {trial_i}: '
                        f'{max_infld:.02f} {mean_infld:.02f} {mean_peak:.02f} {mean_trough:.02f} {mean_outfld:.02f} {residual_infld:.04f}')
            residual_inflds.append(residual_infld)
            trial_inflds.append(mean_infld)
            trial_outflds.append(mean_outfld)

        trial_rate_features = [np.asarray(trial_inflds, dtype=np.float32).reshape((1, n_trials)), 
                               np.asarray(trial_outflds, dtype=np.float32).reshape((1, n_trials))]
        rate_features = [mean_peak, mean_trough, max_infld, min_infld, mean_infld, mean_outfld, ]
        #rate_constr = [ mean_peak if max_infld > 0. else -1. ]
        rate_constr = [ mean_peak - mean_trough if max_infld > 0. else -1. ]
        return (np.asarray(residual_inflds), trial_rate_features, rate_features, rate_constr)
Ejemplo n.º 7
0
def main(config_file, population, dt, gid, gid_selection_file, arena_id, trajectory_id, generate_weights,
         t_max, t_min,  nprocs_per_worker, n_epochs, n_initial, initial_maxiter, initial_method, optimizer_method, surrogate_method,
         population_size, num_generations, resample_fraction, mutation_rate,
         template_paths, dataset_prefix, config_prefix,
         param_config_name, selectivity_config_name, param_type, recording_profile, results_file, results_path, spike_events_path,
         spike_events_namespace, spike_events_t, input_features_path, input_features_namespaces, n_trials,
         trial_regime, problem_regime, target_features_path, target_features_namespace, target_state_variable,
         target_state_filter, use_coreneuron, cooperative_init, spawn_startup_wait):
    """
    Optimize the input stimulus selectivity of the specified cell in a network clamp configuration.
    """
    init_params = dict(locals())

    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    rank = comm.Get_rank()

    results_file_id = None
    if rank == 0:
        results_file_id = generate_results_file_id(population, gid)
        
    results_file_id = comm.bcast(results_file_id, root=0)
    comm.barrier()
    
    np.seterr(all='raise')
    verbose = True
    cache_queries = True

    config_logging(verbose)

    cell_index_set = set([])
    if gid_selection_file is not None:
        with open(gid_selection_file, 'r') as f:
            lines = f.readlines()
            for line in lines:
                gid = int(line)
                cell_index_set.add(gid)
    elif gid is not None:
        cell_index_set.add(gid)
    else:
        comm.barrier()
        comm0 = comm.Split(2 if rank == 0 else 1, 0)
        if rank == 0:
            env = Env(**init_params, comm=comm0)
            attr_info_dict = read_cell_attribute_info(env.data_file_path, populations=[population],
                                                      read_cell_index=True, comm=comm0)
            cell_index = None
            attr_name, attr_cell_index = next(iter(attr_info_dict[population]['Trees']))
            cell_index_set = set(attr_cell_index)
        comm.barrier()
        cell_index_set = comm.bcast(cell_index_set, root=0)
        comm.barrier()
        comm0.Free()
    init_params['cell_index_set'] = cell_index_set
    del(init_params['gid'])

    params = dict(locals())
    env = Env(**params)
    if size == 1:
        configure_hoc_env(env)
        init(env, population, cell_index_set, arena_id, trajectory_id, n_trials,
             spike_events_path, spike_events_namespace=spike_events_namespace, 
             spike_train_attr_name=spike_events_t,
             input_features_path=input_features_path,
             input_features_namespaces=input_features_namespaces,
             generate_weights_pops=set(generate_weights), 
             t_min=t_min, t_max=t_max)
        
    if (population in env.netclamp_config.optimize_parameters[param_type]):
        opt_params = env.netclamp_config.optimize_parameters[param_type][population]
    else:
        raise RuntimeError(f'optimize_selectivity: population {population} does not have optimization configuration')

    if target_state_variable is None:
        target_state_variable = 'v'
    
    init_params['target_features_arena'] = arena_id
    init_params['target_features_trajectory'] = trajectory_id
    opt_state_baseline = opt_params['Targets']['state'][target_state_variable]['baseline']
    init_params['state_baseline'] = opt_state_baseline
    init_params['state_variable'] = target_state_variable
    init_params['state_filter'] = target_state_filter
    init_objfun_name = 'init_selectivity_objfun'
        
    best = optimize_run(env, population, param_config_name, selectivity_config_name, init_objfun_name, problem_regime=problem_regime,
                        n_epochs=n_epochs, n_initial=n_initial, initial_maxiter=initial_maxiter, initial_method=initial_method, 
                        optimizer_method=optimizer_method, surrogate_method=surrogate_method, population_size=population_size, 
                        num_generations=num_generations, resample_fraction=resample_fraction, mutation_rate=mutation_rate, 
                        param_type=param_type, init_params=init_params, results_file=results_file, nprocs_per_worker=nprocs_per_worker, 
                        cooperative_init=cooperative_init, spawn_startup_wait=spawn_startup_wait, verbose=verbose)
    
    opt_param_config = optimization_params(env.netclamp_config.optimize_parameters, [population], param_config_name, param_type)
    if best is not None:
        if results_path is not None:
            run_ts = time.strftime("%Y%m%d_%H%M%S")
            file_path = f'{results_path}/optimize_selectivity.{run_ts}.yaml'
            param_names = opt_param_config.param_names
            param_tuples = opt_param_config.param_tuples

            if ProblemRegime[problem_regime] == ProblemRegime.every:
                results_config_dict = {}
                for gid, prms in viewitems(best):
                    n_res = prms[0][1].shape[0]
                    prms_dict = dict(prms)
                    this_results_config_dict = {}
                    for i in range(n_res):
                        results_param_list = []
                        for param_pattern, param_tuple in zip(param_names, param_tuples):
                            results_param_list.append((param_tuple.population,
                                                       param_tuple.source,
                                                       param_tuple.sec_type,
                                                       param_tuple.syn_name,
                                                       param_tuple.param_path,
                                                       float(prms_dict[param_pattern][i])))
                        this_results_config_dict[i] = results_param_list
                    results_config_dict[gid] = this_results_config_dict
                    
            else:
                prms = best[0]
                n_res = prms[0][1].shape[0]
                prms_dict = dict(prms)
                results_config_dict = {}
                for i in range(n_res):
                    results_param_list = []
                    for param_pattern, param_tuple in zip(param_names, param_tuples):
                        results_param_list.append((param_tuple.population,
                                                   param_tuple.source,
                                                   param_tuple.sec_type,
                                                   param_tuple.syn_name,
                                                   param_tuple.param_path,
                                                   float(prms_dict[param_pattern][i])))
                    results_config_dict[i] = results_param_list

            write_to_yaml(file_path, { population: results_config_dict } )

            
    comm.barrier()
Ejemplo n.º 8
0
def recsout(env,
            output_path,
            t_start=None,
            clear_data=False,
            write_cell_location_data=False,
            write_trial_data=False):
    """
    Writes intracellular state traces to specified NeuroH5 output file.

    :param env:
    :param output_path:
    :param clear_data:
    :param reduce_data:
    :return:
    """
    t_rec = env.t_rec
    equilibration_duration = float(
        env.stimulus_config['Equilibration Duration'])
    reduce_data = env.recording_profile.get('reduce', None)
    n_trials = env.n_trials

    trial_time_ranges = get_trial_time_ranges(env.t_rec.to_python(),
                                              env.n_trials)
    trial_time_bins = [
        t_trial_start for t_trial_start, t_trial_end in trial_time_ranges
    ]
    trial_dur = np.asarray([env.tstop + equilibration_duration] * n_trials,
                           dtype=np.float32)

    for pop_name in sorted(env.celltypes.keys()):
        local_rec_types = list(env.recs_dict[pop_name].keys())
        rec_types = sorted(
            set(env.comm.allreduce(local_rec_types, op=mpi_op_concat)))
        for rec_type in rec_types:
            recs = env.recs_dict[pop_name][rec_type]
            attr_dict = defaultdict(lambda: {})
            for rec in recs:
                gid = rec['gid']
                data_vec = np.array(rec['vec'],
                                    copy=clear_data,
                                    dtype=np.float32)
                time_vec = np.array(t_rec, copy=clear_data, dtype=np.float32)
                if t_start is not None:
                    time_inds = np.where(time_vec >= t_start)[0]
                    time_vec = time_vec[time_inds]
                    data_vec = data_vec[time_inds]
                trial_bins = np.digitize(time_vec, trial_time_bins) - 1
                for trial_i in range(n_trials):
                    trial_inds = np.where(trial_bins == trial_i)[0]
                    time_vec[trial_inds] -= np.sum(
                        trial_dur[:(trial_i)]) + equilibration_duration
                label = rec['label']
                if label in attr_dict[gid]:
                    if reduce_data is None:
                        raise RuntimeError(
                            'recsout: duplicate recorder labels and no reduce strategy specified'
                        )
                    elif reduce_data is True:
                        attr_dict[gid][label] += data_vec
                    else:
                        raise RuntimeError(
                            'recsout: unsupported reduce strategy specified')
                else:
                    attr_dict[gid][label] = data_vec
                    attr_dict[gid]['t'] = time_vec
                if write_trial_data:
                    attr_dict[gid]['trial duration'] = trial_dur
                if write_cell_location_data:
                    distance = rec.get('distance', None)
                    if distance is not None:
                        attr_dict[gid]['distance'] = np.asarray(
                            [distance], dtype=np.float32)
                    section = rec.get('section', None)
                    if section is not None:
                        attr_dict[gid]['section'] = np.asarray([section],
                                                               dtype=np.int16)
                    loc = rec.get('loc', None)
                    if loc is not None:
                        attr_dict[gid]['loc'] = np.asarray([loc],
                                                           dtype=np.float32)
                if clear_data:
                    rec['vec'].resize(0)
            if env.results_namespace_id is None:
                namespace_id = "Intracellular %s" % (rec_type)
            else:
                namespace_id = "Intracellular %s %s" % (
                    rec_type, str(env.results_namespace_id))
            append_cell_attributes(output_path,
                                   pop_name,
                                   attr_dict,
                                   namespace=namespace_id,
                                   comm=env.comm,
                                   io_size=env.io_size)
    if clear_data:
        env.t_rec.resize(0)

    env.comm.barrier()
    if env.comm.Get_rank() == 0:
        logger.info("*** Output intracellular state results to file %s" %
                    output_path)
Ejemplo n.º 9
0
def spikeout(env, output_path, t_start=None, clear_data=False):
    """
    Writes spike times to specified NeuroH5 output file.

    :param env:
    :param output_path:
    :param clear_data: 
    :return:
    """
    equilibration_duration = float(
        env.stimulus_config['Equilibration Duration'])
    n_trials = env.n_trials

    t_vec = np.array(env.t_vec, dtype=np.float32)
    id_vec = np.array(env.id_vec, dtype=np.uint32)

    trial_time_ranges = get_trial_time_ranges(env.t_rec.to_python(),
                                              env.n_trials)
    trial_time_bins = [
        t_trial_start for t_trial_start, t_trial_end in trial_time_ranges
    ]
    trial_dur = np.asarray([env.tstop + equilibration_duration] * n_trials,
                           dtype=np.float32)

    binlst = []
    typelst = sorted(env.celltypes.keys())
    binvect = np.asarray([env.celltypes[k]['start'] for k in typelst])
    sort_idx = np.argsort(binvect, axis=0)
    pop_names = [typelst[i] for i in sort_idx]
    bins = binvect[sort_idx][1:]
    inds = np.digitize(id_vec, bins)

    if env.results_namespace_id is None:
        namespace_id = "Spike Events"
    else:
        namespace_id = "Spike Events %s" % str(env.results_namespace_id)

    for i, pop_name in enumerate(pop_names):
        spkdict = {}
        sinds = np.where(inds == i)
        if len(sinds) > 0:
            ids = id_vec[sinds]
            ts = t_vec[sinds]
            for j in range(0, len(ids)):
                gid = ids[j]
                t = ts[j]
                if (t_start is None) or (t >= t_start):
                    if gid in spkdict:
                        spkdict[gid]['t'].append(t)
                    else:
                        spkdict[gid] = {'t': [t]}
            for gid in spkdict:
                spiketrain = np.array(spkdict[gid]['t'], dtype=np.float32)
                if gid in env.spike_onset_delay:
                    spiketrain -= env.spike_onset_delay[gid]
                trial_bins = np.digitize(spiketrain, trial_time_bins) - 1
                trial_spikes = [
                    np.copy(spiketrain[np.where(trial_bins == trial_i)[0]])
                    for trial_i in range(n_trials)
                ]
                for trial_i, trial_spiketrain in enumerate(trial_spikes):
                    trial_spiketrain = trial_spikes[trial_i]
                    trial_spiketrain -= np.sum(
                        trial_dur[:(trial_i)]) + equilibration_duration
                spkdict[gid]['t'] = np.concatenate(trial_spikes)
                spkdict[gid]['Trial Duration'] = trial_dur
                spkdict[gid]['Trial Index'] = np.asarray(trial_bins,
                                                         dtype=np.uint8)
        append_cell_attributes(output_path,
                               pop_name,
                               spkdict,
                               namespace=namespace_id,
                               comm=env.comm,
                               io_size=env.io_size)
        del (spkdict)

    if clear_data:
        env.t_vec.resize(0)
        env.id_vec.resize(0)

    env.comm.barrier()
    if env.comm.Get_rank() == 0:
        logger.info("*** Output spike results to file %s" % output_path)
Ejemplo n.º 10
0
def spatial_bin_graph(connectivity_path,
                      coords_path,
                      distances_namespace,
                      destination,
                      sources,
                      extents,
                      bin_size=20.0,
                      cache_size=100,
                      comm=None):
    """
    Obtain reduced graphs of the specified projections by binning nodes according to their spatial position.

    :param connectivity_path:
    :param coords_path:
    :param distances_namespace: 
    :param destination: 
    :param source: 

    """

    import networkx as nx

    if comm is None:
        comm = MPI.COMM_WORLD

    rank = comm.Get_rank()

    (population_ranges, _) = read_population_ranges(coords_path)

    destination_start = population_ranges[destination][0]
    destination_count = population_ranges[destination][1]

    if rank == 0:
        logger.info('reading %s distances...' % destination)

    destination_soma_distances = bcast_cell_attributes(
        coords_path,
        destination,
        namespace=distances_namespace,
        comm=comm,
        root=0)

    ((x_min, x_max), (y_min, y_max)) = extents
    u_bins = np.arange(x_min, x_max, bin_size)
    v_bins = np.arange(y_min, y_max, bin_size)

    dest_u_bins = {}
    dest_v_bins = {}
    destination_soma_distance_U = {}
    destination_soma_distance_V = {}
    for k, v in destination_soma_distances:
        dist_u = v['U Distance'][0]
        dist_v = v['V Distance'][0]
        dest_u_bins[k] = np.searchsorted(u_bins, dist_u, side='left')
        dest_v_bins[k] = np.searchsorted(v_bins, dist_v, side='left')
        destination_soma_distance_U[k] = dist_u
        destination_soma_distance_V[k] = dist_v

    del (destination_soma_distances)

    if (sources == ()) or (sources == []) or (sources is None):
        sources = []
        for (src, dst) in read_projection_names(connectivity_path):
            if dst == destination:
                sources.append(src)

    source_soma_distances = {}
    for s in sources:
        if rank == 0:
            logger.info('reading %s distances...' % s)
        source_soma_distances[s] = bcast_cell_attributes(
            coords_path, s, namespace=distances_namespace, comm=comm, root=0)

    source_u_bins = {}
    source_v_bins = {}
    source_soma_distance_U = {}
    source_soma_distance_V = {}
    for s in sources:
        this_source_soma_distance_U = {}
        this_source_soma_distance_V = {}
        this_source_u_bins = {}
        this_source_v_bins = {}
        for k, v in source_soma_distances[s]:
            dist_u = v['U Distance'][0]
            dist_v = v['V Distance'][0]
            this_source_u_bins[k] = np.searchsorted(u_bins,
                                                    dist_u,
                                                    side='left')
            this_source_v_bins[k] = np.searchsorted(v_bins,
                                                    dist_v,
                                                    side='left')
            this_source_soma_distance_U[k] = dist_u
            this_source_soma_distance_V[k] = dist_v
        source_soma_distance_U[s] = this_source_soma_distance_U
        source_soma_distance_V[s] = this_source_soma_distance_V
        source_u_bins[s] = this_source_u_bins
        source_v_bins[s] = this_source_v_bins
    del (source_soma_distances)

    if rank == 0:
        logger.info('reading connections %s -> %s...' %
                    (str(sources), destination))
    gg = [
        NeuroH5ProjectionGen(connectivity_path,
                             source,
                             destination,
                             cache_size=cache_size,
                             comm=comm) for source in sources
    ]

    dist_bins = defaultdict(dict)
    dist_u_bins = defaultdict(dict)
    dist_v_bins = defaultdict(dict)

    local_u_bin_graph = defaultdict(dict)
    local_v_bin_graph = defaultdict(dict)

    for prj_gen_tuple in zip_longest(*gg):
        destination_gid = prj_gen_tuple[0][0]
        if not all([
                prj_gen_elt[0] == destination_gid
                for prj_gen_elt in prj_gen_tuple
        ]):
            raise RuntimeError(
                'destination %s: destination_gid %i not matched across multiple projection generators: '
                '%s' % (destination, destination_gid,
                        [prj_gen_elt[0] for prj_gen_elt in prj_gen_tuple]))

        if destination_gid is not None:
            dest_u_bin = dest_u_bins[destination_gid]
            dest_v_bin = dest_v_bins[destination_gid]
            for (source, (this_destination_gid,
                          rest)) in zip_longest(sources, prj_gen_tuple):
                this_source_u_bins = source_u_bins[source]
                this_source_v_bins = source_v_bins[source]
                (source_indexes, attr_dict) = rest
                source_u_bin_dict = defaultdict(int)
                source_v_bin_dict = defaultdict(int)
                for source_gid in source_indexes:
                    source_u_bin = this_source_u_bins[source_gid]
                    source_v_bin = this_source_v_bins[source_gid]
                    source_u_bin_dict[source_u_bin] += 1
                    source_v_bin_dict[source_v_bin] += 1
                local_u_bin_graph[dest_u_bin][source] = source_u_bin_dict
                local_v_bin_graph[dest_v_bin][source] = source_v_bin_dict

    local_u_bin_graphs = comm.gather(dict(local_u_bin_graph), root=0)
    local_v_bin_graphs = comm.gather(dict(local_v_bin_graph), root=0)

    u_bin_graph = None
    v_bin_graph = None
    nu = None
    nv = None

    if rank == 0:

        u_bin_edges = {destination: dict(ChainMap(*local_u_bin_graphs))}
        v_bin_edges = {destination: dict(ChainMap(*local_v_bin_graphs))}

        nu = len(u_bins)
        u_bin_graph = nx.Graph()
        for pop in [destination] + list(sources):
            for i in range(nu):
                u_bin_graph.add_node((pop, i))

        for i, ss in viewitems(u_bin_edges[destination]):
            for source, ids in viewitems(ss):
                u_bin_graph.add_weighted_edges_from([
                    ((source, j), (destination, i), count)
                    for j, count in viewitems(ids)
                ])

        nv = len(v_bins)
        v_bin_graph = nx.Graph()
        for pop in [destination] + list(sources):
            for i in range(nv):
                v_bin_graph.add_node((pop, i))

        for i, ss in viewitems(v_bin_edges[destination]):
            for source, ids in viewitems(ss):
                v_bin_graph.add_weighted_edges_from([
                    ((source, j), (destination, i), count)
                    for j, count in viewitems(ids)
                ])

    label = '%s to %s' % (str(sources), destination)

    return {
        'label': label,
        'bin size': bin_size,
        'destination': destination,
        'sources': sources,
        'U graph': u_bin_graph,
        'V graph': v_bin_graph
    }
Ejemplo n.º 11
0
def generate_synaptic_connections(rank,
                                  gid,
                                  ranstream_syn,
                                  ranstream_con,
                                  cluster_seed,
                                  destination_gid,
                                  synapse_dict,
                                  population_dict,
                                  projection_synapse_dict,
                                  projection_prob_dict,
                                  connection_dict,
                                  random_choice=random_choice_w_replacement):
    """
    Given a set of synapses for a particular gid, projection
    configuration, projection and connection probability dictionaries,
    generates a set of possible connections for each synapse. The
    procedure first assigns each synapse to a projection, using the
    given proportions of each synapse type, and then chooses source
    gids for each synapse using the given projection probability
    dictionary.

    :param ranstream_syn: random stream for the synapse partitioning step
    :param ranstream_con: random stream for the choosing source gids step
    :param destination_gid: destination gid
    :param synapse_dict: synapse configurations, a dictionary with fields: 1) syn_ids (synapse ids) 2) syn_types (excitatory, inhibitory, etc).,
                        3) swc_types (SWC types(s) of synapse location in the neuronal morphological structure 3) syn_layers (synapse layer placement)
    :param population_dict: mapping of population names to population indices
    :param projection_synapse_dict: mapping of projection names to a tuple of the form: <syn_layer, swc_type, syn_type, syn_proportion>
    :param projection_prob_dict: mapping of presynaptic population names to sets of source probabilities and source gids
    :param connection_dict: output connection dictionary
    :param random_choice: random choice procedure (default uses np.ranstream.multinomial)

    """
    num_projections = len(projection_synapse_dict)
    prj_pop_index = {
        population: i
        for (i, population) in enumerate(projection_synapse_dict)
    }
    synapse_prj_counts = np.zeros((num_projections, ))
    synapse_prj_partition = defaultdict(lambda: defaultdict(list))
    maxit = 10
    it = 0
    ## assign each synapse to a projection
    while (np.count_nonzero(synapse_prj_counts) < num_projections) and (it <
                                                                        maxit):
        log_flag = it > 1
        if log_flag:
            logger.info("generate_synaptic_connections: gid %i: iteration %i" %
                        (gid, it))
        synapse_prj_counts.fill(0)
        synapse_prj_partition.clear()
        for (syn_id, syn_type, swc_type, syn_layer) in zip(
                synapse_dict['syn_ids'], synapse_dict['syn_types'],
                synapse_dict['swc_types'], synapse_dict['syn_layers']):
            projection = choose_synapse_projection(ranstream_syn, syn_layer, swc_type, syn_type, \
                                                   population_dict, projection_synapse_dict, log=log_flag)
            if log_flag:
                logger.info('generate_synaptic_connections: gid %i: ' \
                            'syn_id = %i syn_type = %i swc_type = %i syn_layer = %i projection = %s' % \
                            (gid, syn_id, syn_type, swc_type, syn_layer, projection))
            assert (projection is not None)
            synapse_prj_counts[prj_pop_index[projection]] += 1
            synapse_prj_partition[projection][syn_layer].append(syn_id)
        it += 1

    empty_projections = []

    for projection in projection_synapse_dict:
        logger.debug('Rank %i: gid %i: projection %s has %i synapses' %
                     (rank, destination_gid, projection,
                      len(synapse_prj_partition[projection])))
        if not (len(synapse_prj_partition[projection]) > 0):
            empty_projections.append(projection)

    if len(empty_projections) > 0:
        logger.warning('Rank %i: gid %i: projections %s have an empty synapse list; ' \
                       'swc types are %s layers are %s' % \
                       (rank, destination_gid, str(empty_projections), str(set(synapse_dict['swc_types'].flat)), \
                        str(set(synapse_dict['syn_layers'].flat))))
    assert (len(empty_projections) == 0)

    ## Choose source connections based on distance-weighted probability
    count = 0
    for projection, prj_layer_dict in viewitems(synapse_prj_partition):
        (syn_config_type, syn_config_layers, syn_config_sections, syn_config_proportions, syn_config_contacts) = \
            projection_synapse_dict[projection]
        gid_dict = connection_dict[projection]
        prj_source_vertices = []
        prj_syn_ids = []
        prj_distances = []
        for prj_layer, syn_ids in viewitems(prj_layer_dict):
            source_probs, source_gids, distances_u, distances_v = \
                projection_prob_dict[projection][prj_layer]
            distance_dict = {source_gid: distance_u + distance_v \
                             for (source_gid, distance_u, distance_v) in \
                             zip(source_gids, distances_u, distances_v)}
            if len(source_gids) > 0:
                n_syn_groups = int(
                    math.ceil(
                        float(len(syn_ids)) / float(syn_config_contacts)))
                source_gid_counts = random_choice(ranstream_con, n_syn_groups,
                                                  source_probs)
                total_count = 0
                if syn_config_contacts > 1:
                    ncontacts = int(math.ceil(syn_config_contacts))
                    for i in range(0, len(source_gid_counts)):
                        if source_gid_counts[i] > 0:
                            source_gid_counts[i] *= ncontacts
                if len(source_gid_counts) == 0:
                    logger.warning('Rank %i: source vertices list is empty for gid: %i projection: %s layer: %s ' \
                                   'source probs: %s distances_u: %s distances_v: %s' % \
                                   (rank, destination_gid, projection, str(layer), \
                                    str(source_probs), str(distances_u), str(distances_v)))

                uv_distance_sums = np.add(distances_u,
                                          distances_v,
                                          dtype=np.float32)
                source_vertices = np.asarray(random_clustered_shuffle(len(source_gids), \
                                                                      source_gid_counts, \
                                                                      center_ids=source_gids, \
                                                                      cluster_std=2.0, \
                                                                      random_seed=cluster_seed), \
                                             dtype=np.uint32)[0:len(syn_ids)]
                assert (len(source_vertices) == len(syn_ids))
                distances = np.asarray([distance_dict[gid] for gid in source_vertices], \
                                       dtype=np.float32).reshape(-1, )
                prj_source_vertices.append(source_vertices)
                prj_syn_ids.append(syn_ids)
                prj_distances.append(distances)
                gid_dict[destination_gid] = (np.asarray([], dtype=np.uint32), {
                    'Synapses': {
                        'syn_id': np.asarray([], dtype=np.uint32)
                    },
                    'Connections': {
                        'distance': np.asarray([], dtype=np.float32)
                    }
                })
                cluster_seed += 1
        if len(prj_source_vertices) > 0:
            prj_source_vertices_array = np.concatenate(prj_source_vertices)
        else:
            prj_source_vertices_array = np.asarray([], dtype=np.uint32)
        del (prj_source_vertices)
        if len(prj_syn_ids) > 0:
            prj_syn_ids_array = np.concatenate(prj_syn_ids)
        else:
            prj_syn_ids_array = np.asarray([], dtype=np.uint32)
        del (prj_syn_ids)
        if len(prj_distances) > 0:
            prj_distances_array = np.concatenate(prj_distances)
        else:
            prj_distances_array = np.asarray([], dtype=np.float32)
        del (prj_distances)
        if len(prj_source_vertices_array) == 0:
            logger.warning(
                'Rank %i: source gid list is empty for gid: %i projection: %s'
                % (rank, destination_gid, projection))
        count += len(prj_source_vertices_array)
        gid_dict[destination_gid] = (prj_source_vertices_array,
                                     {'Synapses': {'syn_id': np.asarray(prj_syn_ids_array, \
                                                                        dtype=np.uint32)},
                                      'Connections': {'distance': prj_distances_array}
                                      })

    return count
Ejemplo n.º 12
0
def statistics(parts):
    npart = len(parts)
    total_cx = 0
    max_part_cx = 0
    ncx = 0
    max_cx = 0
    for part in parts:
        ncx += len(part[1])
        total_cx += part[0]
        if part[0] > max_part_cx:
            max_part_cx = part[0]
        for cx in part[1]:
            if cx[0] > max_cx:
                max_cx = cx[0]
    avg_part_cx = total_cx / npart
    loadbal = 1.0
    if max_part_cx > 0.:
        loadbal = avg_part_cx / max_part_cx
    logger.info(
        "*** loadbal=%g total_cx=%g npart=%d ncx=%d max_part_cx=%g max_cx=%g" %
        (loadbal, total_cx, npart, ncx, max_part_cx, max_cx))


if __name__ == '__main__':
    for cx in ([(i, i) for i in range(10)], []):
        logger.info('%i complexity items %s' % (len(cx), cx))
        pinfo = lpt(cx, 3)
        logger.info('%i lpt partitions %s' % (len(pinfo), str(pinfo)))
        statistics(pinfo)
Ejemplo n.º 13
0
def icp_transform(comm,
                  env,
                  soma_coords,
                  projection_ls,
                  population_extents,
                  rotate=None,
                  populations=None,
                  icp_iter=1000,
                  opt_iter=100):
    """
    Uses the iterative closest point (ICP) algorithm of the PCL library to transform soma coordinates onto a surface for a particular L value.
    http://pointclouds.org/documentation/tutorials/iterative_closest_point.php#iterative-closest-point

    """

    import dlib, pcl

    rank = comm.rank
    size = comm.size

    if populations is None:
        populations = list(soma_coords.keys())

    srf_resample = 25

    layer_extents = env.geometry['Parametric Surface']['Layer Extents']

    (extent_u, extent_v, extent_l) = get_total_extents(layer_extents)

    min_u, max_u = extent_u
    min_v, max_v = extent_v
    min_l, max_l = extent_l

    ## This parameter is used to expand the range of L and avoid
    ## situations where the endpoints of L end up outside of the range
    ## of the distance interpolant
    safety = 0.01

    extent_u = (min_u - safety, max_u + safety)
    extent_v = (min_v - safety, max_v + safety)

    projection_ptclouds = []
    for obs_l in projection_ls:
        srf = make_surface(extent_u, extent_v, obs_l, rotate=rotate)
        U, V = srf._resample_uv(srf_resample, srf_resample)
        meshpts = srf.ev(U, V)
        projection_ptcloud = pcl.PointCloud()
        projection_ptcloud.from_array(meshpts)
        projection_ptclouds.append(projection_ptcloud)

    soma_coords_dict = {}
    for pop in populations:
        coords_dict = soma_coords[pop]
        if rank == 0:
            logger.info('Computing point transformation for population %s...' %
                        pop)
        count = 0
        xyz_coords = []
        gids = []
        for gid, coords in viewitems(coords_dict):
            if gid % size == rank:
                soma_u, soma_v, soma_l = coords
                xyz_coords.append(
                    DG_volume(soma_u, soma_v, soma_l, rotate=rotate))
                gids.append(gid)
        xyz_pts = np.vstack(xyz_coords)

        cloud_in = pcl.PointCloud()
        cloud_in.from_array(xyz_pts)

        icp = cloud_in.make_IterativeClosestPoint()

        all_est_xyz_coords = []
        all_est_uvl_coords = []
        all_interp_err = []

        for (k, cloud_prj) in enumerate(projection_ls):
            k_est_xyz_coords = np.zeros((len(gids), 3))
            k_est_uvl_coords = np.zeros((len(gids), 3))
            interp_err = np.zeros((len(gids), ))
            converged, transf, estimate, fitness = icp.icp(cloud_in,
                                                           cloud_prj,
                                                           max_iter=icp_iter)
            logger.info('Transformation of population %s has converged: ' %
                        (pop) + str(converged) + ' score: %f' % (fitness))
            for i, gid in zip(list(range(0, estimate.size)), gids):
                est_xyz_coords = estimate[i]
                k_est_xyz_coords[i, :] = est_xyz_coords
                f_uvl_distance = make_uvl_distance(est_xyz_coords,
                                                   rotate=rotate)
                uvl_coords, err = dlib.find_min_global(f_uvl_distance,
                                                       limits[0], limits[1],
                                                       opt_iter)
                k_est_uvl_coords[i, :] = uvl_coords
                interp_err[i, ] = err
                if rank == 0:
                    logger.info(
                        'gid %i: u: %f v: %f l: %f' %
                        (gid, uvl_coords[0], uvl_coords[1], uvl_coords[2]))
            all_est_xyz_coords.append(k_est_xyz_coords)
            all_est_uvl_coords.append(k_est_uvl_coords)
            all_interp_err.append(interp_err)

        coords_dict = {}
        for (i, gid) in enumerate(gids):
            coords_dict[gid] = {
                'X Coordinate':
                np.asarray([col[i, 0] for col in all_est_xyz_coords],
                           dtype='float32'),
                'Y Coordinate':
                np.asarray([col[i, 1] for col in all_est_xyz_coords],
                           dtype='float32'),
                'Z Coordinate':
                np.asarray([col[i, 2] for col in all_est_xyz_coords],
                           dtype='float32'),
                'U Coordinate':
                np.asarray([col[i, 0] for col in all_est_uvl_coords],
                           dtype='float32'),
                'V Coordinate':
                np.asarray([col[i, 1] for col in all_est_uvl_coords],
                           dtype='float32'),
                'L Coordinate':
                np.asarray([col[i, 2] for col in all_est_uvl_coords],
                           dtype='float32'),
                'Interpolation Error':
                np.asarray([err[i] for err in all_interp_err], dtype='float32')
            }

        soma_coords_dict[pop] = coords_dict

    return soma_coords_dict