コード例 #1
0
def main(arena_id, cell_selection_path, config_file, template_paths, hoc_lib_path, dataset_prefix, config_prefix,
         results_path, results_id, node_rank_file, io_size, recording_fraction, recording_profile, coredat, trajectory_id, tstop, v_init,
         stimulus_onset, max_walltime_hours, checkpoint_clear_data, checkpoint_interval, results_write_time,
         spike_input_path, spike_input_namespace, spike_input_attr, dt, ldbal, lptbal, cleanup, profile_memory, write_selection,
         verbose, debug, dry_run):

    profile_time = False


    comm = MPI.COMM_WORLD
    np.seterr(all='raise')
    params = dict(locals())

    env = Env(**params)

    if profile_time:
        from dentate.network import init, run
        import cProfile
        cProfile.runctx('init(env)', None, locals(), filename='dentate_profile_init')
        if not dry_run:
            cProfile.runctx('run(env)', None, locals(), filename='dentate_profile_run')
    else:
        network.init(env)
        if not dry_run:
            network.run(env)
コード例 #2
0
ファイル: eval_network.py プロジェクト: soltesz-lab/dentate
def eval_network(env, network_config, from_param_list, from_param_dict, network_params, network_param_values, params_id, target_trj_rate_map_dict, t_start, t_stop, target_populations, output_path):

    param_tuple_values = None
    if params_id is not None:
        x = network_param_values[params_id]
        if isinstance(x, list):
            param_tuple_values = from_param_list(x)
        elif isinstance(x, dict):
            param_tuple_values = from_param_dict(x)
        else:
            raise RuntimeError(f"eval_network: invalid input parameters argument {x}")
    
        if env.comm.rank == 0:
            logger.info("*** Updating network parameters ...")
            logger.info(pprint.pformat(param_tuple_values))
        update_network_params(env, param_tuple_values)

    env.checkpoint_clear_data = False
    env.checkpoint_interval = None
    env.tstop = t_stop
    network.run(env, output=network_config.get('output_results', False), shutdown=False)

    for pop_name in target_trj_rate_map_dict:
        append_cell_attributes(env.results_file_path, pop_name, target_trj_rate_map_dict[pop_name], 
                               namespace='Target Trajectory Rate Map', comm=env.comm, io_size=env.io_size)

    local_features = network_features(env, target_trj_rate_map_dict, t_start, t_stop, target_populations)
    return collect_network_features(env, local_features, target_populations, output_path, params_id, param_tuple_values)
コード例 #3
0
def network_objfun(env, operational_config, opt_targets,
                   target_trj_rate_map_dict, from_param_dict, t_start, t_stop, target_populations, x):

    param_tuple_values = from_param_dict(x)
    update_network_params(env, param_tuple_values)

    env.tstop = t_stop
    network.run(env, output=False, shutdown=False)

    return network_features(env, target_trj_rate_map_dict, t_start, t_stop, target_populations)
コード例 #4
0
def compute_features_firing_rate_fraction_active(x, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    update_source_contexts(x, context)
    context.env.results_file_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))

    network.run(context.env, output=context.output_results, shutdown=False)

    pop_spike_dict = spikedata.get_env_spike_dict(context.env, include_artificial=False)

    t_start = 250.
    t_stop = context.env.tstop
    
    time_bins  = np.arange(t_start, t_stop, context.bin_size)

    for pop_name in context.target_populations:

        mean_rate_sum = 0.
        spike_density_dict = spikedata.spike_density_estimate (pop_name, pop_spike_dict[pop_name], time_bins)
        for gid, dens_dict in utils.viewitems(spike_density_dict):
            mean_rate_sum += np.mean(dens_dict['rate'])
        mean_rate_sum = context.env.comm.allreduce(mean_rate_sum, op=MPI.SUM)

        n_total = context.env.comm.allreduce(len(context.env.cells[pop_name]) - len(context.env.artificial_cells[pop_name]), op=MPI.SUM)
        n_active = context.env.comm.allreduce(len(spike_density_dict), op=MPI.SUM)

        if n_active > 0:
            mean_rate = mean_rate_sum / n_active
        else:
            mean_rate = 0.

        if n_total > 0:
            fraction_active = n_active / n_total
        else:
            fraction_active = 0.

        rank = int(context.env.pc.id())
        if rank == 0:
            context.logger.info('population %s: n_active = %d n_total = %d' % (pop_name, n_active, n_total))

        results['%s firing rate' % pop_name] = mean_rate
        results['%s fraction active' % pop_name] = fraction_active

    return results
コード例 #5
0
def main(config_file, template_paths, hoc_lib_path, dataset_prefix,
         config_prefix, results_path, results_id, input_path, input_namespace,
         target_cell, tstop, v_init, stimulus_onset, max_walltime_hours,
         results_write_time, dt, ldbal, lptbal, verbose, dry_run):
    """
    :param config_file: str; model configuration file name
    :param template_paths: str; colon-separated list of paths to directories containing hoc cell templates
    :param hoc_lib_path: str; path to directory containing required hoc libraries
    :param dataset_prefix: str; path to directory containing required neuroh5 data files
    :param config_prefix: str; path to directory containing network and cell mechanism config files
    :param results_path: str; path to directory to export output files
    :param results_id: str; label for neuroh5 namespaces to write spike and voltage trace data
    :param tstop: int; physical time to simulate (ms)
    :param v_init: float; initialization membrane potential (mV)
    :param stimulus_onset: float; starting time of stimulus (ms)
    :param max_walltime_hours: float; maximum wall time (hours)
    :param results_write_time: float; time to write out results at end of simulation
    :param dt: float; simulation time step
    :param verbose: bool; print verbose diagnostic messages while constructing the network
    :param dry_run: bool; whether to actually execute simulation after building network
    """
    comm = MPI.COMM_WORLD
    np.seterr(all='raise')
    vrecord_fraction = 1.0
    env = Env(comm,
              config_file,
              template_paths,
              hoc_lib_path,
              dataset_prefix,
              config_prefix,
              results_path,
              results_id,
              vrecord_fraction,
              target_cell,
              tstop,
              v_init,
              stimulus_onset,
              max_walltime_hours,
              results_write_time,
              dt,
              cell_selection=target_cell,
              spike_input_path=input_path,
              spike_input_ns=input_namespace,
              verbose=verbose)
    network.init(env)
    if not dry_run:
        network.run(env)
コード例 #6
0
def compute_features_network_walltime(x, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    start_time = time.time()
    update_source_contexts(x, context)
    results['modify_network_time'] = time.time() - start_time
    start_time = time.time()
    context.env.results_file_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))
    network.run(context.env, output=context.output_results, shutdown=False)
    results['sim_network_time'] = time.time() - start_time

    return results
コード例 #7
0
ファイル: dry_run.py プロジェクト: soltesz-lab/dentate
def main(cell_selection_path, config_file, template_paths, hoc_lib_path,
         dataset_prefix, config_prefix, results_path, results_id,
         node_rank_file, io_size, vrecord_fraction, coredat, tstop, v_init,
         stimulus_onset, max_walltime_hours, results_write_time,
         spike_input_path, spike_input_namespace, dt, ldbal, lptbal, cleanup,
         verbose, run_test):
    """
    :param cell_selection_path: str; name of file specifying subset of cells gids to be instantiated
    :param config_file: str; model configuration file name
    :param template_paths: str; colon-separated list of paths to directories containing hoc cell templates
    :param hoc_lib_path: str; path to directory containing required hoc libraries
    :param dataset_prefix: str; path to directory containing required neuroh5 data files
    :param config_prefix: str; path to directory containing network and cell mechanism config files
    :param results_path: str; path to directory to export output files
    :param results_id: str; label for neuroh5 namespaces to write spike and voltage trace data
    :param node_rank_file: str; name of file specifying assignment of node gids to MPI ranks
    :param io_size: int; the number of MPI ranks to be used for I/O operations
    :param vrecord_fraction: float; fraction of cells to record intracellular voltage from
    :param coredat: bool; Save CoreNEURON data
    :param tstop: int; physical time to simulate (ms)
    :param v_init: float; initialization membrane potential (mV)
    :param stimulus_onset: float; starting time of stimulus (ms)
    :param max_walltime_hours: float; maximum wall time (hours)
    :param results_write_time: float; time to write out results at end of simulation
    :param spike_input_path: str; path to file for input spikes when cell selection is specified
    :param spike_input_namespace: str;
    :param dt: float; simulation time step
    :param ldbal: bool; estimate load balance based on cell complexity
    :param lptbal: bool; calculate load balance with LPT algorithm
    :param cleanup: bool; whether to delete from memory the synapse attributes metadata after specifying connections
    :param verbose: bool; print verbose diagnostic messages while constructing the network
    :param run_test: bool; whether to actually execute simulation after building network
    """
    comm = MPI.COMM_WORLD
    np.seterr(all='raise')
    params = dict(locals())
    env = Env(**params)
    network.init(env)
    if run_test:
        network.run(env, output=False)
コード例 #8
0
def compute_features_firing_rate(x, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    update_source_contexts(x, context)
    context.env.results_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))

    network.run(context.env, output=context.output_results, shutdown=False)

    pop_spike_dict = spikedata.get_env_spike_dict(context.env)

    t_start = 0.
    t_stop = context.env.tstop

    time_bins = np.arange(t_start, t_stop, context.bin_size)

    pop_name = 'GC'

    mean_rate_sum = 0.
    spike_density_dict = spikedata.spike_density_estimate(
        pop_name, pop_spike_dict[pop_name], time_bins)
    for gid, dens_dict in utils.viewitems(spike_density_dict):
        mean_rate_sum += np.mean(dens_dict['rate'])

    n = len(spike_density_dict)
    if n > 0:
        mean_rate = mean_rate_sum / n
    else:
        mean_rate = 0.

    results['firing_rate'] = mean_rate

    return results
コード例 #9
0
def compute_network_features(x, model_id=None, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    update_source_contexts(x, context)
    # TODO: Do you want this to be identical on all ranks in a subworld? You can use context.comm.bcast
    context.env.results_file_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))

    temporal_resolution = float(
        context.env.stimulus_config['Temporal Resolution'])
    time_bins = np.arange(context.t_start, context.t_stop, temporal_resolution)

    context.env.tstop = context.t_stop
    network.run(context.env, output=context.output_results, shutdown=False)

    pop_spike_dict = spikedata.get_env_spike_dict(context.env,
                                                  include_artificial=False)

    for pop_name in context.target_populations:

        n_active_local = 0
        mean_rate_sum_local = 0.
        spike_density_dict = spikedata.spike_density_estimate(
            pop_name, pop_spike_dict[pop_name], time_bins)
        for gid, dens_dict in utils.viewitems(spike_density_dict):
            mean_rate = np.mean(dens_dict['rate'])
            mean_rate_sum_local += mean_rate
            if mean_rate > 0.:
                n_active_local += 1
        mean_rate_sum = context.env.comm.allreduce(mean_rate_sum_local,
                                                   op=MPI.SUM)
        context.env.comm.barrier()

        n_local = len(context.env.cells[pop_name]) - len(
            context.env.artificial_cells[pop_name])
        n_total = context.env.comm.allreduce(n_local, op=MPI.SUM)
        n_active = context.env.comm.allreduce(n_active_local, op=MPI.SUM)
        context.env.comm.barrier()

        if n_active > 0:
            mean_rate = mean_rate_sum / n_active
        else:
            mean_rate = 0.

        if n_total > 0:
            fraction_active = n_active / n_total
        else:
            fraction_active = 0.

        mean_target_rate_dist_residual = None
        if n_active > 0:
            pop_name_is_in_network = pop_name in context.target_trj_rate_map_dict and \
                                     len(context.target_trj_rate_map_dict[pop_name] > 0)
            pop_name_is_in_network_list = context.env.comm.gather(
                pop_name_is_in_network, root=0)
            if context.env.comm.rank == 0:
                if any(pop_name_is_in_network_list):
                    pop_name_is_in_network = True
            pop_name_is_in_network = context.env.comm.bcast(
                pop_name_is_in_network, root=0)

            if pop_name_is_in_network:
                mean_target_rate_dist_residual = 0.
                target_rate_dist_residuals = []
                if pop_name in context.target_trj_rate_map_dict:
                    target_trj_rate_map_dict = context.target_trj_rate_map_dict[
                        pop_name]

                    for gid in target_trj_rate_map_dict:
                        target_trj_rate_map = target_trj_rate_map_dict[gid]
                        rate_map_len = len(target_trj_rate_map)
                        if gid in spike_density_dict:
                            residual = np.sum(
                                target_trj_rate_map -
                                spike_density_dict[gid]['rate'][:rate_map_len])
                        else:
                            residual = np.sum(target_trj_rate_map)
                        target_rate_dist_residuals.append(residual)
                residual_sum_local = np.sum(target_rate_dist_residuals)
                residual_sum = context.env.comm.allreduce(residual_sum_local,
                                                          op=MPI.SUM)
                mean_target_rate_dist_residual = residual_sum / len(
                    target_trj_rate_map_dict)
            context.env.comm.barrier()

        rank = int(context.env.pc.id())
        if context.env.comm.rank == 0:
            context.logger.info(
                'population %s: n_active = %d n_total = %d mean rate = %s' %
                (pop_name, n_active, n_total, str(mean_rate)))

        results['%s firing rate' % pop_name] = mean_rate
        results['%s fraction active' % pop_name] = fraction_active
        if mean_target_rate_dist_residual is not None:
            results['%s target rate dist residual' %
                    pop_name] = mean_target_rate_dist_residual

    return results
コード例 #10
0
def compute_network_features(x, model_id=None, export=False):
    """

    :param x: array
    :param export: bool
    :return: dict
    """
    results = dict()
    update_source_contexts(x, context)
    # TODO: Do you want this to be identical on all ranks in a subworld? You can use context.comm.bcast
    context.env.results_file_id = '%s_%s' % \
                             (context.interface.worker_id, datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))

    temporal_resolution = float(
        context.env.stimulus_config['Temporal Resolution'])
    time_bins = np.arange(context.t_start, context.t_stop, temporal_resolution)

    context.env.tstop = context.t_stop
    network.run(context.env, output=context.output_results, shutdown=False)

    local_network_features = network_features(context.env,
                                              context.target_trj_rate_map_dict,
                                              context.t_start, context.t_stop,
                                              context.target_populations)
    results = {}
    for pop_name in context.target_populations:

        n_active_local = local_network_features[pop_name]['n_active']
        n_total_local = local_network_features[pop_name]['n_total']
        sum_mean_rate_local = local_network_features[pop_name]['sum_mean_rate']
        n_target_rate_map_local = local_network_features[pop_name][
            'n_target_rate_map']

        n_total = context.env.comm.allreduce(n_total_local, op=MPI.SUM)
        n_active = context.env.comm.allreduce(n_active_local, op=MPI.SUM)
        mean_rate_sum = context.env.comm.allreduce(sum_mean_rate_local,
                                                   op=MPI.SUM)
        n_target_rate_map = context.env.comm.allreduce(n_target_rate_map_local,
                                                       op=MPI.SUM)

        has_target_rate_map = n_target_rate_map > 0
        if has_target_rate_map:
            sum_target_rate_dist_residual_local = local_network_features[
                pop_name]['sum_target_rate_dist_residual']
            sum_target_rate_dist_residual = context.env.comm.allreduce(
                sum_target_rate_dist_residual_local, op=MPI.SUM)
        context.env.comm.barrier()

        if n_active > 0:
            mean_rate = mean_rate_sum / n_active
        else:
            mean_rate = 0.

        if n_total > 0:
            fraction_active = n_active / n_total
        else:
            fraction_active = 0.

        mean_target_rate_dist_residual = None
        if has_target_rate_map:
            mean_target_rate_dist_residual = sum_target_rate_dist_residual / n_target_rate_map

        rank = int(context.env.pc.id())
        if context.env.comm.rank == 0:
            context.logger.info(
                'population %s: n_active = %d n_total = %d mean rate = %s' %
                (pop_name, n_active, n_total, str(mean_rate)))

        results['%s fraction active' % pop_name] = fraction_active
        results['%s firing rate' % pop_name] = mean_rate
        if mean_target_rate_dist_residual is not None:
            results['%s target rate dist residual' %
                    pop_name] = mean_target_rate_dist_residual

    return results