Exemple #1
0
def main(path_to_config, parameter_name=None):
    # Read the meta-config file
    cfg = config.Config(path_to_config)

    # Get configuration related to the current pipeline stage
    stage = cfg.stage("gen_topo_db")

    # Fetch adjacency matrix and neuron infos according to configured locations/files
    adj_matrix, neuron_info = read_input(stage["inputs"])
    assert adj_matrix.shape[0] == len(neuron_info), "Neuron info and adjacency matrix have incompatible sizes!"
    topo_db_cfg = stage["config"]
    # Calculate tribes, i.e. gids of adjacent neurons
    tribes = calculate_tribes(adj_matrix, neuron_info)

    # Populate DB
    if parameter_name is None:  # Case 1: generate all columns at once
        DB = create_db_with_specified_columns(["tribe", "neuron_info"] + topo_db_cfg["parameters"],
                                              tribes, neuron_info, topo_db_cfg, adj_matrix)
        # Write output to where it's meant to go
        write_output(DB, stage["outputs"]["database"])
    else:  # Case 2: Generate one single column at a time
        DB = create_db_with_specified_columns([parameter_name], tribes, neuron_info, topo_db_cfg, adj_matrix)
        suffix = "." + parameter_name.lower().replace(" ", "_")  # Use parameter name as suffix for output file
        # Write output to the 'other' directory for later merging
        if not os.path.exists(stage["other"]):
            os.makedirs(stage["other"])
        out_fn = os.path.join(stage["other"], os.path.split(stage["outputs"]["database"])[1]) + suffix
        write_output(DB, out_fn)
def main(path_to_config):
    # Read the meta-config file
    cfg = config.Config(path_to_config)
    # Get configuration related to the current pipeline stage
    stage = cfg.stage("sample_tribes")
    db = read_input(stage["inputs"])
    tribes = make_all_samples(db, stage["config"]["Champions"])
    write_output(tribes, stage["outputs"])
def main(path_to_config):
    # Read the meta-config file
    cfg = config.Config(path_to_config)
    # Get configuration related to the current pipeline stage
    stage = cfg.stage("split_spikes")
    spikes, stims = read_input(stage["inputs"])
    split_spikes = execute_split(spikes, stims, stage["config"])
    write_output(split_spikes, stage["outputs"])
Exemple #4
0
def main(path_to_config):
    # Read the meta-config file
    cfg = config.Config(path_to_config)
    # Get configuration related to the current pipeline stage
    stage = cfg.stage("struc_tribe_analysis")
    db, tribal_chiefs, tribal_gids = read_input(stage["inputs"])
    tribal_values = lookup_parameters(db, tribal_chiefs, tribal_gids,
                                      stage["config"])
    write_output(tribal_values, stage["outputs"])
Exemple #5
0
def main(path_to_config, **kwargs):
    # Read the meta-config file
    cfg = config.Config(path_to_config)
    # Get configuration related to the current pipeline stage
    stage = cfg.stage("count_triads")
    tribes, M, info = read_input(stage["inputs"])
    overexpression = count_triads_all(tribes, M, info, stage['config'],
                                      **kwargs)
    write_output(overexpression, stage["outputs"])
def main(path_to_config, **kwargs):
    assert "index" not in kwargs, "Splitting by index not supported for topo_featurization!"
    # 1. Evaluate configuration.
    # Read the meta-config file
    cfg = config.Config(path_to_config)
    # Get configuration related to the current pipeline stage
    stage = cfg.stage("topological_featurization")
    topo_featurization_cfg = stage["config"]
    timebin = topo_featurization_cfg["time_bin"]
    parameter = topo_featurization_cfg[topo_featurization_cfg["topo_method"]]
    # number of time steps per trial
    stim_dur = cfg.stage('split_spikes')['config']['stim_duration_ms']
    n_t_bins = int(stim_dur / timebin)
    t_bins = np.arange(n_t_bins + 1) * timebin

    # 2. Read input data
    spiketrains, tribal_data, adj_matrix, neuron_info = read_input(
        stage["inputs"])
    tribes = tribal_data["gids"]
    tribes = tribes.filter(**kwargs)

    # 3. Analyze.
    # Create analysis function, given the spikes and time bins
    featurization_func = make_topo_features_for_tribes(
        spiketrains, t_bins, parameter, adj_matrix, GidConverter(neuron_info))
    # unpool with this function adds the additional condition of "stimulus" (stimulus identifier)
    tribes.unpool(
        func=featurization_func)  # shape of data: t_bins x 1 x trials
    # Now put the data into the expected format. First pooling along tribes (index).
    features_data = tribes.pool(
        ["index"],
        func=np.hstack)  # shape of data: t_bins x tribe_index x trials
    # Then pooling along different stimuli
    features_data = features_data.pool(["stimulus"],
                                       func=ordered_list,
                                       xy=True)
    # Features that have been removed in the filter step need to be added back for the expected format.
    # Update: changed how filter works. It no longer destroys the associated dimension
    # for k, v in kwargs.items():
    #     features_data.add_label(k, v)
    # The format also needs an "index" condition. We pooled that away, so we just add to what remains index=0
    features_data.add_label("index", "0")
    # transform writes data into individual hdf5 files and returns their paths.
    fn_data = features_data.transform(
        ["sampling", "specifier", "index"],  # data: str (path to .h5 file)
        func=make_write_h5(stage['other']),
        xy=True)
    # There is some additional info about the neuron samples that we want to inherit from the "tribes" structure.
    # So we add that info to fn_data
    fn_data.extended_map(lambda x, y: x.update(y[0]),
                         [get_idv_label(tribal_data)])
    write_output(TopoData.condition_collection_to_dict(fn_data),
                 stage["outputs"])
def main(path_to_config, **kwargs):
    # Read the meta-config file
    cfg = config.Config(path_to_config)
    # Get configuration related to the current pipeline stage
    stage = cfg.stage("manifold_analysis")
    spikes, stims, tribal_chiefs, tribal_gids = read_input(stage["inputs"])
    if len(kwargs) > 0:
        tribal_gids = tribal_gids.filter(**kwargs)
        tribal_chiefs = tribal_chiefs.filter(**kwargs)
    res_lookup = transform_all(spikes, stims, tribal_chiefs, tribal_gids,
                               stage["config"], stage["other"])
    write_output(res_lookup, stage["outputs"])
Exemple #8
0
def main(path_to_config):
    # Read the meta-config file
    cfg = config.Config(path_to_config)

    # Get configuration related to the current pipeline stage
    stage = cfg.stage("struc_tribe_analysis")
    topo_db_cfg = cfg.stage("gen_topo_db")["config"]
    stage_cfg = stage["config"]

    # Fetch adjacency matrix and neuron infos according to configured locations/files
    adj_matrix, neuron_info, tribes = read_input(stage["inputs"])
    assert adj_matrix.shape[0] == len(
        neuron_info
    ), "Neuron info and adjacency matrix have incompatible sizes!"

    param_dict = get_parameter_db_for_samples(tribes, neuron_info, topo_db_cfg,
                                              stage_cfg, adj_matrix)
    # Write output to where it's meant to go
    write_output(param_dict, stage["outputs"]["struc_parameters_volumetric"])