Beispiel #1
0
def apply_chunkhandler_ssd():
    data = SuperSegmentationDataset(
        working_dir="/wholebrain/songbird/j0126/areaxfs_v6/")
    ssd_include = [491527, 1090051]
    chunk_size = 4000
    features = {'sv': 1, 'mi': 2, 'vc': 3, 'syn_ssv': 4}
    transform = clouds.Compose([clouds.Center()])

    ch = ChunkHandler(data=data,
                      sample_num=4000,
                      density_mode=False,
                      specific=False,
                      ctx_size=chunk_size,
                      obj_feats=features,
                      splitting_redundancy=1,
                      sampling=True,
                      transform=transform,
                      ssd_include=ssd_include,
                      ssd_labels='axoness',
                      label_mappings=[(3, 2), (4, 3), (5, 1), (6, 1)])

    save_path = os.path.expanduser('~/thesis/current_work/chunkhandler_tests/')
    ix = 0
    while ix < 500:
        sample1 = ch[ix]
        sample2 = ch[ix + 1]
        ix += 2
        sample = [sample1, sample2]
        with open(f'{save_path}{ix}.pkl', 'wb') as f:
            pickle.dump(sample, f)
        f.close()
    ch.terminate()
Beispiel #2
0
def run_skeleton_generation(max_n_jobs=None):
    if max_n_jobs is None:
        max_n_jobs = global_params.NCORE_TOTAL * 2
    log = initialize_logging('skeleton_generation',
                             global_params.config.working_dir + '/logs/',
                             overwrite=False)
    ssd = SuperSegmentationDataset(
        working_dir=global_params.config.working_dir)

    # TODO: think about using create_sso_skeleton_fast if underlying RAG
    #  obeys spatial correctness (> 10x faster)
    # list of SSV IDs and SSD parameters need to be given to a single QSUB job
    multi_params = ssd.ssv_ids
    nb_svs_per_ssv = np.array(
        [len(ssd.mapping_dict[ssv_id]) for ssv_id in ssd.ssv_ids])
    ordering = np.argsort(nb_svs_per_ssv)
    multi_params = multi_params[ordering[::-1]]
    multi_params = chunkify(multi_params, max_n_jobs)

    # add ssd parameters
    multi_params = [(ssv_ids, ssd.version, ssd.version_dict, ssd.working_dir)
                    for ssv_ids in multi_params]

    # create SSV skeletons, requires SV skeletons!
    log.info('Starting skeleton generation of {} SSVs.'.format(len(
        ssd.ssv_ids)))
    qu.QSUB_script(multi_params,
                   "export_skeletons_new",
                   log=log,
                   n_max_co_processes=global_params.NCORE_TOTAL,
                   remove_jobfolder=True)

    log.info('Finished skeleton generation.')
Beispiel #3
0
def run_skeleton_generation(max_n_jobs: Optional[int] = None,
                            map_myelin: Optional[bool] = None):
    """
    Generate the cell reconstruction skeletons.

    Args:
        max_n_jobs: Number of parallel jobs.
        map_myelin: Map myelin predictions at every ``skeleton['nodes']`` in
        :py:attr:`~syconn.reps.super_segmentation_object.SuperSegmentationObject.skeleton`.

    """
    if map_myelin is None:
        map_myelin = os.path.isdir(global_params.config.working_dir +
                                   '/knossosdatasets/myelin/')
    if max_n_jobs is None:
        max_n_jobs = global_params.NCORE_TOTAL * 2
    log = initialize_logging('skeleton_generation',
                             global_params.config.working_dir + '/logs/',
                             overwrite=False)
    ssd = SuperSegmentationDataset(
        working_dir=global_params.config.working_dir)

    # TODO: think about using create_sso_skeleton_fast if underlying RAG
    #  obeys spatial correctness (> 10x faster)
    # list of SSV IDs and SSD parameters need to be given to a single QSUB job
    multi_params = ssd.ssv_ids
    nb_svs_per_ssv = np.array(
        [len(ssd.mapping_dict[ssv_id]) for ssv_id in ssd.ssv_ids])
    ordering = np.argsort(nb_svs_per_ssv)
    multi_params = multi_params[ordering[::-1]]
    multi_params = chunkify(multi_params, max_n_jobs)

    # add ssd parameters
    multi_params = [(ssv_ids, ssd.version, ssd.version_dict, ssd.working_dir,
                     map_myelin) for ssv_ids in multi_params]

    # create SSV skeletons, requires SV skeletons!
    log.info('Starting skeleton generation of {} SSVs.'.format(len(
        ssd.ssv_ids)))
    qu.QSUB_script(multi_params,
                   "export_skeletons_new",
                   log=log,
                   n_max_co_processes=global_params.NCORE_TOTAL,
                   remove_jobfolder=True,
                   n_cores=2)

    log.info('Finished skeleton generation.')
Beispiel #4
0
def map_myelin_global(max_n_jobs: Optional[int] = None):
    """
    Stand-alone myelin mapping to cell reconstruction skeletons. See kwarg ``map_myelin``
    in :func:`run_skeleton_generation` for a mapping right after skeleton generation.

    Args:
        max_n_jobs: Number of parallel jobs.

    """
    if max_n_jobs is None:
        max_n_jobs = global_params.NCORE_TOTAL * 2
    log = initialize_logging('myelin_mapping',
                             global_params.config.working_dir + '/logs/',
                             overwrite=False)
    ssd = SuperSegmentationDataset(
        working_dir=global_params.config.working_dir)

    # TODO: think about using create_sso_skeleton_fast if underlying RAG
    #  obeys spatial correctness (> 10x faster)
    # list of SSV IDs and SSD parameters need to be given to a single QSUB job
    multi_params = ssd.ssv_ids
    nb_svs_per_ssv = np.array(
        [len(ssd.mapping_dict[ssv_id]) for ssv_id in ssd.ssv_ids])
    ordering = np.argsort(nb_svs_per_ssv)
    multi_params = multi_params[ordering[::-1]]
    multi_params = chunkify(multi_params, max_n_jobs)

    # add ssd parameters
    multi_params = [(ssv_ids, ssd.version, ssd.version_dict, ssd.working_dir)
                    for ssv_ids in multi_params]

    # create SSV skeletons, requires SV skeletons!
    log.info('Starting myelin mapping of {} SSVs.'.format(len(ssd.ssv_ids)))
    qu.QSUB_script(multi_params,
                   "map_myelin2skel",
                   log=log,
                   n_max_co_processes=global_params.NCORE_TOTAL,
                   remove_jobfolder=True,
                   n_cores=2)

    log.info('Finished myelin mapping.')
Beispiel #5
0
    7422544, 7724119, 4536922, 14339175, 13124207, 23760403, 1270390, 19767416,
    8367753, 11913868, 17016975, 31424660, 29199509, 5163177, 7615858, 7854770,
    8640627, 28141748, 24417974, 3879610, 27078852, 11259589, 14338767,
    31880915, 1055956, 26933461, 4318936, 27210457, 14969039, 4391644, 1720542,
    10782758, 15182572, 16583920, 11093755, 903933, 8647422, 24795273,
    21414657, 19563778, 18185991, 24096010, 10789134, 571151, 13788944,
    15650584, 26649882, 9796385, 12238629, 27470118, 1000234, 31888172,
    26014513, 8533299, 11470473, 19753798, 4156235, 9639760, 19200342, 2855767,
    1169753, 14965595, 29172062, 4533087, 23397730, 15432553, 1993580, 526189,
    8886129, 31974770, 15919989, 6183805, 17217918, 23020416, 25716101,
    7945095, 8425865, 10788746, 28798642, 14152592, 5868433, 13460392,
    10819502, 24601007, 18280371, 22335412, 1993653, 9941435, 13979071,
    1357193, 19030985, 16134612, 31887834, 31697388, 29549021, 27286495,
    18345954, 27112938, 16272875, 33843692, 2945519, 3554298
]

if __name__ == "__main__":
    ssd_new = SuperSegmentationDataset(
        working_dir="/wholebrain/scratch/areaxfs3/", version="gliagt")
    # generate SSVs from single SVs:
    for k in glia_ids:
        ssd_new.mapping_dict[k] = [k]
    # save SSVs
    ssd_new.save_dataset_deep()
    gt_dir = "/wholebrain/scratch/areaxfs3//ssv_gliagt/"
    write_obj2pkl(gt_dir + "gliagt_labels.pkl", {k: 1 for k in glia_ids})
    # Neuron GT is given by axon GT ...
    for ssv in ssd_new.ssvs:
        ssv.load_attr_dict()
        ssv._render_rawviews(nb_views=2, add_cellobjects=False)
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import os
from syconn.mp import batchjob_utils as qu
from syconn.mp.mp_utils import start_multiprocess
from syconn.reps.super_segmentation_dataset import SuperSegmentationDataset
from syconn.handler.basics import chunkify
from syconn.proc.mapping import map_glia_fraction
import numpy as np
import itertools

if __name__ == "__main__":
    script_folder = os.path.dirname(
        os.path.abspath(__file__)) + "/../../syconn/QSUB_scripts/"
    print(script_folder)
    ssds = SuperSegmentationDataset(
        working_dir="/wholebrain/scratch/areaxfs3/", version="0")
    multi_params = ssds.ssv_ids
    np.random.shuffle(multi_params)
    multi_params = chunkify(multi_params, 2000)
    path_to_out = qu.QSUB_script(
        multi_params,
        "render_sso_ortho",  #"export_skeletons_new", #"map_viewaxoness2skel",
        n_max_co_processes=100,
        pe="openmp",
        queue=None,
        script_folder=script_folder,
        suffix="",
        n_cores=1)