示例#1
0
def _run_huge_ssv_render_worker(q: Queue, q_out: Queue):
    """
    Helper method of :func:`~run_glia_rendering`.

    Args:
        q: Input queue.
        q_out: Output queue.

    """
    while True:
        inp = q.get()
        if inp == -1:
            break
        kk, g, version = inp
        # Create SSV object
        sv_ixs = np.sort(list(g.nodes()))
        sso = SuperSegmentationObject(sv_ixs[0], working_dir=global_params.config.working_dir,
                                      version=version, create=False, sv_ids=sv_ixs)
        # nodes of sso._rag need to be SV
        new_G = nx.Graph()
        for e in g.edges():
            new_G.add_edge(sso.get_seg_obj("sv", e[0]),
                           sso.get_seg_obj("sv", e[1]))
        sso._rag = new_G
        sso.render_views(add_cellobjects=False, cellobjects_only=False,
                         skip_indexviews=True, woglia=False, overwrite=True,
                         qsub_co_jobs=global_params.config.ngpu_total)
        q_out.put(0)
示例#2
0
def _run_huge_ssv_render_worker(q, q_out):
    while True:
        inp = q.get()
        if inp == -1:
            break
        kk, g, version = inp
        # Create SSV object
        sv_ixs = np.sort(list(g.nodes()))
        sso = SuperSegmentationObject(
            sv_ixs[0],
            working_dir=global_params.config.working_dir,
            version=version,
            create=False,
            sv_ids=sv_ixs)
        # nodes of sso._rag need to be SV
        new_G = nx.Graph()
        for e in g.edges():
            new_G.add_edge(sso.get_seg_obj("sv", e[0]),
                           sso.get_seg_obj("sv", e[1]))
        sso._rag = new_G
        sso.render_views(add_cellobjects=False,
                         cellobjects_only=False,
                         skip_indexviews=True,
                         woglia=False,
                         overwrite=True,
                         qsub_co_jobs=global_params.NGPU_TOTAL)
        q_out.put(0)
示例#3
0
def generate_label_views(kzip_path,
                         ssd_version,
                         gt_type,
                         n_voting=40,
                         nb_views=2,
                         ws=(256, 128),
                         comp_window=8e3,
                         out_path=None,
                         verbose=False):
    """

    Parameters
    ----------
    kzip_path : str
    gt_type :  str
    ssd_version : str
    n_voting : int
        Number of collected nodes during BFS for majority vote (label smoothing)
    nb_views : int
    ws: Tuple[int]
    comp_window : float
    initial_run : bool
        if True, will copy SSV from default SSD to SSD with version=gt_type
    out_path : str
        If given, export mesh colored accoring to GT labels
    verbose : bool
        Print additional information

    Returns
    -------
    Tuple[np.array]
        raw, label and index views
    """
    assert gt_type in ["axgt",
                       "spgt"], "Currently only spine and axon GT is supported"
    n_labels = 5 if gt_type == "axgt" else 4
    palette = generate_palette(n_labels)
    sso_id = int(re.findall("/(\d+).", kzip_path)[0])
    sso = SuperSegmentationObject(sso_id, version=ssd_version)
    if initial_run:  # use default SSD version
        orig_sso = SuperSegmentationObject(sso_id)
        orig_sso.copy2dir(dest_dir=sso.ssv_dir, safe=False)
    if not sso.attr_dict_exists:
        msg = 'Attribute dict of original SSV was not copied successfully ' \
              'to target SSD.'
        raise ValueError(msg)
    sso.load_attr_dict()
    indices, vertices, normals = sso.mesh

    # # Load mesh
    vertices = vertices.reshape((-1, 3))

    # load skeleton
    skel = load_skeleton(kzip_path)
    if len(skel) == 1:
        skel = list(skel.values())[0]
    else:
        skel = skel["skeleton"]
    skel_nodes = list(skel.getNodes())

    node_coords = np.array(
        [n.getCoordinate() * sso.scaling for n in skel_nodes])
    node_labels = np.array(
        [str2intconverter(n.getComment(), gt_type) for n in skel_nodes],
        dtype=np.int)
    node_coords = node_coords[(node_labels != -1)]
    node_labels = node_labels[(node_labels != -1)]

    # create KD tree from skeleton node coordinates
    tree = KDTree(node_coords)
    # transfer labels from skeleton to mesh
    dist, ind = tree.query(vertices, k=1)
    vertex_labels = node_labels[ind]  # retrieving labels of vertices
    if n_voting > 0:
        vertex_labels = bfs_smoothing(vertices,
                                      vertex_labels,
                                      n_voting=n_voting)
    color_array = palette[vertex_labels].astype(np.float32) / 255.

    if out_path is not None:
        if gt_type == 'spgt':  #
            colors = [[0.6, 0.6, 0.6, 1], [0.9, 0.2, 0.2, 1],
                      [0.1, 0.1, 0.1, 1], [0.05, 0.6, 0.6, 1],
                      [0.9, 0.9, 0.9, 1]]
        else:  # dendrite, axon, soma, bouton, terminal, background
            colors = [[0.6, 0.6, 0.6, 1], [0.9, 0.2, 0.2, 1],
                      [0.1, 0.1, 0.1, 1], [0.05, 0.6, 0.6, 1],
                      [0.6, 0.05, 0.05, 1], [0.9, 0.9, 0.9, 1]]
        colors = (np.array(colors) * 255).astype(np.uint8)
        color_array_mesh = colors[
            vertex_labels][:,
                           0]  # TODO: check why only first element, maybe colors introduces an additional axis
        write_mesh2kzip("{}/sso_{}_gtlabels.k.zip".format(out_path, sso.id),
                        sso.mesh[0],
                        sso.mesh[1],
                        sso.mesh[2],
                        color_array_mesh,
                        ply_fname="gtlabels.ply")

    # Initializing mesh object with ground truth coloring
    mo = MeshObject("neuron", indices, vertices, color=color_array)

    # use downsampled locations for view locations, only if they are close to a
    # labeled skeleton node
    locs = generate_rendering_locs(vertices, comp_window /
                                   6)  # 6 rendering locations per comp.
    # window
    dist, ind = tree.query(locs)
    locs = locs[dist[:, 0] < 2000]  #[::3][:5]  # TODO add as parameter

    # # # To get view locations
    # dest_folder = os.path.expanduser("~") + \
    #               "/spiness_skels/{}/view_imgs_{}/".format(sso_id, n_voting)
    # if not os.path.isdir(dest_folder):
    #     os.makedirs(dest_folder)
    # loc_text = ''
    # for i, c in enumerate(locs):
    #     loc_text += str(i) + "\t" + str((c / np.array([10, 10, 20])).astype(np.int)) +'\n' #rescalling to the voxel grid
    # with open("{}/viewcoords.txt".format(dest_folder), "w") as f:
    #     f.write(loc_text)
    # # # DEBUG PART END
    label_views, rot_mat = _render_mesh_coords(locs,
                                               mo,
                                               depth_map=False,
                                               return_rot_matrices=True,
                                               ws=ws,
                                               smooth_shade=False,
                                               nb_views=nb_views,
                                               comp_window=comp_window,
                                               verbose=verbose)
    label_views = remap_rgb_labelviews(label_views[..., :3], palette)[:, None]
    # TODO: the 3 neglects the alpha channel, i.e. remapping labels bigger than 256**3 becomes
    #  invalid
    index_views = render_sso_coords_index_views(sso,
                                                locs,
                                                rot_mat=rot_mat,
                                                verbose=verbose,
                                                nb_views=nb_views,
                                                ws=ws,
                                                comp_window=comp_window)
    raw_views = render_sso_coords(sso,
                                  locs,
                                  nb_views=nb_views,
                                  ws=ws,
                                  comp_window=comp_window,
                                  verbose=verbose,
                                  rot_mat=rot_mat)
    return raw_views, label_views, index_views
except ImportError:
    import pickle as pkl
from syconn.reps.super_segmentation import SuperSegmentationObject
from syconn.proc.sd_proc import sos_dict_fact, init_sos
from syconn.proc.rendering import render_sso_coords_generic
from syconn import global_params

path_storage_file = sys.argv[1]
path_out_file = sys.argv[2]

with open(path_storage_file, 'rb') as f:
    args = []
    while True:
        try:
            args.append(pkl.load(f))
        except EOFError:
            break
coords = args[0]
sso_kwargs = args[1]
working_dir = sso_kwargs['working_dir']
global_params.wd = working_dir
kwargs = args[2]
file_store_number = args[3]
sso = SuperSegmentationObject(**sso_kwargs)
file = 'file'
file = file + str(file_store_number)
views = render_sso_coords_generic(sso, working_dir, coords, **kwargs)

with open(path_out_file, "wb") as f:
    pkl.dump(views, f)
示例#5
0
def _run_neuron_rendering_big_helper(max_n_jobs=None):
    if max_n_jobs is None:
        max_n_jobs = global_params.NNODES_TOTAL * 2
    log = initialize_logging('neuron_view_rendering_big',
                             global_params.config.working_dir + '/logs/')
    # view rendering prior to glia removal, choose SSD accordingly
    ssd = SuperSegmentationDataset(
        working_dir=global_params.config.working_dir)

    #  TODO: use actual size criteria, e.g. number of sampling locations
    nb_svs_per_ssv = np.array(
        [len(ssd.mapping_dict[ssv_id]) for ssv_id in ssd.ssv_ids])

    # render normal size SSVs
    size_mask = nb_svs_per_ssv <= global_params.RENDERING_MAX_NB_SV
    if 'example' in global_params.config.working_dir and np.sum(
            ~size_mask) == 0:
        # generate at least one (artificial) huge SSV
        size_mask[:1] = False
        size_mask[1:] = True
    # sort ssv ids according to their number of SVs (descending)
    # list of SSV IDs and SSD parameters need to be given to a single QSUB job
    if np.sum(~size_mask) > 0:
        log.info('{} huge SSVs will be rendered on the cluster.'.format(
            np.sum(~size_mask)))
        # identify huge SSVs and process them individually on whole cluster
        big_ssv = ssd.ssv_ids[~size_mask]

        # # TODO: Currently high memory consumption when rendering index views! take into account
        # #  when multiprocessing
        # # TODO: refactor `render_sso_coords_multiprocessing` and then use `QSUB_render_views_egl`
        # #  here!
        # render normal views only
        n_cores = global_params.NCORES_PER_NODE // global_params.NGPUS_PER_NODE
        n_parallel_jobs = global_params.NGPU_TOTAL
        render_kwargs = dict(add_cellobjects=True,
                             woglia=True,
                             overwrite=True,
                             skip_indexviews=True)
        sso_kwargs = dict(working_dir=global_params.config.working_dir,
                          nb_cpus=n_cores,
                          enable_locking_so=False,
                          enable_locking=False)

        # sort ssv ids according to their number of SVs (descending)
        ordering = np.argsort(nb_svs_per_ssv[~size_mask])
        multi_params = big_ssv[ordering[::-1]]
        multi_params = chunkify(multi_params, max_n_jobs)
        # list of SSV IDs and SSD parameters need to be given to a single QSUB job
        multi_params = [(ixs, sso_kwargs, render_kwargs)
                        for ixs in multi_params]
        path_to_out = qu.QSUB_script(multi_params,
                                     "render_views",
                                     n_max_co_processes=n_parallel_jobs,
                                     log=log,
                                     additional_flags="--gres=gpu:1",
                                     n_cores=n_cores,
                                     remove_jobfolder=True)
        # # render index-views only
        for ssv_id in big_ssv:
            ssv = SuperSegmentationObject(
                ssv_id, working_dir=global_params.config.working_dir)
            render_sso_coords_multiprocessing(ssv,
                                              global_params.config.working_dir,
                                              verbose=True,
                                              return_views=False,
                                              disable_batchjob=False,
                                              n_jobs=n_parallel_jobs,
                                              n_cores=n_cores,
                                              render_indexviews=True)
        log.info('Finished rendering of {}/{} SSVs.'.format(
            len(big_ssv), len(nb_svs_per_ssv)))
示例#6
0
with open(path_storage_file, 'rb') as f:
    args = []
    while True:
        try:
            args.append(pkl.load(f))
        except EOFError:
            break

ch = args[0]
wd = args[1]
render_kwargs = args[2]
ssvs_large = []
ssvs_small = []
for ssv_ix in ch:
    sso = SuperSegmentationObject(ssv_ix,
                                  working_dir=wd,
                                  enable_locking_so=True)
    if len(sso.sample_locations()) > 1000:
        ssvs_large.append(ssvs_small)

# render huge SSVs in parallel, multiple jobs per SSV
n_parallel_jobs = global_params.NCORES_PER_NODE // global_params.NGPUS_PER_NODE
for ssv in ssvs_large:
    render_sso_coords_multiprocessing(ssvs_large,
                                      wd,
                                      n_parallel_jobs,
                                      render_indexviews=False,
                                      render_kwargs=render_kwargs)
    render_sso_coords_multiprocessing(ssvs_large,
                                      wd,
                                      n_parallel_jobs,
示例#7
0
def run_glia_rendering():
    log = initialize_logging('glia_view_rendering',
                             global_params.config.working_dir + '/logs/',
                             overwrite=False)
    np.random.seed(0)

    # view rendering prior to glia removal, choose SSD accordingly
    version = "tmp"  # glia removal is based on the initial RAG and does not require explicitly stored SSVs

    G = nx.Graph()  # TODO: Add factory method for initial RAG
    with open(global_params.config.init_rag_path, 'r') as f:
        for l in f.readlines():
            edges = [int(v) for v in re.findall('(\d+)', l)]
            G.add_edge(edges[0], edges[1])

    all_sv_ids_in_rag = np.array(list(G.nodes()), dtype=np.uint)
    log.info("Found {} SVs in initial RAG.".format(len(all_sv_ids_in_rag)))

    # add single SV connected components to initial graph
    sd = SegmentationDataset(obj_type='sv',
                             working_dir=global_params.config.working_dir)
    sv_ids = sd.ids
    diff = np.array(list(set(sv_ids).difference(set(all_sv_ids_in_rag))))
    log.info('Found {} single connected component SVs which were missing'
             ' in initial RAG.'.format(len(diff)))

    for ix in diff:
        G.add_node(ix)

    all_sv_ids_in_rag = np.array(list(G.nodes()), dtype=np.uint)
    log.info("Found {} SVs in initial RAG after adding size-one connected "
             "components. Writing kml text file".format(
                 len(all_sv_ids_in_rag)))

    # write out readable format for 'glia_prediction.py'
    ccs = [[n for n in cc] for cc in nx.connected_component_subgraphs(G)]
    kml = knossos_ml_from_ccs([np.sort(cc)[0] for cc in ccs], ccs)
    with open(global_params.config.working_dir + "initial_rag.txt", 'w') as f:
        f.write(kml)

    # generate parameter for view rendering of individual SSV
    log.info("Starting view rendering.")
    multi_params = []
    for cc in nx.connected_component_subgraphs(G):
        multi_params.append(cc)
    multi_params = np.array(multi_params)

    # identify huge SSVs and process them individually on whole cluster
    nb_svs = np.array([g.number_of_nodes() for g in multi_params])
    big_ssv = multi_params[nb_svs > RENDERING_MAX_NB_SV]

    for kk, g in enumerate(big_ssv[::-1]):
        # Create SSV object
        sv_ixs = np.sort(list(g.nodes()))
        log.info("Processing SSV [{}/{}] with {} SVs on whole cluster.".format(
            kk + 1, len(big_ssv), len(sv_ixs)))
        sso = SuperSegmentationObject(
            sv_ixs[0],
            working_dir=global_params.config.working_dir,
            version=version,
            create=False,
            sv_ids=sv_ixs)
        # nodes of sso._rag need to be SV
        new_G = nx.Graph()
        for e in g.edges():
            new_G.add_edge(sso.get_seg_obj("sv", e[0]),
                           sso.get_seg_obj("sv", e[1]))
        sso._rag = new_G
        sso.render_views(add_cellobjects=False,
                         cellobjects_only=False,
                         skip_indexviews=True,
                         woglia=False,
                         qsub_pe="openmp",
                         overwrite=True,
                         qsub_co_jobs=global_params.NCORE_TOTAL)

    # render small SSV without overhead and single cpus on whole cluster
    multi_params = multi_params[nb_svs <= RENDERING_MAX_NB_SV]
    np.random.shuffle(multi_params)
    multi_params = chunkify(multi_params, 2000)

    # list of SSV IDs and SSD parameters need to be given to a single QSUB job
    multi_params = [(ixs, global_params.config.working_dir, version)
                    for ixs in multi_params]
    path_to_out = qu.QSUB_script(multi_params,
                                 "render_views_glia_removal",
                                 n_max_co_processes=global_params.NCORE_TOTAL,
                                 pe="openmp",
                                 queue=None,
                                 script_folder=None,
                                 suffix="")

    # check completeness
    sd = SegmentationDataset("sv",
                             working_dir=global_params.config.working_dir)
    res = find_missing_sv_views(sd, woglia=False, n_cores=10)
    missing_not_contained_in_rag = []
    missing_contained_in_rag = []
    for el in res:
        if el not in all_sv_ids_in_rag:
            missing_not_contained_in_rag.append(el)
        else:
            missing_contained_in_rag.append(el)
    if len(missing_not_contained_in_rag):
        log.info("%d SVs were not rendered but also not part of the initial"
                 "RAG: {}".format(missing_not_contained_in_rag))
    if len(missing_contained_in_rag) != 0:
        msg = "Not all SSVs were rendered completely! Missing:\n" \
              "{}".format(missing_contained_in_rag)
        log.error(msg)
        raise RuntimeError(msg)
示例#8
0
path_out_file = sys.argv[2]

with open(path_storage_file, 'rb') as f:
    args = []
    while True:
        try:
            args.append(pkl.load(f))
        except EOFError:
            break

scaling = global_params.config.entries['Dataset']['scaling']
for cc in args:
    svixs = list(cc.nodes())
    cc_ix = np.min(svixs)
    sso = SuperSegmentationObject(cc_ix, version="gliaremoval", nb_cpus=2,
                                  working_dir=global_params.config.working_dir,
                                  create=True, scaling=scaling,
                                  sv_ids=svixs)
    so_cc = nx.Graph()
    for e in cc.edges():
        so_cc.add_edge(sso.get_seg_obj("sv", e[0]),
                       sso.get_seg_obj("sv", e[1]))
    sso._rag = so_cc
    sd = sos_dict_fact(svixs)
    sos = init_sos(sd)
    sso._objects["sv"] = sos
    try:
        sso.gliasplit(verbose=False)
    except Exception as e:
        print("\n-------------------------------------\n"
              "Splitting of SSV %d failed with %s."
              "\n-------------------------------------\n" % (cc_ix, e))
示例#9
0
import sys

try:
    import cPickle as pkl
except ImportError:
    import pickle as pkl
from syconn.reps.super_segmentation import SuperSegmentationObject

path_storage_file = sys.argv[1]
path_out_file = sys.argv[2]

with open(path_storage_file, 'rb') as f:
    args = []
    while True:
        try:
            args.append(pkl.load(f))
        except EOFError:
            break

ch = args[0]
wd = args[1]
for ssv_ix in ch:
    sso = SuperSegmentationObject(ssv_ix,
                                  working_dir=wd,
                                  enable_locking_so=True)
    sso.load_attr_dict()
    sso.render_views(add_cellobjects=True, woglia=True, overwrite=True)

with open(path_out_file, "wb") as f:
    pkl.dump("0", f)
示例#10
0
with open(path_storage_file, 'rb') as f:
    args = []
    while True:
        try:
            args.append(pkl.load(f))
        except EOFError:
            break

ch = args[0]
wd = args[1]
version = args[2]
for g in ch:
    # only generate SSV temporarily but do not write it to FS
    # corresponding SVs are parsed explicitly ('sv_ids=sv_ixs')
    sv_ixs = np.sort(list(g.nodes()))
    sso = SuperSegmentationObject(sv_ixs[0], working_dir=wd, version=version,
                                  create=False, sv_ids=sv_ixs,
                                  enable_locking_so=True)
    sso.load_attr_dict()
    # nodes of sso._rag need to be SV
    new_G = nx.Graph()
    for e in g.edges():
        new_G.add_edge(sso.get_seg_obj("sv", e[0]),
                       sso.get_seg_obj("sv", e[1]))
    sso._rag = new_G
    sso.render_views(add_cellobjects=False, woglia=False, overwrite=True,
                     skip_indexviews=True)

with open(path_out_file, "wb") as f:
    pkl.dump("0", f)
        except EOFError:
            break

ch = args[0]
wd = args[1]
version = args[2]

ssvs_large = []
ssvs_small = []
for g in ch:
    # only generate SSV temporarily but do not write it to FS
    # corresponding SVs are parsed explicitly ('sv_ids=sv_ixs')
    sv_ixs = np.sort(list(g.nodes()))
    sso = SuperSegmentationObject(sv_ixs[0],
                                  working_dir=wd,
                                  version=version,
                                  create=False,
                                  sv_ids=sv_ixs,
                                  enable_locking_so=True)
    # nodes of sso._rag need to be SV
    new_G = nx.Graph()
    for e in g.edges():
        new_G.add_edge(sso.get_seg_obj("sv", e[0]),
                       sso.get_seg_obj("sv", e[1]))
    sso._rag = new_G
    if len(sso.sample_locations()) > np.inf:
        # TODO: Currently does not work with `version=tmp`. Add as parameter to global_params.py
        ssvs_large.append(sso)
    else:
        ssvs_small.append(sso)

# render huge SSVs in parallel, multiple jobs per SSV, use more threads than cores -> increase
ch = args[0]
wd = args[1]
if len(args) > 2:
    render_kwargs = None
else:
    render_kwargs = dict(add_cellobjects=True, woglia=True, overwrite=True)

# render huge SSVs in parallel, multiple jobs per SSV
n_parallel_jobs = global_params.NCORES_PER_NODE  # // global_params.NGPUS_PER_NODE  # Assumes

ssvs_large = []
ssvs_small = []
for ssv_ix in ch:
    # locking is explicitly enabled when saving SV views, no need to enable it for reading data
    sso = SuperSegmentationObject(ssv_ix, working_dir=wd,
                                  enable_locking_so=False)
    if len(sso.sample_locations()) > 1e3:  # TODO: add as parameter to global_params.py
        ssvs_large.append(sso)
    else:
        ssvs_small.append(sso)

# this job is always started using half of the node and with one GPU
for ssv in ssvs_large:
    render_sso_coords_multiprocessing(ssv, wd, n_parallel_jobs,
                                      render_indexviews=False, return_views=False,
                                      render_kwargs=render_kwargs)

    render_sso_coords_multiprocessing(ssv, wd, n_parallel_jobs,
                                      render_indexviews=True, return_views=False,
                                      render_kwargs=render_kwargs)
示例#13
0
path_out_file = sys.argv[2]

with open(path_storage_file, 'rb') as f:
    args = []
    while True:
        try:
            args.append(pkl.load(f))
        except EOFError:
            break

ch = args[0]
sso_kwargs = args[1]
if type(sso_kwargs) is str:  # only the working directory is given
    sso_kwargs = dict(working_dir=sso_kwargs, enable_locking_so=False)
if len(args) == 3:
    render_kwargs = args[2]
else:
    render_kwargs = dict(add_cellobjects=True, woglia=True, overwrite=True)
for ssv_ix in ch:
    if not np.isscalar(ssv_ix):
        sv_ids = ssv_ix[1]
        ssv_ix = ssv_ix[0]
    else:
        sv_ids = None  # will be loaded from attribute dict
    sso = SuperSegmentationObject(ssv_ix, sv_ids=sv_ids, **sso_kwargs)
    sso.load_attr_dict()
    sso.render_views(**render_kwargs)

with open(path_out_file, "wb") as f:
    pkl.dump("0", f)