Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--cube',
        default=
        '/mnt/md0/XRay/Knossos_measurements/WT_colon_380/knossos_cube/mag1/knossos.conf'
    )
    parser.add_argument(
        '--anno',
        default=
        '/mnt/md0/XRay/Knossos_measurements/WT_colon_380/annotation/muscle_skeleton_annotation-20180113T1805.139.k.zip'
    )
    args = parser.parse_args()
    print(args.cube, args.anno)
    root_dir = '.'
    f_knossos = args.cube
    f_overlay = args.anno
    f_out = os.path.join(root_dir, 'output')
    if not os.path.exists(f_out):
        os.makedirs(f_out)

    kd = knossosdataset.KnossosDataset()
    kd.initialize_from_knossos_path(f_knossos)

    #raw = kd.from_raw_cubes_to_matrix(size=kd.boundary, offset=[0, 0, 0])
    print(kd._experiment_name)
    print(kd.boundary)

    #overlay= kd.from_kzip_to_matrix(path=f_overlay,size=kd.boundary, offset=[0, 0, 0], mag=1, verbose=True, alt_exp_name_kzip_path_mode=False)
    sk = skeleton.Skeleton()
    sk.fromNml(f_overlay)
    result_dict = sweep_sk(sk)
    result_dict = get_stat(sk, result_dict)
    convert_to_csv(result_dict, f_out)
    sys.exit()
Пример #2
0
def offset_annotation(anno, dx, dy, dz):
    sk = skeleton.Skeleton()
    sk.fromNml(anno, use_file_scaling=True)
    f_out = os.path.join(os.path.dirname(anno), 'offset.k.zip')
    print(f_out)
    # print(sk.getAnnotations())
    # print(sk.annotation)
    for n in sk.getNodes():
        x, y, z = n.getCoordinate()
        #print('pre: {}, {}, {}'.format(x,y,z))
        n.setCoordinate((x + dx, y + dy, z + dz))
        #print('post: {}, {}, {}'.format(x+dx,y+dy,z+dz))

    sk.to_kzip(f_out, force_overwrite=True)
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--cube',
                        default='../full_stack_rot_knossos/mag1/knossos.conf')
    parser.add_argument('--anno', default='../trace/')
    parser.add_argument('--output', default='.')
    parser.add_argument('--features',
                        type=str,
                        choices=['both', 'cellbody', 'dendrite', 'mask'],
                        default='both')

    args = parser.parse_args()
    f_knossos = args.cube
    f_overlay = max(glob.iglob(os.path.join(args.anno, '*.k.zip')),
                    key=os.path.getmtime)
    f_output = args.output
    if not os.path.exists(f_output):
        os.makedirs(f_output)
    f_swc = os.path.join(f_output, 'output.swc')
    f_center = os.path.join(f_output, 'center.txt')

    kd = knossosdataset.KnossosDataset()
    kd.initialize_from_knossos_path(f_knossos)
    #raw = kd.from_raw_cubes_to_matrix(size=kd.boundary, offset=[0, 0, 0])
    print(kd._experiment_name)
    print(kd.boundary)
    #overlay= kd.from_kzip_to_matrix(path=f_overlay,size=kd.boundary, offset=[0, 0, 0], mag=1, verbose=True, alt_exp_name_kzip_path_mode=False)
    sk = skeleton.Skeleton()
    sk.fromNml(f_overlay)
    if args.features == 'both':
        sk2swc_and_center(sk, f_swc, f_center)
    elif args.features == 'cellbody':
        sk2swc_cellbody_and_center(sk, f_swc, f_center)
    elif args.features == 'dendrite':
        sk2swc_dendrite_and_center(sk, f_swc, f_center)
    elif args.features == 'mask':
        sk.toSWC(f_swc)
Пример #4
0
def update_RAG_with_reconnects(
    reconnect_folder='/mnt/j0126/areaxfs_v10/reconnect_tasks/final_tasks/',
    path_to_skeletons='/mnt/j0126/areaxfs_v10/reconnect_tasks/traced_skeletons/',
    path_to_reconn_rags='/mnt/j0126/areaxfs_v10/reconnect_tasks/resulting_ssv_rags/'
):
    """
    Applies the reconnect skeleton tasks to an existing RAG by adding edges. Requires a Knossos segmentation
    dataset, from which the segmentation IDs are collected.

    :param reconnect_folder:
    :param path_to_skeletons:
    :param path_to_reconn_rags:
    :return:
    """

    # load rag
    rag = load_best_bet_rag()

    # load all reconnects
    kzips = glob.glob(reconnect_folder + '*.k.zip')

    parsing_errors = []
    task_dicts = []
    for kzip in kzips:
        try:
            task_dicts.append(analyze_j0126_reconnector_task(kzip))
            print('Successfully parsed task {0}'.format(kzip))
        except:
            parsing_errors.append(kzip)
            print('Error parsing task {0}'.format(kzip))

    #all_recon_tasks = flatten([t['k_annos'] for t in task_dicts])

    all_recon_tasks = []
    for task_dict in task_dicts:
        all_recon_tasks.extend(task_dict['k_annos'])

    all_src_ids = []
    for task_dict in task_dicts:
        all_src_ids.extend(task_dict['src_ids'])

    all_src_coords = []
    for task_dict in task_dicts:
        all_src_coords.extend(task_dict['src_coords'])

    print('Got in total {0} tasks'.format(len(all_recon_tasks)))
    # filter the skeletons that do not reconnect anything, defined by less than 5 reconnect nodes

    positive_reconnects = [a for a in all_recon_tasks if len(a.getNodes()) > 5]
    print('Got in total {0} reconnects > 5 nodes'.format(
        len(positive_reconnects)))
    print('Total parsing errors: {0}'.format(len(parsing_errors)))

    #return positive_reconnects
    # contains additional edges for RAG from manual reconnects
    additional_edges = []

    workers = init_node_to_sv_id_workers()
    total_reconnects = len(positive_reconnects)
    recon_cnt = 0.
    rag_extension = []
    start = time.time()
    #print(all_recon_tasks)
    #print(all_src_coords)
    #print(all_src_ids)

    unmapped_nodes_cnt = 0

    for reconnect_anno, src_coords, src_id in zip(all_recon_tasks,
                                                  all_src_coords, all_src_ids):
        #print(src_coords)

        #print(src_id)

        if len(reconnect_anno.getNodes()) < 5:
            continue

        recon_cnt += 1.
        print('Reconnects done: {0}%'.format(recon_cnt / total_reconnects *
                                             100.))
        mapped_nodes = []

        node1 = skeleton.SkeletonNode()
        node1.from_scratch(reconnect_anno, *src_coords[0])
        node1.setPureComment('source 1')
        reconnect_anno.addNode(node1)

        node2 = skeleton.SkeletonNode()
        node2.from_scratch(reconnect_anno, *src_coords[1])
        node2.setPureComment('source 2')
        reconnect_anno.addNode(node2)

        reconnect_anno.addEdge(node1, node2)

        # connect source 1 with the closest node an annotator made if there is
        # one
        kd_tree = su.KDtree(
            reconnect_anno.getNodes(),
            [n.getCoordinate() for n in reconnect_anno.getNodes()])

        nodes, dists = kd_tree.query_k_nearest([src_coords[0]],
                                               k=3,
                                               return_dists=True)
        for node, dist in zip(nodes, dists):
            # link the source 1 node with the first annotator placed node in the
            # skeleton; query k = 3 is necessary to ensure that one of the hits
            # is an annotator created node!
            if not 'source' in node.getPureComment():
                reconnect_anno.addEdge(node1, node)
                break

        orig_reconnect_anno = copy.deepcopy(reconnect_anno)

        #reconnect_anno.interpolate_nodes(max_node_dist_scaled=200)

        # push nodes onto queue
        anno_nodes = reconnect_anno.getNodes()
        all_node_cnt = len(anno_nodes) + 10
        for skel_node in reconnect_anno.getNodes():
            node_to_sv_mappings_todo_queue.put(skel_node.getCoordinate())

        # push the seed coordinates onto the queue - this is a hack, make sure
        # that they are mapped > 5 times, see below

        [node_to_sv_mappings_todo_queue.put(src_coords[0]) for i in range(5)]
        [node_to_sv_mappings_todo_queue.put(src_coords[1]) for i in range(5)]

        # wait for all nodes to be mapped
        done_nodes = 0
        while done_nodes < all_node_cnt:
            node_coord, sv_id = node_to_sv_mappings_done_queue.get()
            mapped_nodes.append((node_coord, sv_id))
            #done_node.setPureComment(str(sv_id))
            done_nodes += 1
            #print('\r\x1b[K Nodes done: {0} from {1} total'.format(done_nodes, all_node_cnt), end='')
            #time.sleep(0.01)

        all_mapped_sv_ids = [el[1] for el in mapped_nodes]

        # after interpolation, a new kd tree is needed
        kd_tree = su.KDtree(
            reconnect_anno.getNodes(),
            [n.getCoordinate() for n in reconnect_anno.getNodes()])

        #print('Len reconnect anno nodes: {0}'.format(len(reconnect_anno.getNodes())))
        #print('Len mapped nodes: {0}'.format(
        #    len(mapped_nodes)))

        for mapped_node in mapped_nodes:
            #print('mapped_nodes[1]: {0}'.format(mapped_node[0]))
            #anno_node = kd_tree.query_ball_point(mapped_node[0], radius=10.)
            anno_node, dist = kd_tree.query_k_nearest([mapped_node[0]],
                                                      return_dists=True)
            #anno_node = anno_node[0]
            #dist = dist[0]
            #if dist > 0.:
            #print('anno_node: {0}'.format(anno_node))
            #print('dist: {0}'.format(dist))
            # temp storage for mapped sv_id
            anno_node.sv_id = mapped_node[1]

        # count sv_ID occurences and replace infrequent ones with 0
        very_likely = []
        keep_ids = dict()
        #keep_ids[0] = False
        #print('Type sv_id 0 {0}'.format(type(0)))
        unique_ids, counts = np.unique(all_mapped_sv_ids, return_counts=True)
        for sv_id, cnt in zip(unique_ids, counts):
            #print('Type sv_id {0}'.format(type(sv_id)))
            if sv_id != 0:
                if cnt > 4:
                    keep_ids[sv_id] = True
                    very_likely.append(sv_id)
                else:
                    keep_ids[sv_id] = False
            else:
                keep_ids[sv_id] = False

        # prune skeleton using keep_ids to remove nodes over the background
        # and also nodes with sv_ids that were mapped to sv_ids that were too
        # infrequent
        #all_nodes = list(reconnect_anno.getNodes())

        nx_g = su.annotation_to_nx_graph(reconnect_anno)

        n_o_i = list({k for k, v in nx_g.degree() if v > 1})
        # delete in-between nodes that should not be included
        #print('noi {0}'.format(n_o_i))
        for node in n_o_i:
            #print('node.sv_id: {0}'.format(node.sv_id))
            try:
                if keep_ids[node.sv_id] == False:
                    # remove this node, relinking it to neighbors
                    neighbors = list(nx_g[node].keys())
                    #print('Found neighbors {0}'.format(neighbors))
                    # connect all neighbors, take first (arbitrary)
                    if len(neighbors) > 1:
                        src_neighbor = neighbors[0]
                        for neighbor in neighbors[1:]:
                            reconnect_anno.addEdge(src_neighbor, neighbor)
                            nx_g.add_edge(src_neighbor, neighbor)

                    reconnect_anno.removeNode(node)
                    nx_g.remove_node(node)
            except AttributeError:
                unmapped_nodes_cnt += 1
                print('Node {0} of src_id {1} without sv_id.'.format(
                    node, src_id))

        n_o_i = list({k for k, v in nx_g.degree() if v == 1})
        # delete end nodes that should not be included
        for node in n_o_i:
            try:
                if keep_ids[node.sv_id] == False:
                    reconnect_anno.removeNode(node)
                    nx_g.remove_node(node)
            except AttributeError:
                unmapped_nodes_cnt += 1
                print('Node {0} of src_id {1} without sv_id.'.format(
                    node, src_id))

        for n in reconnect_anno.getNodes():
            try:
                n.setPureComment('{0} sv id: {1}'.format(
                    n.getPureComment(), n.sv_id))
            except AttributeError:
                n.setPureComment('{0} sv id: {1}'.format(
                    n.getPureComment(), 'not mapped'))

        # convert the skeleton to nx graph by iterating over the edges of the
        # skeleton annotation; the result is a nx graph representing the
        # topology of the skeleton, but consisting of sv_ids as nodes;

        topo_nx_g = nx.Graph()
        edges = reconnect_anno.getEdges()
        for src_node in list(edges.keys()):
            for trg_node in edges[src_node]:
                try:
                    if src_node.sv_id != trg_node.sv_id:
                        topo_nx_g.add_edge(src_node.sv_id, trg_node.sv_id)
                except AttributeError:
                    pass

        #rag_extension.append(very_likely)
        #very_likely = []

        #unique_likely, counts = np.unique(all_mapped_sv_ids, return_counts=True)
        #for sv_id, cnt in zip(unique_likely, counts):
        #    if sv_id != 0:
        #        if cnt > 3:
        #            very_likely.append(sv_id)
        # list of list of svs that belong together according to the reconnect tracing
        rag_extension.append(topo_nx_g)

        # write topo_nx_g to file for later bidirectionality analysis

        # write annotation with mergelist as kzip to folder
        #for src_id, anno, src_coords in zip(all_recon_tasks['src_ids'],
        #                                    all_recon_tasks['k_annos'],
        #                                    all_recon_tasks['src_coords']):

        skel_obj = skeleton.Skeleton()

        #this_anno = skeleton.SkeletonAnnotation()
        #this_anno.scaling = [10.,10., 20.]

        skel_obj.add_annotation(orig_reconnect_anno)
        orig_reconnect_anno.setComment('tracing')

        skel_obj.add_annotation(reconnect_anno)
        reconnect_anno.setComment('sv topo')
        outfile = path_to_skeletons + 'reconnect_{0}.k.zip'.format(src_id)

        #print('Writing {0}'.format(outfile))
        #skel_paths.append(outfile)
        skel_obj.to_kzip(outfile)

        # add mergelist to the kzip
        buff = ''
        buff += '{0} 0 0 '.format('1')
        for sv_id in very_likely:
            buff += '{0} '.format(sv_id)
        buff += '\n0 0 0\n\n\n'

        with zipfile.ZipFile(outfile, "a", zipfile.ZIP_DEFLATED) as zf:
            zf.writestr('mergelist.txt', buff)

        outfile = path_to_reconn_rags + 'ssv_rag_{0}.csv'.format(src_id)
        nx.write_edgelist(topo_nx_g, outfile, delimiter=',', data=False)

    for worker in workers:
        worker.terminate()

    added_rag_edges = 0
    print('Extending global rag')
    for this_ssv_rag in rag_extension:
        new_edges = this_ssv_rag.edges()
        rag.add_edges_from(new_edges)
        added_rag_edges += len(new_edges)

    print('Done extending global rag')
    print('Added in total {0} edges to the rag'.format(added_rag_edges))

    # create merge list with reconnects only for testing
    #for this_ext in rag_extension:
    #    if len(this_ext) > 1:
    #        last_id = this_ext[0]
    #        for this_id in this_ext[1:]:
    #            if not rag.has_edge(last_id, this_id):
    #                rag.add_edge(last_id, this_id)
    #                added_rag_edges.append((last_id, this_id))

    nx_rag_to_knossos_mergelist(
        rag,
        path_to_mergelist=
        '/mnt/j0126/areaxfs_v10/RAGs/v4b_20180214_nocb_merges_reconnected_knossos_mergelist.txt'
    )

    nx.write_edgelist(
        rag,
        '/mnt/j0126/areaxfs_v10/RAGs/v4b_20180214_nocb_merges_reconnected.txt',
        delimiter=',',
        data=False)

    print('Total number unmapped nodes: {0}'.format(unmapped_nodes_cnt))

    print('Mapping {0} took {1}'.format(total_reconnects,
                                        (time.time() - start)))
    return
Пример #5
0
def find_job(args):
    a, t_a, bs, trees_b, rel_dist_thresh, rel_search_radius, max_dist, thresh, k, dir_name = args
    print(a)
    edges = []
    for b, t_b in zip(bs, trees_b):
        if len(t_a.joined_coords) < len(t_b.joined_coords):
            t_long = t_b
            t_short = t_a
        else:
            t_long = t_b
            t_short = t_a

        tmp = t_long.joined_kdt.get_knn(t_short.joined_coords, k=1)
        distances, indices, coords = tmp

        min_ix = distances.argsort()[:k]
        distances = distances[min_ix]
        indices = indices[min_ix]
        radii = (t_long.joined_radii[indices] +
                 t_short.joined_radii[min_ix]) / 2
        rel_distances = distances / radii.mean()
        rel_mean = rel_distances.mean()
        if rel_mean < rel_dist_thresh:
            pass  #this should have been added already
        elif rel_mean < rel_search_radius and distances.mean() < max_dist:
            # Warning this works on isotropic coords
            closest_point_long = coords[min_ix[0]].copy()
            closest_point_short = t_short.joined_coords[min_ix[0]].copy()
            try:
                tmp = t_long.joined_kdt.get_knn(closest_point_short, k=20)
            except ValueError:
                k_ = len(t_long.joined_coords)
                tmp = t_long.joined_kdt.get_knn(closest_point_short, k=k_)

            coords_long = tmp[2].copy()
            try:
                tmp = t_short.joined_kdt.get_knn(closest_point_long, k=20)
            except ValueError:
                k_ = len(t_short.joined_coords)
                tmp = t_short.joined_kdt.get_knn(closest_point_long, k=k_)

            coords_short = tmp[2].copy()

            closest_point_long[2] *= 2
            closest_point_short[2] *= 2
            coords_long[:, 2] *= 2
            coords_short[:, 2] *= 2

            center_long = coords_long.mean(0)
            center_short = coords_short.mean(0)

            uu, dd, vv = np.linalg.svd(coords_long - center_long)
            direc_long = vv[0]  # take largest eigenvector

            uu, dd, vv = np.linalg.svd(coords_short - center_short)
            direc_short = vv[0]  # take largest eigenvector

            direc_centers = center_long - center_short
            direc_centers /= np.linalg.norm(direc_centers)

            direc_pairs = closest_point_long - closest_point_short
            direc_pairs /= np.linalg.norm(direc_pairs)

            # align all directions:
            if np.dot(direc_short, direc_centers) < 0:
                direc_short *= -1
            if np.dot(direc_long, direc_centers) < 0:
                direc_long *= -1
            if np.dot(direc_pairs, direc_centers) < 0:
                direc_pairs *= -1

            m1 = np.dot(direc_short, direc_centers)
            m2 = np.dot(direc_long, direc_centers)
            m = (m1 + m2) / 2
            print(m1, m2, m)

            if m > thresh:
                edges.append((a, b))

            ###############################################################
            skel_obj = knossos_skeleton.Skeleton()
            anno = knossos_skeleton.SkeletonAnnotation()
            anno.scaling = (9.0, 9.0, 20.0)
            skel_obj.add_annotation(anno)
            anno.setComment("%.1f %.1f %.1f" % (
                m,
                m1,
                m2,
            ))

            def add_node(x, y, z, r=5):
                new_node = knossos_skeleton.SkeletonNode()
                new_node.from_scratch(anno, x, y, z // 2, radius=r)
                anno.addNode(new_node)
                return new_node

            n0 = add_node(*center_long, r=10)
            n1 = add_node(*center_short, r=10)
            n0.addChild(n1)

            n2 = add_node(*closest_point_long, r=5)
            n3 = add_node(*closest_point_short, r=5)
            n2.addChild(n3)

            n4 = add_node(*(center_long + 20 * direc_long), r=3)
            n5 = add_node(*(center_long - 20 * direc_long), r=3)
            n0.addChild(n4)
            n0.addChild(n5)

            n6 = add_node(*(center_short + 20 * direc_short), r=3)
            n7 = add_node(*(center_short - 20 * direc_short), r=3)
            n1.addChild(n6)
            n1.addChild(n7)

            if not os.path.exists("merge_candidates"):
                os.makedirs("merge_candidates")

            skel_obj.to_kzip("%s/splitfix_candidates/%i.k.zip" %
                             (dir_name, time.time()))
            ###############################################################

    return edges
Пример #6
0
def make_shotgun_data_z(cube_shape, save_name, z_skip=5):
    barr_thresh = 0.7
    z_lookaround = 5
    max_footprint_S = ball_generator(9)
    max_footprint_L = ball_generator(21)
    max_footprint_L = max_footprint_L[3:-3]

    peak_thresh = 3.0
    peak_thresh_L = 9

    kds_barr = KnossosDataset()
    data_prefix = os.path.expanduser("~/lustre/sdorkenw/j0126_")
    kds_barr.initialize_from_knossos_path(data_prefix + '161012_barrier/')
    pred = kds_barr.from_raw_cubes_to_matrix(cube_shape.shape,
                                             cube_shape.offset,
                                             show_progress=False,
                                             zyx_mode=True,
                                             datatype=np.float32)
    pred /= 255

    mem_high = np.invert(pred > barr_thresh)
    seeds = []
    seed_values = []
    noise = np.random.rand(*(mem_high[0:0 + z_lookaround].shape)) * 1e-3
    running_sum = 0
    for z in range(0, mem_high.shape[0] - z_lookaround, z_skip):
        try:
            dt = ndimage.distance_transform_edt(mem_high[z:z + z_lookaround],
                                                sampling=[2, 1, 1])
            dt = ndimage.filters.gaussian_filter(dt, (1.0, 2.0, 2.0))
            dt += noise

            z_peaks_S = ndimage.maximum_filter(dt,
                                               footprint=max_footprint_S,
                                               mode='constant')
            z_peaks_L = ndimage.maximum_filter(dt,
                                               footprint=max_footprint_L,
                                               mode='constant')

            z_peaks_small = (z_peaks_S == dt) * ((peak_thresh_L > dt) &
                                                 (dt > peak_thresh))
            z_peaks_large = (z_peaks_L == dt) * ((peak_thresh_L <= dt))
            z_peaks = z_peaks_large + z_peaks_small
            z_peaks *= (pred[z:z + z_lookaround] < 0.5)
            seeds_z = np.array(z_peaks.nonzero()).T
            seeds_z[:, 0] += z
        except KeyboardInterrupt:
            break
        finally:
            seeds.append(seeds_z)
            seed_values.append(dt[z_peaks])
            running_sum += z_peaks.sum()
            print(z, running_sum, z_peaks_small.sum(), z_peaks_large.sum(),
                  z_peaks.sum())

    seeds = np.concatenate(seeds, axis=0)
    seed_values = np.concatenate(seed_values, axis=0)
    seeds, index = unique_rows(seeds)
    seed_values = seed_values[index]

    lar = np.array([4, 8, 8])
    lari = lar * [2, 1, 1]
    sz = lar * 2 + 1
    szi = lari * 2 + 1

    pred2 = kds_barr.from_raw_cubes_to_matrix(cube_shape.shape + 2 * lar,
                                              cube_shape.offset - lar,
                                              show_progress=False,
                                              zyx_mode=True,
                                              datatype=np.float32)
    pred2 /= 255

    mem_high2 = np.invert(pred2 > barr_thresh)
    dt = ndimage.distance_transform_edt(mem_high2, sampling=[2, 1, 1])
    local_grid = np.vstack([x - x.mean() for x in np.ones(szi).nonzero()])
    directions = np.zeros((len(seeds), 3))
    perm = np.random.permutation(local_grid.shape[1])[:400]
    for i, (s, v) in enumerate(zip(seeds, seed_values)):
        z, y, x = s  # np.round(s).astype(np.int)
        cavity = dt[z:z + sz[0], y:y + sz[1], x:x + sz[2]]
        cavity = ndimage.zoom(cavity, [float(sz[1]) / sz[0], 1, 1])
        s_val = dt[z + lar[0], y + lar[1], x + lar[2]]
        diff = np.abs(cavity - s_val)
        d_m = diff.mean()
        mask = (diff < d_m)
        d_max = diff[mask].max()
        um = np.zeros_like(diff)
        um[mask] = d_max - diff[mask]
        um = um.ravel()
        uu, dd, vv = np.linalg.svd((um * local_grid).T[perm])
        direc_iso = vv[0]  # take largest eigenvector
        direc_iso /= np.linalg.norm(direc_iso, axis=0)  # normalise
        directions[i] = direc_iso


#    local_grid = np.mgrid[-2:2:5j, -2:2:5j,-2:2:5j]
#    local_grid = np.vstack([g.ravel() for g in local_grid])
#    directions = np.zeros((len(seeds), 3))
#    for i, s in enumerate(seeds):
#        z, y, x = s  # np.round(s).astype(np.int)
#        cavity = pred2[z:z + 3, y:y + 5, x:x + 5]
#        cavity = ndimage.zoom(cavity, [5.0/3.0,1,1] )
#        cavity = (1 - cavity).ravel()
#        uu, dd, vv = np.linalg.svd((cavity * local_grid).T)
#        direc_iso = vv[0]  # take largest eigenvector
#        direc_iso /= np.linalg.norm(direc_iso, axis=0)  # normalise
#        directions[i] = direc_iso

    seeds += cube_shape.offset
    utils.picklesave([seeds, directions, seed_values], save_name)

    # Creater skeleton with seeds and dirs
    skel_obj = knossos_skeleton.Skeleton()
    anno = knossos_skeleton.SkeletonAnnotation()
    anno.scaling = (9.0, 9.0, 20.0)
    skel_obj.add_annotation(anno)

    def add_node(s, r=5):
        z, y, x = s
        new_node = knossos_skeleton.SkeletonNode()
        new_node.from_scratch(anno, x, y, z, radius=r)
        anno.addNode(new_node)
        return new_node

    for s, dir, v in zip(seeds, directions, seed_values):
        n = add_node(s, r=4)
        n.appendComment("%.1f" % v)
        dir = dir.copy()
        dir[0] /= 2
        n1 = add_node((s + 11 * dir), r=1)
        n2 = add_node((s - 11 * dir), r=1)
        n1.addChild(n)
        n2.addChild(n)

    return seeds, directions, seed_values, skel_obj