Ejemplo n.º 1
0
    def setUp(self):
        os.makedirs(self.tmp_folder, exist_ok=True)
        os.makedirs(self.config_folder, exist_ok=True)
        global_config = NodeLabelWorkflow.get_config()['global']
        global_config['shebang'] = self.shebang
        global_config['block_shape'] = self.block_shape
        with open(os.path.join(self.config_folder, 'global.config'), 'w') as f:
            json.dump(global_config, f)

        config = NodeLabelWorkflow.get_config()['merge_node_labels']
        config.update({'threads_per_job': self.n_jobs})
        with open(os.path.join(self.config_folder, 'merge_node_labels.config'),
                  'w') as f:
            json.dump(config, f)
Ejemplo n.º 2
0
    def test_overlaps(self):
        task = NodeLabelWorkflow(tmp_folder=self.tmp_folder,
                                 config_dir=self.config_folder,
                                 target=self.target,
                                 max_jobs=self.n_jobs,
                                 ws_path=self.path,
                                 ws_key=self.ws_key,
                                 input_path=self.path,
                                 input_key=self.input_key,
                                 output_path=self.output_path,
                                 output_key=self.output_key,
                                 max_overlap=False)

        ret = luigi.build([task], local_scheduler=True)
        self.assertTrue(ret)

        # load the result
        overlaps = ndist.deserializeOverlapChunk(
            os.path.join(self.output_path, self.output_key), (0, ))[0]

        # compute the expected overlaps
        ws, inp = self.load_data()
        overlaps_exp = self.compute_overlaps(ws, inp, max_overlap=False)

        # check the result
        ids = np.unique(ws)
        self.check_overlaps(ids, overlaps, overlaps_exp)
Ejemplo n.º 3
0
    def test_subresults(self):
        task = NodeLabelWorkflow(tmp_folder=self.tmp_folder,
                                 config_dir=self.config_folder,
                                 target=self.target,
                                 max_jobs=self.n_jobs,
                                 ws_path=self.path,
                                 ws_key=self.ws_key,
                                 input_path=self.path,
                                 input_key=self.input_key,
                                 output_path=self.output_path,
                                 output_key=self.output_key)

        ret = luigi.build([task], local_scheduler=True)
        self.assertTrue(ret)

        tmp_path = os.path.join(self.output_path, 'label_overlaps_')
        ws, inp = self.load_data()

        blocking = nt.blocking([0, 0, 0], ws.shape, self.block_shape)
        for block_id in range(blocking.numberOfBlocks):
            block = blocking.getBlock(block_id)
            chunk_id = tuple(
                start // bs
                for start, bs in zip(block.begin, self.block_shape))
            bb = tuple(
                slice(beg, end) for beg, end in zip(block.begin, block.end))

            wsb, inpb = ws[bb], inp[bb]

            overlaps, _ = ndist.deserializeOverlapChunk(tmp_path, chunk_id)
            overlaps_exp = self.compute_overlaps(wsb, inpb, False)

            ids = np.unique(wsb)
            self.check_overlaps(ids, overlaps, overlaps_exp)
Ejemplo n.º 4
0
    def test_max_overlap(self):
        task = NodeLabelWorkflow(tmp_folder=self.tmp_folder,
                                 config_dir=self.config_folder,
                                 target=self.target,
                                 max_jobs=self.n_jobs,
                                 ws_path=self.path,
                                 ws_key=self.ws_key,
                                 input_path=self.path,
                                 input_key=self.input_key,
                                 output_path=self.output_path,
                                 output_key=self.output_key)

        ret = luigi.build([task], local_scheduler=True)
        self.assertTrue(ret)

        # load the result
        with z5py.File(self.output_path) as f:
            overlaps = f[self.output_key][:]

        ws, inp = self.load_data()

        overlaps_exp, mask = self.compute_overlaps(ws, inp)
        self.assertEqual(overlaps.shape, overlaps_exp.shape)

        overlaps = overlaps[mask]
        overlaps_exp = overlaps_exp[mask]

        # compare results
        self.assertTrue(np.allclose(overlaps, overlaps_exp))
Ejemplo n.º 5
0
    def test_node_labels(self):
        config = NodeLabelWorkflow.get_config()['merge_node_labels']
        config.update({'threads_per_job': 8})
        with open(os.path.join(self.config_folder,
                               'merge_node_labels.config'), 'w') as f:
            json.dump(config, f)

        task = NodeLabelWorkflow(tmp_folder=self.tmp_folder,
                                 config_dir=self.config_folder,
                                 target=self.target, max_jobs=8,
                                 ws_path=self.path, ws_key=self.ws_key,
                                 input_path=self.path, input_key=self.input_key,
                                 output_path=self.output_path, output_key=self.output_key)
        ret = luigi.build([task], local_scheduler=True)
        self.assertTrue(ret)
        self._check_result()
Ejemplo n.º 6
0
 def setUp(self):
     self._mkdir(self.tmp_folder)
     self._mkdir(self.config_folder)
     global_config = NodeLabelWorkflow.get_config()['global']
     global_config['shebang'] = self.shebang
     global_config['block_shape'] = [10, 256, 256]
     with open(os.path.join(self.config_folder, 'global.config'), 'w') as f:
         json.dump(global_config, f)
 def get_config():
     configs = super(UnmergeWorkflow, UnmergeWorkflow).get_config()
     configs.update({
         'fix_merges':
         fix_tasks.FixMergesLocal.default_task_config(),
         'find_merges':
         find_tasks.FindMergesLocal.default_task_config(),
         'write':
         write_tasks.WriteLocal.default_task_config(),
         **NodeLabelWorkflow.get_config()
     })
     return configs
 def nucleus_labels(self, dep, out_key):
     dep = NodeLabelWorkflow(tmp_folder=self.tmp_folder,
                             max_jobs=self.max_jobs,
                             target=self.target,
                             config_dir=self.config_dir,
                             input_path=self.path,
                             input_key=self.nucleus_seg_key,
                             ws_path=self.path,
                             ws_key=self.seg_key,
                             output_path=self.path,
                             output_key=out_key,
                             prefix='nuclei-node-labels',
                             max_overlap=False,
                             ignore_label=0,
                             serialize_counts=True,
                             dependency=dep)
     return dep
Ejemplo n.º 9
0
    def test_cell_nucleus_mappings(self):
        from mmpb.attributes.cell_nucleus_mapping import map_cells_to_nuclei

        segmentation_folder = '../../data/0.1.1/segmentations'
        seg_path = os.path.join(segmentation_folder,
                                'sbem-6dpf-1-whole-segmented-cells-labels.h5')
        segmentation_folder = '../../data/0.0.0/segmentations'
        nuc_path = os.path.join(
            segmentation_folder,
            'sbem-6dpf-1-whole-segmented-nuclei-labels.h5')
        with h5py.File(seg_path, 'r') as f:
            max_id = f['t00000/s00/0/cells'].attrs['maxId']
        label_ids = np.arange(max_id + 1, dtype='uint64')

        output_path = os.path.join(self.tmp_folder, 'table-test.csv')

        config_folder = os.path.join(self.tmp_folder, 'configs')
        os.makedirs(config_folder, exist_ok=True)

        conf = NodeLabelWorkflow.get_config()['global']
        shebang = '#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python'
        conf.update({'shebang': shebang})
        with open(os.path.join(config_folder, 'global.config'), 'w') as f:
            json.dump(conf, f)

        target = 'local'
        max_jobs = 60
        map_cells_to_nuclei(label_ids,
                            seg_path,
                            nuc_path,
                            output_path,
                            tmp_folder=self.tmp_folder,
                            target=target,
                            max_jobs=max_jobs)

        table = pandas.read_csv(output_path, sep='\t')
        assert len(table) == max_id + 1

        # make sure each nucleus is mapped only once
        nucleus_ids = table['nucleus_id'].values
        nucleus_ids, id_counts = np.unique(nucleus_ids, return_counts=True)
        nucleus_ids, id_counts = nucleus_ids[1:], id_counts[1:]
        self.assertEqual(id_counts.sum(), id_counts.size)
Ejemplo n.º 10
0
    def test_regions(self):
        from mmpb.attributes.region_attributes import region_attributes

        image_folder = '../../data/0.0.0/images'
        segmentation_folder = '../../data/0.0.0/segmentations'
        seg_path = os.path.join(segmentation_folder,
                                'sbem-6dpf-1-whole-segmented-cells-labels.h5')
        seg_key = 't00000/s00/0/cells'
        with h5py.File(seg_path, 'r') as f:
            max_id = f[seg_key].attrs['maxId']

        output_path = os.path.join(self.tmp_folder, 'table-test.csv')
        label_ids = np.arange(max_id + 1, dtype='uint64')

        # write the global config
        config_folder = os.path.join(self.tmp_folder, 'configs')
        os.makedirs(config_folder, exist_ok=True)
        conf = NodeLabelWorkflow.get_config()['global']
        shebang = '#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python'
        conf.update({'shebang': shebang})
        with open(os.path.join(config_folder, 'global.config'), 'w') as f:
            json.dump(conf, f)

        target = 'local'
        max_jobs = 8
        region_attributes(seg_path, output_path, image_folder,
                          segmentation_folder, label_ids, self.tmp_folder,
                          target, max_jobs)

        table = pandas.read_csv(output_path, sep='\t')
        assert len(table) == max_id + 1

        base_path = '../../data/0.0.0/tables/sbem-6dpf-1-whole-segmented-cells-labels/default.csv'
        base_table = pandas.read_csv(base_path, sep='\t')

        seg_key = 't00000/s00/2/cells'
        tissue_path = '../../data/rawdata/sbem-6dpf-1-whole-segmented-tissue-labels.h5'
        tissue_key = 't00000/s00/0/cells'
        self.check_result(table, base_table, seg_path, seg_key, tissue_path,
                          tissue_key)
Ejemplo n.º 11
0
    def test_lifted_nh_with_labels(self):
        node_label_path = os.path.join(self.tmp_folder, 'node_labels.n5')
        node_label_key = 'node_labels'
        task_labels = NodeLabelWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_folder,
                                        max_jobs=8, target='local',
                                        ws_path=self.input_path, ws_key=self.ws_key,
                                        input_path=self.input_path, input_key=self.labels_key,
                                        output_path=node_label_path, output_key=node_label_key,
                                        max_overlap=True)

        graph_path = os.path.join(self.tmp_folder, 'graph.n5')
        graph_key = 'graph'
        graph_config = GraphWorkflow.get_config()['initial_sub_graphs']
        graph_config["ignore_label"] = False
        with open(os.path.join(self.config_folder, 'initial_sub_graphs.config'), 'w') as f:
            json.dump(graph_config, f)

        task_graph = GraphWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_folder,
                                   max_jobs=8, target='local',
                                   input_path=self.input_path, input_key=self.ws_key,
                                   graph_path=graph_path, output_key=graph_key)
        ret = luigi.build([task_labels, task_graph],
                          local_scheduler=True)
        self.assertTrue(ret)

        # TODO try different graph depth and different number of threads !
        graph_depth = 3
        out_path = os.path.join(self.tmp_folder, 'lifted_nh.h5')
        out_key = 'lifted_nh'
        task_nh = SparseLiftedNeighborhoodLocal(tmp_folder=self.tmp_folder, config_dir=self.config_folder,
                                                max_jobs=1, dependency=task_graph,
                                                graph_path=graph_path, graph_key=graph_key,
                                                node_label_path=node_label_path, node_label_key=node_label_key,
                                                output_path=out_path, output_key=out_key,
                                                prefix='', nh_graph_depth=graph_depth)
        ret = luigi.build([task_nh], local_scheduler=True)
        self.assertTrue(ret)
        self._check_result(graph_depth)