Example #1
0
    def setUp(self):
        os.makedirs(self.tmp_folder, exist_ok=True)
        os.makedirs(self.config_folder, exist_ok=True)
        global_config = NodeLabelWorkflow.get_config()['global']
        global_config['shebang'] = self.shebang
        global_config['block_shape'] = self.block_shape
        with open(os.path.join(self.config_folder, 'global.config'), 'w') as f:
            json.dump(global_config, f)

        config = NodeLabelWorkflow.get_config()['merge_node_labels']
        config.update({'threads_per_job': self.n_jobs})
        with open(os.path.join(self.config_folder, 'merge_node_labels.config'),
                  'w') as f:
            json.dump(config, f)
Example #2
0
 def setUp(self):
     self._mkdir(self.tmp_folder)
     self._mkdir(self.config_folder)
     global_config = NodeLabelWorkflow.get_config()['global']
     global_config['shebang'] = self.shebang
     global_config['block_shape'] = [10, 256, 256]
     with open(os.path.join(self.config_folder, 'global.config'), 'w') as f:
         json.dump(global_config, f)
 def get_config():
     configs = super(UnmergeWorkflow, UnmergeWorkflow).get_config()
     configs.update({
         'fix_merges':
         fix_tasks.FixMergesLocal.default_task_config(),
         'find_merges':
         find_tasks.FindMergesLocal.default_task_config(),
         'write':
         write_tasks.WriteLocal.default_task_config(),
         **NodeLabelWorkflow.get_config()
     })
     return configs
Example #4
0
    def test_node_labels(self):
        config = NodeLabelWorkflow.get_config()['merge_node_labels']
        config.update({'threads_per_job': 8})
        with open(os.path.join(self.config_folder,
                               'merge_node_labels.config'), 'w') as f:
            json.dump(config, f)

        task = NodeLabelWorkflow(tmp_folder=self.tmp_folder,
                                 config_dir=self.config_folder,
                                 target=self.target, max_jobs=8,
                                 ws_path=self.path, ws_key=self.ws_key,
                                 input_path=self.path, input_key=self.input_key,
                                 output_path=self.output_path, output_key=self.output_key)
        ret = luigi.build([task], local_scheduler=True)
        self.assertTrue(ret)
        self._check_result()
Example #5
0
    def test_cell_nucleus_mappings(self):
        from mmpb.attributes.cell_nucleus_mapping import map_cells_to_nuclei

        segmentation_folder = '../../data/0.1.1/segmentations'
        seg_path = os.path.join(segmentation_folder,
                                'sbem-6dpf-1-whole-segmented-cells-labels.h5')
        segmentation_folder = '../../data/0.0.0/segmentations'
        nuc_path = os.path.join(
            segmentation_folder,
            'sbem-6dpf-1-whole-segmented-nuclei-labels.h5')
        with h5py.File(seg_path, 'r') as f:
            max_id = f['t00000/s00/0/cells'].attrs['maxId']
        label_ids = np.arange(max_id + 1, dtype='uint64')

        output_path = os.path.join(self.tmp_folder, 'table-test.csv')

        config_folder = os.path.join(self.tmp_folder, 'configs')
        os.makedirs(config_folder, exist_ok=True)

        conf = NodeLabelWorkflow.get_config()['global']
        shebang = '#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python'
        conf.update({'shebang': shebang})
        with open(os.path.join(config_folder, 'global.config'), 'w') as f:
            json.dump(conf, f)

        target = 'local'
        max_jobs = 60
        map_cells_to_nuclei(label_ids,
                            seg_path,
                            nuc_path,
                            output_path,
                            tmp_folder=self.tmp_folder,
                            target=target,
                            max_jobs=max_jobs)

        table = pandas.read_csv(output_path, sep='\t')
        assert len(table) == max_id + 1

        # make sure each nucleus is mapped only once
        nucleus_ids = table['nucleus_id'].values
        nucleus_ids, id_counts = np.unique(nucleus_ids, return_counts=True)
        nucleus_ids, id_counts = nucleus_ids[1:], id_counts[1:]
        self.assertEqual(id_counts.sum(), id_counts.size)
Example #6
0
    def test_regions(self):
        from mmpb.attributes.region_attributes import region_attributes

        image_folder = '../../data/0.0.0/images'
        segmentation_folder = '../../data/0.0.0/segmentations'
        seg_path = os.path.join(segmentation_folder,
                                'sbem-6dpf-1-whole-segmented-cells-labels.h5')
        seg_key = 't00000/s00/0/cells'
        with h5py.File(seg_path, 'r') as f:
            max_id = f[seg_key].attrs['maxId']

        output_path = os.path.join(self.tmp_folder, 'table-test.csv')
        label_ids = np.arange(max_id + 1, dtype='uint64')

        # write the global config
        config_folder = os.path.join(self.tmp_folder, 'configs')
        os.makedirs(config_folder, exist_ok=True)
        conf = NodeLabelWorkflow.get_config()['global']
        shebang = '#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python'
        conf.update({'shebang': shebang})
        with open(os.path.join(config_folder, 'global.config'), 'w') as f:
            json.dump(conf, f)

        target = 'local'
        max_jobs = 8
        region_attributes(seg_path, output_path, image_folder,
                          segmentation_folder, label_ids, self.tmp_folder,
                          target, max_jobs)

        table = pandas.read_csv(output_path, sep='\t')
        assert len(table) == max_id + 1

        base_path = '../../data/0.0.0/tables/sbem-6dpf-1-whole-segmented-cells-labels/default.csv'
        base_table = pandas.read_csv(base_path, sep='\t')

        seg_key = 't00000/s00/2/cells'
        tissue_path = '../../data/rawdata/sbem-6dpf-1-whole-segmented-tissue-labels.h5'
        tissue_key = 't00000/s00/0/cells'
        self.check_result(table, base_table, seg_path, seg_key, tissue_path,
                          tissue_key)