Example #1
0
    def delete_model(self):
        # Delete entry in zoo custom
        self.file_to_remove = self.file_to_remove.get()
        custom_zoo_dict = yaml.load(open(custom_zoo, 'r'), Loader=yaml.FullLoader)
        if custom_zoo_dict is None:
            custom_zoo_dict = {}

        if self.file_to_remove in custom_zoo_dict:
            del custom_zoo_dict[self.file_to_remove]
        else:
            msg = f"Model {self.file_to_remove} not found." \
                  f" Please check if the name you typed is a custom model. Pre-loaded models can not be deleted."
            gui_logger.error(msg)
            self.popup.destroy()
            raise RuntimeError(msg)

        with open(custom_zoo, 'w') as f:
            yaml.dump(custom_zoo_dict, f)

        file_directory = os.path.join(home_path,
                                      PLANTSEG_MODELS_DIR,
                                      self.file_to_remove)

        if os.path.exists(file_directory):
            rmtree(file_directory)
        else:
            msg = f"Model {self.file_to_remove} not found." \
                  f" Please check if the name you typed is a custom model. Pre-loaded models can not be deleted."
            gui_logger.error(msg)
            self.popup.destroy()
            raise RuntimeError(msg)

        gui_logger.info("Model successfully removed! The effect will be visible after restarting PlantSeg")
        self.popup.destroy()
    def read_process_write(self, input_path):
        gui_logger.info(f'Loading stack from {input_path}')
        input_data, voxel_size = self.load_stack(input_path)

        output_data = self.process(input_data)
        # TODO: voxel_size may change after pre-/post-processing (i.e. when scaling is used); adapt accordingly

        output_path = self._create_output_path(input_path)
        gui_logger.info(f'Saving results in {output_path}')
        self.save_output(output_data, output_path, voxel_size)

        if self.save_raw:
            raw_path = self._raw_path(input_path)
            if os.path.exists(raw_path):
                with h5py.File(raw_path, 'r') as f:
                    raw = f['raw'][...]
                with h5py.File(output_path, 'r+') as f:
                    f.create_dataset('raw', data=raw, compression='gzip')
                    # save voxel_size
                    f['raw'].attrs['element_size_um'] = voxel_size
            else:
                gui_logger.warning(
                    f'Cannot save raw input: {raw_path} not found')

        # return output_path
        return output_path
Example #3
0
    def process(self, image):
        gui_logger.info(f"Preprocessing files...")

        image = self.filter(image, self.filter_param)
        image = _rescale(image, self.factor, self.order)

        return image
Example #4
0
    def process(self, pmaps):
        gui_logger.info('Clustering with GASP...')

        # Pmaps are interpreted as affinities
        affinities = np.stack([pmaps, pmaps, pmaps], axis=0)

        offsets = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
        # Shift is required to correct aligned affinities
        affinities = shift_affinities(affinities, offsets=offsets)

        # invert affinities
        affinities = 1 - affinities

        # Run GASP:
        if self.run_ws:
            # In this case the agglomeration is initialized with superpixels:
            # use additional option 'intersect_with_boundary_pixels' to break the SP along the boundaries
            # (see CREMI-experiments script for an example)
            superpixel_gen = WatershedOnDistanceTransformFromAffinities(
                offsets,
                threshold=self.ws_threshold,
                min_segment_size=self.ws_minsize,
                preserve_membrane=True,
                sigma_seeds=self.ws_sigma,
                stacked_2d=self.ws_2d,
                used_offsets=[0, 1, 2],
                offset_weights=[1, 1, 1],
                n_threads=self.n_threads)

        else:
            superpixel_gen = None

        # start real world clock timer
        runtime = time.time()

        run_GASP_kwargs = {
            'linkage_criteria': self.gasp_linkage_criteria,
            'add_cannot_link_constraints': False,
            'use_efficient_implementations': False
        }

        # Init and run Gasp
        gasp_instance = GaspFromAffinities(offsets,
                                           superpixel_generator=superpixel_gen,
                                           run_GASP_kwargs=run_GASP_kwargs,
                                           n_threads=self.n_threads,
                                           beta_bias=self.beta)
        # running gasp
        segmentation, _ = gasp_instance(affinities)

        # init and run size threshold
        size_threshold = SizeThreshAndGrowWithWS(self.post_minsize, offsets)
        segmentation = size_threshold(affinities, segmentation)

        # stop real world clock timer
        runtime = time.time() - runtime
        gui_logger.info(f"Clustering took {runtime:.2f} s")

        return segmentation
 def __call__(self):
     if not self.state:
         gui_logger.info(
             f"Skipping '{self.__class__.__name__}'. Disabled by the user.")
         return self.input_paths
     else:
         return [
             self.read_process_write(input_path)
             for input_path in self.input_paths
         ]
    def process(self, image):
        gui_logger.info(f"Preprocessing files...")
        if self.crop is not None:
            gui_logger.info(f"Cropping input image to: {self.crop}")
            image = image[self.crop]

        image = self.filter(image, self.filter_param)
        image = _rescale(image, self.factor, self.order)

        return image
Example #7
0
    def process(self, pmaps):
        gui_logger.info('Clustering with MultiCut...')
        runtime = time.time()
        segmentation = self.segment_volume(pmaps)

        if self.post_minsize > self.ws_minsize:
            segmentation, _ = apply_size_filter(segmentation, pmaps, self.post_minsize)

        # stop real world clock timer
        runtime = time.time() - runtime
        gui_logger.info(f"Clustering took {runtime:.2f} s")

        return segmentation
Example #8
0
    def read_process_write(self, input_path):
        gui_logger.info(f'Loading stack from {input_path}')
        input_data, voxel_size = self.load_stack(input_path)

        output_data = self.process(input_data)
        # TODO: voxel_size may change after pre-/post-processing (i.e. when scaling is used); adapt accordingly

        output_path = self._create_output_path(input_path)
        gui_logger.info(f'Saving results in {output_path}')
        self.save_output(output_data, output_path, voxel_size)

        # return output_path
        return output_path
Example #9
0
def raw2seg(config):
    input_paths = load_paths(config)
    gui_logger.info(f"Running the pipeline on: {input_paths}")

    gui_logger.info("Executing pipeline, see terminal for verbose logs.")
    all_pipeline_steps = [('preprocessing', configure_preprocessing_step),
                          ('cnn_prediction', configure_cnn_step),
                          ('cnn_postprocessing',
                           configure_cnn_postprocessing_step),
                          ('segmentation', configure_segmentation_step),
                          ('segmentation_postprocessing',
                           configure_segmentation_postprocessing_step)]

    for pipeline_step_name, pipeline_step_setup in all_pipeline_steps:
        gui_logger.info(
            f"Executing pipeline step: '{pipeline_step_name}'. Parameters: '{config[pipeline_step_name]}'. Files {input_paths}."
        )
        pipeline_step = pipeline_step_setup(input_paths,
                                            config[pipeline_step_name])
        output_paths = pipeline_step()

        # replace input_paths for all pipeline steps except DataPostProcessing3D
        if not isinstance(pipeline_step, DataPostProcessing3D):
            input_paths = output_paths

    gui_logger.info(f"Pipeline execution finished!")
Example #10
0
    def submit(self, config):
        """
        Executes the pipeline task described by the config. If the work_queue is full throws an exception.
        It is up to the user to check if the work_queue is full.
        """

        gui_logger.info(
            f"Executing segmentation pipeline for config: {config}")
        # add config to the queue
        self.work_queue.put_nowait(config)
        # execute segmentation pipeline
        future = self.executor.submit(raw2seg, config)
        # remove the config from the queue when finished
        future.add_done_callback(self._done_callback)
        return future
Example #11
0
    def process(self, pmaps):
        gui_logger.info('Clustering with LiftedMulticut...')
        boundary_pmaps, nuclei_pmaps = pmaps
        runtime = time.time()
        segmentation = segment_volume_lmc(boundary_pmaps, nuclei_pmaps,
                                          self.ws_threshold, self.ws_sigma,
                                          self.ws_minsize)

        if self.post_minsize > self.ws_minsize:
            segmentation, _ = apply_size_filter(segmentation, boundary_pmaps,
                                                self.post_minsize)

        # stop real world clock timer
        runtime = time.time() - runtime
        gui_logger.info(f"Clustering took {runtime:.2f} s")

        return segmentation
Example #12
0
    def load_model(self):
        # Model path
        path = self.file_dialog.files.get()
        # Get name
        model_name = str(self.simple_entry1.tk_value.get())
        # Get resolution
        resolution = [float(value.get()) for value in self.list_entry.tk_value]
        # Get description
        description = str(self.simple_entry2.tk_value.get())

        dest_dir = os.path.join(home_path, PLANTSEG_MODELS_DIR, model_name)
        os.makedirs(dest_dir, exist_ok=True)
        all_files = glob.glob(os.path.join(path, "*"))
        all_expected_files = [
            'config_train.yml', 'last_checkpoint.pytorch',
            'best_checkpoint.pytorch'
        ]
        for file in all_files:
            if os.path.basename(file) in all_expected_files:
                copy2(file, dest_dir)
                all_expected_files.remove(os.path.basename(file))

        if len(all_expected_files) != 0:
            msg = f'It was not possible to find in the directory specified {all_expected_files}, ' \
                  f'the model can not be loaded.'
            gui_logger.error(msg)
            self.popup.destroy()
            raise RuntimeError(msg)

        custom_zoo_dict = yaml.load(open(custom_zoo, 'r'),
                                    Loader=yaml.FullLoader)
        if custom_zoo_dict is None:
            custom_zoo_dict = {}

        custom_zoo_dict[model_name] = {}
        custom_zoo_dict[model_name]["path"] = path
        custom_zoo_dict[model_name]["resolution"] = resolution
        custom_zoo_dict[model_name]["description"] = description

        with open(custom_zoo, 'w') as f:
            yaml.dump(custom_zoo_dict, f)

        gui_logger.info("Model successfully added!")
        self.restart()
    def load_stack(self, file_path):
        """
        Load data from a given file.

        Args:
            file_path (str): path to the file containing the stack

        Returns:
            tuple(nd.array, tuple(float)): (numpy array containing stack's data, stack's data voxel size)
        """
        _, ext = os.path.splitext(file_path)

        if ext in TIFF_EXTENSIONS:
            # load tiff file
            data = tifffile.imread(file_path)
            # parse voxel_size
            voxel_size = read_tiff_voxel_size(file_path)
        elif ext in H5_EXTENSIONS:
            # load data from H5 file
            with h5py.File(file_path, "r") as f:
                h5_input_key = find_input_key(f)
                gui_logger.info(
                    f"Found '{h5_input_key}' dataset inside {file_path}")
                # set h5_output_key to be the same as h5_input_key if h5_output_key not defined
                if self.h5_output_key is None:
                    self.h5_output_key = h5_input_key

                ds = f[h5_input_key]
                data = ds[...]

            # Parse voxel size
            voxel_size = read_h5_voxel_size(file_path)

        else:
            raise RuntimeError("Unsupported file extension")

        # reshape data to 3D always
        data = np.nan_to_num(data)
        data = self._fix_input_shape(data)

        # normalize data according to processing type
        data = self._adjust_input_type(data)
        return data, voxel_size
Example #14
0
    def read_process_write(self, input_path):
        gui_logger.info(f'Loading stack from {input_path}')
        boundary_pmaps, voxel_size = self.load_stack(input_path)
        nuclei_pmaps_path = self._find_nuclei_pmaps_path(input_path)
        if nuclei_pmaps_path is None:
            raise RuntimeError(
                f'Cannot find nuclei probability maps for: {input_path}')
        nuclei_pmaps, _ = self.load_stack(nuclei_pmaps_path)

        # pass boundary_pmaps and nuceli_pmaps to process with Lifted Multicut
        pmaps = (boundary_pmaps, nuclei_pmaps)
        output_data = self.process(pmaps)

        output_path = self._create_output_path(input_path)
        gui_logger.info(f'Saving results in {output_path}')
        self.save_output(output_data, output_path, voxel_size)

        if self.save_raw:
            self.save_raw_dataset(input_path, output_path, voxel_size)

        # return output_path
        return output_path
Example #15
0
def check_models(model_name, update_files=False):
    """
    Simple script to check and download trained modules
    """
    model_dir = os.path.join(os.path.expanduser("~"), PLANTSEG_MODELS_DIR,
                             model_name)

    # Check if model directory exist if not create it
    if ~os.path.exists(model_dir):
        os.makedirs(model_dir, exist_ok=True)

    model_config_path = os.path.exists(
        os.path.join(model_dir, CONFIG_TRAIN_YAML))
    model_best_path = os.path.exists(
        os.path.join(model_dir, BEST_MODEL_PYTORCH))
    model_last_path = os.path.exists(
        os.path.join(model_dir, LAST_MODEL_PYTORCH))

    # Check if files are there, if not download them
    if (not model_config_path or not model_best_path or not model_last_path
            or update_files):

        # Read config
        model_file = os.path.join(plantseg_global_path, "resources",
                                  "models_zoo.yaml")
        config = yaml.load(open(model_file, 'r'), Loader=yaml.FullLoader)

        if model_name in config:
            url = config[model_name]["path"]

            gui_logger.info(f"Downloading model files from: '{url}' ...")
            wget.download(url + CONFIG_TRAIN_YAML, out=model_dir)
            wget.download(url + BEST_MODEL_PYTORCH, out=model_dir)
            wget.download(url + LAST_MODEL_PYTORCH, out=model_dir)
        else:
            raise RuntimeError(
                f"Custom model {model_name} corrupted. Required files not found."
            )
Example #16
0
    def __call__(self):
        logger = utils.get_logger('UNet3DPredictor')

        if not self.state:
            # skip network predictions and return input_paths
            gui_logger.info(
                f"Skipping '{self.__class__.__name__}'. Disabled by the user.")
            return self.paths
        else:
            # create config/download models only when cnn_prediction enabled
            config = create_predict_config(self.paths, self.cnn_config)

            # Create the model
            model = get_model(config)

            # Load model state
            model_path = config['model_path']
            model_name = config["model_name"]

            logger.info(f"Loading model '{model_name}' from {model_path}")
            utils.load_checkpoint(model_path, model)
            logger.info(f"Sending the model to '{config['device']}'")
            model = model.to(config['device'])

            logger.info('Loading HDF5 datasets...')

            # Run prediction
            output_paths = []
            for test_loader in get_test_loaders(config):
                gui_logger.info(
                    f"Running network prediction on {test_loader.dataset.file_path}..."
                )
                runtime = time.time()

                logger.info(f"Processing '{test_loader.dataset.file_path}'...")

                output_file = _get_output_file(test_loader.dataset, model_name)

                predictor = _get_predictor(model, test_loader, output_file,
                                           config)

                # run the model prediction on the entire dataset and save to the 'output_file' H5
                predictor.predict()

                # save resulting output path
                output_paths.append(output_file)

                runtime = time.time() - runtime
                gui_logger.info(f"Network prediction took {runtime:.2f} s")

            self._update_voxel_size(self.paths, output_paths)

            # free GPU memory after the inference is finished
            if torch.cuda.is_available():
                torch.cuda.empty_cache()

            return output_paths
Example #17
0
 def shutdown(self, wait=False):
     gui_logger.info("Shutting down")
     self.executor.shutdown(wait=wait)
Example #18
0
    def process(self, image):
        gui_logger.info("Postprocessing files...")

        image = _rescale(image, self.factor, self.order)

        return image