Ejemplo n.º 1
0
def run(config_path, tiles):
    """
    The worker function that process the tiles.
    """
    # delayed load
    from pytorch3dunet.datasets.utils import get_test_loaders
    from pytorch3dunet.predict import _get_predictor

    # load config
    config = load_config(config_path)

    # load model
    model = load_model(config)

    # downsample tiles and write them to scratch
    for tid, tile in enumerate(tiles):
        pass

    # update config path

    for test_loader in get_test_loaders(config):
        logger.info(f"Processing '{test_loader.dataset.file_path}'...")

        output_file = _get_output_file(test_loader.dataset)

        predictor = _get_predictor(model, test_loader, output_file, config)
        # run the model prediction on the entire dataset and save to the 'output_file' H5
        predictor.predict()
Ejemplo n.º 2
0
def main():
    # Load configuration
    config = load_config()

    # Create the model
    model = get_model(config)

    # Load model state
    model_path = config['model_path']
    logger.info(f'Loading model from {model_path}...')
    utils.load_checkpoint(model_path, model)
    # use DataParallel if more than 1 GPU available
    device = config['device']
    if torch.cuda.device_count() > 1 and not device.type == 'cpu':
        model = nn.DataParallel(model)
        logger.info(f'Using {torch.cuda.device_count()} GPUs for prediction')

    logger.info(f"Sending the model to '{device}'")
    model = model.to(device)

    output_dir = config['loaders'].get('output_dir', None)
    if output_dir is not None:
        os.makedirs(output_dir, exist_ok=True)
        logger.info(f'Saving predictions to: {output_dir}')

    for test_loader in get_test_loaders(config):
        logger.info(f"Processing '{test_loader.dataset.file_path}'...")

        output_file = _get_output_file(dataset=test_loader.dataset,
                                       output_dir=output_dir)

        predictor = _get_predictor(model, test_loader, output_file, config)
        # run the model prediction on the entire dataset and save to the 'output_file' H5
        predictor.predict()
Ejemplo n.º 3
0
def run(config_path, paths):
    """
    The worker function that process the tiles.

    Args:
        config_path (str) 
        paths (Iterator)
    """
    # load config
    config = load_config(config_path)

    # load model
    model = load_model(config)

    # update config path
    config["loaders"]["test"]["file_paths"] = paths

    output_paths = []
    for test_loader in get_test_loaders(config):
        logger.info(f"Processing '{test_loader.dataset.file_path}'...")

        output_file = _get_output_file(test_loader.dataset)

        predictor = _get_predictor(model, test_loader, output_file, config)
        # run the model prediction on the entire dataset and save to the 'output_file' H5
        predictor.predict()

        output_paths.append(output_file)

    return output_paths
Ejemplo n.º 4
0
def main():
    # Load configuration
    config = load_config()

    # Create the model
    model = get_model(config['model'])

    # Load model state
    model_path = config['model_path']
    logger.info(f'Loading model from {model_path}...')
    utils.load_checkpoint(model_path, model)
    # use DataParallel if more than 1 GPU available
    device = config['device']
    if torch.cuda.device_count() > 1 and not device.type == 'cpu':
        model = nn.DataParallel(model)
        logger.info(f'Using {torch.cuda.device_count()} GPUs for prediction')

    logger.info(f"Sending the model to '{device}'")
    model = model.to(device)

    output_dir = config['loaders'].get('output_dir', None)
    if output_dir is not None:
        os.makedirs(output_dir, exist_ok=True)
        logger.info(f'Saving predictions to: {output_dir}')

    # create predictor instance
    predictor = _get_predictor(model, output_dir, config)

    for test_loader in get_test_loaders(config):
        # run the model prediction on the test_loader and save the results in the output_dir
        predictor(test_loader)
Ejemplo n.º 5
0
    def test_stanard_predictor(self, tmpdir, test_config):
        # Add output dir
        test_config['loaders']['output_dir'] = tmpdir

        # create random dataset
        tmp = NamedTemporaryFile(delete=False)

        with h5py.File(tmp.name, 'w') as f:
            shape = (32, 64, 64)
            f.create_dataset('raw', data=np.random.rand(*shape))

        # Add input file
        test_config['loaders']['test']['file_paths'] = [tmp.name]

        # Create the model with random weights
        model = get_model(test_config)
        # Create device and update config
        device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
        test_config['device'] = device
        model = model.to(device)

        for test_loader in get_test_loaders(test_config):

            output_file = _get_output_file(dataset=test_loader.dataset,
                                           output_dir=tmpdir)

            predictor = _get_predictor(model, test_loader, output_file,
                                       test_config)
            # run the model prediction on the entire dataset and save to the 'output_file' H5
            predictor.predict()
Ejemplo n.º 6
0
    def __call__(self):
        logger = utils.get_logger('UNet3DPredictor')

        if not self.state:
            # skip network predictions and return input_paths
            gui_logger.info(
                f"Skipping '{self.__class__.__name__}'. Disabled by the user.")
            return self.paths
        else:
            # create config/download models only when cnn_prediction enabled
            config = create_predict_config(self.paths, self.cnn_config)

            # Create the model
            model = get_model(config)

            # Load model state
            model_path = config['model_path']
            model_name = config["model_name"]

            logger.info(f"Loading model '{model_name}' from {model_path}")
            utils.load_checkpoint(model_path, model)
            logger.info(f"Sending the model to '{config['device']}'")
            model = model.to(config['device'])

            logger.info('Loading HDF5 datasets...')

            # Run prediction
            output_paths = []
            for test_loader in get_test_loaders(config):
                gui_logger.info(
                    f"Running network prediction on {test_loader.dataset.file_path}..."
                )
                runtime = time.time()

                logger.info(f"Processing '{test_loader.dataset.file_path}'...")

                output_file = _get_output_file(test_loader.dataset, model_name)

                predictor = _get_predictor(model, test_loader, output_file,
                                           config)

                # run the model prediction on the entire dataset and save to the 'output_file' H5
                predictor.predict()

                # save resulting output path
                output_paths.append(output_file)

                runtime = time.time() - runtime
                gui_logger.info(f"Network prediction took {runtime:.2f} s")

            self._update_voxel_size(self.paths, output_paths)

            # free GPU memory after the inference is finished
            if torch.cuda.is_available():
                torch.cuda.empty_cache()

            return output_paths
Ejemplo n.º 7
0
def main():
    parser = ArgumentParser()
    parser.add_argument("-r",
                        "--runconfig",
                        dest='runconfig',
                        type=str,
                        required=True,
                        help=f"The run config yaml file")
    parser.add_argument("-n",
                        "--numworkers",
                        dest='numworkers',
                        type=int,
                        required=True,
                        help=f"Number of workers")
    parser.add_argument("-d",
                        "--device",
                        dest='device',
                        type=str,
                        required=False,
                        help=f"Device")

    args = parser.parse_args()
    runconfig = args.runconfig
    nworkers = int(args.numworkers)

    # Load configuration
    config = load_config(runconfig, nworkers, args.device)

    # Create the model
    model = get_model(config['model'])

    # Load model state
    model_path = config['model_path']
    logger.info(f'Loading model from {model_path}...')
    utils.load_checkpoint(model_path, model)
    # use DataParallel if more than 1 GPU available
    device = config['device']
    if torch.cuda.device_count() > 1 and not device.type == 'cpu':
        model = nn.DataParallel(model)
        logger.info(f'Using {torch.cuda.device_count()} GPUs for prediction')

    logger.info(f"Sending the model to '{device}'")
    model = model.to(device)

    output_dir = config['loaders'].get('output_dir', None)
    if output_dir is not None:
        os.makedirs(output_dir, exist_ok=True)
        logger.info(f'Saving predictions to: {output_dir}')

    # create predictor instance
    predictor = _get_predictor(model, output_dir, config)

    for test_loader in get_test_loaders(config):
        # run the model prediction on the test_loader and save the results in the output_dir
        predictor(test_loader)
Ejemplo n.º 8
0
def infer(h5_paths: List[str], config_path: str, dst_dir):
    # load model
    config = load_config(config_path)
    model = load_model(config)

    # update file path
    config["loaders"]["test"]["file_paths"] = h5_paths

    prob_h5_paths = []
    for test_loader in get_test_loaders(config):
        logger.info(f"Processing '{test_loader.dataset.file_path}'...")

        fname = os.path.basename(test_loader.dataset.file_path)
        output_file = os.path.join(dst_dir, fname)

        predictor = _get_predictor(model, test_loader, output_file, config)
        predictor.predict()

        prob_h5_paths.append(output_file)
    return prob_h5_paths