Example #1
0
def main():
    # Load configuration
    config = load_config()

    # create logger
    logfile = config.get('logfile', None)
    logger = utils.get_logger('UNet3DPredictor', logfile=logfile)

    # Create the model
    model = get_model(config)

    # multiple GPUs
    if (torch.cuda.device_count() > 1):
        logger.info("There are {} GPUs available".format(
            torch.cuda.device_count()))
        model = nn.DataParallel(model)

    # Load model state
    model_path = config['model_path']
    logger.info(f'Loading model from {model_path}...')
    utils.load_checkpoint(model_path, model)
    logger.info(f"Sending the model to '{config['device']}'")
    model = model.to(config['device'])

    logger.info('Loading HDF5 datasets...')
    for test_loader in get_test_loaders(config):
        logger.info(f"Processing '{test_loader.dataset.file_path}'...")

        #output_file = _get_output_file(test_loader.dataset)
        output_file = _get_output_file(config['output_folder'],
                                       test_loader.dataset)
        logger.info(output_file)
        predictor = _get_predictor(model, test_loader, output_file, config)
        # run the model prediction on the entire dataset and save to the 'output_file' H5
        predictor.predict()
Example #2
0
def main():
    # Load configuration
    config = load_config()

    # Create the model
    model = get_model(config)

    # Load model state
    model_path = config['model_path']
    logger.info(f'Loading model from {model_path}...')
    utils.load_checkpoint(model_path, model)
    logger.info(f"Sending the model to '{config['device']}'")
    model = model.to(config['device'])

    logger.info('Loading HDF5 datasets...')
    store_predictions_in_memory = config.get('store_predictions_in_memory',
                                             True)
    if store_predictions_in_memory:
        logger.info(
            'Predictions will be stored in memory. Make sure you have enough RAM for you dataset.'
        )

    for test_loader in get_test_loaders(config):
        logger.info(f"Processing '{test_loader.dataset.file_path}'...")

        output_file = _get_output_file(test_loader.dataset)
        # run the model prediction on the entire dataset and save to the 'output_file' H5
        if store_predictions_in_memory:
            predict_in_memory(model, test_loader, output_file, config)
        else:
            predict(model, test_loader, output_file, config)
Example #3
0
def main():
    # Load configuration
    config = load_config()

    # Create the model
    model = get_model(config)

    # Load model state
    model_path = config['model_path']
    logger.info(f'Loading model from {model_path}...')
    utils.load_checkpoint(model_path, model)
    logger.info(f"Sending the model to '{config['device']}'")
    model = model.to(config['device'])

    logger.info('Loading HDF5 datasets...')
    for test_loader in get_test_loaders(config):
        logger.info(f"Processing '{test_loader.dataset.file_path}'...")

        output_file = _get_output_file(test_loader.dataset)
        # run the model prediction on the entire dataset and save to the 'output_file' H5
        predict(model, test_loader, output_file, config)
Example #4
0
def main():
    # Load configuration
    config = load_config()

    # Create the model
    model = get_model(config)

    # Load model state
    model_path = config['model_path']
    logger.info(f'Loading model from {model_path}...')
    utils.load_checkpoint(model_path, model)
    logger.info(f"Sending the model to '{config['device']}'")
    model = model.to(config['device'])

    logger.info('Loading HDF5 datasets...')

    test_loader = get_test_loaders(config)['test']
    for i, data_pair in enumerate(test_loader):
        output_file = 'predict_' + str(i) + '.h5'
        predictor = _get_predictor(model, data_pair, output_file, config)
        predictor.predict()
Example #5
0
def main():
    # Load configuration
    config = load_config()

    # Create the model
    model = get_model(config)

    # Load model state
    model_path = config['model_path']
    logger.info(f'Loading model from {model_path}...')
    utils.load_checkpoint(model_path, model)
    logger.info(f"Sending the model to '{config['device']}'")
    model = model.to(config['device'])

    logger.info('Loading HDF5 datasets...')
    for test_loader in get_test_loaders(config):
        logger.info(f"Processing '{test_loader.dataset.file_path}'...")
        # run the model prediction on the entire dataset
        predictions = predict(model, test_loader, config)
        # save the resulting probability maps
        output_file = _get_output_file(test_loader.dataset)
        dataset_names = _get_dataset_names(config, len(predictions))
        save_predictions(predictions, output_file, dataset_names)
Example #6
0
    
    
    datasets_config = config['datasets']

    # get train and validation files
#    test_paths = datasets_config['test_path']
#    assert isinstance(test_paths, list)
#    # get h5 internal paths for raw and label
#    raw_internal_path = datasets_config['raw_internal_path']
#    # get train/validation patch size and stride
#    patch = tuple(datasets_config['patch'])
#    stride = tuple(datasets_config['stride'])
#    num_workers = datasets_config.get('num_workers', 1)
    
    logger.info('Loading HDF5 datasets...')
    test_loaders = get_test_loaders(config)
    l_test_loaders = (list(test_loaders))
    
    for test_loader in l_test_loaders:
        logger.info(f"Processing '{test_loader.dataset.file_path}'...")

        output_file = _get_output_file(test_loader.dataset,out_path)
        
        # run the model prediction on the entire dataset and save to the 'output_file' H5
        predictions = predict(model, test_loader, config)
        dataset_names = _get_dataset_names(config, len(predictions))
        
        
        save_predictions(predictions, output_file, dataset_names)