def get_dependencies(requirements_txt=None):
    if requirements_txt is None:
        requirements_txt = sct_dir_local_path("requirements.txt")

    requirements_txt = open(requirements_txt, "r", encoding="utf-8")

    # workaround for https://github.com/davidfischer/requirements-parser/issues/39
    warnings.filterwarnings(action='ignore', module='requirements')

    for req in requirements.parse(requirements_txt):
        if ';' in req.line:  # handle environment markers; TODO: move this upstream into requirements-parser
            condition = req.line.split(';', 1)[-1].strip()
            if not _test_condition(condition):
                continue
        pkg = req.name
        # TODO: just return req directly and make sure caller can deal with fancier specs
        ver = dict(req.specs).get("==", None)
        yield pkg, ver
def test_batch_processing_results(csv_filepath, row, pos):
    """Ensure that new batch_processing.sh results are approximately equal to the cached baseline results."""
    sct_dir = pathlib.Path(sct_dir_local_path())
    csv_filepath_old = sct_dir / "unit_testing/batch_processing/cached_results" / csv_filepath
    csv_filepath_new = sct_dir / "sct_example_data" / csv_filepath
    assert csv_filepath_old.is_file(
    ), f"{csv_filepath_old} not present. Please check the SCT installation."
    assert csv_filepath_new.is_file(
    ), f"{csv_filepath_new} not present. Was batch_processing.sh run beforehand?"

    with open(csv_filepath_old, newline='') as csvfile:
        reader = csv.reader(csvfile, delimiter=',')
        metric_value_old = float([
            row for row in reader
        ][row][pos])  # Row/position varies depending on metric

    with open(csv_filepath_new, newline='') as csvfile:
        reader = csv.reader(csvfile, delimiter=',')
        metric_value_new = float([
            row for row in reader
        ][row][pos])  # Row/position varies depending on metric

    assert metric_value_new == pytest.approx(
        metric_value_old)  # Default rel_tolerance: 1e-6
示例#3
0
def detect_centerline(img, contrast, verbose=1):
    """Detect spinal cord centerline using OptiC.

    :param img: input Image() object.
    :param contrast: str: The type of contrast. Will define the path to Optic model.
    :returns: Image(): Output centerline
    """

    # Fetch path to Optic model based on contrast
    optic_models_path = sct_dir_local_path('data', 'optic_models',
                                           '{}_model'.format(contrast))

    logger.debug('Detecting the spinal cord using OptiC')
    img_orientation = img.orientation

    temp_folder = TempFolder()
    temp_folder.chdir()

    # convert image data type to int16, as required by opencv (backend in OptiC)
    img_int16 = img.copy()
    # Replace non-numeric values by zero
    img_data = img.data
    img_data[np.where(np.isnan(img_data))] = 0
    img_data[np.where(np.isinf(img_data))] = 0
    img_int16.data[np.where(np.isnan(img_int16.data))] = 0
    img_int16.data[np.where(np.isinf(img_int16.data))] = 0
    # rescale intensity
    min_out = np.iinfo('uint16').min
    max_out = np.iinfo('uint16').max
    min_in = np.nanmin(img_data)
    max_in = np.nanmax(img_data)
    data_rescaled = img_data.astype('float') * (max_out - min_out) / (max_in -
                                                                      min_in)
    img_int16.data = data_rescaled - (data_rescaled.min() - min_out)
    # change data type
    img_int16.change_type(np.uint16)
    # reorient the input image to RPI + convert to .nii
    img_int16.change_orientation('RPI')
    file_img = 'img_rpi_uint16'
    img_int16.save(file_img + '.nii')

    # call the OptiC method to generate the spinal cord centerline
    optic_input = file_img
    optic_filename = file_img + '_optic'
    os.environ["FSLOUTPUTTYPE"] = "NIFTI_PAIR"
    cmd_optic = [
        'isct_spine_detect',
        '-ctype=dpdt',
        '-lambda=1',
        optic_models_path,
        optic_input,
        optic_filename,
    ]
    # TODO: output coordinates, for each slice, in continuous (not discrete) values.

    run_proc(cmd_optic, is_sct_binary=True, verbose=0)

    # convert .img and .hdr files to .nii.gz
    img_ctl = Image(file_img + '_optic_ctr.hdr')
    img_ctl.change_orientation(img_orientation)

    # return to initial folder
    temp_folder.chdir_undo()
    if verbose < 2:
        logger.info("Remove temporary files...")
        temp_folder.cleanup()

    return img_ctl
示例#4
0
#
# About the license: see the file LICENSE.TXT
###############################################################################

import sys
import os
import logging
from typing import Mapping
from hashlib import md5

import pytest

from spinalcordtoolbox.utils.sys import sct_dir_local_path, sct_test_path

# FIXME: APIFY
sys.path.append(sct_dir_local_path('scripts'))
import sct_download_data as downloader

logger = logging.getLogger(__name__)


def pytest_sessionstart():
    """ Download sct_testing_data prior to test collection. """
    logger.info("Downloading sct test data")
    downloader.main(['-d', 'sct_testing_data', '-o', sct_test_path()])


@pytest.fixture(scope="session", autouse=True)
def test_data_integrity(request):
    files_checksums = dict()
    for root, _, files in os.walk(sct_test_path()):
#!/usr/bin/env python
# -*- coding: utf-8
# pytest unit tests to validate the results of the batch_processing.sh script

import os
import pathlib
import csv

import pytest

from spinalcordtoolbox.utils.sys import sct_dir_local_path

SCT_DIR = pathlib.Path(sct_dir_local_path())
CACHE_DIR = SCT_DIR / "testing" / "batch_processing" / "cached_results"
OUTPUT_DIR = SCT_DIR / "data" / "sct_example_data"

# TODO: We can and should be verifying more results produced by this pipeline, but which values?
TESTED_VALUES = [("t2/csa_c2c3.csv", 0, "MEAN(area)"),
                 ("t2/csa_pmj.csv", 0, "MEAN(area)"),
                 ("t2s/csa_gm.csv", 3, "MEAN(area)"),
                 ("t2s/csa_wm.csv", 3, "MEAN(area)"),
                 ("mt/mtr_in_wm.csv", 0, "MAP()"),
                 ("dmri/fa_in_cst.csv", 0, "WA()"),
                 ("dmri/fa_in_cst.csv", 1, "WA()")]


def get_csv_float_value(csv_filepath, row, column):
    with open(csv_filepath, newline='') as csvfile:
        reader = csv.DictReader(csvfile, delimiter=',')
        value = [row for row in reader][row][column]
    return float(value)