コード例 #1
0
def test_save_as_csv_per_slice_then_per_level(dummy_metrics, dummy_vert_level):
    """Test with and without specifying perlevel. See: https://github.com/neuropoly/spinalcordtoolbox/issues/2141"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(
        dummy_metrics['with float'],
        levels=[3, 4],
        perlevel=True,
        vert_level=dummy_vert_level,
        group_funcs=(('WA', aggregate_slicewise.func_wa), ))
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(
        dummy_metrics['with float'],
        slices=[0],
        group_funcs=(('WA', aggregate_slicewise.func_wa), ),
    )
    aggregate_slicewise.save_as_csv(agg_metric,
                                    'tmp_file_out.csv',
                                    append=True)
    with open('tmp_file_out.csv', 'r') as csvfile:
        reader = csv.DictReader(csvfile, delimiter=',')
        row = next(reader)
        assert row['Slice (I->S)'] == '2:3'
        assert row['VertLevel'] == '3'
        next(reader)
        row = next(reader)
        assert row['Slice (I->S)'] == '0'
        assert row['VertLevel'] == ''
コード例 #2
0
def test_dimension_mismatch_between_metric_and_vertfile(
        dummy_metrics, dummy_vert_level):
    """Test that an exception is raised only for mismatched metric and -vertfile images."""
    for metric in dummy_metrics:
        if metric == 'inconsistent length':
            with pytest.raises(ValueError) as err:
                aggregate_slicewise.aggregate_per_slice_or_level(
                    dummy_metrics[metric], vert_level=dummy_vert_level)
            assert "mismatch" in str(err.value)
        else:
            # Verify that no error is thrown for all other metrics
            aggregate_slicewise.aggregate_per_slice_or_level(
                dummy_metrics[metric], vert_level=dummy_vert_level)
コード例 #3
0
def test_aggregate_per_slice(dummy_metrics):
    """Test extraction of metrics aggregation per slice: Selected slices"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], slices=[3, 4],
                                                                  perslice=True,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    assert agg_metric[(3,)]['WA()'] == 41.0
    assert agg_metric[(4,)]['WA()'] == 50.0
コード例 #4
0
def test_aggregate_per_level(dummy_metrics, dummy_vert_level):
    """Test extraction of metrics aggregation per vertebral level"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[2, 3],
                                                                  perlevel=True, vert_level=dummy_vert_level,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    assert agg_metric[(0, 1)] == {'VertLevel': (2,), 'WA()': 30.0}
    assert agg_metric[(2, 3)] == {'VertLevel': (3,), 'WA()': 40.0}
コード例 #5
0
def test_aggregate_across_levels(dummy_metrics, dummy_vert_level):
    """Test extraction of metrics aggregation across vertebral levels"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[2, 3],
                                                                  perslice=False, perlevel=False,
                                                                  vert_level=dummy_vert_level,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    assert agg_metric[(0, 1, 2, 3)] == {'VertLevel': (2, 3), 'WA()': 35.0}
コード例 #6
0
def test_aggregate_per_slice(dummy_metrics):
    """Test extraction of metrics aggregation per slice: Selected slices"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], slices=[3, 4],
                                                                  perslice=True,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    assert agg_metric[(3,)]['WA()'] == 41.0
    assert agg_metric[(4,)]['WA()'] == 50.0
コード例 #7
0
def test_aggregate_across_all_slices(dummy_metrics):
    """Test extraction of metrics aggregation across slices: All slices by default"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(
        dummy_metrics['with float'],
        perslice=False,
        group_funcs=(('WA', aggregate_slicewise.func_wa), ))
    assert agg_metric[list(agg_metric)[0]]['WA()'] == 38.0
コード例 #8
0
def test_aggregate_per_level(dummy_metrics, dummy_vert_level):
    """Test extraction of metrics aggregation per vertebral level"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[2, 3],
                                                                  perlevel=True, vert_level=dummy_vert_level,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    assert agg_metric[(0, 1)] == {'VertLevel': (2,), 'WA()': 30.0}
    assert agg_metric[(2, 3)] == {'VertLevel': (3,), 'WA()': 40.0}
コード例 #9
0
def test_aggregate_across_levels(dummy_metrics, dummy_vert_level):
    """Test extraction of metrics aggregation across vertebral levels"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[2, 3],
                                                                  perslice=False, perlevel=False,
                                                                  vert_level=dummy_vert_level,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    assert agg_metric[(0, 1, 2, 3)] == {'VertLevel': (2, 3), 'WA()': 35.0}
コード例 #10
0
def test_save_as_csv(dummy_metrics):
    """Test writing of output metric csv file"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(
        dummy_metrics['with float'],
        slices=[3, 4],
        perslice=False,
        group_funcs=(('WA', aggregate_slicewise.func_wa),
                     ('STD', aggregate_slicewise.func_std)))
    # standard scenario
    aggregate_slicewise.save_as_csv(agg_metric,
                                    'tmp_file_out.csv',
                                    fname_in='FakeFile.txt')
    with open('tmp_file_out.csv', 'r') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=',')
        next(spamreader)  # skip header
        assert next(spamreader)[1:] == [
            sct.__version__, 'FakeFile.txt', '3:4', '', '45.5', '4.5'
        ]
    # with appending
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
    aggregate_slicewise.save_as_csv(agg_metric,
                                    'tmp_file_out.csv',
                                    append=True)
    with open('tmp_file_out.csv', 'r') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=',')
        next(spamreader)  # skip header
        assert next(spamreader)[1:] == [
            sct.__version__, '', '3:4', '', '45.5', '4.5'
        ]
        assert next(spamreader)[1:] == [
            sct.__version__, '', '3:4', '', '45.5', '4.5'
        ]
コード例 #11
0
def test_aggregate_across_levels_perslice(dummy_metrics, dummy_vert_level):
    """Test extraction of metrics aggregation within selected vertebral levels and per slice"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[2, 3],
                                                                  perslice=True, perlevel=False,
                                                                  vert_level=dummy_vert_level,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    assert agg_metric[(0,)] == {'VertLevel': (2,), 'WA()': 29.0}
    assert agg_metric[(2,)] == {'VertLevel': (3,), 'WA()': 39.0}
コード例 #12
0
def test_save_as_csv_sorting(dummy_metrics):
    """Make sure slices are sorted in output csv file"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], perslice=True,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
    with open('tmp_file_out.csv', 'r') as csvfile:
        spamreader = csv.DictReader(csvfile, delimiter=',')
        assert [row['Slice (I->S)'] for row in spamreader] == ['0', '1', '2', '3', '4']
コード例 #13
0
def test_aggregate_across_levels_perslice(dummy_metrics, dummy_vert_level):
    """Test extraction of metrics aggregation within selected vertebral levels and per slice"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[2, 3],
                                                                  perslice=True, perlevel=False,
                                                                  vert_level=dummy_vert_level,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    assert agg_metric[(0,)] == {'VertLevel': (2,), 'WA()': 29.0}
    assert agg_metric[(2,)] == {'VertLevel': (3,), 'WA()': 39.0}
コード例 #14
0
def test_save_as_csv_sorting(dummy_metrics):
    """Make sure slices are sorted in output csv file"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], perslice=True,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
    with open('tmp_file_out.csv', 'r') as csvfile:
        spamreader = csv.DictReader(csvfile, delimiter=',')
        assert [row['Slice (I->S)'] for row in spamreader] == ['0', '1', '2', '3', '4']
コード例 #15
0
def test_save_as_csv_per_slice_then_per_level(dummy_metrics, dummy_vert_level):
    """Test with and without specifying perlevel. See: https://github.com/neuropoly/spinalcordtoolbox/issues/2141"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[3, 4],
                                                                  perlevel=True,
                                                                  vert_level=dummy_vert_level,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], slices=[0],
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),),)
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv', append=True)
    with open('tmp_file_out.csv', 'r') as csvfile:
        reader = csv.DictReader(csvfile, delimiter=',')
        row = next(reader)
        assert row['Slice (I->S)'] == '2:3'
        assert row['VertLevel'] == '3'
        next(reader)
        row = next(reader)
        assert row['Slice (I->S)'] == '0'
        assert row['VertLevel'] == ''
コード例 #16
0
def test_save_as_csv_per_level(dummy_metrics, dummy_vert_level):
    """Make sure slices are listed in reduced form"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[3, 4],
                                                                  perlevel=True,
                                                                  vert_level=dummy_vert_level,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
    with open('tmp_file_out.csv', 'r') as csvfile:
        reader = csv.DictReader(csvfile, delimiter=',')
        row = next(reader)
        assert row['Slice (I->S)'] == '2:3'
        assert row['VertLevel'] == '3'
コード例 #17
0
def test_save_as_csv_per_level(dummy_metrics, dummy_vert_level):
    """Make sure slices are listed in reduced form"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[3, 4],
                                                                  perlevel=True,
                                                                  vert_level=dummy_vert_level,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
    with open('tmp_file_out.csv', 'r') as csvfile:
        reader = csv.DictReader(csvfile, delimiter=',')
        row = next(reader)
        assert row['Slice (I->S)'] == '2:3'
        assert row['VertLevel'] == '3'
コード例 #18
0
def test_aggregate_slices_pmj(dummy_metrics):
    """Test extraction of metrics aggregation within selected slices at a PMJ distance"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(
        dummy_metrics['with float'],
        slices=[2, 3, 4, 5],
        distance_pmj=64,
        perslice=False,
        perlevel=False,
        group_funcs=(('WA', aggregate_slicewise.func_wa), ))
    assert agg_metric[(2, 3, 4, 5)] == {
        'VertLevel': None,
        'DistancePMJ': [64],
        'WA()': 45.25
    }
コード例 #19
0
def test_aggregate_across_selected_slices(dummy_metrics):
    """Test extraction of metrics aggregation across slices: Selected slices"""
    agg_metrics = {}
    for metric in dummy_metrics:
        agg_metrics[metric] = \
            aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics[metric], slices=[1, 2], perslice=False,
                                                             group_funcs=(('WA', aggregate_slicewise.func_wa),
                                                                          ('STD', aggregate_slicewise.func_std)))
    assert agg_metrics['with float'][(1, 2)]['WA()'] == 35.0
    assert agg_metrics['with float'][(1, 2)]['STD()'] == 4.0
    assert agg_metrics['with int'][(1, 2)]['WA()'] == 100.5
    # check that even if there is an error in metric estimation, the function outputs a dict for specific slicegroup
    assert agg_metrics['with nan'][(1, 2)]['WA()'] == 101.0
    assert agg_metrics['inconsistent length'][(1, 2)]['WA()'] == 'index 2 is out of bounds for axis 0 with size 2'
    assert agg_metrics['with string'][(1, 2)]['WA()'] == "ufunc 'isfinite' not supported for the input types, and " \
                                                           "the inputs could not be safely coerced to any supported " \
                                                           "types according to the casting rule ''safe''"
コード例 #20
0
def test_save_as_csv_pmj(tmp_path, dummy_metrics):
    """Test writing of output metric csv file with distance from PMJ method"""
    path_out = str(tmp_path / 'tmp_file_out.csv')
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(
        dummy_metrics['with float'],
        slices=[2, 3, 4, 5],
        distance_pmj=64.0,
        perslice=False,
        perlevel=False,
        group_funcs=(('WA', aggregate_slicewise.func_wa), ))
    aggregate_slicewise.save_as_csv(agg_metric, path_out)
    with open(path_out, 'r') as csvfile:
        reader = csv.DictReader(csvfile, delimiter=',')
        row = next(reader)
        assert row['Slice (I->S)'] == '2:5'
        assert row['DistancePMJ'] == '64.0'
        assert row['VertLevel'] == ''
コード例 #21
0
def test_aggregate_across_selected_slices(dummy_metrics):
    """Test extraction of metrics aggregation across slices: Selected slices"""
    agg_metrics = {}
    for metric in dummy_metrics:
        agg_metrics[metric] = \
            aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics[metric], slices=[1, 2], perslice=False,
                                                             group_funcs=(('WA', aggregate_slicewise.func_wa),
                                                                          ('STD', aggregate_slicewise.func_std)))
    assert agg_metrics['with float'][(1, 2)]['WA()'] == 35.0
    assert agg_metrics['with float'][(1, 2)]['STD()'] == 4.0
    assert agg_metrics['with int'][(1, 2)]['WA()'] == 100.5
    # check that even if there is an error in metric estimation, the function outputs a dict for specific slicegroup
    assert agg_metrics['with nan'][(1, 2)]['WA()'] == 101.0
    assert agg_metrics['inconsistent length'][(1, 2)]['WA()'] == 'index 2 is out of bounds for axis 0 with size 2'
    assert agg_metrics['with string'][(1, 2)]['WA()'] == "ufunc 'isfinite' not supported for the input types, and " \
                                                           "the inputs could not be safely coerced to any supported " \
                                                           "types according to the casting rule ''safe''"
コード例 #22
0
def test_save_as_csv(dummy_metrics):
    """Test writing of output metric csv file"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], slices=[3, 4],
                                                                  perslice=False,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),
                                                                               ('STD', aggregate_slicewise.func_std)))
    # standard scenario
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv', fname_in='FakeFile.txt')
    with open('tmp_file_out.csv', 'r') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=',')
        next(spamreader)  # skip header
        assert next(spamreader)[1:] == [sct.__version__, 'FakeFile.txt', '3:4', '', '45.5', '4.5']
    # with appending
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
    aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv', append=True)
    with open('tmp_file_out.csv', 'r') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=',')
        next(spamreader)  # skip header
        assert next(spamreader)[1:] == [sct.__version__, '', '3:4', '', '45.5', '4.5']
        assert next(spamreader)[1:] == [sct.__version__, '', '3:4', '', '45.5', '4.5']
コード例 #23
0
def main(args):
    parser = get_parser()
    arguments = parser.parse(args)

    # Initialization
    slices = ''
    group_funcs = (('MEAN', func_wa), ('STD', func_std))  # functions to perform when aggregating metrics along S-I

    fname_segmentation = sct.get_absolute_path(arguments['-i'])
    fname_vert_levels = ''
    if '-o' in arguments:
        file_out = os.path.abspath(arguments['-o'])
    else:
        file_out = ''
    if '-append' in arguments:
        append = int(arguments['-append'])
    else:
        append = 0
    if '-vert' in arguments:
        vert_levels = arguments['-vert']
    else:
        vert_levels = ''
    if '-r' in arguments:
        remove_temp_files = arguments['-r']
    if '-vertfile' in arguments:
        fname_vert_levels = arguments['-vertfile']
    if '-perlevel' in arguments:
        perlevel = arguments['-perlevel']
    else:
        perlevel = None
    if '-z' in arguments:
        slices = arguments['-z']
    if '-perslice' in arguments:
        perslice = arguments['-perslice']
    else:
        perslice = None
    if '-angle-corr' in arguments:
        if arguments['-angle-corr'] == '1':
            angle_correction = True
        elif arguments['-angle-corr'] == '0':
            angle_correction = False
    param_centerline = ParamCenterline(
        algo_fitting=arguments['-centerline-algo'],
        smooth=arguments['-centerline-smooth'],
        minmax=True)
    path_qc = arguments.get("-qc", None)
    qc_dataset = arguments.get("-qc-dataset", None)
    qc_subject = arguments.get("-qc-subject", None)

    verbose = int(arguments.get('-v'))
    sct.init_sct(log_level=verbose, update=True)  # Update log level

    # update fields
    metrics_agg = {}
    if not file_out:
        file_out = 'csa.csv'

    metrics, fit_results = process_seg.compute_shape(fname_segmentation,
                                                     angle_correction=angle_correction,
                                                     param_centerline=param_centerline,
                                                     verbose=verbose)
    for key in metrics:
        metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                        levels=parse_num_list(vert_levels), perslice=perslice,
                                                        perlevel=perlevel, vert_level=fname_vert_levels,
                                                        group_funcs=group_funcs)
    metrics_agg_merged = _merge_dict(metrics_agg)
    save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)

    # QC report (only show CSA for clarity)
    if path_qc is not None:
        generate_qc(fname_segmentation, args=args, path_qc=os.path.abspath(path_qc), dataset=qc_dataset,
                    subject=qc_subject, path_img=_make_figure(metrics_agg_merged, fit_results),
                    process='sct_process_segmentation')

    sct.display_open(file_out)
コード例 #24
0
def main(args):
    parser = get_parser()
    arguments = parser.parse(args)
    param = Param()

    # Initialization
    slices = param.slices
    group_funcs = (('MEAN', func_wa), ('STD', func_std)
                   )  # functions to perform when aggregating metrics along S-I

    fname_segmentation = sct.get_absolute_path(arguments['-i'])
    fname_vert_levels = ''
    if '-o' in arguments:
        file_out = os.path.abspath(arguments['-o'])
    else:
        file_out = ''
    if '-append' in arguments:
        append = int(arguments['-append'])
    else:
        append = 0
    if '-vert' in arguments:
        vert_levels = arguments['-vert']
    else:
        vert_levels = ''
    if '-r' in arguments:
        remove_temp_files = arguments['-r']
    if '-vertfile' in arguments:
        fname_vert_levels = arguments['-vertfile']
    if '-perlevel' in arguments:
        perlevel = arguments['-perlevel']
    else:
        perlevel = Param().perlevel
    if '-z' in arguments:
        slices = arguments['-z']
    if '-perslice' in arguments:
        perslice = arguments['-perslice']
    else:
        perslice = Param().perslice
    if '-angle-corr' in arguments:
        if arguments['-angle-corr'] == '1':
            angle_correction = True
        elif arguments['-angle-corr'] == '0':
            angle_correction = False

    verbose = int(arguments.get('-v'))
    sct.init_sct(log_level=verbose, update=True)  # Update log level

    # update fields
    metrics_agg = {}
    if not file_out:
        file_out = 'csa.csv'

    metrics = process_seg.compute_shape(fname_segmentation,
                                        algo_fitting='bspline',
                                        angle_correction=angle_correction,
                                        verbose=verbose)
    for key in metrics:
        metrics_agg[key] = aggregate_per_slice_or_level(
            metrics[key],
            slices=parse_num_list(slices),
            levels=parse_num_list(vert_levels),
            perslice=perslice,
            perlevel=perlevel,
            vert_level=fname_vert_levels,
            group_funcs=group_funcs)
    metrics_agg_merged = _merge_dict(metrics_agg)
    save_as_csv(metrics_agg_merged,
                file_out,
                fname_in=fname_segmentation,
                append=append)
    sct.display_open(file_out)
コード例 #25
0
def test_aggregate_across_all_slices(dummy_metrics):
    """Test extraction of metrics aggregation across slices: All slices by default"""
    agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], perslice=False,
                                                                  group_funcs=(('WA', aggregate_slicewise.func_wa),))
    assert agg_metric[list(agg_metric)[0]]['WA()'] == 38.0
コード例 #26
0
def main(args):
    parser = get_parser()
    arguments = parser.parse(args)
    param = Param()

    # Initialization
    slices = param.slices
    angle_correction = True
    use_phys_coord = True
    group_funcs = (('MEAN', func_wa), ('STD', func_std))  # functions to perform when aggregating metrics along S-I

    fname_segmentation = sct.get_absolute_path(arguments['-i'])
    name_process = arguments['-p']
    fname_vert_levels = ''
    if '-o' in arguments:
        file_out = os.path.abspath(arguments['-o'])
    else:
        file_out = ''
    if '-append' in arguments:
        append = int(arguments['-append'])
    else:
        append = 0
    if '-vert' in arguments:
        vert_levels = arguments['-vert']
    else:
        vert_levels = ''
    if '-r' in arguments:
        remove_temp_files = arguments['-r']
    if '-vertfile' in arguments:
        fname_vert_levels = arguments['-vertfile']
    if '-perlevel' in arguments:
        perlevel = arguments['-perlevel']
    else:
        perlevel = Param().perlevel
    if '-v' in arguments:
        verbose = int(arguments['-v'])
    if '-z' in arguments:
        slices = arguments['-z']
    if '-perslice' in arguments:
        perslice = arguments['-perslice']
    else:
        perslice = Param().perslice
    if '-a' in arguments:
        param.algo_fitting = arguments['-a']
    if '-no-angle' in arguments:
        if arguments['-no-angle'] == '1':
            angle_correction = False
        elif arguments['-no-angle'] == '0':
            angle_correction = True
    if '-use-image-coord' in arguments:
        if arguments['-use-image-coord'] == '1':
            use_phys_coord = False
        if arguments['-use-image-coord'] == '0':
            use_phys_coord = True

    # update fields
    param.verbose = verbose
    metrics_agg = {}
    if not file_out:
        file_out = name_process + '.csv'

    if name_process == 'centerline':
        process_seg.extract_centerline(fname_segmentation, verbose=param.verbose,
                                       algo_fitting=param.algo_fitting, use_phys_coord=use_phys_coord,
                                       file_out=file_out)

    if name_process == 'csa':
        metrics = process_seg.compute_csa(fname_segmentation, algo_fitting=param.algo_fitting,
                                          type_window=param.type_window, window_length=param.window_length,
                                          angle_correction=angle_correction, use_phys_coord=use_phys_coord,
                                          remove_temp_files=remove_temp_files, verbose=verbose)

        for key in metrics:
            metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                            levels=parse_num_list(vert_levels), perslice=perslice,
                                                            perlevel=perlevel, vert_level=fname_vert_levels,
                                                            group_funcs=group_funcs)
        metrics_agg_merged = merge_dict(metrics_agg)
        save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)
        sct.printv('\nFile created: '+file_out, verbose=1, type='info')

    if name_process == 'label-vert':
        if '-discfile' in arguments:
            fname_discs = arguments['-discfile']
        else:
            sct.printv('\nERROR: Disc label file is mandatory (flag: -discfile).\n', 1, 'error')
        process_seg.label_vert(fname_segmentation, fname_discs, verbose=verbose)

    if name_process == 'shape':
        fname_discs = None
        if '-discfile' in arguments:
            fname_discs = arguments['-discfile']
        metrics = process_seg.compute_shape(fname_segmentation, remove_temp_files=remove_temp_files, verbose=verbose)
        for key in metrics:
            metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                            levels=parse_num_list(vert_levels), perslice=perslice,
                                                            perlevel=perlevel, vert_level=fname_vert_levels,
                                                            group_funcs=group_funcs)
        metrics_agg_merged = merge_dict(metrics_agg)
        save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)
        sct.printv('\nFile created: ' + file_out, verbose=1, type='info')
コード例 #27
0
def main(args=None):
    parser = get_parser()
    if args:
        arguments = parser.parse_args(args)
    else:
        arguments = parser.parse_args(
            args=None if sys.argv[1:] else ['--help'])

    # Initialization
    slices = ''
    group_funcs = (('MEAN', func_wa), ('STD', func_std)
                   )  # functions to perform when aggregating metrics along S-I

    fname_segmentation = get_absolute_path(arguments.i)
    fname_vert_levels = ''
    if arguments.o is not None:
        file_out = os.path.abspath(arguments.o)
    else:
        file_out = ''
    if arguments.append is not None:
        append = arguments.append
    else:
        append = 0
    if arguments.vert is not None:
        vert_levels = arguments.vert
    else:
        vert_levels = ''
    remove_temp_files = arguments.r
    if arguments.vertfile is not None:
        fname_vert_levels = arguments.vertfile
    if arguments.perlevel is not None:
        perlevel = arguments.perlevel
    else:
        perlevel = None
    if arguments.z is not None:
        slices = arguments.z
    if arguments.perslice is not None:
        perslice = arguments.perslice
    else:
        perslice = None
    angle_correction = arguments.angle_corr
    param_centerline = ParamCenterline(algo_fitting=arguments.centerline_algo,
                                       smooth=arguments.centerline_smooth,
                                       minmax=True)
    path_qc = arguments.qc
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject

    verbose = int(arguments.v)
    init_sct(log_level=verbose, update=True)  # Update log level

    # update fields
    metrics_agg = {}
    if not file_out:
        file_out = 'csa.csv'

    metrics, fit_results = compute_shape(fname_segmentation,
                                         angle_correction=angle_correction,
                                         param_centerline=param_centerline,
                                         verbose=verbose)
    for key in metrics:
        if key == 'length':
            # For computing cord length, slice-wise length needs to be summed across slices
            metrics_agg[key] = aggregate_per_slice_or_level(
                metrics[key],
                slices=parse_num_list(slices),
                levels=parse_num_list(vert_levels),
                perslice=perslice,
                perlevel=perlevel,
                vert_level=fname_vert_levels,
                group_funcs=(('SUM', func_sum), ))
        else:
            # For other metrics, we compute the average and standard deviation across slices
            metrics_agg[key] = aggregate_per_slice_or_level(
                metrics[key],
                slices=parse_num_list(slices),
                levels=parse_num_list(vert_levels),
                perslice=perslice,
                perlevel=perlevel,
                vert_level=fname_vert_levels,
                group_funcs=group_funcs)
    metrics_agg_merged = merge_dict(metrics_agg)
    save_as_csv(metrics_agg_merged,
                file_out,
                fname_in=fname_segmentation,
                append=append)

    # QC report (only show CSA for clarity)
    if path_qc is not None:
        generate_qc(fname_segmentation,
                    args=args,
                    path_qc=os.path.abspath(path_qc),
                    dataset=qc_dataset,
                    subject=qc_subject,
                    path_img=_make_figure(metrics_agg_merged, fit_results),
                    process='sct_process_segmentation')

    display_open(file_out)
コード例 #28
0
def main(args):
    parser = get_parser()
    arguments = parser.parse(args)
    param = Param()

    # Initialization
    slices = param.slices
    group_funcs = (('MEAN', func_wa), ('STD', func_std))  # functions to perform when aggregating metrics along S-I

    fname_segmentation = sct.get_absolute_path(arguments['-i'])
    fname_vert_levels = ''
    if '-o' in arguments:
        file_out = os.path.abspath(arguments['-o'])
    else:
        file_out = ''
    if '-append' in arguments:
        append = int(arguments['-append'])
    else:
        append = 0
    if '-vert' in arguments:
        vert_levels = arguments['-vert']
    else:
        vert_levels = ''
    if '-r' in arguments:
        remove_temp_files = arguments['-r']
    if '-vertfile' in arguments:
        fname_vert_levels = arguments['-vertfile']
    if '-perlevel' in arguments:
        perlevel = arguments['-perlevel']
    else:
        perlevel = Param().perlevel
    if '-z' in arguments:
        slices = arguments['-z']
    if '-perslice' in arguments:
        perslice = arguments['-perslice']
    else:
        perslice = Param().perslice
    if '-angle-corr' in arguments:
        if arguments['-angle-corr'] == '1':
            angle_correction = True
        elif arguments['-angle-corr'] == '0':
            angle_correction = False

    verbose = int(arguments.get('-v'))
    sct.init_sct(log_level=verbose, update=True)  # Update log level

    # update fields
    metrics_agg = {}
    if not file_out:
        file_out = 'csa.csv'

    metrics = process_seg.compute_shape(fname_segmentation,
                                        algo_fitting='bspline',
                                        angle_correction=angle_correction,
                                        verbose=verbose)
    for key in metrics:
        metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                        levels=parse_num_list(vert_levels), perslice=perslice,
                                                        perlevel=perlevel, vert_level=fname_vert_levels,
                                                        group_funcs=group_funcs)
    metrics_agg_merged = _merge_dict(metrics_agg)
    save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)
    sct.display_open(file_out)