コード例 #1
0
def test_data_save(test_data, tmp_path, WRITE_TOLERANCE):
    d_ref = Dataset(test_data[0])

    if d_ref.subtype is "":
        path_out = tmp_path / d_ref.type
    else:
        path_out = tmp_path / (d_ref.type + '.' + d_ref.subtype)

    d_ref.write(path_out)
    d_test = Dataset(path_out)

    diff = d_ref.data - d_test.data
    max_error = np.max(np.abs(diff))

    try:
        assert np.array_equal(d_ref.data, d_test.data)
    except AssertionError:
        pass

    if max_error > 0.0:
        try:
            assert max_error < WRITE_TOLERANCE
            print('Arrays are not identical, but max difference: {} is tolerated'.format(max_error))
        except AssertionError as e:
            raise e
コード例 #2
0
def test_parameters(test_parameters):

    dataset = Dataset(test_parameters[0], load=False)
    dataset.load_parameters()

    for jcampdx in dataset._parameters.values():
        with Path(str(jcampdx.path)+'.json').open() as file:
            reference = json.load(file)

        assert jcampdx.to_dict() == reference
コード例 #3
0
def test_split(test_split_data, tmp_path):
    dataset = Dataset(Path(test_split_data[1]) / test_split_data[0]['path'])

    if test_split_data[0]['splitter'] == 'SlicePackage':
        SlicePackageSplitter().split(dataset, write=True, path_out=tmp_path)
    elif test_split_data[0]['splitter'] == 'FG_ECHO':
        FrameGroupSplitter('FG_ECHO').split(dataset,
                                            write=True,
                                            path_out=tmp_path)

    for ref in test_split_data[0]['results'].values():
        ds_split = Dataset(tmp_path / ref['path'])
        assert ds_split.shape == tuple(ref['shape'])
コード例 #4
0
def test_data_load(test_data):
    dataset = Dataset(test_data[0])
    with np.load(str(dataset.path)+'.npz') as data:
        try:
            assert np.array_equal(dataset.data, data['data'])
        except:
            print()
コード例 #5
0
def test_ra(test_ra_data):

    d1 = Dataset(Path(test_ra_data[1]) / test_ra_data[0]['path'])
    core_index = tuple(slice(None) for i in range(d1.encoded_dim))
    d2 = Dataset(Path(test_ra_data[1]) / test_ra_data[0]['path'],
                 random_access=True)

    if "slices" in test_ra_data[0].keys():
        for s in test_ra_data[0]['slices']:
            slice_ = json_to_slice(s)
            assert np.array_equal(d1.data[slice_], d2.data[slice_])
    else:
        # test by single slice - index
        for index in np.ndindex(d1.shape[d1.encoded_dim:]):
            assert np.array_equal(d1.data[core_index + index],
                                  d2.data[core_index + index])

        # test all possible slices
        for slice_ in generate_slices(d1.shape[d1.encoded_dim:]):
            assert np.array_equal(d1.data[core_index + slice_],
                                  d2.data[core_index + slice_])
コード例 #6
0
def yield_bruker(args):
    """

    If the path spectified by args.file is:

    1/ Bruker dataset file (2dseq) - function yields its data and properties of the dataset
    2/ Directory - function yields data and properties and data of all datasets compliant to the queries

    """
    # get location of the spec2nii Bruker properties configuration file
    bruker_properties_path = pkg_resources.resource_filename('spec2nii', 'bruker_properties.json')

    # get a list of queries to filter datasets
    queries = _get_queries(args)

    # case of Bruker dataset
    if os.path.isfile(args.file):
        d = Dataset(args.file, property_files=[bruker_properties_path], parameter_files=['method'])
        try:
            d.query(queries)
        except FilterEvalFalse:
            raise ValueError(f'Bruker dataset {d.path} is not suitable for conversion to mrs_nifti')
        yield from _proc_dataset(d, args)

    # case of folder containing Bruker datasets
    elif os.path.isdir(args.file):

        # process individual datasets
        for dataset in Folder(args.file, dataset_state={
            "parameter_files": ['method'],
            "property_files": [bruker_properties_path]
        }).get_dataset_list_rec():
            with dataset as d:
                try:
                    d.query(queries)
                except FilterEvalFalse:
                    continue
                yield from _proc_dataset(d, args)
コード例 #7
0
def test_properties(test_properties):
    if test_properties:
        dataset = Dataset(test_properties[0], load=False, parameter_files=['subject'])
        dataset.load_parameters()
        dataset.load_properties()
        assert dataset.to_dict() == test_properties[1]