Beispiel #1
0
def test_write_raw():
    # lcmodel needs to know the transform properties
    transform = suspect.transformation_matrix([1, 0, 0], [0, 1, 0], [0, 0, 0],
                                              [10, 10, 10])
    data = suspect.MRSData(numpy.zeros(1, 'complex'),
                           1e-3,
                           123.456,
                           transform=transform)
    mock = unittest.mock.mock_open()
    with patch.object(builtins, 'open', mock):
        suspect.io.lcmodel.save_raw("/home/ben/test_raw.raw", data)
Beispiel #2
0
def extract_header_parameters(header):
    """
    Creates a dictionary with the most important header parameters from a GE
    P-file, used to create an MRSData object with the result.

    Parameters
    ----------
    header: dict
        The header dictionary loaded by the Orchestra library from a P-file

    Returns
    -------
    dict
        Dictionary containing the parameters needed to create an MRSData object
    """
    dt = 1 / header["rdb_hdr_rec"]["spectral_width"]
    f0 = header["rdb_hdr_ps"]["mps_freq"] * 1e-7
    te = header["rdb_hdr_rec"]["rdb_hdr_te"] / 1000  # convert from us to ms
    tr = header["rdb_hdr_image"]["tr"] / 1000  # convert from us to ms

    # calculating the transform is quite involved
    # GE internally uses a RAS coordinate system, so we have to convert to the
    # LPS system used here (and by DICOM standard)
    voxel_size = numpy.array([
        header["rdb_hdr_image"]["user8"], header["rdb_hdr_image"]["user9"],
        header["rdb_hdr_image"]["user10"]
    ])
    position_vector = numpy.array([
        -header["rdb_hdr_image"]["user11"], -header["rdb_hdr_image"]["user12"],
        header["rdb_hdr_image"]["user13"]
    ])
    tl_coord_lps = numpy.array([
        -header["rdb_hdr_image"]["tlhc_R"], -header["rdb_hdr_image"]["tlhc_A"],
        header["rdb_hdr_image"]["tlhc_S"]
    ])

    tr_coord_lps = numpy.array([
        -header["rdb_hdr_image"]["trhc_R"], -header["rdb_hdr_image"]["trhc_A"],
        header["rdb_hdr_image"]["trhc_S"]
    ])

    br_coord_lps = numpy.array([
        -header["rdb_hdr_image"]["brhc_R"], -header["rdb_hdr_image"]["brhc_A"],
        header["rdb_hdr_image"]["brhc_S"]
    ])

    e1 = tr_coord_lps - tl_coord_lps
    e1 = e1 / numpy.linalg.norm(e1)
    e2 = br_coord_lps - tr_coord_lps
    e2 = e2 / numpy.linalg.norm(e2)

    transform = transformation_matrix(e1, e2, position_vector, voxel_size)

    return {"dt": dt, "f0": f0, "te": te, "tr": tr, "transform": transform}
Beispiel #3
0
def test_lcmodel_all_files():
    # lcmodel needs to know the transform properties
    transform = suspect.transformation_matrix([1, 0, 0], [0, 1, 0], [0, 0, 0],
                                              [10, 10, 10])
    data = suspect.MRSData(numpy.zeros(1, 'complex'),
                           1e-3,
                           123.456,
                           transform=transform)
    mock = unittest.mock.mock_open()
    with patch.object(builtins, 'open', mock):
        suspect.io.lcmodel.write_all_files(
            os.path.join(os.getcwd(), "lcmodel"), data)
Beispiel #4
0
def test_base_transform():
    position = np.array([10, 20, 30])
    voxel_size = np.array([20, 20, 20])
    transform = suspect.transformation_matrix([1, 0, 0], [0, 1, 0], position, voxel_size)
    base = suspect.base.ImageBase(np.zeros(1), transform)
    np.testing.assert_equal(base.position, position)
    np.testing.assert_equal(base.voxel_size, voxel_size)
    transformed = base.to_scanner(0, 0, 0)
    np.testing.assert_equal(transformed, position)
    transformed = base.to_scanner(np.array([[0, 0, 0], [1, 1, 1]]))
    np.testing.assert_equal(transformed, [position, position + voxel_size])
    transformed = base.from_scanner(position)
    np.testing.assert_equal((0, 0, 0), transformed)
Beispiel #5
0
def create_dcm_field_dose(fp_dicom_dose):
    """
    Input:      fp_main = path to full AUTOMC output
    Output:     imagebase file of dicom dose
    Summary:    Loads dicom dose field file
    """

    dose_1 = pydicom.read_file(fp_dicom_dose)

    raw_eclip_dose = dose_1.pixel_array * dose_1.DoseGridScaling

    eclip_dose_row_vec = np.array(dose_1.ImageOrientationPatient[:3])
    eclip_dose_col_vec = np.array(dose_1.ImageOrientationPatient[3:])
    eclip_dose_pos = np.array(dose_1.ImagePositionPatient)
    eclip_dose_slice_thic = dose_1.GridFrameOffsetVector[1] - dose_1.GridFrameOffsetVector[0]
    eclip_dose_vox_spac = list(dose_1.PixelSpacing)
    eclip_dose_vox_spac.append(eclip_dose_slice_thic)

    eclip_dose_trans = suspect.transformation_matrix(eclip_dose_row_vec, eclip_dose_col_vec, eclip_dose_pos, eclip_dose_vox_spac)

    eclipse_dose = suspect.base.ImageBase(raw_eclip_dose, eclip_dose_trans)

    return eclipse_dose
Beispiel #6
0
def load_siemens_dicom(filename):
    """Imports a file in the Siemens .IMA format.

    Parameters
    ----------
    filename : str
        The name of the file to import

    """
    # the .IMA format is a DICOM standard, unfortunately most of the information is contained inside a private and very
    # complicated header with its own data storage format, we have to get that information out along with the data
    # start by reading in the DICOM file completely
    dataset = pydicom.dicomio.read_file(filename)
    # now look through the tags (0029, 00xx) to work out which xx refers to the csa header
    # xx seems to start at 10 for Siemens
    xx = 0x0010
    header_index = 0
    while (0x0029, xx) in dataset:
        if dataset[0x0029, xx].value == "SIEMENS CSA HEADER":
            header_index = xx
        xx += 1
    # check that we have found the header
    if header_index == 0:
        raise KeyError("Could not find header index")
    # now we know which tag contains the CSA image header info: (0029, xx10)
    csa_header_bytes = dataset[0x0029, 0x0100 * header_index + 0x0010].value
    csa_header = read_csa_header(csa_header_bytes)
    # for key, value in csa_header.items():
    #    print("%s : %s" % (str(key), str(value)))
    # we can also get the series header info: (0029, xx20), but this seems to be mostly pretty boring

    # now we can work out the shape of the data (slices, rows, columns, fid_points)
    data_shape = (
        csa_header["SpectroscopyAcquisitionOut-of-planePhaseSteps"],
        csa_header["Rows"],
        csa_header["Columns"],
        csa_header["DataPointColumns"],
    )

    # now look through the tags (0029, 00xx) to work out which xx refers to the csa header
    # xx seems to start at 10 for Siemens
    xx = 0x0010
    data_index = 0
    while (0x7fe1, xx) in dataset:
        if dataset[0x7fe1, xx].value == "SIEMENS CSA NON-IMAGE":
            data_index = xx
        xx += 1
    # check that we have found the data
    if data_index == 0:
        raise KeyError("Could not find data index")
    # extract the actual data bytes
    csa_data_bytes = dataset[0x7fe1, 0x0100 * data_index + 0x0010].value
    # the data is stored as a list of 4 byte floats in (real, imaginary) pairs
    data_floats = struct.unpack("<%df" % (len(csa_data_bytes) / 4),
                                csa_data_bytes)
    complex_data = complex_array_from_iter(iter(data_floats),
                                           length=len(data_floats) // 2,
                                           shape=data_shape)

    in_plane_rot = csa_header["VoiInPlaneRotation"]
    x_vector = numpy.array([-1, 0, 0])
    normal_vector = numpy.array(csa_header["VoiOrientation"])
    orthogonal_x = x_vector - numpy.dot(x_vector,
                                        normal_vector) * normal_vector
    orthonormal_x = orthogonal_x / numpy.linalg.norm(orthogonal_x)
    rot_matrix = rotation_matrix(in_plane_rot, normal_vector)
    row_vector = numpy.dot(rot_matrix, orthonormal_x)
    column_vector = numpy.cross(row_vector, normal_vector)
    voxel_size = (*csa_header["PixelSpacing"], csa_header["SliceThickness"])
    transform = transformation_matrix(row_vector, column_vector,
                                      csa_header["VoiPosition"], voxel_size)

    voi_size = [
        csa_header["VoiReadoutFoV"], csa_header["VoiPhaseFoV"],
        csa_header["VoiThickness"]
    ]

    metadata = {"voi_size": voi_size}

    return MRSData(complex_data,
                   csa_header["RealDwellTime"] * 1e-9,
                   csa_header["ImagingFrequency"],
                   te=csa_header["EchoTime"],
                   tr=csa_header["RepetitionTime"],
                   transform=transform,
                   metadata=metadata)
Beispiel #7
0
def load_sdat(sdat_filename, spar_filename=None, spar_encoding=None):
    # if the spar filename is not supplied, assume it is in the same folder as
    # the sdat and only differs in the extension
    if spar_filename is None:
        path, ext = os.path.splitext(sdat_filename)
        # match the capitalisation of the sdat extension
        if ext == ".SDAT":
            spar_filename = path + ".SPAR"
        elif ext == ".sdat":
            spar_filename = path + ".spar"

    with open(spar_filename, 'r', encoding=spar_encoding) as fin:
        parameter_dict = {}
        for line in fin:
            # ignore empty lines and comments starting with !
            if line != "\n" and not line.startswith("!"):
                key, value = map(str.strip, line.split(":", 1))
                if key in spar_types["floats"]:
                    parameter_dict[key] = float(value)
                elif key in spar_types["integers"]:
                    parameter_dict[key] = int(value)
                elif key in spar_types["strings"]:
                    parameter_dict[key] = value
                else:
                    pass
                    #print("{} : {}".format(key, value))

    dt = 1 / parameter_dict["sample_frequency"]

    with open(sdat_filename, 'rb') as fin:
        raw_bytes = fin.read()

    floats = _vax_to_ieee_single_float(raw_bytes)
    data_iter = iter(floats)
    complex_iter = (complex(r, -i) for r, i in zip(data_iter, data_iter))
    raw_data = numpy.fromiter(complex_iter, "complex64")
    if parameter_dict["rows"] > 1:
        raw_data = numpy.reshape(
            raw_data, (parameter_dict["rows"] //
                       parameter_dict["nr_of_phase_encoding_profiles_ky"],
                       parameter_dict["nr_of_phase_encoding_profiles_ky"],
                       parameter_dict["nr_of_slices_for_multislice"],
                       parameter_dict["samples"])).squeeze()
    else:
        raw_data = numpy.reshape(
            raw_data,
            (parameter_dict["rows"], parameter_dict["samples"])).squeeze()

    # calculate transformation matrix
    voxel_size = numpy.array([
        parameter_dict["lr_size"], parameter_dict["ap_size"],
        parameter_dict["cc_size"]
    ])
    position_vector = numpy.array([
        parameter_dict["lr_off_center"], parameter_dict["ap_off_center"],
        parameter_dict["cc_off_center"]
    ])

    A = numpy.eye(3)
    for a, ang in enumerate(
        ["lr_angulation", "ap_angulation", "cc_angulation"]):
        axis = numpy.zeros(3)
        axis[a] = 1
        A = A @ rotation_matrix(parameter_dict[ang] / 180 * numpy.pi, axis)
    e1 = A[:, 0]
    e1 = e1 / numpy.linalg.norm(e1)
    e2 = A[:, 1]
    e2 = e2 / numpy.linalg.norm(e2)

    transform = transformation_matrix(e1, e2, position_vector, voxel_size)

    return MRSData(raw_data,
                   dt,
                   parameter_dict["synthesizer_frequency"] * 1e-6,
                   te=parameter_dict["echo_time"],
                   tr=parameter_dict["repetition_time"],
                   transform=transform)
Beispiel #8
0
def load_dicom_volume(filename):
    """ Creates a 3D volume from all the slices in a folder and extracts useful information from a supplied image

    Parameters
    ----------
    filename : DICOM file

    Returns
    -------
    dict
        A dictionary containing values for voxel spacing, position, volume, vectors, and a transformation matrix

    """
    # load the supplied file and get the UID of the series
    ds = pydicom.read_file(filename)
    seriesUID = ds.SeriesInstanceUID

    # get the position of the image
    position = numpy.array(list(map(float, ds.ImagePositionPatient)))

    # get the direction normal to the plane of the image
    row_vector = numpy.array(ds.ImageOrientationPatient[:3])
    col_vector = numpy.array(ds.ImageOrientationPatient[3:])
    normal_vector = numpy.cross(row_vector, col_vector)

    # we order slices by their distance along the normal
    def normal_distance(coords):
        return numpy.dot(normal_vector, coords)

    # create a dictionary to hold the slices as we load them
    slices = {normal_distance(position): ds.pixel_array}

    # extract the path to the folder of the file so we can look for others from the same series
    folder, _ = os.path.split(filename)
    for name in os.listdir(folder):
        if name.lower().endswith(".ima") or name.lower().endswith(".dcm"):
            new_dicom_name = os.path.join(folder, name)
            new_ds = pydicom.read_file(new_dicom_name)

            # check that the series UID matches
            if new_ds.SeriesInstanceUID == seriesUID:
                if new_ds.pixel_array.shape != ds.pixel_array.shape:
                    continue
                new_position = list(map(float, new_ds.ImagePositionPatient))
                slices[normal_distance(new_position)] = new_ds.pixel_array

                # we set the overall position of the volume with the position
                # of the lowest slice
                if normal_distance(new_position) < normal_distance(position):
                    position = new_position

    # that is all the slices in the folder, assemble them into a 3d volume
    voxel_array = numpy.zeros(
        (len(slices), ds.pixel_array.shape[0], ds.pixel_array.shape[1]),
        dtype=ds.pixel_array.dtype)
    sorted_slice_positions = sorted(slices.keys())
    for i, slice_position in enumerate(sorted_slice_positions):
        voxel_array[i] = slices[slice_position]

    # the voxel spacing is a combination of PixelSpacing and slice separation
    voxel_spacing = list(map(float, ds.PixelSpacing))
    voxel_spacing.append(sorted_slice_positions[1] - sorted_slice_positions[0])

    # replace the initial slice z position with the lowest slice z position
    # position[2] = sorted_slice_positions[0]

    transform = suspect.transformation_matrix(row_vector, col_vector, position,
                                              voxel_spacing)

    return suspect.base.ImageBase(voxel_array, transform)
Beispiel #9
0
def load_rda(filename):
    header_dict = {}
    with open(filename, 'rb') as fin:
        header_line = fin.readline().strip()
        if header_line != b">>> Begin of header <<<":
            raise Exception("Error reading file {} as a .rda".format(filename))
        header_line = fin.readline().strip().decode('windows-1252')
        while header_line != ">>> End of header <<<":
            key, value = map(str.strip, header_line.split(":", 1))
            if key in rda_types["strings"]:
                header_dict[key] = value
            elif key in rda_types["integers"]:
                header_dict[key] = int(value)
            elif key in rda_types["floats"]:
                header_dict[key] = float(value)
            elif "[" in key and "]" in key:
                # could be a dict or a list
                key, index = re.split(r"\]|\[", key)[0:2]
                if key in rda_types["dictionaries"]:
                    if key not in header_dict:
                        header_dict[key] = {}
                    header_dict[key][index] = value
                else:
                    # not a dictionary, must be a list
                    if key in rda_types["float_arrays"]:
                        value = float(value)
                    elif key in rda_types["integer_arrays"]:
                        value = int(value)
                    index = int(index)
                    # make sure there is a list in the header_dict, with enough entries
                    if not key in header_dict:
                        header_dict[key] = []
                    while len(header_dict[key]) <= index:
                        header_dict[key].append(0)
                    header_dict[key][index] = value
            header_line = fin.readline().strip().decode('windows-1252')
        # now we can read the data
        data = fin.read()

    # the shape of the data in slice, column, row, time format
    data_shape = header_dict["CSIMatrixSize"][::-1]
    data_shape.append(header_dict["VectorSize"])
    data_shape = numpy.array(data_shape)
    data_size = numpy.prod(
        data_shape) * 16  # each data point is a complex double, 16 bytes
    if data_size != len(data):
        raise ValueError(
            "Error reading file {}: expected {} bytes of data, got {}".format(
                filename, data_size, len(data)))

    # unpack the data into complex numbers
    data_as_floats = struct.unpack("<{}d".format(numpy.prod(data_shape) * 2),
                                   data)
    float_iter = iter(data_as_floats)
    complex_iter = (complex(r, i) for r, i in zip(float_iter, float_iter))
    complex_data = numpy.fromiter(complex_iter, "complex64",
                                  int(numpy.prod(data_shape)))
    complex_data = numpy.reshape(complex_data, data_shape).squeeze()

    # some .rda files have a misnamed field, correct this here
    if "VOIReadoutFOV" not in header_dict:
        if "VOIReadoutVOV" in header_dict:
            header_dict["VOIReadoutFOV"] = header_dict.pop("VOIReadoutVOV")

    # combine positional elements in the header
    voi_size = (header_dict["VOIReadoutFOV"], header_dict["VOIPhaseFOV"],
                header_dict["VOIThickness"])
    voi_center = (header_dict["VOIPositionSag"], header_dict["VOIPositionCor"],
                  header_dict["VOIPositionTra"])
    voxel_size = (header_dict["PixelSpacingCol"],
                  header_dict["PixelSpacingRow"],
                  header_dict["PixelSpacing3D"])

    x_vector = numpy.array(header_dict["RowVector"])
    y_vector = numpy.array(header_dict["ColumnVector"])

    to_scanner = transformation_matrix(x_vector, y_vector,
                                       numpy.array(voi_center), voxel_size)

    # put useful components from the header in the metadata
    metadata = {
        "voi_size": voi_size,
        "position": voi_center,
        "voxel_size": voxel_size,
        "protocol": header_dict["ProtocolName"],
        "to_scanner": to_scanner,
        "from_scanner": numpy.linalg.inv(to_scanner)
    }

    return MRSData(complex_data,
                   header_dict["DwellTime"] * 1e-6,
                   header_dict["MRFrequency"],
                   te=header_dict["TE"],
                   tr=header_dict["TR"],
                   transform=to_scanner,
                   metadata=metadata)
Beispiel #10
0
def parse_twix_header(header_string):
    #print(header_string)
    # get the name of the protocol being acquired
    protocol_name_string = re.search(
        r"<ParamString.\"tProtocolName\">  { \".+\"  }\n",
        header_string).group()
    protocol_name = protocol_name_string.split("\"")[3]
    # get information about the subject being scanned
    patient_id_string = re.search(
        r"<ParamString.\"PatientID\">  { \".+\"  }\n", header_string).group()
    patient_id = patient_id_string.split("\"")[3]
    patient_name = re.escape(
        re.search(r"(<ParamString.\"PatientName\">  { \")(.+)(\"  }\n)",
                  header_string).group(2))
    patient_birthday = re.search(
        r"(<ParamString.\"PatientBirthDay\">  { \")(.+)(\"  }\n)",
        header_string).group(2)
    # get the FrameOfReference to get the date and time of the scan
    frame_of_reference = re.search(
        r"(<ParamString.\"FrameOfReference\">  { )(\".+\")(  }\n)",
        header_string).group(2)
    if re.match("x*", frame_of_reference):
        exam_date = "x" * 6
        exam_time = "x" * 6
    else:
        exam_date_time = frame_of_reference.split(".")[10]
        exam_date = exam_date_time[2:8]
        exam_time = exam_date_time[8:14]
    # get the scan parameters
    frequency_matches = [
        r"<ParamLong.\"Frequency\">  { \d*  }",
        r"<ParamDouble.\"MainFrequency\">  { (.+)}\n"
    ]
    for frequency_pattern in frequency_matches:
        match = re.search(frequency_pattern, header_string)
        if match:
            frequency_string = match.group()
            number_string = re.findall(r"[0-9\.]+", frequency_string)[-1]
            frequency = float(number_string) * 1e-6
            break
    else:
        raise KeyError("Unable to identify Frequency from header")
    dwell_time_matches = [
        r"<ParamLong.\"DwellTimeSig\">  { \d*  }",
        r"<ParamDouble.\"DwellTime\">  { (.+)}"
    ]
    for dwell_time_match in dwell_time_matches:
        match = re.search(dwell_time_match, header_string)
        if match:
            dwell_time_string = match.group()
            number_string = re.findall(r"[0-9\.]+", dwell_time_string)[-1]
            dwell_time = float(number_string) * 1e-9
            break
    else:
        raise KeyError("Unable to identify Dwell Time from header")

    # get TE
    # TE is stored in us, we would prefer to use ms
    te = float(re.search(r"(alTE\[0\]\s*=\s*)(\d+)",
                         header_string).group(2)) / 1000
    # get TR
    tr = float(re.search(r"(alTR\[0\]\s*=\s*)(\d+)",
                         header_string).group(2)) / 1000

    # get voxel size
    ro_fov = read_double("VoI_RoFOV", header_string)
    pe_fov = read_double("VoI_PeFOV", header_string)
    slice_thickness = read_double("VoI_SliceThickness", header_string)

    # get position information
    pos_sag = read_double("VoI_Position_Sag", header_string)
    pos_cor = read_double("VoI_Position_Cor", header_string)
    pos_tra = read_double("VoI_Position_Tra", header_string)

    # get orientation information
    in_plane_rot = read_double("VoI_InPlaneRotAngle", header_string)
    normal_sag = read_double("VoI_Normal_Sag", header_string)
    normal_cor = read_double("VoI_Normal_Cor", header_string)
    normal_tra = read_double("VoI_Normal_Tra", header_string)

    # the orientation is stored in a somewhat strange way - a normal vector and
    # a rotation angle. to get the row vector, we first use Gram-Schmidt to
    # make [-1, 0, 0] (the default row vector) orthogonal to the normal, and
    # then rotate that vector by the rotation angle (which we do here with a
    # quaternion (not any more, quaternion library has issues with Travis)
    x_vector = numpy.array([-1, 0, 0])
    normal_vector = numpy.array([normal_sag, normal_cor, normal_tra])
    orthogonal_x = x_vector - numpy.dot(x_vector,
                                        normal_vector) * normal_vector
    orthonormal_x = orthogonal_x / numpy.linalg.norm(orthogonal_x)
    #rotation_quaternion = quaternion.from_rotation_vector(in_plane_rot * normal_vector)
    #row_vector2 = quaternion.rotate_vectors(rotation_quaternion, orthonormal_x)
    rot_matrix = rotation_matrix(in_plane_rot, normal_vector)
    row_vector = numpy.dot(rot_matrix, orthonormal_x)
    column_vector = numpy.cross(row_vector, normal_vector)
    transform = transformation_matrix(row_vector, column_vector,
                                      [pos_sag, pos_cor, pos_tra],
                                      [ro_fov, pe_fov, slice_thickness])

    return {
        "protocol_name": protocol_name,
        "patient_name": patient_name,
        "patient_id": patient_id,
        "patient_birthdate": patient_birthday,
        "dt": dwell_time,
        "f0": frequency,
        "transform": transform,
        "te": te,
        "tr": tr,
        "exam_date": exam_date,
        "exam_time": exam_time
    }
Beispiel #11
0
def load_siemens_dicom(filename):
    """Imports a file in the Siemens .IMA format.

    Parameters
    ----------
    filename : str
        The name of the file to import

    """
    # the .IMA format is a DICOM standard, unfortunately most of the information is contained inside a private and very
    # complicated header with its own data storage format, we have to get that information out along with the data
    # start by reading in the DICOM file completely
    dataset = pydicom.dicomio.read_file(filename)
    # now look through the tags (0029, 00xx) to work out which xx refers to the csa header
    # xx seems to start at 10 for Siemens
    xx = 0x0010
    header_index = 0
    while (0x0029, xx) in dataset:
        if dataset[0x0029, xx].value == "SIEMENS CSA HEADER":
            header_index = xx
        xx += 1
    # check that we have found the header
    if header_index == 0:
        raise KeyError("Could not find header index")
    # now we know which tag contains the CSA image header info: (0029, xx10)
    csa_header_bytes = dataset[0x0029, 0x0100 * header_index + 0x0010].value
    csa_header = read_csa_header(csa_header_bytes)
    # for key, value in csa_header.items():
    #    print("%s : %s" % (str(key), str(value)))
    # we can also get the series header info: (0029, xx20), but this seems to be mostly pretty boring

    # now we can work out the shape of the data (slices, rows, columns, fid_points)
    data_shape = (csa_header["SpectroscopyAcquisitionOut-of-planePhaseSteps"],
                  csa_header["Rows"],
                  csa_header["Columns"],
                  csa_header["DataPointColumns"],
                  )

    # now look through the tags (0029, 00xx) to work out which xx refers to the csa header
    # xx seems to start at 10 for Siemens
    xx = 0x0010
    data_index = 0
    while (0x7fe1, xx) in dataset:
        if dataset[0x7fe1, xx].value == "SIEMENS CSA NON-IMAGE":
            data_index = xx
        xx += 1
    # check that we have found the data
    if data_index == 0:
        raise KeyError("Could not find data index")
    # extract the actual data bytes
    csa_data_bytes = dataset[0x7fe1, 0x0100 * data_index + 0x0010].value
    # the data is stored as a list of 4 byte floats in (real, imaginary) pairs
    data_floats = struct.unpack("<%df" % (len(csa_data_bytes) / 4), csa_data_bytes)
    complex_data = complex_array_from_iter(iter(data_floats),
                                           length=len(data_floats) // 2,
                                           shape=data_shape)

    in_plane_rot = csa_header["VoiInPlaneRotation"]
    x_vector = numpy.array([-1, 0, 0])
    normal_vector = numpy.array(csa_header["VoiOrientation"])
    orthogonal_x = x_vector - numpy.dot(x_vector, normal_vector) * normal_vector
    orthonormal_x = orthogonal_x / numpy.linalg.norm(orthogonal_x)
    rot_matrix = rotation_matrix(in_plane_rot, normal_vector)
    row_vector = numpy.dot(rot_matrix, orthonormal_x)
    column_vector = numpy.cross(row_vector, normal_vector)
    voxel_size = (*csa_header["PixelSpacing"],
                  csa_header["SliceThickness"])
    transform = transformation_matrix(row_vector,
                                      column_vector,
                                      csa_header["VoiPosition"],
                                      voxel_size)

    voi_size = [csa_header["VoiReadoutFoV"],
                csa_header["VoiPhaseFoV"],
                csa_header["VoiThickness"]]

    metadata = {
        "voi_size": voi_size
    }

    return MRSData(complex_data,
                   csa_header["RealDwellTime"] * 1e-9,
                   csa_header["ImagingFrequency"],
                   te=csa_header["EchoTime"],
                   transform=transform,
                   metadata=metadata)
Beispiel #12
0
def parse_twix_header(header_string):
    #print(header_string)
    # get the name of the protocol being acquired
    protocol_name_matches = [r"tProtocolName\s*=\s*\"(.*)\"\s*"]
    protocol_name = get_meta_regex(protocol_name_matches, header_string)
    # get information about the subject being scanned
    patient_id_string = re.search(
        r"<ParamString.\"PatientID\">\s*{\s*\".+\"\s*}\n",
        header_string).group()
    patient_id = patient_id_string.split("\"")[3]
    patient_name = re.escape(
        re.search(r"(<ParamString.\"PatientName\">\s*{\s*\")(.+)(\"\s*}\n)",
                  header_string).group(2))
    patient_birthday = re.search(
        r"(<ParamString.\"PatientBirthDay\">\s*{\s*\")(.+)(\"\s*}\n)",
        header_string).group(2)
    # get the FrameOfReference to get the date and time of the scan
    frame_of_reference = re.search(
        r"(<ParamString.\"FrameOfReference\">\s*{\s*)(\".+\")(\s*}\n)",
        header_string).group(2)
    if re.match("x*", frame_of_reference):
        exam_date = "x" * 6
        exam_time = "x" * 6
    else:
        exam_date_time = frame_of_reference.split(".")[10]
        exam_date = exam_date_time[2:8]
        exam_time = exam_date_time[8:14]
    # get the scan parameters
    frequency_matches = [
        r"sTXSPEC\.asNucleusInfo\[0\]\.lFrequency\s*=\s*([[0-9]*[.]?[0-9]*]{0,})\s*",
        r"<ParamLong.\"Frequency\">  { (\d*)  }",
        r"<ParamDouble.\"MainFrequency\">  { (.+)}\n"
    ]
    frequency = get_meta_regex(frequency_matches, header_string, convert=1e-6)

    dwell_time_matches = [
        r"sRXSPEC\.alDwellTime\[0\]\s*=\s*([[0-9]*[.]?[0-9]*]{0,})\s*",
        r"<ParamLong.\"DwellTimeSig\">  { (\d*)  }",
        r"<ParamDouble.\"DwellTime\">  { (.+)}",
    ]
    dwell_time = get_meta_regex(dwell_time_matches,
                                header_string,
                                convert=1e-9)

    # get TE
    # TE is stored in us, we would prefer to use ms
    te = float(re.search(r"(alTE\[0\]\s*=\s*)(\d+)",
                         header_string).group(2)) / 1000
    # get TR
    tr = float(re.search(r"(alTR\[0\]\s*=\s*)(\d+)",
                         header_string).group(2)) / 1000

    # get voxel size
    ro_fov_matches = [
        r"sSpecPara\.sVoI\.dReadoutFOV\s*=\s*(-?[[0-9]*[.]?[0-9]*]{0,})\s*",
        r"<ParamDouble.\"VoI_RoFOV\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_RoFOV\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    pe_fov_matches = [
        r"sSpecPara\.sVoI\.dPhaseFOV\s*=\s*(-?[[0-9]*[.]?[0-9]*]{0,})\s*",
        r"<ParamDouble.\"VoI_PeFOV\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_PeFOV\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    slice_thickness_matches = [
        r"sSpecPara\.sVoI\.dThickness\s*=\s*(-?[[0-9]*[.]?[0-9]*]{0,})\s*",
        r"<ParamDouble.\"VoI_SliceThickness\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_SliceThickness\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    ro_fov = get_meta_regex(ro_fov_matches, header_string, default=0)
    pe_fov = get_meta_regex(pe_fov_matches, header_string, default=0)
    slice_thickness = get_meta_regex(slice_thickness_matches,
                                     header_string,
                                     default=0)

    # get position information
    pos_sag_matches = [
        r"sSpecPara\.sVoI\.sPosition\.dSag\s*=\s*(-?[[0-9]*[.]?[0-9]*]{0,})\s*",
        r"<ParamDouble\.\"VoI_Position_Sag\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_Position_Sag\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    pos_cor_matches = [
        r"sSpecPara\.sVoI\.sPosition\.dCor\s*=\s*(-?[[0-9]*[.]?[0-9]*]{0,})\s*",
        r"<ParamDouble\.\"VoI_Position_Cor\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_Position_Cor\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    pos_tra_matches = [
        r"sSpecPara\.sVoI\.sPosition\.dTra\s*=\s*(-?[[0-9]*[.]?[0-9]*]{0,})\s*",
        r"<ParamDouble\.\"VoI_Position_Tra\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_Position_Tra\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    pos_sag = get_meta_regex(pos_sag_matches, header_string, default=0)
    pos_cor = get_meta_regex(pos_cor_matches, header_string, default=0)
    pos_tra = get_meta_regex(pos_tra_matches, header_string, default=0)

    # get orientation information
    in_plane_rot_matches = [
        r"<ParamDouble\.\"VoI_InPlaneRotAngle\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoiInPlaneRot\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_InPlaneRotAngle\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    normal_sag_matches = [
        r"sSpecPara\.sVoI\.sNormal\.dSag\s*=\s*(-?[[0-9]*[.]?[0-9]*]{0,})\s*$",
        r"<ParamDouble\.\"VoI_Normal_Sag\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoiNormalSag\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_Normal_Sag\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    normal_cor_matches = [
        r"sSpecPara\.sVoI\.sNormal\.dCor\s*=\s*(-?[[0-9]*[.]?[0-9]*]{0,})\s*$",
        r"<ParamDouble\.\"VoI_Normal_Cor\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoiNormalCor\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_Normal_Cor\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    normal_tra_matches = [
        r"sSpecPara\.sVoI\.sNormal\.dTra\s*=\s*(-?[[0-9]*[.]?[0-9]*]{0,})\s*$",
        r"<ParamDouble\.\"VoI_Normal_Tra\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoiNormalTra\">  { <Precision> \d+(  -?[0-9\.]+)?  }",
        r"<ParamDouble\.\"VoI_Normal_Tra\">\s*{\s*(-?[0-9\.]+)?\s*}"
    ]
    in_plane_rot = get_meta_regex(in_plane_rot_matches,
                                  header_string,
                                  default=0)
    normal_sag = get_meta_regex(normal_sag_matches, header_string, default=0)
    normal_cor = get_meta_regex(normal_cor_matches, header_string, default=0)
    normal_tra = get_meta_regex(normal_tra_matches, header_string, default=0)

    # the orientation is stored in a somewhat strange way - a normal vector and
    # a rotation angle. to get the row vector, we first use Gram-Schmidt to
    # make [-1, 0, 0] (the default row vector) orthogonal to the normal, and
    # then rotate that vector by the rotation angle (which we do here with a
    # quaternion (not any more, quaternion library has issues with Travis)
    normal_vector = numpy.array([normal_sag, normal_cor, normal_tra])
    if calculate_orientation(normal_vector) == "SAG":
        x_vector = numpy.array([0, 0, 1])
    else:
        x_vector = numpy.array([-1, 0, 0])
    orthogonal_x = x_vector - numpy.dot(x_vector,
                                        normal_vector) * normal_vector
    orthonormal_x = orthogonal_x / numpy.linalg.norm(orthogonal_x)
    #rotation_quaternion = quaternion.from_rotation_vector(in_plane_rot * normal_vector)
    #row_vector2 = quaternion.rotate_vectors(rotation_quaternion, orthonormal_x)
    rot_matrix = rotation_matrix(in_plane_rot, normal_vector)
    row_vector = numpy.dot(rot_matrix, orthonormal_x)
    column_vector = numpy.cross(row_vector, normal_vector)
    transform = transformation_matrix(row_vector, column_vector,
                                      [pos_sag, pos_cor, pos_tra],
                                      [ro_fov, pe_fov, slice_thickness])

    return {
        "protocol_name": protocol_name,
        "patient_name": patient_name,
        "patient_id": patient_id,
        "patient_birthdate": patient_birthday,
        "dt": dwell_time,
        "f0": frequency,
        "transform": transform,
        "te": te,
        "tr": tr,
        "exam_date": exam_date,
        "exam_time": exam_time
    }
Beispiel #13
0
def load_rda(filename):
    header_dict = {}
    with open(filename, 'rb') as fin:
        header_line = fin.readline().strip()
        if header_line != b">>> Begin of header <<<":
            raise Exception("Error reading file {} as a .rda".format(filename))
        header_line = fin.readline().strip().decode('windows-1252')
        while header_line != ">>> End of header <<<":
            key, value = map(str.strip, header_line.split(":", 1))
            if key in rda_types["strings"]:
                header_dict[key] = value
            elif key in rda_types["integers"]:
                header_dict[key] = int(value)
            elif key in rda_types["floats"]:
                header_dict[key] = float(value)
            elif "[" in key and "]" in key:
                # could be a dict or a list
                key, index = re.split("\]|\[", key)[0:2]
                if key in rda_types["dictionaries"]:
                    if key not in header_dict:
                        header_dict[key] = {}
                    header_dict[key][index] = value
                else:
                    # not a dictionary, must be a list
                    if key in rda_types["float_arrays"]:
                        value = float(value)
                    elif key in rda_types["integer_arrays"]:
                        value = int(value)
                    index = int(index)
                    # make sure there is a list in the header_dict, with enough entries
                    if not key in header_dict:
                        header_dict[key] = []
                    while len(header_dict[key]) <= index:
                        header_dict[key].append(0)
                    header_dict[key][index] = value
            header_line = fin.readline().strip().decode('windows-1252')
        # now we can read the data
        data = fin.read()

    # the shape of the data in slice, column, row, time format
    data_shape = header_dict["CSIMatrixSize"][::-1]
    data_shape.append(header_dict["VectorSize"])
    data_shape = numpy.array(data_shape)
    data_size = numpy.prod(data_shape) * 16  # each data point is a complex double, 16 bytes
    if data_size != len(data):
        raise ValueError("Error reading file {}: expected {} bytes of data, got {}".format(filename, data_size, len(data)))

    # unpack the data into complex numbers
    data_as_floats = struct.unpack("<{}d".format(numpy.prod(data_shape) * 2), data)
    float_iter = iter(data_as_floats)
    complex_iter = (complex(r, i) for r, i in zip(float_iter, float_iter))
    complex_data = numpy.fromiter(complex_iter, "complex64", int(numpy.prod(data_shape)))
    complex_data = numpy.reshape(complex_data, data_shape).squeeze()

    # some .rda files have a misnamed field, correct this here
    if "VOIReadoutFOV" not in header_dict:
        if "VOIReadoutVOV" in header_dict:
            header_dict["VOIReadoutFOV"] = header_dict.pop("VOIReadoutVOV")

    # combine positional elements in the header
    voi_size = (header_dict["VOIReadoutFOV"],
                header_dict["VOIPhaseFOV"],
                header_dict["VOIThickness"])
    voi_center = (header_dict["VOIPositionSag"],
                  header_dict["VOIPositionCor"],
                  header_dict["VOIPositionTra"])
    voxel_size = (header_dict["PixelSpacingCol"],
                  header_dict["PixelSpacingRow"],
                  header_dict["PixelSpacing3D"])

    x_vector = numpy.array(header_dict["RowVector"])
    y_vector = numpy.array(header_dict["ColumnVector"])

    to_scanner = transformation_matrix(x_vector, y_vector, numpy.array(voi_center), voxel_size)

    # put useful components from the header in the metadata
    metadata = {
        "voi_size": voi_size,
        "position": voi_center,
        "voxel_size": voxel_size,
        "protocol": header_dict["ProtocolName"],
        "to_scanner": to_scanner,
        "from_scanner": numpy.linalg.inv(to_scanner)
    }

    return MRSData(complex_data,
                   header_dict["DwellTime"] * 1e-6,
                   header_dict["MRFrequency"],
                   te=header_dict["TE"],
                   transform=to_scanner,
                   metadata=metadata)
Beispiel #14
0
def load_dicom_volume(filename):
    # load the supplied file and get the UID of the series
    ds = pydicom.read_file(filename)
    seriesUID = ds.SeriesInstanceUID

    # get the position of the image
    position = numpy.array(list(map(float, ds.ImagePositionPatient)))

    # get the direction normal to the plane of the image
    row_vector = numpy.array(ds.ImageOrientationPatient[:3])
    col_vector = numpy.array(ds.ImageOrientationPatient[3:])
    normal_vector = numpy.cross(row_vector, col_vector)

    # we order slices by their distance along the normal
    def normal_distance(coords):
        return numpy.dot(normal_vector, coords)

    # create a dictionary to hold the slices as we load them
    slices = {normal_distance(position): ds.pixel_array}

    # extract the path to the folder of the file so we can look for others from the same series
    folder, _ = os.path.split(filename)
    for name in os.listdir(folder):
        if name.lower().endswith(".ima") or name.lower().endswith(".dcm"):
            new_dicom_name = os.path.join(folder, name)
            new_ds = pydicom.read_file(new_dicom_name)

            # check that the series UID matches
            if new_ds.SeriesInstanceUID == seriesUID:
                if new_ds.pixel_array.shape != ds.pixel_array.shape:
                    continue
                new_position = list(map(float, new_ds.ImagePositionPatient))
                slices[normal_distance(new_position)] = new_ds.pixel_array

                # we set the overall position of the volume with the position
                # of the lowest slice
                if normal_distance(new_position) < normal_distance(position):
                    position = new_position

    # that is all the slices in the folder, assemble them into a 3d volume
    voxel_array = numpy.zeros((len(slices),
                               ds.pixel_array.shape[0],
                               ds.pixel_array.shape[1]), dtype=ds.pixel_array.dtype)
    sorted_slice_positions = sorted(slices.keys())
    for i, slice_position in enumerate(sorted_slice_positions):
        voxel_array[i] = slices[slice_position]

    # the voxel spacing is a combination of PixelSpacing and slice separation
    voxel_spacing = list(map(float, ds.PixelSpacing))
    voxel_spacing.append(sorted_slice_positions[1] - sorted_slice_positions[0])

    # replace the initial slice z position with the lowest slice z position
    # position[2] = sorted_slice_positions[0]

    transform = transformation_matrix(row_vector,
                                      col_vector,
                                      position,
                                      voxel_spacing)

    return {
        "voxel_spacing": voxel_spacing,
        "position": position,
        "volume": voxel_array,
        "vectors": [row_vector, col_vector, normal_vector],
        "transform": transform
    }
Beispiel #15
0
def load_siemens_dicom(filename):
    """Imports a file in the Siemens .IMA format.

    Parameters
    ----------
    filename : str
        The name of the file to import

    """
    # the .IMA format is a DICOM standard, unfortunately most of the information is contained inside a private and very
    # complicated header with its own data storage format, we have to get that information out along with the data
    # start by reading in the DICOM file completely
    dataset = pydicom.dicomio.read_file(filename)
    # now look through the tags (0029, 00xx) to work out which xx refers to the csa header
    # xx seems to start at 10 for Siemens
    xx = 0x0010
    header_index = 0
    while (0x0029, xx) in dataset:
        if dataset[0x0029, xx].value == "SIEMENS CSA HEADER":
            header_index = xx
        xx += 1
    # check that we have found the header
    if header_index == 0:
        raise KeyError("Could not find header index")
    # now we know which tag contains the CSA image header info: (0029, xx10)
    csa_header_bytes = dataset[0x0029, 0x0100 * header_index + 0x0010].value
    csa_header = read_csa_header(csa_header_bytes)
    # for key, value in csa_header.items():
    #    print("%s : %s" % (str(key), str(value)))
    # we can also get the series header info: (0029, xx20), but this seems to be mostly pretty boring

    # now we can work out the shape of the data (slices, rows, columns, fid_points)
    data_shape = (
        csa_header["SpectroscopyAcquisitionOut-of-planePhaseSteps"],
        csa_header["Rows"],
        csa_header["Columns"],
        csa_header["DataPointColumns"],
    )

    # now look through the tags (0029, 00xx) to work out which xx refers to the csa header
    # xx seems to start at 10 for Siemens
    xx = 0x0010
    data_index = 0
    while (0x7fe1, xx) in dataset:
        if dataset[0x7fe1, xx].value == "SIEMENS CSA NON-IMAGE":
            data_index = xx
        xx += 1
    # check that we have found the data
    if data_index == 0:
        raise KeyError("Could not find data index")
    # extract the actual data bytes
    csa_data_bytes = dataset[0x7fe1, 0x0100 * data_index + 0x0010].value
    # the data is stored as a list of 4 byte floats in (real, imaginary) pairs
    data_floats = struct.unpack("<%df" % (len(csa_data_bytes) / 4),
                                csa_data_bytes)

    # a bug report (#143) has been submitted that for at least one .IMA dataset
    # created with an old Siemens VB17 WIP, the data_shape worked out above
    # does not match the actual size of the data because the
    # Out-of-planePhaseSteps value is not the number of slices. Assuming this
    # is a rare situation that is unlikely to happen often, the simple solution
    # is simply to check the size matches here, and if not then use the size
    # of data available as the shape
    available_points = len(data_floats) // 2
    if numpy.prod(data_shape) != available_points:
        data_shape = (available_points, )
        warnings.warn("The calculated data shape for this file {} does not "
                      "match the size of data contained in the file {}. "
                      "Therefore the returned data shape from this function "
                      "will simply be ({},), any reshaping must be done by "
                      "the user. If you need help with this or believe this "
                      "has occured in error, please raise an issue at"
                      "https://github.com/openmrslab/suspect/issues.")

    complex_data = complex_array_from_iter(iter(data_floats),
                                           length=len(data_floats) // 2,
                                           shape=data_shape)

    in_plane_rot = csa_header["VoiInPlaneRotation"]
    x_vector = numpy.array([-1, 0, 0])
    normal_vector = numpy.array(csa_header["VoiOrientation"])
    orthogonal_x = x_vector - numpy.dot(x_vector,
                                        normal_vector) * normal_vector
    orthonormal_x = orthogonal_x / numpy.linalg.norm(orthogonal_x)
    rot_matrix = rotation_matrix(in_plane_rot, normal_vector)
    row_vector = numpy.dot(rot_matrix, orthonormal_x)
    column_vector = numpy.cross(row_vector, normal_vector)
    voxel_size = (*csa_header["PixelSpacing"], csa_header["SliceThickness"])
    transform = transformation_matrix(row_vector, column_vector,
                                      csa_header["VoiPosition"], voxel_size)

    voi_size = [
        csa_header["VoiReadoutFoV"], csa_header["VoiPhaseFoV"],
        csa_header["VoiThickness"]
    ]

    metadata = {"voi_size": voi_size}

    return MRSData(complex_data,
                   csa_header["RealDwellTime"] * 1e-9,
                   csa_header["ImagingFrequency"],
                   te=csa_header["EchoTime"],
                   tr=csa_header["RepetitionTime"],
                   transform=transform,
                   metadata=metadata)
Beispiel #16
0
def parse_twix_header(header_string):
    #print(header_string)
    # get the name of the protocol being acquired
    protocol_name_string = re.search(r"<ParamString.\"tProtocolName\">  { \".+\"  }\n", header_string).group()
    protocol_name = protocol_name_string.split("\"")[3]
    # get information about the subject being scanned
    patient_id_string = re.search(r"<ParamString.\"PatientID\">  { \".+\"  }\n", header_string).group()
    patient_id = patient_id_string.split("\"")[3]
    patient_name = re.escape(re.search(r"(<ParamString.\"PatientName\">  { \")(.+)(\"  }\n)", header_string).group(2))
    patient_birthday = re.search(r"(<ParamString.\"PatientBirthDay\">  { \")(.+)(\"  }\n)", header_string).group(2)
    # get the FrameOfReference to get the date and time of the scan
    frame_of_reference = re.search(r"(<ParamString.\"FrameOfReference\">  { )(\".+\")(  }\n)", header_string).group(2)
    if re.match("x*", frame_of_reference):
        exam_date = "x" * 6
        exam_time = "x" * 6
    else:
        exam_date_time = frame_of_reference.split(".")[10]
        exam_date = exam_date_time[2:8]
        exam_time = exam_date_time[8:14]
    # get the scan parameters
    frequency_matches = [
        r"<ParamLong.\"Frequency\">  { \d*  }",
        r"<ParamDouble.\"MainFrequency\">  { (.+)}\n"
    ]
    for frequency_pattern in frequency_matches:
        match = re.search(frequency_pattern, header_string)
        if match:
            frequency_string = match.group()
            number_string = re.findall(r"[0-9\.]+", frequency_string)[-1]
            frequency = float(number_string) * 1e-6
            break
    else:
        raise KeyError("Unable to identify Frequency from header")
    dwell_time_matches = [
        r"<ParamLong.\"DwellTimeSig\">  { \d*  }",
        r"<ParamDouble.\"DwellTime\">  { (.+)}"
    ]
    for dwell_time_match in dwell_time_matches:
        match = re.search(dwell_time_match, header_string)
        if match:
            dwell_time_string = match.group()
            number_string = re.findall(r"[0-9\.]+", dwell_time_string)[-1]
            dwell_time = float(number_string) * 1e-9
            break
    else:
        raise KeyError("Unable to identify Dwell Time from header")

    # get TE
    # TE is stored in us, we would prefer to use ms
    te = float(re.search(r"(alTE\[0\]\s*=\s*)(\d+)", header_string).group(2)) / 1000

    # get voxel size
    ro_fov = read_double("VoI_RoFOV", header_string)
    pe_fov = read_double("VoI_PeFOV", header_string)
    slice_thickness = read_double("VoI_SliceThickness", header_string)

    # get position information
    pos_sag = read_double("VoI_Position_Sag", header_string)
    pos_cor = read_double("VoI_Position_Cor", header_string)
    pos_tra = read_double("VoI_Position_Tra", header_string)

    # get orientation information
    in_plane_rot = read_double("VoI_InPlaneRotAngle", header_string)
    normal_sag = read_double("VoI_Normal_Sag", header_string)
    normal_cor = read_double("VoI_Normal_Cor", header_string)
    normal_tra = read_double("VoI_Normal_Tra", header_string)

    # the orientation is stored in a somewhat strange way - a normal vector and
    # a rotation angle. to get the row vector, we first use Gram-Schmidt to
    # make [-1, 0, 0] (the default row vector) orthogonal to the normal, and
    # then rotate that vector by the rotation angle (which we do here with a
    # quaternion (not any more, quaternion library has issues with Travis)
    x_vector = numpy.array([-1, 0, 0])
    normal_vector = numpy.array([normal_sag, normal_cor, normal_tra])
    orthogonal_x = x_vector - numpy.dot(x_vector, normal_vector) * normal_vector
    orthonormal_x = orthogonal_x / numpy.linalg.norm(orthogonal_x)
    #rotation_quaternion = quaternion.from_rotation_vector(in_plane_rot * normal_vector)
    #row_vector2 = quaternion.rotate_vectors(rotation_quaternion, orthonormal_x)
    rot_matrix = rotation_matrix(in_plane_rot, normal_vector)
    row_vector = numpy.dot(rot_matrix, orthonormal_x)
    column_vector = numpy.cross(row_vector, normal_vector)
    transform = transformation_matrix(row_vector,
                                      column_vector,
                                      [pos_sag, pos_cor, pos_tra],
                                      [ro_fov, pe_fov, slice_thickness])

    return {"protocol_name": protocol_name,
            "patient_name": patient_name,
            "patient_id": patient_id,
            "patient_birthdate": patient_birthday,
            "dt": dwell_time,
            "f0": frequency,
            "transform": transform,
            "te": te,
            "exam_date": exam_date,
            "exam_time": exam_time
            }