Exemple #1
0
def transform_axis(x, y, transform):
    transformed_x = transform @ np.vstack(
        [x, np.zeros(len(x)), np.ones(len(x))])
    transformed_y = transform @ np.vstack(
        [np.zeros(len(y)), y, np.ones(len(y))])

    return transformed_x[0:2], transformed_y[0:2]
Exemple #2
0
def merge_view_horz(volume, dx, dy):
    junctions = []

    # creating merged volume
    merge_vol = np.zeros((volume.shape[0], volume.shape[1]))

    # creating vector for processing along cols (one row)
    amplitude = np.zeros(
        (volume.shape[0],
         volume.shape[2]))  # 1 if it is vertical 0 if the bars are horizontal

    y = np.linspace(0,
                    0 + (volume.shape[0] * dy),
                    volume.shape[0],
                    endpoint=False)  # definition of the distance axis
    # x = np.arange(0,) #definition of the distance axis

    # merging the two images together
    ampl_resamp = np.zeros(((volume.shape[0]) * 10, volume.shape[2]))
    # amp_peak = np.zeros((volume.shape[0]) * 10)

    for item in tqdm(range(0, volume.shape[2])):
        merge_vol = merge_vol + volume[:, :, item]
        amplitude[:, item] = volume[:, int(volume.shape[1] / 2), item]
        ampl_resamp[:, item] = signal.resample(
            amplitude[:, item],
            int(len(amplitude)) * 10)  # resampling the amplitude vector
        # amp_peak = amp_peak + ampl_resamp[:, item] / volume.shape[2]

    fig, ax = plt.subplots(nrows=2, squeeze=True, figsize=(6, 8))

    extent = (0, 0 + (volume.shape[1] * dx), 0, 0 + (volume.shape[0] * dy))

    ax[0].imshow(merge_vol, extent=extent, aspect="auto")
    ax[0].set_xlabel("x distance [mm]")
    ax[0].set_ylabel("y distance [mm]")

    ax[1].plot(y, amplitude, label="Amplitude profile")
    ax[1].set_ylabel("amplitude")
    ax[1].set_xlabel("y distance [mm]")
    ax[1].legend()
    fig.suptitle("Merged volume", fontsize=16)

    # peaks, peak_type, peak_figs = peak_find(ampl_resamp, dy)
    peaks, peak_type, peak_figs = pf.peak_find(ampl_resamp, dy)
    # junction_figs = minimize_junction_Y(ampl_resamp, peaks, peak_type, dy / 10)
    junction_figs = minY.minimize_junction_Y(ampl_resamp, peaks, peak_type,
                                             dy / 10)
    junctions.append(junction_figs)

    return fig, peak_figs, junctions
Exemple #3
0
def get_dose_grid_structure_mask(structure_name, dcm_struct, dcm_dose):
    x_dose, y_dose, z_dose = xyz_axes_from_dataset(dcm_dose)

    xx_dose, yy_dose = np.meshgrid(x_dose, y_dose)
    points = np.swapaxes(np.vstack([xx_dose.ravel(), yy_dose.ravel()]), 0, 1)

    x_structure, y_structure, z_structure = pull_structure(
        structure_name, dcm_struct)
    structure_z_values = np.array([item[0] for item in z_structure])

    mask = np.zeros((len(y_dose), len(x_dose), len(z_dose)), dtype=bool)

    for z_val in structure_z_values:
        structure_indices = _get_indices(z_structure, z_val)

        for structure_index in structure_indices:
            dose_index = int(np.where(z_dose == z_val)[0])

            assert z_structure[structure_index][0] == z_dose[dose_index]

            structure_polygon = matplotlib.path.Path([
                (x_structure[structure_index][i],
                 y_structure[structure_index][i])
                for i in range(len(x_structure[structure_index]))
            ])
            mask[:, :, dose_index] = mask[:, :, dose_index] | (
                structure_polygon.contains_points(points).reshape(
                    len(y_dose), len(x_dose)))

    return mask
Exemple #4
0
def calculate_anti_aliased_mask(contours, dcm_ct, expansion=5):
    transformation_params = get_image_transformation_parameters(dcm_ct)
    dx, dy, Cx, Cy, Ox, Oy = transformation_params

    x_grid, y_grid, ct_size = get_grid(
        dcm_ct, transformation_params=transformation_params)

    new_ct_size = np.array(ct_size) * expansion

    expanded_mask = np.zeros(new_ct_size)

    for xyz in contours:
        x = np.array(xyz[0::3])
        y = np.array(xyz[1::3])
        z = xyz[2::3]

        assert len(set(z)) == 1

        r = (((y - Cy) / dy * Oy)) * expansion + (expansion - 1) * 0.5
        c = (((x - Cx) / dx * Ox)) * expansion + (expansion - 1) * 0.5

        expanded_mask = np.logical_or(
            expanded_mask,
            skimage.draw.polygon2mask(new_ct_size, np.array(list(zip(r, c)))),
        )

    mask = reduce_expanded_mask(expanded_mask, ct_size[0], expansion)
    mask = 2 * mask - 1

    return x_grid, y_grid, mask
Exemple #5
0
def calculate_expanded_mask(contours, dcm_ct, expansion):
    dx, dy, Cx, Cy, Ox, Oy = get_image_transformation_parameters(dcm_ct)

    ct_size = np.shape(dcm_ct.pixel_array)

    new_ct_size = np.array(ct_size) * expansion

    expanded_mask = np.zeros(new_ct_size)

    for xyz in contours:
        x = np.array(xyz[0::3])
        y = np.array(xyz[1::3])
        z = xyz[2::3]

        if len(set(z)) != 1:
            raise ValueError("Expected only one z value for a given contour")

        r = (((y - Cy) / dy * Oy)) * expansion + (expansion - 1) * 0.5
        c = (((x - Cx) / dx * Ox)) * expansion + (expansion - 1) * 0.5

        expanded_mask = np.logical_or(
            expanded_mask,
            skimage.draw.polygon2mask(new_ct_size, np.array(list(zip(r, c)))),
        )

    return expanded_mask
Exemple #6
0
 def _profile(self):
     """The actual profile array; private attr that is passed to MultiProfile."""
     profile = np.zeros(len(self._multi_x_locations[0]))
     for _, x, y in zip(
         self._radii, self._multi_x_locations, self._multi_y_locations
     ):
         profile += scipy.ndimage.map_coordinates(self.image_array, [y, x], order=0)
     profile /= self.num_profiles
     return profile
Exemple #7
0
def create_output_mask(dcm_ct, contours_by_ct_uid, structure, ct_uid, expansion=5):
    _, _, ct_size = mask.get_grid(dcm_ct)

    contours_on_this_slice = contours_by_ct_uid[ct_uid].keys()
    if structure in contours_on_this_slice:
        original_contours = contours_by_ct_uid[ct_uid][structure]
        _, _, calculated_mask = mask.calculate_anti_aliased_mask(
            original_contours, dcm_ct, expansion=expansion
        )
    else:
        calculated_mask = np.zeros(ct_size) - 1

    return calculated_mask
Exemple #8
0
def calc_mu_density(
    mu,
    mlc,
    jaw,
    grid_resolution=None,
    max_leaf_gap=None,
    leaf_pair_widths=None,
    min_step_per_pixel=None,
):
    """Determine the MU Density.

    Both jaw and mlc positions are defined in bipolar format for each control
    point. A negative value indicates travel over the isocentre. All positional
    arguments are defined at the isocentre projection with the units of mm.

    Parameters
    ----------
    mu : numpy.ndarray
        1-D array containing an MU value for each control point.
    mlc : numpy.ndarray
        3-D array containing the MLC positions

            | axis 0: control point
            | axis 1: mlc pair
            | axis 2: leaf bank

    jaw : numpy.ndarray
        2-D array containing the jaw positions.

            | axis 0: control point
            | axis 1: diaphragm

    grid_resolution : float, optional
        The calc grid resolution. Defaults to 1 mm.

    max_leaf_gap : float, optional
        The maximum possible distance between opposing leaves. Defaults to
        400 mm.

    leaf_pair_widths : tuple, optional
        The widths of each leaf pair in the
        MLC limiting device. The number of entries in the tuples defines
        the number of leaf pairs. Each entry itself defines that particular
        leaf pair width. Defaults to 80 leaf pairs each 5 mm wide.

    min_step_per_pixel : int, optional
        The minimum number of time steps
        used per pixel for each control point. Defaults to 10.

    Returns
    -------
    mu_density : numpy.ndarray
        2-D array containing the calculated mu density.

            | axis 0: jaw direction
            | axis 1: mlc direction

    Examples
    --------
    >>> import numpy as np
    >>> import pymedphys
    >>>
    >>> leaf_pair_widths = (5, 5, 5)
    >>> max_leaf_gap = 10
    >>> mu = np.array([0, 2, 5, 10])
    >>> mlc = np.array([
    ...     [
    ...         [1, 1],
    ...         [2, 2],
    ...         [3, 3]
    ...     ],
    ...     [
    ...         [2, 2],
    ...         [3, 3],
    ...         [4, 4]
    ...     ],
    ...     [
    ...         [-2, 3],
    ...         [-2, 4],
    ...         [-2, 5]
    ...     ],
    ...     [
    ...         [0, 0],
    ...         [0, 0],
    ...         [0, 0]
    ...     ]
    ... ])
    >>> jaw = np.array([
    ...     [7.5, 7.5],
    ...     [7.5, 7.5],
    ...     [-2, 7.5],
    ...     [0, 0]
    ... ])
    >>>
    >>> grid = pymedphys.mudensity.grid(
    ...    max_leaf_gap=max_leaf_gap, leaf_pair_widths=leaf_pair_widths)
    >>> grid['mlc']
    array([-5., -4., -3., -2., -1.,  0.,  1.,  2.,  3.,  4.,  5.])
    >>>
    >>> grid['jaw']
    array([-8., -7., -6., -5., -4., -3., -2., -1.,  0.,  1.,  2.,  3.,  4.,
            5.,  6.,  7.,  8.])
    >>>
    >>> mu_density = pymedphys.mudensity.calculate(
    ...    mu, mlc, jaw, max_leaf_gap=max_leaf_gap,
    ...    leaf_pair_widths=leaf_pair_widths)
    >>> pymedphys.mudensity.display(grid, mu_density)
    >>>
    >>> np.round(mu_density, 1)
    array([[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.3, 1.9, 2.2, 1.9, 0.4, 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.4, 2.2, 2.5, 2.2, 0.6, 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.4, 2.4, 2.8, 2.5, 0.8, 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.4, 2.5, 3.1, 2.8, 1. , 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.4, 2.5, 3.4, 3.1, 1.3, 0. , 0. , 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.7, 3.7, 3.5, 1.6, 0. , 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.8, 4. , 3.8, 1.9, 0.1, 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.8, 4.3, 4.1, 2.3, 0.1, 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.9, 5.2, 4.7, 2.6, 0.2, 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.8, 5.4, 6.6, 3.8, 0.5, 0. ],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 5.1, 7.5, 6.7, 3.9, 0.5],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 4.7, 6.9, 6.7, 3.9, 0.5],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 4.5, 6.3, 6.4, 3.9, 0.5],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 4.5, 5.6, 5.7, 3.8, 0.5],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 4.5, 5.1, 5.1, 3.3, 0.5],
           [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])


    MU Density from a Mosaiq record

    >>> import pymedphys
    >>>
    >>> def mu_density_from_mosaiq(msq_server_name, field_id):
    ...     with pymedphys.mosaiq.connect(msq_server_name) as cursor:
    ...         delivery = pymedphys.Delivery.from_mosaiq(cursor, field_id)
    ...
    ...     grid = pymedphys.mudensity.grid()
    ...     mu_density = delivery.mudensity()
    ...     pymedphys.mudensity.display(grid, mu_density)
    >>>
    >>> mu_density_from_mosaiq('a_server_name', 11111) # doctest: +SKIP


    MU Density from a logfile at a given filepath

    >>> import pymedphys
    >>>
    >>> def mu_density_from_logfile(filepath):
    ...     delivery_data = Delivery.from_logfile(filepath)
    ...     mu_density = Delivery.mudensity()
    ...
    ...     grid = pymedphys.mudensity.grid()
    ...     pymedphys.mudensity.display(grid, mu_density)
    >>>
    >>> mu_density_from_logfile(r"a/path/goes/here")  # doctest: +SKIP

    """

    if grid_resolution is None:
        grid_resolution = __DEFAULT_GRID_RESOLUTION

    if max_leaf_gap is None:
        max_leaf_gap = __DEFAULT_MAX_LEAF_GAP

    if leaf_pair_widths is None:
        leaf_pair_widths = __DEFAULT_LEAF_PAIR_WIDTHS

    if min_step_per_pixel is None:
        min_step_per_pixel = __DEFAULT_MIN_STEP_PER_PIXEL

    divisibility_of_max_leaf_gap = np.array(max_leaf_gap / 2 / grid_resolution)
    max_leaf_gap_is_divisible = (
        divisibility_of_max_leaf_gap.astype(int) == divisibility_of_max_leaf_gap
    )

    if not max_leaf_gap_is_divisible:
        raise ValueError(
            "The grid resolution needs to be able to divide the max leaf gap exactly by"
            " four"
        )

    leaf_pair_widths = np.array(leaf_pair_widths)

    if not np.max(np.abs(mlc)) <= max_leaf_gap / 2:  # pylint: disable = unneeded-not
        first_failing_control_point = np.where(np.abs(mlc) > max_leaf_gap / 2)[0][0]

        raise ValueError(
            "The mlc should not travel further out than half the maximum leaf gap.\n"
            "The first failing control point has the following positions:\n"
            f"{np.array(mlc)[first_failing_control_point, :, :]}"
        )

    mu, mlc, jaw = remove_irrelevant_control_points(mu, mlc, jaw)

    full_grid = get_grid(max_leaf_gap, grid_resolution, leaf_pair_widths)

    mu_density = np.zeros((len(full_grid["jaw"]), len(full_grid["mlc"])))

    for i in range(len(mu) - 1):
        control_point_slice = slice(i, i + 2, 1)
        current_mlc = mlc[control_point_slice, :, :]
        current_jaw = jaw[control_point_slice, :]
        delivered_mu = np.diff(mu[control_point_slice])

        grid, mu_density_of_slice = calc_single_control_point(
            current_mlc,
            current_jaw,
            delivered_mu,
            leaf_pair_widths=leaf_pair_widths,
            grid_resolution=grid_resolution,
            min_step_per_pixel=min_step_per_pixel,
        )
        full_grid_mu_density_of_slice = _convert_to_full_grid(
            grid, full_grid, mu_density_of_slice
        )

        mu_density += full_grid_mu_density_of_slice

    return mu_density
Exemple #9
0
def convert_dose(plan, export_path):

    # Check that the plan has a primary image, as we can't create a meaningful RTDOSE without it:
    if not plan.primary_image:
        plan.logger.error("No primary image found for plan. Unable to generate RTDOSE.")
        return

    patient_info = plan.pinnacle.patient_info
    plan_info = plan.plan_info
    trial_info = plan.trial_info
    image_info = plan.primary_image.image_info[0]

    patient_position = plan.patient_position

    # Get the UID for the Dose and the Plan
    doseInstanceUID = plan.dose_inst_uid
    planInstanceUID = plan.plan_inst_uid

    # Populate required values for file meta information
    file_meta = pydicom.dataset.Dataset()
    file_meta.MediaStorageSOPClassUID = RTDoseSOPClassUID
    file_meta.TransferSyntaxUID = GTransferSyntaxUID
    file_meta.MediaStorageSOPInstanceUID = doseInstanceUID
    file_meta.ImplementationClassUID = GImplementationClassUID

    # Create the pydicom.dataset.FileDataset instance (initially no data elements, but
    # file_meta supplied)
    RDfilename = f"RD.{file_meta.MediaStorageSOPInstanceUID}.dcm"
    ds = pydicom.dataset.FileDataset(
        RDfilename, {}, file_meta=file_meta, preamble=b"\x00" * 128
    )
    ds.SpecificCharacterSet = "ISO_IR 100"
    ds.InstanceCreationDate = time.strftime("%Y%m%d")
    ds.InstanceCreationTime = time.strftime("%H%M%S")

    ds.SOPClassUID = RTDoseSOPClassUID  # RT Dose Storage
    ds.SOPInstanceUID = doseInstanceUID
    datetimesplit = plan_info["ObjectVersion"]["WriteTimeStamp"].split()
    # Read more accurate date from trial file if it is available
    trial_info = plan.trial_info
    if trial_info:
        datetimesplit = trial_info["ObjectVersion"]["WriteTimeStamp"].split()

    ds.StudyDate = datetimesplit[0].replace("-", "")
    ds.StudyTime = datetimesplit[1].replace(":", "")
    ds.AccessionNumber = ""
    ds.Modality = RTDOSEModality
    ds.Manufacturer = Manufacturer
    ds.OperatorsName = ""
    ds.ManufacturerModelName = plan_info["ToolType"]
    ds.SoftwareVersions = [plan_info["PinnacleVersionDescription"]]
    ds.PhysiciansOfRecord = patient_info["RadiationOncologist"]
    ds.PatientName = patient_info["FullName"]
    ds.PatientBirthDate = patient_info["DOB"]
    ds.PatientID = patient_info["MedicalRecordNumber"]
    ds.PatientSex = patient_info["Gender"][0]

    ds.SliceThickness = trial_info["DoseGrid .VoxelSize .Z"] * 10
    ds.SeriesInstanceUID = doseInstanceUID
    ds.InstanceNumber = "1"

    ds.StudyInstanceUID = image_info["StudyInstanceUID"]
    ds.FrameOfReferenceUID = image_info["FrameUID"]
    ds.StudyID = plan.primary_image.image["StudyID"]

    # Assume zero struct shift for now (may not the case for versions below Pinnacle 9)
    if patient_position in ("HFP", "FFS"):
        dose_origin_x = -trial_info["DoseGrid .Origin .X"] * 10
    elif patient_position in ("HFS", "FFP"):
        dose_origin_x = trial_info["DoseGrid .Origin .X"] * 10

    if patient_position in ("HFS", "FFS"):
        dose_origin_y = -trial_info["DoseGrid .Origin .Y"] * 10
    elif patient_position in ("HFP", "FFP"):
        dose_origin_y = trial_info["DoseGrid .Origin .Y"] * 10

    if patient_position in ("HFS", "HFP"):
        dose_origin_z = -trial_info["DoseGrid .Origin .Z"] * 10
    elif patient_position in ("FFS", "FFP"):
        dose_origin_z = trial_info["DoseGrid .Origin .Z"] * 10

    # Image Position (Patient) seems off, so going to calculate shift assuming
    # dose origin in center and I want outer edge
    ydoseshift = (
        trial_info["DoseGrid .VoxelSize .Y"] * 10 * trial_info["DoseGrid .Dimension .Y"]
        - trial_info["DoseGrid .VoxelSize .Y"] * 10
    )
    zdoseshift = (
        trial_info["DoseGrid .VoxelSize .Z"] * 10 * trial_info["DoseGrid .Dimension .Z"]
        - trial_info["DoseGrid .VoxelSize .Z"] * 10
    )

    if patient_position == "HFS":
        ds.ImagePositionPatient = [
            dose_origin_x,
            dose_origin_y - ydoseshift,
            dose_origin_z - zdoseshift,
        ]
    elif patient_position == "HFP":
        ds.ImagePositionPatient = [
            dose_origin_x,
            dose_origin_y + ydoseshift,
            dose_origin_z - zdoseshift,
        ]
    elif patient_position == "FFS":
        ds.ImagePositionPatient = [
            dose_origin_x,
            dose_origin_y - ydoseshift,
            dose_origin_z + zdoseshift,
        ]
    elif patient_position == "FFP":
        ds.ImagePositionPatient = [
            dose_origin_x,
            dose_origin_y + ydoseshift,
            dose_origin_z + zdoseshift,
        ]

    # Read this from CT DCM if available?
    if "HFS" in patient_position or "FFS" in patient_position:
        ds.ImageOrientationPatient = [1.0, 0.0, 0.0, 0.0, 1.0, -0.0]
    elif "HFP" in patient_position or "FFP" in patient_position:
        ds.ImageOrientationPatient = [-1.0, 0.0, 0.0, 0.0, -1.0, -0.0]

    # Read this from CT DCM if available
    ds.PositionReferenceIndicator = ""
    ds.SamplesPerPixel = 1
    ds.PhotometricInterpretation = "MONOCHROME2"

    ds.NumberOfFrames = int(
        trial_info["DoseGrid .Dimension .Z"]
    )  # is this Z dimension???
    # Using y for Rows because that's what's in the exported dicom file for
    # test patient
    ds.Rows = int(trial_info["DoseGrid .Dimension .Y"])
    ds.Columns = int(trial_info["DoseGrid .Dimension .X"])
    ds.PixelSpacing = [
        trial_info["DoseGrid .VoxelSize .X"] * 10,
        trial_info["DoseGrid .VoxelSize .Y"] * 10,
    ]
    ds.BitsAllocated = 16
    ds.BitsStored = 16
    ds.HighBit = 15
    ds.PixelRepresentation = 0
    ds.DoseUnits = "GY"
    ds.DoseType = "PHYSICAL"
    ds.DoseSummationType = "PLAN"

    # Since DoseSummationType is PLAN, only need to reference RTPLAN here, no need to
    # reference fraction group.
    ds.ReferencedRTPlanSequence = pydicom.sequence.Sequence()
    ds.ReferencedRTPlanSequence.append(pydicom.dataset.Dataset())
    ds.ReferencedRTPlanSequence[0].ReferencedSOPClassUID = RTPlanSOPClassUID
    ds.ReferencedRTPlanSequence[0].ReferencedSOPInstanceUID = planInstanceUID

    ds.TissueHeterogeneityCorrection = "IMAGE"

    grid_frame_offset_vector = []
    for p in range(0, int(trial_info["DoseGrid .Dimension .Z"])):
        grid_frame_offset_vector.append(
            p * float(trial_info["DoseGrid .VoxelSize .X"] * 10)
        )
    ds.GridFrameOffsetVector = grid_frame_offset_vector

    # Array in which to sum the dose values of all beams
    summed_pixel_values = []

    # For each beam in the trial, convert the dose from the Pinnacle binary
    # file and sum together
    beam_list = trial_info["BeamList"] if trial_info["BeamList"] else []
    if len(beam_list) == 0:
        plan.logger.warning("No Beams found in Trial. Unable to generate RTDOSE.")
        return

    for beam in beam_list:

        plan.logger.info("Exporting Dose for beam: %s", beam["Name"])

        # Get the binary file for this beam
        binary_id = re.findall("\\d+", beam["DoseVolume"])[0]
        filled_binary_id = str(binary_id).zfill(3)
        binary_file = os.path.join(plan.path, f"plan.Trial.binary.{filled_binary_id}")

        # Get the prescription for this beam (need this for number of fractions)
        prescription = [
            p
            for p in trial_info["PrescriptionList"]
            if p["Name"] == beam["PrescriptionName"]
        ][0]

        # Get the prescription point
        plan.logger.debug("PrescriptionPointName: %s", beam["PrescriptionPointName"])
        points = plan.points
        prescription_point = []
        for p in points:
            if p["Name"] == beam["PrescriptionPointName"]:
                plan.logger.debug(
                    "Presc Point: %s %s %s %s",
                    p["Name"],
                    p["XCoord"],
                    p["YCoord"],
                    p["ZCoord"],
                )
                prescription_point = plan.convert_point(p)
                break

        if len(prescription_point) < 3:
            plan.logger.warning(
                "No valid prescription point found for beam! Beam will be ignored for "
                "Dose conversion. Dose will most likely be incorrect"
            )
            continue

        plan.logger.debug("Presc Point Dicom: %s, %s", p["Name"], prescription_point)
        total_prescription = (
            beam["MonitorUnitInfo"]["PrescriptionDose"]
            * prescription["NumberOfFractions"]
        )
        plan.logger.debug("Total Prescription %s", total_prescription)

        # Read the dose into a grid, so that we can interpolate for the prescription
        # point and determine the MU for the grid
        dose_grid = np.zeros(
            (
                trial_info["DoseGrid .Dimension .X"],
                trial_info["DoseGrid .Dimension .Y"],
                trial_info["DoseGrid .Dimension .Z"],
            )
        )
        spacing = [
            trial_info["DoseGrid .VoxelSize .X"] * 10,
            trial_info["DoseGrid .VoxelSize .Y"] * 10,
            trial_info["DoseGrid .VoxelSize .Z"] * 10,
        ]
        origin = [
            ds.ImagePositionPatient[0],
            ds.ImagePositionPatient[1],
            ds.ImagePositionPatient[2],
        ]
        if os.path.isfile(binary_file):
            with open(binary_file, "rb") as b:
                for z in range(trial_info["DoseGrid .Dimension .Z"] - 1, -1, -1):
                    for y in range(0, trial_info["DoseGrid .Dimension .Y"]):
                        for x in range(0, trial_info["DoseGrid .Dimension .X"]):
                            data_element = b.read(4)
                            value = struct.unpack(">f", data_element)[0]
                            dose_grid[x, y, z] = value
        else:
            plan.logger.warning("Dose file not found")
            plan.logger.error("Skipping generating RTDOSE")
            return

        # Get the index within that grid of the dose reference point
        idx = [0.0, 0.0, 0.0]
        for i in range(3):
            idx[i] = -(origin[i] - prescription_point[i]) / spacing[i]
        plan.logger.debug("Index of prescription point within grid: %s", idx)

        # Trilinear interpolation of that point within the dose grid
        cgy_mu = trilinear_interpolation(idx, dose_grid)
        plan.logger.debug("cgy_mu: %s", cgy_mu)

        # Now that we have the cgy/mu value of the dose reference point, we can
        # extract an accurate value for MU
        beam_mu = (total_prescription / cgy_mu) / prescription["NumberOfFractions"]
        plan.logger.debug("Beam MU: %s", beam_mu)

        pixel_data_list = []
        for z in range(trial_info["DoseGrid .Dimension .Z"] - 1, -1, -1):
            for y in range(0, trial_info["DoseGrid .Dimension .Y"]):
                for x in range(0, trial_info["DoseGrid .Dimension .X"]):
                    value = (
                        float(prescription["NumberOfFractions"])
                        * dose_grid[x, y, z]
                        * beam_mu
                        / 100
                    )
                    pixel_data_list.append(value)

        ds.FrameIncrementPointer = ds.data_element("GridFrameOffsetVector").tag

        main_pix_array = []
        for h in range(0, trial_info["DoseGrid .Dimension .Z"]):
            pixelsforframe = []
            for k in range(
                0,
                trial_info["DoseGrid .Dimension .X"]
                * trial_info["DoseGrid .Dimension .Y"],
            ):

                pixelsforframe.append(
                    float(
                        pixel_data_list[
                            h
                            * trial_info["DoseGrid .Dimension .Y"]
                            * trial_info["DoseGrid .Dimension .X"]
                            + k
                        ]
                    )
                )

            main_pix_array = main_pix_array + list(reversed(pixelsforframe))

        main_pix_array = list(reversed(main_pix_array))

        # Add the values from this beam to the summed values
        if len(summed_pixel_values) == 0:
            summed_pixel_values = main_pix_array
        else:
            for i, values in enumerate(summed_pixel_values):
                summed_pixel_values[i] = values + main_pix_array[i]

    # Compute the scaling factor
    scale = max(summed_pixel_values) / 16384
    ds.DoseGridScaling = scale
    plan.logger.debug("Dose Grid Scaling: %s", ds.DoseGridScaling)

    pixel_binary_block = bytes()

    # Scale by the scaling factor
    pixelvaluelist = []
    for _, element in enumerate(summed_pixel_values, 0):

        if scale != 0:
            element = round(element / scale)
        else:
            element = 0
        pixelvaluelist.append(int(element))

    # Set the PixelData
    pixel_binary_block = struct.pack("%sh" % len(pixelvaluelist), *pixelvaluelist)
    ds.PixelData = pixel_binary_block

    # Save the RTDose Dicom File
    output_file = os.path.join(export_path, RDfilename)
    plan.logger.info("Creating Dose file: %s", output_file)
    ds.save_as(output_file)
Exemple #10
0
def read_dicom3D(direc, i_option):
    # item = 0
    for subdir, dirs, files in os.walk(direc):  # pylint: disable = unused-variable
        k = 0
        for file in tqdm(sorted(files)):
            # print('filename=', file)
            if os.path.splitext(file)[1] == ".dcm":
                dataset = pydicom.dcmread(direc + file)
                if k == 0:
                    ArrayDicom = np.zeros(
                        (dataset.Rows, dataset.Columns, 0),
                        dtype=dataset.pixel_array.dtype,
                    )
                    tmp_array = dataset.pixel_array
                    if i_option.startswith(("y", "yeah", "yes")):
                        max_val = np.amax(tmp_array)
                        tmp_array = tmp_array / max_val
                        min_val = np.amin(tmp_array)
                        tmp_array = tmp_array - min_val
                        tmp_array = 1 - tmp_array  # inverting the range

                        # min_val = np.amin(tmp_array)  # normalizing
                        # tmp_array = tmp_array - min_val
                        # tmp_array = tmp_array / (np.amax(tmp_array))
                        tmp_array = u.norm01(tmp_array)
                    else:
                        # min_val = np.amin(tmp_array)
                        # tmp_array = tmp_array - min_val
                        # tmp_array = tmp_array / (np.amax(tmp_array))
                        tmp_array = u.norm01(tmp_array)  # just normalize
                    ArrayDicom = np.dstack((ArrayDicom, tmp_array))
                    # print("item thickness [mm]=", dataset.SliceThickness)
                    SID = dataset.RTImageSID
                    dx = 1 / (SID * (1 / dataset.ImagePlanePixelSpacing[0]) / 1000)
                    dy = 1 / (SID * (1 / dataset.ImagePlanePixelSpacing[1]) / 1000)
                    print("pixel spacing row [mm]=", dx)
                    print("pixel spacing col [mm]=", dy)
                else:
                    tmp_array = dataset.pixel_array
                    if i_option.startswith(("y", "yeah", "yes")):
                        max_val = np.amax(tmp_array)
                        tmp_array = tmp_array / max_val
                        min_val = np.amin(tmp_array)
                        tmp_array = tmp_array - min_val
                        tmp_array = 1 - tmp_array  # inverting the range

                        # min_val = np.amin(tmp_array)  # normalizing
                        # tmp_array = tmp_array - min_val
                        # tmp_array = tmp_array / (np.amax(tmp_array))
                        tmp_array = u.norm01(tmp_array)
                    else:
                        # min_val = np.amin(tmp_array)
                        # tmp_array = tmp_array - min_val
                        # tmp_array = tmp_array / (np.amax(tmp_array))  # just normalize
                        tmp_array = u.norm01(tmp_array)
                    ArrayDicom = np.dstack((ArrayDicom, tmp_array))
            k = k + 1

    xfield, yfield, rotfield = image_analyze(ArrayDicom, i_option)

    multi_slice_viewer(ArrayDicom, dx, dy)

    if np.shape(xfield)[2] == 2:
        fig, peak_figs, junctions_figs = merge_view_vert(xfield, dx, dy)
        with PdfPages(direc + "jaws_X_report.pdf") as pdf:
            pdf.savefig(fig)
            # for i in range(0, len(peak_figs)):
            for _, f in enumerate(peak_figs):
                pdf.savefig(f)

            # for i in range(0, len(junctions_figs)):
            for _, f in enumerate(junctions_figs):
                pdf.savefig(f)

            plt.close()

    else:
        print(
            "X jaws data analysis not completed please verify that you have two X jaws images. For more information see manual."
        )

    if np.shape(yfield)[2] == 4:
        fig, peak_figs, junctions_figs = merge_view_horz(yfield, dx, dy)
        # print('peak_figs********************************************************=', len(peak_figs),peak_figs)
        with PdfPages(direc + "jaws_Y_report.pdf") as pdf:
            pdf.savefig(fig)
            # for i in range(0, len(peak_figs)):
            for _, f in enumerate(peak_figs):
                pdf.savefig(f)

            for _, f in enumerate(junctions_figs):
                pdf.savefig(f)

            plt.close()

    else:
        print(
            "Y jaws data analysis not completed please verify that you have four Y jaws images. For more information see manual."
        )

    if np.shape(rotfield)[2] == 4:
        fig, peak_figs, junctions_figs = merge_view_filtrot(rotfield, dx, dy)

        with PdfPages(direc + "jaws_FR_report.pdf") as pdf:
            pdf.savefig(fig)
            for _, f in enumerate(peak_figs):
                pdf.savefig(f)

            for _, f in enumerate(junctions_figs):
                pdf.savefig(f)

            plt.close()

    else:
        print(
            "Field rotation data analysis not completed please verify that you have four field rotation images. For more information see manual."
        )
Exemple #11
0
def merge_view_filtrot(volume, dx, dy):

    volume_resort = np.copy(
        volume
    )  # this will hold the resorted volume 0 to 3 clockwise
    junctions_comb = []
    peaks_figs_comb = []

    # we need to create 4 matches

    # 0,1,2,3 will be tagged top left, top right, bottom right, bottom left
    for i in range(0, int(np.shape(volume)[2])):
        diag_stack = [
            0,
            0,
            0,
            0,
        ]  # we will sum along one direction whichever is biggest will tag the file
        for j in range(0, int(min([np.shape(volume)[0], np.shape(volume)[1]]) / 2)):
            # print('j=',j,int(np.shape(volume)[0] / 2)+j, int(np.shape(volume)[1] / 2)+j)
            diag_stack[0] = (
                diag_stack[0]
                + volume[
                    int(np.shape(volume)[0] / 2) - j,
                    int(np.shape(volume)[1] / 2) - j,
                    i,
                ]
            )
            diag_stack[1] = (
                diag_stack[1]
                + volume[
                    int(np.shape(volume)[0] / 2) - j,
                    int(np.shape(volume)[1] / 2) + j,
                    i,
                ]
            )
            diag_stack[2] = (
                diag_stack[2]
                + volume[
                    int(np.shape(volume)[0] / 2) + j,
                    int(np.shape(volume)[1] / 2) + j,
                    i,
                ]
            )
            diag_stack[3] = (
                diag_stack[3]
                + volume[
                    int(np.shape(volume)[0] / 2) + j,
                    int(np.shape(volume)[1] / 2) - j,
                    i,
                ]
            )

        volume_resort[:, :, np.argmax(diag_stack)] = volume[:, :, i]

    # creating merged volumes
    merge_vol = np.zeros((volume_resort.shape[0], volume_resort.shape[1]))

    # creating vector for processing (1 horizontal & 1 vertical)
    amplitude_horz = np.zeros(
        (volume_resort.shape[1], volume_resort.shape[2])
    )  # 1 if it is vertical 0 if the bars are horizontal
    amplitude_vert = np.zeros((volume_resort.shape[0], volume_resort.shape[2]))

    # y = np.linspace(0, 0 + (volume_resort.shape[0] * dy), volume_resort.shape[0],
    #                 endpoint=False)  # definition of the distance axis
    # x = np.linspace(0, 0 + (volume_resort.shape[1] * dy), volume_resort.shape[1],
    #                 endpoint=False)  # definition of the distance axis

    ampl_resamp_y1 = np.zeros(
        ((volume_resort.shape[0]) * 10, int(volume_resort.shape[2] / 2))
    )
    ampl_resamp_y2 = np.zeros(
        ((volume_resort.shape[0]) * 10, int(volume_resort.shape[2] / 2))
    )

    ampl_resamp_x1 = np.zeros(
        ((volume_resort.shape[1]) * 10, int(volume_resort.shape[2] / 2))
    )
    ampl_resamp_x2 = np.zeros(
        ((volume_resort.shape[1]) * 10, int(volume_resort.shape[2] / 2))
    )

    amplitude_horz[:, 0] = volume_resort[
        int(volume_resort.shape[0] / 3.25), :, 0
    ]  # for profile 1
    amplitude_horz[:, 1] = volume_resort[
        int(volume_resort.shape[0] / 3.25), :, 1
    ]  # for profile 1
    amplitude_horz[:, 3] = volume_resort[
        int(volume_resort.shape[0]) - int(volume_resort.shape[0] / 3.25), :, 2
    ]  # the numbers here are reversed because we are going to slide the second graph (the overlay) to minimize the error  #for profile 2
    amplitude_horz[:, 2] = volume_resort[
        int(volume_resort.shape[0]) - int(volume_resort.shape[0] / 3.25), :, 3
    ]

    amplitude_vert[:, 0] = volume_resort[
        :, int(volume_resort.shape[1]) - int(volume_resort.shape[1] / 2.8), 1
    ]  # the numbers here are reversed because we are going to slide the second graph (the overlay) to minimize the error #for profile 3
    amplitude_vert[:, 1] = volume_resort[
        :, int(volume_resort.shape[1]) - int(volume_resort.shape[1] / 2.8), 2
    ]
    amplitude_vert[:, 3] = volume_resort[
        :, int(volume_resort.shape[1] / 2.8), 3
    ]  # for profile 4
    amplitude_vert[:, 2] = volume_resort[:, int(volume_resort.shape[1] / 2.8), 0]

    plt.figure()
    for item in tqdm(range(0, int(volume.shape[2] / 2))):
        merge_vol = merge_vol + volume[:, :, item]

        data_samp = amplitude_vert[:, item]
        ampl_resamp_y1[:, item] = signal.resample(
            data_samp, int(np.shape(amplitude_vert)[0]) * 10
        )
        data_samp = amplitude_horz[:, item]
        ampl_resamp_x1[:, item] = signal.resample(
            data_samp, int(np.shape(amplitude_horz)[0]) * 10
        )

    for item in tqdm(range(int(volume.shape[2] / 2), volume.shape[2])):
        merge_vol = merge_vol + volume[:, :, item]
        data_samp = amplitude_vert[:, item]
        ampl_resamp_y2[:, item - int(volume.shape[2] / 2)] = signal.resample(
            data_samp, int(np.shape(amplitude_vert)[0]) * 10
        )
        data_samp = amplitude_horz[:, item]
        ampl_resamp_x2[:, item - int(volume.shape[2] / 2)] = signal.resample(
            data_samp, int(np.shape(amplitude_horz)[0]) * 10
        )

    fig, ax = plt.subplots(ncols=1, nrows=1, squeeze=True, figsize=(6, 8))

    extent = (0, 0 + (volume.shape[1] * dx), 0, 0 + (volume.shape[0] * dy))

    ax.imshow(merge_vol, extent=extent, aspect="auto")
    ax.set_aspect("equal", "box")
    ax.set_xlabel("x distance [mm]")
    ax.set_ylabel("y distance [mm]")
    fig.suptitle("Merged volume", fontsize=16)

    ax.hlines(dy * int(volume_resort.shape[0] / 3.25), 0, dx * volume_resort.shape[1])
    ax.text(
        dx * int(volume_resort.shape[1] / 2.25),
        dy * int(volume_resort.shape[0] / 3),
        "Profile 2",
    )

    ax.hlines(
        dy * int(volume_resort.shape[0]) - dy * int(volume_resort.shape[0] / 3.25),
        0,
        dx * volume_resort.shape[1],
    )
    ax.text(
        dx * int(volume_resort.shape[1] / 2.25),
        dy * int(volume_resort.shape[0]) - dy * int(volume_resort.shape[0] / 3.5),
        "Profile 1",
    )

    ax.vlines(dx * int(volume_resort.shape[1] / 2.8), 0, dy * volume_resort.shape[0])
    ax.text(
        dx * int(volume_resort.shape[1] / 3.1),
        dy * int(volume_resort.shape[0] / 1.8),
        "Profile 4",
        rotation=90,
    )

    ax.vlines(
        dx * int(volume_resort.shape[1]) - dx * int(volume_resort.shape[1] / 2.8),
        0,
        dy * volume_resort.shape[0],
    )
    ax.text(
        dx * int(volume_resort.shape[1]) - dx * int(volume_resort.shape[1] / 2.9),
        dy * int(volume_resort.shape[0] / 1.8),
        "Profile 3",
        rotation=90,
    )
    # plt.show()

    peaks, peak_type, peak_figs = pffr.peak_find_fieldrot(
        ampl_resamp_x1, dx, "Profile 1"
    )
    junction_figs = minFR.minimize_junction_fieldrot(
        ampl_resamp_x1, peaks, peak_type, dx / 10, "Profile 1"
    )
    peaks_figs_comb.append(peak_figs)
    junctions_comb.append(junction_figs)

    peaks, peak_type, peak_figs = pffr.peak_find_fieldrot(
        ampl_resamp_x2, dx, "Profile 2"
    )
    junction_figs = minFR.minimize_junction_fieldrot(
        ampl_resamp_x2, peaks, peak_type, dx / 10, "Profile 2"
    )
    peaks_figs_comb.append(peak_figs)
    junctions_comb.append(junction_figs)

    peaks, peak_type, peak_figs = pffr.peak_find_fieldrot(
        ampl_resamp_y1, dy, "Profile 3"
    )
    junction_figs = minFR.minimize_junction_fieldrot(
        ampl_resamp_y1, peaks, peak_type, dy / 10, "Profile 3"
    )
    peaks_figs_comb.append(peak_figs)
    junctions_comb.append(junction_figs)

    peaks, peak_type, peak_figs = pffr.peak_find_fieldrot(
        ampl_resamp_y2, dy, "Profile 4"
    )
    junction_figs = minFR.minimize_junction_fieldrot(
        ampl_resamp_y2, peaks, peak_type, dy / 10, "Profile 4"
    )
    peaks_figs_comb.append(peak_figs)
    junctions_comb.append(junction_figs)

    return fig, peaks_figs_comb, junctions_comb
Exemple #12
0
def getMLCdata(ds):
    leafIndices = MLCdata.TrueBeam  # import leaf index values from MLC data file
    beamSequence = ds.BeamSequence
    beamNames = []  # getting the beam names
    mlcData = {}
    jawData = {}
    segWeightData = {}
    maxPositions = {}
    maxJawPos = {}
    maxSep = {}
    maxSepSum = {}
    for b in beamSequence:  # looping through the beams
        name = b.BeamName
        beamNames.append(name)
        cps = b.ControlPointSequence
        cp = len(cps)

        leafPositions = {}  # empty dictionary to place leaf positions into
        jawPositions = {}
        segWeight = {}
        maxA = np.zeros((60))
        maxB = np.zeros((60))
        maxJaws = np.zeros((4))
        maxPos = []
        for i in range(0, cp - 1):  # looping through control points
            if len(cps[i].BeamLimitingDevicePositionSequence
                   ) == 3:  # assymm jaws
                xJaws_ = cps[i].BeamLimitingDevicePositionSequence[
                    0].LeafJawPositions
                yJaws_ = cps[i].BeamLimitingDevicePositionSequence[
                    1].LeafJawPositions
                mlcPos = cps[i].BeamLimitingDevicePositionSequence[
                    2].LeafJawPositions
            elif len(cps[i].BeamLimitingDevicePositionSequence
                     ) == 2:  # symmetric jaws
                xJaws_ = cps[i].BeamLimitingDevicePositionSequence[
                    0].LeafJawPositions
                yJaws_ = cps[i].BeamLimitingDevicePositionSequence[
                    0].LeafJawPositions
                mlcPos = cps[i].BeamLimitingDevicePositionSequence[
                    1].LeafJawPositions
            leafPosA = list(map(float, mlcPos[0:60]))
            leafPosB = list(map(float, mlcPos[60:120]))
            leafPositions[i] = np.column_stack(
                (leafIndices, leafPosA, leafPosB))
            xJaws = list(map(float, xJaws_))
            yJaws = list(map(float, yJaws_))
            positions = list((xJaws[0], xJaws[1], yJaws[0], yJaws[1]))
            indices = list(("x1", "x2", "y1", "y2"))
            jawPositions[i] = np.column_stack((indices, positions))

            if i == 0:
                segWeight[i] = float(cps[i + 1].CumulativeMetersetWeight)
            else:
                segWeight[i] = float(cps[i + 1].CumulativeMetersetWeight -
                                     cps[i].CumulativeMetersetWeight)

            for l in range(0, 60):
                if leafPosA[l] < maxA[l]:
                    maxA[l] = float(leafPosA[l])
                if leafPosB[l] > maxB[l]:
                    maxB[l] = float(leafPosB[l])

            for l in range(0, 4):
                if abs(positions[l]) > abs(maxJaws[l]):
                    maxJaws[l] = float(positions[l])

        string = str(f"{name}")
        maxPos = np.column_stack((maxA, maxB))
        mlcData[string] = leafPositions
        jawData[string] = jawPositions
        segWeightData[string] = segWeight
        maxPositions[string] = maxPos
        maxJawPos[string] = maxJaws

        _ = list(abs(np.array(maxA) - np.array(maxB)))
        maxSepSum[string] = np.nansum(_)
        maxSep[string] = _

    return (mlcData, jawData, segWeightData, maxSep, maxSepSum, maxPositions,
            maxJawPos)