Beispiel #1
0
def pcolormesh_grid(x, y, grid_resolution=None):
    if grid_resolution is None:
        diffs = np.hstack([np.diff(x), np.diff(y)])
        assert np.all(np.abs(diffs - diffs[0]) < 10 ** -12)

        grid_resolution = diffs[0]

    new_x = np.concatenate([x - grid_resolution / 2, [x[-1] + grid_resolution / 2]])
    new_y = np.concatenate([y - grid_resolution / 2, [y[-1] + grid_resolution / 2]])

    return new_x, new_y
Beispiel #2
0
def group_consecutive_logfiles(file_hashes, index):
    times = np.array([index[key]["local_time"]
                      for key in file_hashes]).astype(np.datetime64)

    sort_reference = np.argsort(times)
    file_hashes = file_hashes[sort_reference]
    times = times[sort_reference]

    hours_4 = np.array(60 * 60 * 4).astype(np.timedelta64)
    split_locations = np.where(np.diff(times) >= hours_4)[0] + 1

    return np.split(file_hashes, split_locations)
Beispiel #3
0
    def _from_dicom_beam(cls, beam, meterset):
        leaf_boundaries = beam.BeamLimitingDeviceSequence[
            -1].LeafPositionBoundaries
        leaf_widths = np.diff(leaf_boundaries)

        assert beam.BeamLimitingDeviceSequence[-1].NumberOfLeafJawPairs == len(
            leaf_widths)
        num_leaves = len(leaf_widths)

        control_points = beam.ControlPointSequence

        mlcs = [
            control_point.BeamLimitingDevicePositionSequence[-1].
            LeafJawPositions for control_point in control_points
        ]

        mlcs = [
            np.array([
                -np.array(mlc[0:num_leaves][::-1]),
                np.array(mlc[num_leaves::][::-1])
            ][::-1]).T for mlc in mlcs
        ]

        dicom_jaw = [
            control_point.BeamLimitingDevicePositionSequence[0].
            LeafJawPositions for control_point in control_points
        ]

        jaw = np.array(dicom_jaw)

        second_col = deepcopy(jaw[:, 1])
        jaw[:, 1] = jaw[:, 0]
        jaw[:, 0] = second_col

        jaw[:, 1] = -jaw[:, 1]

        final_mu_weight = np.array(beam.FinalCumulativeMetersetWeight)

        mu = [
            meterset * np.array(control_point.CumulativeMetersetWeight) /
            final_mu_weight for control_point in control_points
        ]

        gantry_angles = convert_IEC_angle_to_bipolar(
            [control_point.GantryAngle for control_point in control_points])

        collimator_angles = convert_IEC_angle_to_bipolar([
            control_point.BeamLimitingDeviceAngle
            for control_point in control_points
        ])

        return cls(mu, gantry_angles, collimator_angles, mlcs, jaw)
Beispiel #4
0
    def get_increment(self):
        """ minimum step-size increment

        Returns
        -------
        increment : float

        """
        steps = np.diff(self.x)
        if np.isclose(steps.min(), steps.mean()):
            return steps.mean()
        else:
            return steps.min()
Beispiel #5
0
def search_for_centre_of_largest_bounded_circle(x, y, callback=None):
    """Find the centre of the largest bounded circle within the insert."""
    insert = shapely_insert(x, y)
    boundary = insert.boundary
    centroid = insert.centroid

    furthest_distance = np.hypot(np.diff(insert.bounds[::2]),
                                 np.diff(insert.bounds[1::2]))

    def minimising_function(optimiser_input):
        x, y = optimiser_input
        point = shapely.geometry.Point(x, y)

        if insert.contains(point):
            edge_distance = point.distance(boundary)
        else:
            edge_distance = -point.distance(boundary)

        return -edge_distance

    x0 = np.squeeze(centroid.coords)
    niter = 200
    T = furthest_distance / 3
    stepsize = furthest_distance / 2
    niter_success = 50
    output = scipy.optimize.basinhopping(
        minimising_function,
        x0,
        niter=niter,
        T=T,
        stepsize=stepsize,
        niter_success=niter_success,
        callback=callback,
    )

    circle_centre = output.x

    return circle_centre
def angle_dd2dcm(angle):
    diff = np.append(np.diff(angle), 0)
    movement = (np.empty_like(angle)).astype(str)

    movement[diff > 0] = "CW"
    movement[diff < 0] = "CC"
    movement[diff == 0] = "NONE"

    converted_angle = np.array(angle, copy=False)
    converted_angle[
        converted_angle < 0] = converted_angle[converted_angle < 0] + 360

    converted_angle = converted_angle.astype(str).tolist()

    return converted_angle, movement
Beispiel #7
0
def delivery_from_icom_stream(icom_stream):
    icom_stream_points = extract.get_data_points(icom_stream)
    delivery_raw = [
        get_delivery_data_items(single_icom_stream)
        for single_icom_stream in icom_stream_points
    ]

    mu = np.array([item[0] for item in delivery_raw])
    diff_mu = np.concatenate([[0], np.diff(mu)])
    diff_mu[diff_mu < 0] = 0
    mu = np.cumsum(diff_mu)

    gantry = np.array([item[1] for item in delivery_raw])
    collimator = np.array([item[2] for item in delivery_raw])
    mlc = np.array([item[3] for item in delivery_raw])
    jaw = np.array([item[4] for item in delivery_raw])

    return mu, gantry, collimator, mlc, jaw
Beispiel #8
0
    def _from_pandas(cls: Type[DeliveryGeneric], table) -> DeliveryGeneric:
        raw_monitor_units = table["Step Dose/Actual Value (Mu)"]

        diff = np.append([0], np.diff(raw_monitor_units))
        diff[diff < 0] = 0

        monitor_units = np.cumsum(diff)

        gantry = table[GANTRY_NAME]
        collimator = table[COLLIMATOR_NAME]

        y1_bank = [table[name] for name in Y1_LEAF_BANK_NAMES]

        y2_bank = [table[name] for name in Y2_LEAF_BANK_NAMES]

        mlc = [y1_bank, y2_bank]
        mlc = np.swapaxes(mlc, 0, 2)

        jaw = [table[name] for name in JAW_NAMES]
        jaw = np.swapaxes(jaw, 0, 1)

        return cls(monitor_units, gantry, collimator, mlc, jaw)
Beispiel #9
0
    def _from_mosaiq_base(cls, cursor, field_id):
        txfield_results, txfieldpoint_results = fetch_and_verify_mosaiq_sql(
            cursor, field_id)

        total_mu = np.array(txfield_results[0]).astype(float)
        cumulative_percentage_mu = txfieldpoint_results[:, 0].astype(float)

        if np.shape(cumulative_percentage_mu) == ():
            mu_per_control_point = [0, total_mu]
        else:
            cumulative_mu = cumulative_percentage_mu * total_mu / 100
            mu_per_control_point = np.concatenate([[0],
                                                   np.diff(cumulative_mu)])

        monitor_units = np.cumsum(mu_per_control_point).tolist()

        mlc_a = np.squeeze(
            decode_msq_mlc(txfieldpoint_results[:, 1].astype(bytes))).T
        mlc_b = np.squeeze(
            decode_msq_mlc(txfieldpoint_results[:, 2].astype(bytes))).T

        msq_gantry_angle = txfieldpoint_results[:, 3].astype(float)
        msq_collimator_angle = txfieldpoint_results[:, 4].astype(float)

        coll_y1 = txfieldpoint_results[:, 5].astype(float)
        coll_y2 = txfieldpoint_results[:, 6].astype(float)

        mlc, jaw = collimation_to_bipolar_mm(mlc_a, mlc_b, coll_y1, coll_y2)
        gantry = convert_IEC_angle_to_bipolar(msq_gantry_angle)
        collimator = convert_IEC_angle_to_bipolar(msq_collimator_angle)

        # TODO Tidy up this axis swap
        mlc = np.swapaxes(mlc, 0, 2)
        jaw = np.swapaxes(jaw, 0, 1)

        mosaiq_delivery_data = cls(monitor_units, gantry, collimator, mlc, jaw)

        return mosaiq_delivery_data
Beispiel #10
0
    def _gantry_angle_masks(self,
                            gantry_angles,
                            gantry_tol,
                            allow_missing_angles=False):
        masks = [
            self._gantry_angle_mask(gantry_angle, gantry_tol)
            for gantry_angle in gantry_angles
        ]

        for mask in masks:
            if np.all(mask == 0):
                continue

            # TODO: Apply mask by more than just gantry angle to appropriately
            # extract beam index even when multiple beams have the same gantry
            # angle
            is_duplicate_gantry_angles = (np.sum(
                np.abs(np.diff(np.concatenate([[0], mask, [0]])))) != 2)

            if is_duplicate_gantry_angles:
                raise ValueError("Duplicate gantry angles not yet supported")

        try:
            assert np.all(np.sum(masks, axis=0) == 1), (
                "Not all beams were captured by the gantry tolerance of "
                " {}".format(gantry_tol))
        except AssertionError:
            if not allow_missing_angles:
                print("Allowable gantry angles = {}".format(gantry_angles))
                gantry = np.array(self.gantry, copy=False)
                out_of_tolerance = np.unique(
                    gantry[np.sum(masks, axis=0) == 0]).tolist()
                print("The gantry angles out of tolerance were {}".format(
                    out_of_tolerance))

                raise

        return masks
def find_relevant_control_points(mu):
    """Returns that control points that had an MU difference either side.
    """
    mu_diff = np.diff(mu)
    no_change = mu_diff == 0
    try:
        start = no_change[0]
        end = no_change[-1]
    except IndexError:
        all_true = np.empty_like(mu).astype(bool)
        all_true.fill(True)
        return all_true

    no_change_before = no_change[0:-1]
    no_change_after = no_change[1::]

    no_change_before_and_after = no_change_before & no_change_after

    irrelevant_control_point = np.hstack(
        [start, no_change_before_and_after, end])
    relevant_control_points = np.invert(irrelevant_control_point)

    return relevant_control_points
Beispiel #12
0
    def merge(self: DeliveryGeneric,
              *args: DeliveryGeneric) -> DeliveryGeneric:
        cls = type(self)
        separate: List[DeliveryGeneric] = [self] + [*args]
        collection: Dict[str, Tuple] = {}

        for delivery_data in separate:
            for field in delivery_data._fields:  # pylint: disable=no-member
                try:
                    collection[field] = np.concatenate(
                        [collection[field],
                         getattr(delivery_data, field)],
                        axis=0)
                except KeyError:
                    collection[field] = getattr(delivery_data, field)

        mu = np.concatenate([[0], np.diff(collection["monitor_units"])])
        mu[mu < 0] = 0
        collection["monitor_units"] = np.cumsum(mu)

        merged = cls(**collection)

        return merged
Beispiel #13
0
def get_mosaiq_delivery_data_bygantry(mosaiq_delivery_data):
    mu = np.array(mosaiq_delivery_data.monitor_units)
    mlc = np.array(mosaiq_delivery_data.mlc)
    jaw = np.array(mosaiq_delivery_data.jaw)
    gantry_angles = np.array(mosaiq_delivery_data.gantry)
    unique_mosaiq_gantry_angles = np.unique(gantry_angles)

    mosaiq_delivery_data_bygantry = dict()

    for mosaiq_gantry_angle in unique_mosaiq_gantry_angles:
        gantry_angle_matches = gantry_angles == mosaiq_gantry_angle

        diff_mu = np.concatenate([[0], np.diff(mu)])[gantry_angle_matches]
        gantry_angle_specific_mu = np.cumsum(diff_mu)

        mosaiq_delivery_data_bygantry[mosaiq_gantry_angle] = dict()
        mosaiq_delivery_data_bygantry[mosaiq_gantry_angle][
            "mu"] = gantry_angle_specific_mu
        mosaiq_delivery_data_bygantry[mosaiq_gantry_angle]["mlc"] = mlc[
            gantry_angle_matches]
        mosaiq_delivery_data_bygantry[mosaiq_gantry_angle]["jaw"] = jaw[
            gantry_angle_matches]

    return mosaiq_delivery_data_bygantry
Beispiel #14
0
def peak_detect(
    values,
    threshold: Union[float, int] = None,
    min_distance: Union[float, int] = 10,
    max_number: int = None,
    search_region=(0.0, 1.0),
    find_min_instead: bool = False,
):
    """Find the peaks or valleys of a 1D signal.

    Uses the difference (np.diff) in signal to find peaks. Current limitations include:
        1) Only for use in 1-D data; 2D may be possible with the gradient function.
        2) Will not detect peaks at the very edge of array (i.e. 0 or -1 index)

    Parameters
    ----------
    values : array-like
        Signal values to search for peaks within.
    threshold : int, float
        The value the peak must be above to be considered a peak. This removes "peaks"
        that are in a low-value region.
        If passed an int, the actual value is the threshold.
        E.g. when passed 15, any peak less with a value <15 is removed.
        If passed a float, it will threshold as a percent. Must be between 0 and 1.
        E.g. when passed 0.4, any peak <40% of the maximum value will be removed.
    min_distance : int, float
        If passed an int, parameter is the number of elements apart a peak must be from neighboring peaks.
        If passed a float, must be between 0 and 1 and represents the ratio of the profile to exclude.
        E.g. if passed 0.05 with a 1000-element profile, the minimum peak width will be 0.05*1000 = 50 elements.
    max_number : int
        Specify up to how many peaks will be returned. E.g. if 3 is passed in and 5 peaks are found, only the 3 largest
        peaks will be returned.
    find_min_instead : bool
        If False (default), peaks will be returned.
        If True, valleys will be returned.

    Returns
    -------
    max_vals : numpy.array
        The values of the peaks found.
    max_idxs : numpy.array
        The x-indices (locations) of the peaks.

    Raises
    ------
    ValueError
        If float not between 0 and 1 passed to threshold.
    """
    peak_vals = (
        []
    )  # a list to hold the y-values of the peaks. Will be converted to a numpy array
    peak_idxs = []  # ditto for x-values (index) of y data.

    if find_min_instead:
        values = -values

    # """Limit search to search region"""
    left_end = search_region[0]
    if is_float_like(left_end):
        left_index = int(left_end * len(values))
    elif is_int_like(left_end):
        left_index = left_end
    else:
        raise ValueError(f"{left_end} must be a float or int")

    right_end = search_region[1]
    if is_float_like(right_end):
        right_index = int(right_end * len(values))
    elif is_int_like(right_end):
        right_index = right_end
    else:
        raise ValueError(f"{right_end} must be a float or int")

    # minimum peak spacing calc
    if isinstance(min_distance, float):
        if 0 > min_distance >= 1:
            raise ValueError(
                "When min_peak_width is passed a float, value must be between 0 and 1"
            )
        else:
            min_distance = int(min_distance * len(values))

    values = values[left_index:right_index]

    # """Determine threshold value"""
    if isinstance(threshold, float) and threshold < 1:
        data_range = values.max() - values.min()
        threshold = threshold * data_range + values.min()
    elif isinstance(threshold, float) and threshold >= 1:
        raise ValueError("When threshold is passed a float, value must be less than 1")
    elif threshold is None:
        threshold = values.min()

    # """Take difference"""
    values_diff = np.diff(
        values.astype(float)
    )  # y and y_diff must be converted to signed type.

    # """Find all potential peaks"""
    for idx in range(len(values_diff) - 1):
        # For each item of the diff array, check if:
        # 1) The y-value is above the threshold.
        # 2) The value of y_diff is positive (negative for valley search), it means the y-value changed upward.
        # 3) The next y_diff value is zero or negative (or positive for valley search); a positive-then-negative diff value means the value
        # is a peak of some kind. If the diff is zero it could be a flat peak, which still counts.

        # 1)
        if values[idx + 1] < threshold:
            continue

        y1_gradient = values_diff[idx] > 0
        y2_gradient = values_diff[idx + 1] <= 0

        # 2) & 3)
        if y1_gradient and y2_gradient:
            # If the next value isn't zero it's a single-pixel peak. Easy enough.
            if values_diff[idx + 1] != 0:
                peak_vals.append(values[idx + 1])
                peak_idxs.append(idx + 1 + left_index)
            # elif idx >= len(y_diff) - 1:
            #     pass
            # Else if the diff value is zero, it could be a flat peak, or it could keep going up; we don't know yet.
            else:
                # Continue on until we find the next nonzero diff value.
                try:
                    shift = 0
                    while values_diff[(idx + 1) + shift] == 0:
                        shift += 1
                        if (idx + 1 + shift) >= (len(values_diff) - 1):
                            break
                    # If the next diff is negative (or positive for min), we've found a peak. Also put the peak at the center of the flat
                    # region.
                    is_a_peak = values_diff[(idx + 1) + shift] < 0
                    if is_a_peak:
                        peak_vals.append(values[int((idx + 1) + np.round(shift / 2))])
                        peak_idxs.append((idx + 1 + left_index) + np.round(shift / 2))
                except IndexError:
                    pass

    # convert to numpy arrays
    peak_vals = np.array(peak_vals)
    peak_idxs = np.array(peak_idxs)

    # """Enforce the min_peak_distance by removing smaller peaks."""
    # For each peak, determine if the next peak is within the min peak width range.
    index = 0
    while index < len(peak_idxs) - 1:

        # If the second peak is closer than min_peak_distance to the first peak, find the larger peak and remove the other one.
        if peak_idxs[index] > peak_idxs[index + 1] - min_distance:
            if peak_vals[index] > peak_vals[index + 1]:
                idx2del = index + 1
            else:
                idx2del = index
            peak_vals = np.delete(peak_vals, idx2del)
            peak_idxs = np.delete(peak_idxs, idx2del)
        else:
            index += 1

    # """If Maximum Number passed, return only up to number given based on a sort of peak values."""
    if max_number is not None and len(peak_idxs) > max_number:
        sorted_peak_vals = peak_vals.argsort()  # type: ignore  # sorts low to high
        peak_vals = peak_vals[
            sorted_peak_vals[
                -max_number:  # pylint: disable = invalid-unary-operand-type
            ]
        ]
        peak_idxs = peak_idxs[
            sorted_peak_vals[
                -max_number:  # pylint: disable = invalid-unary-operand-type
            ]
        ]

    # If we were looking for minimums, convert the values back to the original sign
    if find_min_instead:
        peak_vals = (
            -peak_vals  # type: ignore  # pylint: disable = invalid-unary-operand-type
        )

    return peak_vals, peak_idxs
Beispiel #15
0
def gantry_tol_from_gantry_angles(gantry_angles):
    min_diff = np.min(np.diff(sorted(gantry_angles)))
    gantry_tol = np.min([min_diff / 2 - 0.1, 3])

    return gantry_tol
Beispiel #16
0
    def _from_dicom_beam(cls, beam, meterset):
        if meterset is None:
            raise ValueError("Meterset should not ever be None")

        beam_limiting_device_sequence = beam.BeamLimitingDeviceSequence

        rt_beam_limiting_device_types = {
            item.RTBeamLimitingDeviceType
            for item in beam_limiting_device_sequence
        }

        supported_configurations = [{"MLCX", "ASYMY"}]

        if not rt_beam_limiting_device_types in supported_configurations:
            raise ValueError(
                _pretty_print("""\
                        Currently only DICOM files where the beam
                        limiting devices consist of one of the following
                        combinations are supported:

                        * {supported_configurations}

                        The provided RT Plan DICOM file has the
                        following:

                            {rt_beam_limiting_device_types}

                        This is not yet supported.
                        This is due to a range of assumptions being made
                        internally that assume a single jaw system.
                        There are some cases where this restriction is
                        too tight. Currently however there is not enough
                        testing data to appropriately implement these
                        cases.
                        If you would like to have your device supported
                        please consider uploading anonymised DICOM files
                        and their TRF counterparts to the following
                        issue
                        <https://github.com/pymedphys/pymedphys/issues/1142>.
                    """).format(
                    supported_configurations="\n* ".join(
                        [str(item) for item in supported_configurations]),
                    rt_beam_limiting_device_types=rt_beam_limiting_device_types,
                ))

        mlc_sequence = [
            item for item in beam_limiting_device_sequence
            if item.RTBeamLimitingDeviceType == "MLCX"
        ]

        if len(mlc_sequence) != 1:
            raise ValueError(
                "Expected there to be only one device labelled as MLCX")

        mlc_limiting_device = mlc_sequence[0]

        leaf_boundaries = mlc_limiting_device.LeafPositionBoundaries
        leaf_widths = np.diff(leaf_boundaries)

        if mlc_limiting_device.NumberOfLeafJawPairs != len(leaf_widths):
            raise ValueError("Expected number of leaf pairs to be the same as "
                             "the length of leaf widths")

        num_leaves = len(leaf_widths)

        control_points = beam.ControlPointSequence

        beam_limiting_device_position_sequences = rtplan.get_cp_attribute_leaning_on_prior(
            control_points, "BeamLimitingDevicePositionSequence")

        dicom_mlcs = rtplan.get_leaf_jaw_positions_for_type(
            beam_limiting_device_position_sequences, "MLCX")

        mlcs = [
            np.array([
                -np.array(mlc[0:num_leaves][::-1]),
                np.array(mlc[num_leaves::][::-1])
            ][::-1]).T for mlc in dicom_mlcs
        ]

        dicom_jaw = rtplan.get_leaf_jaw_positions_for_type(
            beam_limiting_device_position_sequences, "ASYMY")

        jaw = np.array(dicom_jaw)

        second_col = deepcopy(jaw[:, 1])

        jaw[:, 1] = jaw[:, 0]
        jaw[:, 0] = second_col

        jaw[:, 1] = -jaw[:, 1]

        final_mu_weight = np.array(beam.FinalCumulativeMetersetWeight)

        if final_mu_weight is None:
            raise ValueError(
                "FinalCumulativeMetersetWeight should not be None")

        # https://dicom.innolitics.com/ciods/rt-plan/rt-beams/300a00b0/300a0111/300a0134
        # http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.8.14.html#sect_C.8.8.14.1

        cumulative_meterset_weight = [
            control_point.CumulativeMetersetWeight
            for control_point in control_points
        ]

        for weight in cumulative_meterset_weight:
            if weight is None:
                raise ValueError(
                    "Cumulative Meterset weight not set within DICOM RT plan file. "
                    "This may be due to the plan being exported from a planning system "
                    "without the dose being calculated.")

        mu = [
            meterset * np.array(weight) / final_mu_weight for control_point,
            weight in zip(control_points, cumulative_meterset_weight)
        ]

        gantry_angles = convert_IEC_angle_to_bipolar(
            rtplan.get_cp_attribute_leaning_on_prior(control_points,
                                                     "GantryAngle"))

        collimator_angles = convert_IEC_angle_to_bipolar(
            rtplan.get_cp_attribute_leaning_on_prior(
                control_points, "BeamLimitingDeviceAngle"))

        return cls(mu, gantry_angles, collimator_angles, mlcs, jaw)
Beispiel #17
0
def calc_mu_density(
    mu,
    mlc,
    jaw,
    grid_resolution=None,
    max_leaf_gap=None,
    leaf_pair_widths=None,
    min_step_per_pixel=None,
):
    """Determine the MU Density.

    Both jaw and mlc positions are defined in bipolar format for each control
    point. A negative value indicates travel over the isocentre. All positional
    arguments are defined at the isocentre projection with the units of mm.

    Parameters
    ----------
    mu : numpy.ndarray
        1-D array containing an MU value for each control point.
    mlc : numpy.ndarray
        3-D array containing the MLC positions

            | axis 0: control point
            | axis 1: mlc pair
            | axis 2: leaf bank

    jaw : numpy.ndarray
        2-D array containing the jaw positions.

            | axis 0: control point
            | axis 1: diaphragm

    grid_resolution : float, optional
        The calc grid resolution. Defaults to 1 mm.

    max_leaf_gap : float, optional
        The maximum possible distance between opposing leaves. Defaults to
        400 mm.

    leaf_pair_widths : tuple, optional
        The widths of each leaf pair in the
        MLC limiting device. The number of entries in the tuples defines
        the number of leaf pairs. Each entry itself defines that particular
        leaf pair width. Defaults to 80 leaf pairs each 5 mm wide.

    min_step_per_pixel : int, optional
        The minimum number of time steps
        used per pixel for each control point. Defaults to 10.

    Returns
    -------
    mu_density : numpy.ndarray
        2-D array containing the calculated mu density.

            | axis 0: jaw direction
            | axis 1: mlc direction

    Examples
    --------
    >>> import numpy as np
    >>> import pymedphys
    >>>
    >>> leaf_pair_widths = (5, 5, 5)
    >>> max_leaf_gap = 10
    >>> mu = np.array([0, 2, 5, 10])
    >>> mlc = np.array([
    ...     [
    ...         [1, 1],
    ...         [2, 2],
    ...         [3, 3]
    ...     ],
    ...     [
    ...         [2, 2],
    ...         [3, 3],
    ...         [4, 4]
    ...     ],
    ...     [
    ...         [-2, 3],
    ...         [-2, 4],
    ...         [-2, 5]
    ...     ],
    ...     [
    ...         [0, 0],
    ...         [0, 0],
    ...         [0, 0]
    ...     ]
    ... ])
    >>> jaw = np.array([
    ...     [7.5, 7.5],
    ...     [7.5, 7.5],
    ...     [-2, 7.5],
    ...     [0, 0]
    ... ])
    >>>
    >>> grid = pymedphys.mudensity.grid(
    ...    max_leaf_gap=max_leaf_gap, leaf_pair_widths=leaf_pair_widths)
    >>> grid['mlc']
    array([-5., -4., -3., -2., -1.,  0.,  1.,  2.,  3.,  4.,  5.])
    >>>
    >>> grid['jaw']
    array([-8., -7., -6., -5., -4., -3., -2., -1.,  0.,  1.,  2.,  3.,  4.,
            5.,  6.,  7.,  8.])
    >>>
    >>> mu_density = pymedphys.mudensity.calculate(
    ...    mu, mlc, jaw, max_leaf_gap=max_leaf_gap,
    ...    leaf_pair_widths=leaf_pair_widths)
    >>> pymedphys.mudensity.display(grid, mu_density)
    >>>
    >>> np.round(mu_density, 1)
    array([[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.3, 1.9, 2.2, 1.9, 0.4, 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.4, 2.2, 2.5, 2.2, 0.6, 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.4, 2.4, 2.8, 2.5, 0.8, 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.4, 2.5, 3.1, 2.8, 1. , 0. , 0. , 0. ],
           [0. , 0. , 0. , 0.4, 2.5, 3.4, 3.1, 1.3, 0. , 0. , 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.7, 3.7, 3.5, 1.6, 0. , 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.8, 4. , 3.8, 1.9, 0.1, 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.8, 4.3, 4.1, 2.3, 0.1, 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.9, 5.2, 4.7, 2.6, 0.2, 0. ],
           [0. , 0. , 0.4, 2.3, 3.2, 3.8, 5.4, 6.6, 3.8, 0.5, 0. ],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 5.1, 7.5, 6.7, 3.9, 0.5],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 4.7, 6.9, 6.7, 3.9, 0.5],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 4.5, 6.3, 6.4, 3.9, 0.5],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 4.5, 5.6, 5.7, 3.8, 0.5],
           [0. , 0.3, 2.2, 3. , 3.5, 4. , 4.5, 5.1, 5.1, 3.3, 0.5],
           [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])


    MU Density from a Mosaiq record

    >>> import pymedphys
    >>>
    >>> def mu_density_from_mosaiq(msq_server_name, field_id):
    ...     with pymedphys.mosaiq.connect(msq_server_name) as cursor:
    ...         delivery = pymedphys.Delivery.from_mosaiq(cursor, field_id)
    ...
    ...     grid = pymedphys.mudensity.grid()
    ...     mu_density = delivery.mudensity()
    ...     pymedphys.mudensity.display(grid, mu_density)
    >>>
    >>> mu_density_from_mosaiq('a_server_name', 11111) # doctest: +SKIP


    MU Density from a logfile at a given filepath

    >>> import pymedphys
    >>>
    >>> def mu_density_from_logfile(filepath):
    ...     delivery_data = Delivery.from_logfile(filepath)
    ...     mu_density = Delivery.mudensity()
    ...
    ...     grid = pymedphys.mudensity.grid()
    ...     pymedphys.mudensity.display(grid, mu_density)
    >>>
    >>> mu_density_from_logfile(r"a/path/goes/here")  # doctest: +SKIP

    """

    if grid_resolution is None:
        grid_resolution = __DEFAULT_GRID_RESOLUTION

    if max_leaf_gap is None:
        max_leaf_gap = __DEFAULT_MAX_LEAF_GAP

    if leaf_pair_widths is None:
        leaf_pair_widths = __DEFAULT_LEAF_PAIR_WIDTHS

    if min_step_per_pixel is None:
        min_step_per_pixel = __DEFAULT_MIN_STEP_PER_PIXEL

    divisibility_of_max_leaf_gap = np.array(max_leaf_gap / 2 / grid_resolution)
    max_leaf_gap_is_divisible = (
        divisibility_of_max_leaf_gap.astype(int) == divisibility_of_max_leaf_gap
    )

    if not max_leaf_gap_is_divisible:
        raise ValueError(
            "The grid resolution needs to be able to divide the max leaf gap exactly by"
            " four"
        )

    leaf_pair_widths = np.array(leaf_pair_widths)

    if not np.max(np.abs(mlc)) <= max_leaf_gap / 2:  # pylint: disable = unneeded-not
        first_failing_control_point = np.where(np.abs(mlc) > max_leaf_gap / 2)[0][0]

        raise ValueError(
            "The mlc should not travel further out than half the maximum leaf gap.\n"
            "The first failing control point has the following positions:\n"
            f"{np.array(mlc)[first_failing_control_point, :, :]}"
        )

    mu, mlc, jaw = remove_irrelevant_control_points(mu, mlc, jaw)

    full_grid = get_grid(max_leaf_gap, grid_resolution, leaf_pair_widths)

    mu_density = np.zeros((len(full_grid["jaw"]), len(full_grid["mlc"])))

    for i in range(len(mu) - 1):
        control_point_slice = slice(i, i + 2, 1)
        current_mlc = mlc[control_point_slice, :, :]
        current_jaw = jaw[control_point_slice, :]
        delivered_mu = np.diff(mu[control_point_slice])

        grid, mu_density_of_slice = calc_single_control_point(
            current_mlc,
            current_jaw,
            delivered_mu,
            leaf_pair_widths=leaf_pair_widths,
            grid_resolution=grid_resolution,
            min_step_per_pixel=min_step_per_pixel,
        )
        full_grid_mu_density_of_slice = _convert_to_full_grid(
            grid, full_grid, mu_density_of_slice
        )

        mu_density += full_grid_mu_density_of_slice

    return mu_density
Beispiel #18
0
    def _from_dicom_beam(cls, beam, meterset):
        if meterset is None:
            raise ValueError("Meterset should not ever be None")

        leaf_boundaries = beam.BeamLimitingDeviceSequence[-1].LeafPositionBoundaries
        leaf_widths = np.diff(leaf_boundaries)

        assert beam.BeamLimitingDeviceSequence[-1].NumberOfLeafJawPairs == len(
            leaf_widths
        )
        num_leaves = len(leaf_widths)

        control_points = beam.ControlPointSequence

        beam_limiting_device_position_sequences = get_cp_attribute_leaning_on_prior(
            control_points, "BeamLimitingDevicePositionSequence"
        )

        mlcs = [
            sequence[-1].LeafJawPositions
            for sequence in beam_limiting_device_position_sequences
        ]

        mlcs = [
            np.array(
                [-np.array(mlc[0:num_leaves][::-1]), np.array(mlc[num_leaves::][::-1])][
                    ::-1
                ]
            ).T
            for mlc in mlcs
        ]

        dicom_jaw = [
            sequence[0].LeafJawPositions
            for sequence in beam_limiting_device_position_sequences
        ]

        jaw = np.array(dicom_jaw)

        second_col = deepcopy(jaw[:, 1])
        jaw[:, 1] = jaw[:, 0]
        jaw[:, 0] = second_col

        jaw[:, 1] = -jaw[:, 1]

        final_mu_weight = np.array(beam.FinalCumulativeMetersetWeight)

        if final_mu_weight is None:
            raise ValueError("FinalCumulativeMetersetWeight should not be None")

        # https://dicom.innolitics.com/ciods/rt-plan/rt-beams/300a00b0/300a0111/300a0134
        # http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.8.8.14.html#sect_C.8.8.14.1

        cumulative_meterset_weight = [
            control_point.CumulativeMetersetWeight for control_point in control_points
        ]

        for weight in cumulative_meterset_weight:
            if weight is None:
                raise ValueError(
                    "Cumulative Meterset weight not set within DICOM RT plan file. "
                    "This may be due to the plan being exported from a planning system "
                    "without the dose being calculated."
                )

        mu = [
            meterset * np.array(weight) / final_mu_weight
            for control_point, weight in zip(control_points, cumulative_meterset_weight)
        ]

        gantry_angles = convert_IEC_angle_to_bipolar(
            get_cp_attribute_leaning_on_prior(control_points, "GantryAngle")
        )

        collimator_angles = convert_IEC_angle_to_bipolar(
            get_cp_attribute_leaning_on_prior(control_points, "BeamLimitingDeviceAngle")
        )

        return cls(mu, gantry_angles, collimator_angles, mlcs, jaw)
Beispiel #19
0
    def _gantry_angle_mask(self, gantry_angle, gantry_angle_tol):
        near_angle = np.abs(np.array(self.gantry) -
                            gantry_angle) <= gantry_angle_tol
        assert np.all(np.diff(np.where(near_angle)[0]) == 1)

        return near_angle