Exemplo n.º 1
0
def calc_comparison(logfile_mu_density, mosaiq_mu_density, normalisation=None):
    if normalisation is None:
        normalisation = np.sum(mosaiq_mu_density)

    comparison = np.sum(np.abs(logfile_mu_density - mosaiq_mu_density)) / normalisation

    return comparison
Exemplo n.º 2
0
    def _fraction_number(self, dicom_template, gantry_tol=3, meterset_tol=0.5):
        fractions = dicom_template.FractionGroupSequence

        if len(fractions) == 1:
            return fractions[0].FractionGroupNumber

        fraction_numbers = [
            fraction.FractionGroupNumber for fraction in fractions
        ]

        fraction_matches = np.array([
            self._matches_fraction(
                dicom_template,
                fraction_number,
                gantry_tol=gantry_tol,
                meterset_tol=meterset_tol,
            ) for fraction_number in fraction_numbers
        ])

        if np.sum(fraction_matches) < 1:
            raise ValueError(
                "A fraction group was not able to be found with the metersets "
                "and gantry angles defined by the tolerances provided. "
                "Please manually define the fraction group number.")

        if np.sum(fraction_matches) > 1:
            raise ValueError(
                "More than one fraction group was found that had metersets "
                "and gantry angles within the tolerances provided. "
                "Please manually define the fraction group number.")

        fraction_number = np.array(fraction_numbers)[fraction_matches]

        return fraction_number
Exemplo n.º 3
0
def create_bb_to_minimise(field, bb_diameter):
    """This is a numpy vectorised version of `create_bb_to_minimise_simple`
    """

    points_to_check_edge_agreement, dist = interppoints.create_bb_points_function(
        bb_diameter)
    dist_mask = np.unique(dist)[:, None] == dist[None, :]
    num_in_mask = np.sum(dist_mask, axis=1)
    mask_count_per_item = np.sum(num_in_mask[:, None] * dist_mask, axis=0)
    mask_mean_lookup = np.where(dist_mask)[0]

    def to_minimise_edge_agreement(centre):
        x, y = points_to_check_edge_agreement(centre)

        results = field(x, y)
        masked_results = results * dist_mask
        mask_mean = np.sum(masked_results, axis=1) / num_in_mask
        diff_to_mean_square = (results - mask_mean[mask_mean_lookup])**2
        mean_of_layers = np.sum(
            diff_to_mean_square[1::] /
            mask_count_per_item[1::]) / (len(mask_mean) - 1)

        return mean_of_layers

    return to_minimise_edge_agreement
Exemplo n.º 4
0
    def to_minimise(inputs):
        x_shift = inputs[0]
        y_shift = inputs[1]
        angle = inputs[2]

        interpolated = shift_and_rotate_with_interp(moving_interp,
                                                    (ref_x, ref_y),
                                                    (x_shift, y_shift), angle)

        return np.sum(
            (interpolated - ref_edge_filtered)**2) - np.sum(interpolated)
Exemplo n.º 5
0
def soft_surface_dice(reference, evaluation):
    """Non-TensorFlow implementation of a soft surface dice
    """
    edge_reference = skimage.filters.scharr(reference)
    edge_evaluation = skimage.filters.scharr(evaluation)

    score = np.sum(np.abs(edge_evaluation - edge_reference)) / np.sum(
        edge_evaluation + edge_reference
    )

    return 1 - score
Exemplo n.º 6
0
    def to_minimise_edge_agreement(centre):
        x, y = points_to_check_edge_agreement(centre)

        results = field(x, y)
        masked_results = results * dist_mask
        mask_mean = np.sum(masked_results, axis=1) / num_in_mask
        diff_to_mean_square = (results - mask_mean[mask_mean_lookup])**2
        mean_of_layers = np.sum(
            diff_to_mean_square[1::] /
            mask_count_per_item[1::]) / (len(mask_mean) - 1)

        return mean_of_layers
Exemplo n.º 7
0
def mephysto_absolute_profiles(
    curvetype,
    depth_test,
    distance,
    relative_dose,
    scan_curvetype,
    scan_depth,
    mephysto_pdd_depth,
    mephysto_pdd_dose,
):

    choose_mephysto = (scan_curvetype == curvetype) & (scan_depth
                                                       == depth_test)

    if np.sum(choose_mephysto) != 1:
        raise ValueError(
            "Can only handle exactly one scan type per mephysto file")

    mephysto_distance = distance[choose_mephysto][0]
    mephysto_normalised_dose = normalisation.normalise_profile(
        mephysto_distance,
        relative_dose[choose_mephysto][0],
        scale_to_pdd=True,
        pdd_distance=mephysto_pdd_depth,
        pdd_relative_dose=mephysto_pdd_dose,
        scan_depth=depth_test,
    )

    return mephysto_distance, mephysto_normalised_dose
Exemplo n.º 8
0
def folder_analyze(volume):
    for item in range(0, volume.shape[2]):
        stack1 = np.sum(volume[:, :, item], axis=0)
        maxstack1 = np.max(stack1)

        stack2 = np.sum(volume[:, :, item], axis=1)
        maxstack2 = np.max(stack2)

        if maxstack2 / maxstack1 > 1.5:  # It is a Y field folder
            field = 2
        elif maxstack2 / maxstack1 < 0.5:  # It is a X field folder
            field = 1
        else:
            field = 3  # It is a field rotation folder

        return field
Exemplo n.º 9
0
def calc_min_distance(cube_definition, contours):
    vertices = cube_vertices(cube_definition)

    vectors = cube_vectors(cube_definition)
    unit_vectors = [vector / np.linalg.norm(vector) for vector in vectors]

    plane_norms = np.array(
        [
            unit_vectors[1],
            -unit_vectors[0],
            -unit_vectors[1],
            unit_vectors[0],
            unit_vectors[2],
            -unit_vectors[2],
        ]
    )

    plane_points = np.array(
        [vertices[0], vertices[1], vertices[2], vertices[0], vertices[0], vertices[3]]
    )

    plane_origin_dist = -np.sum(plane_points * plane_norms, axis=1)

    distance_to_planes = np.dot(plane_norms, contours) + plane_origin_dist[:, None]

    min_dist_squared = np.min(distance_to_planes ** 2, axis=0)

    return min_dist_squared
Exemplo n.º 10
0
 def to_minimise(cube):
     cube_definition = cubify_cube_definition(
         [tuple(cube[0:3]),
          tuple(cube[3:6]),
          tuple(cube[6::])])
     min_dist_squared = calc_min_distance(cube_definition, contour_points)
     return np.sum(min_dist_squared)
Exemplo n.º 11
0
    def distance_to(self, point):
        """Calculate the minimum distance from the line to a point.

        Equations are from here: http://mathworld.wolfram.com/Point-LineDistance2-Dimensional.html #14

        Parameters
        ----------
        point : Point, iterable
            The point to calculate distance to.
        """
        point = Point(point).as_array()
        lp1 = self.point1.as_array()
        lp2 = self.point2.as_array()
        numerator = np.sqrt(
            np.sum(np.power(np.cross((lp2 - lp1), (lp1 - point)), 2)))
        denominator = np.sqrt(np.sum(np.power(lp2 - lp1, 2)))
        return numerator / denominator
Exemplo n.º 12
0
def calc_normalisation(mosaiq_delivery_data):
    all_gantry_angles = mosaiq_delivery_data.mudensity
    mosaiq_gantry_angles = np.unique(mosaiq_delivery_data.gantry)
    number_of_gantry_angles = len(mosaiq_gantry_angles)

    normalisation = np.sum(all_gantry_angles) / number_of_gantry_angles

    return normalisation
Exemplo n.º 13
0
def filled_area_ratio(array) -> float:
    """Return the ratio of filled pixels to empty pixels in the ROI bounding box.

    For example a solid square would be 1.0, while a sold circle would be ~0.785.
    """
    ymin, ymax, xmin, xmax = bounding_box(array)
    box_area = (ymax - ymin) * (xmax - xmin)
    filled_area = np.sum(array)
    return float(filled_area / box_area)
Exemplo n.º 14
0
def plot_gamma_hist(gamma, percent, dist):
    valid_gamma = gamma[~np.isnan(gamma)]

    plt.hist(valid_gamma, 50, density=True)
    pass_ratio = np.sum(valid_gamma <= 1) / len(valid_gamma)

    plt.title(
        "Local Gamma ({0}%/{1}mm) | Percent Pass: {2:.2f} % | Mean Gamma: {3:.2f} | Max Gamma: {4:.2f}"
        .format(percent, dist, pass_ratio * 100, np.mean(valid_gamma),
                np.max(valid_gamma)))
Exemplo n.º 15
0
def _determine_leaf_centres(leaf_pair_widths):
    total_leaf_widths = np.sum(leaf_pair_widths)
    leaf_centres = (np.cumsum(leaf_pair_widths) - leaf_pair_widths / 2 -
                    total_leaf_widths / 2)

    reference_leaf_index = len(leaf_centres) // 2

    top_of_reference_leaf = (leaf_centres[reference_leaf_index] +
                             leaf_pair_widths[reference_leaf_index] / 2)

    return leaf_centres, top_of_reference_leaf
Exemplo n.º 16
0
    def to_minimise(centre):
        (
            xx_left_right,
            yy_left_right,
            xx_top_bot,
            yy_top_bot,
        ) = transform_penumbra_points(points_at_origin, centre, rotation)

        left_right_interpolated = field(xx_left_right, yy_left_right)
        top_bot_interpolated = field(xx_top_bot, yy_top_bot)

        left_right_weighted_diff = (
            2 * (left_right_interpolated - left_right_interpolated[:, ::-1]) /
            (left_right_interpolated + left_right_interpolated[:, ::-1]))
        top_bot_weighted_diff = (
            2 * (top_bot_interpolated - top_bot_interpolated[::-1, :]) /
            (top_bot_interpolated + top_bot_interpolated[::-1, :]))

        return np.sum(left_right_weighted_diff**2) + np.sum(
            top_bot_weighted_diff**2)
Exemplo n.º 17
0
    def _gantry_angle_masks(self,
                            gantry_angles,
                            gantry_tol,
                            allow_missing_angles=False):
        masks = [
            self._gantry_angle_mask(gantry_angle, gantry_tol)
            for gantry_angle in gantry_angles
        ]

        for mask in masks:
            if np.all(mask == 0):
                continue

            # TODO: Apply mask by more than just gantry angle to appropriately
            # extract beam index even when multiple beams have the same gantry
            # angle
            is_duplicate_gantry_angles = (np.sum(
                np.abs(np.diff(np.concatenate([[0], mask, [0]])))) != 2)

            if is_duplicate_gantry_angles:
                raise ValueError("Duplicate gantry angles not yet supported")

        try:
            assert np.all(np.sum(masks, axis=0) == 1), (
                "Not all beams were captured by the gantry tolerance of "
                " {}".format(gantry_tol))
        except AssertionError:
            if not allow_missing_angles:
                print("Allowable gantry angles = {}".format(gantry_angles))
                gantry = np.array(self.gantry, copy=False)
                out_of_tolerance = np.unique(
                    gantry[np.sum(masks, axis=0) == 0]).tolist()
                print("The gantry angles out of tolerance were {}".format(
                    out_of_tolerance))

                raise

        return masks
Exemplo n.º 18
0
def pull_mephysto_item(string, file_contents):
    """Steps through each scan region searching for a mephysto parameter that
    matches the requested string. Returns an array filled with the results for
    all scans that have a match. If a scan does not have a parameter matching
    the request then np.nan is returned.
    """
    scan_index = find_scan_index(file_contents)

    # Format the input string for use in a regex search
    string_test = re.escape(string)

    result = []
    # Loop over each scan index searching for requested parameter
    for index in scan_index:
        # See if any parameters match within this scan
        match = np.array([
            re.match(r"^\t\t" + string_test + "=(.*)$", item) is not None
            for item in file_contents[index]
        ])

        # If there is a single match then return the result
        if np.sum(match) == 1:
            relevant_line = file_contents[index][match][0]
            # Return the value that is after the equal sign
            # Example -- https://regex101.com/r/lR4pS2/7
            result.append(
                re.search(r"^\t\t" + string_test + "=(.*)$",
                          relevant_line).group(1))

        # If there is no matches return np.nan
        elif np.sum(match) == 0:
            result.append(np.nan)

        # If there is more than one match raise an error
        else:
            raise Exception("More than one item has this label")

    return np.array(result)
Exemplo n.º 19
0
def _calc_device_open(blocked_by_device):
    device_open = {}

    for device, value in blocked_by_device.items():
        device_sum = np.sum(
            np.concatenate(
                [np.expand_dims(blocked, axis=0) for _, blocked in value.items()],
                axis=0,
            ),
            axis=0,
        )

        device_open[device] = 1 - device_sum

    return device_open
Exemplo n.º 20
0
def mephysto_absolute_depth_dose(absolute_dose, depth_of_absolute_dose_mm,
                                 distance, relative_dose, scan_curvetype):
    choose_mephysto = scan_curvetype == "PDD"
    if np.sum(choose_mephysto) != 1:
        raise ValueError("Can only handle one PDD per mephysto file")

    mephysto_pdd_depth = distance[choose_mephysto][0]
    mephysto_dose = relative_dose[choose_mephysto][0]

    interpolation = scipy.interpolate.interp1d(mephysto_pdd_depth,
                                               mephysto_dose)

    mephysto_pdd_dose = (mephysto_dose /
                         interpolation(depth_of_absolute_dose_mm) *
                         absolute_dose)

    return mephysto_pdd_depth, mephysto_pdd_dose
Exemplo n.º 21
0
def get_grid(
    max_leaf_gap=__DEFAULT_MAX_LEAF_GAP,
    grid_resolution=__DEFAULT_GRID_RESOLUTION,
    leaf_pair_widths=__DEFAULT_LEAF_PAIR_WIDTHS,
):
    """Get the MU Density grid for plotting purposes.

    Examples
    --------
    See `pymedphys.mudensity.calculate`_.
    """

    leaf_pair_widths = np.array(leaf_pair_widths)

    grid = dict()

    grid["mlc"] = np.arange(
        -max_leaf_gap / 2, max_leaf_gap / 2 + grid_resolution, grid_resolution
    ).astype("float")

    _, top_of_reference_leaf = _determine_leaf_centres(leaf_pair_widths)
    grid_reference_position = _determine_reference_grid_position(
        top_of_reference_leaf, grid_resolution
    )

    # It might be better to use round instead of ceil here.
    total_leaf_widths = np.sum(leaf_pair_widths)
    top_grid_pos = (
        np.ceil((total_leaf_widths / 2 - grid_reference_position) / grid_resolution)
        * grid_resolution
        + grid_reference_position
    )

    bot_grid_pos = (
        grid_reference_position
        - np.ceil((total_leaf_widths / 2 + grid_reference_position) / grid_resolution)
        * grid_resolution
    )

    grid["jaw"] = np.arange(
        bot_grid_pos, top_grid_pos + grid_resolution, grid_resolution
    )

    return grid
Exemplo n.º 22
0
def calc_and_merge_logfile_mudensity(filepaths, grid_resolution=1):
    logfile_results = []
    for filepath in filepaths:
        logfile_delivery_data = pymedphys.Delivery.from_logfile(filepath)
        mu_density_results = mu_density_from_delivery_data(
            logfile_delivery_data, grid_resolution=grid_resolution
        )

        logfile_results.append(mu_density_results)

    grid_xx_list = [result[0] for result in logfile_results]
    grid_yy_list = [result[1] for result in logfile_results]

    # assert np.array_equal(*grid_xx_list)
    # assert np.array_equal(*grid_yy_list)

    grid_xx = grid_xx_list[0]
    grid_yy = grid_yy_list[0]

    mu_densities = [result[2] for result in logfile_results]

    logfile_mu_density = np.sum(mu_densities, axis=0)

    return grid_xx, grid_yy, logfile_mu_density
Exemplo n.º 23
0
def image_analyze(volume, i_opt):
    xfield = []
    yfield = []
    rotfield = []

    if i_opt.startswith(("y", "yeah", "yes")):
        kx = 0
        ky = 0
        krot = 0
        for item in range(0, volume.shape[2]):
            stack1 = np.sum(
                volume[
                    int(
                        np.shape(volume)[0] / 2
                        - np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ) : int(
                        np.shape(volume)[0] / 2
                        + np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ),
                    int(
                        np.shape(volume)[1] / 2
                        - np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ) : int(
                        np.shape(volume)[1] / 2
                        + np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ),
                    item,
                ],
                axis=0,
            )
            maxstack1 = np.amax(stack1)

            # stack2 = np.sum(volume[:, :, item], axis=1)
            stack2 = np.sum(
                volume[
                    int(
                        np.shape(volume)[0] / 2
                        - np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ) : int(
                        np.shape(volume)[0] / 2
                        + np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ),
                    int(
                        np.shape(volume)[1] / 2
                        - np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ) : int(
                        np.shape(volume)[1] / 2
                        + np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ),
                    item,
                ],
                axis=1,
            )
            maxstack2 = np.amax(stack2)

            if maxstack2 / maxstack1 > 1.1:  # It is a Y field folder
                if ky == 0:
                    yfield = volume[:, :, item]
                    yfield = yfield[:, :, np.newaxis]
                else:
                    volappend = volume[:, :, item]
                    yfield = np.append(yfield, volappend[:, :, np.newaxis], axis=2)
                ky = ky + 1
            elif maxstack2 / maxstack1 < 0.9:  # It is a X field folder
                if kx == 0:
                    xfield = volume[:, :, item]
                    xfield = xfield[:, :, np.newaxis]
                else:
                    # xfield=xfield[:,:,np.newaxis]
                    volappend = volume[:, :, item]
                    xfield = np.append(xfield, volappend[:, :, np.newaxis], axis=2)
                kx = kx + 1
            else:  # It is a field rotation folder
                if krot == 0:
                    rotfield = volume[:, :, item]
                    rotfield = rotfield[:, :, np.newaxis]
                else:
                    # rotfield = rotfield[:, :, np.newaxis]
                    volappend = volume[:, :, item]
                    rotfield = np.append(rotfield, volappend[:, :, np.newaxis], axis=2)
                krot = krot + 1

    else:
        kx = 0
        ky = 0
        krot = 0
        for item in range(0, volume.shape[2]):
            stack1 = np.sum(
                volume[
                    int(
                        np.shape(volume)[0] / 2
                        - np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ) : int(
                        np.shape(volume)[0] / 2
                        + np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ),
                    int(
                        np.shape(volume)[1] / 2
                        - np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ) : int(
                        np.shape(volume)[1] / 2
                        + np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ),
                    item,
                ],
                axis=0,
            )
            maxstack1 = np.amax(stack1)

            # stack2 = np.sum(volume[:, :, item], axis=1)
            stack2 = np.sum(
                volume[
                    int(
                        np.shape(volume)[0] / 2
                        - np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ) : int(
                        np.shape(volume)[0] / 2
                        + np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ),
                    int(
                        np.shape(volume)[1] / 2
                        - np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ) : int(
                        np.shape(volume)[1] / 2
                        + np.amin([np.shape(volume)[0], np.shape(volume)[1]]) / 2
                    ),
                    item,
                ],
                axis=1,
            )
            maxstack2 = np.amax(stack2)

            if maxstack2 / maxstack1 > 1.5:  # It is a Y field folder
                if ky == 0:
                    yfield = volume[:, :, item]
                    yfield = yfield[:, :, np.newaxis]
                else:
                    volappend = volume[:, :, item]
                    yfield = np.append(yfield, volappend[:, :, np.newaxis], axis=2)
                ky = ky + 1
            elif maxstack2 / maxstack1 < 0.5:  # It is a X field folder
                if kx == 0:
                    xfield = volume[:, :, item]
                    xfield = xfield[:, :, np.newaxis]
                else:
                    # xfield=xfield[:,:,np.newaxis]
                    volappend = volume[:, :, item]
                    xfield = np.append(xfield, volappend[:, :, np.newaxis], axis=2)
                kx = kx + 1
            else:  # It is a field rotation folder
                if krot == 0:
                    rotfield = volume[:, :, item]
                    rotfield = rotfield[:, :, np.newaxis]
                else:
                    # rotfield = rotfield[:, :, np.newaxis]
                    volappend = volume[:, :, item]
                    rotfield = np.append(rotfield, volappend[:, :, np.newaxis], axis=2)
                krot = krot + 1

    return xfield, yfield, rotfield
Exemplo n.º 24
0
def minimize_junction_X(amplitude, peaks, peak_type, dx):
    print("Analyzing X jaws...")

    amp_prev = 0
    amp_filt_prev = 0

    fig = plt.figure(figsize=(10, 6))  # create the plot

    kk = 0  # counter for figure generation
    for j in range(0, amplitude.shape[1] - 1):
        for k in range(j + 1,
                       amplitude.shape[1]):  # looping through remaining images
            amp_base_res = signal.convolve(amplitude[:, j],
                                           amplitude[:, j],
                                           mode="full")
            amp_base_res = signal.resample(
                amp_base_res / np.amax(amp_base_res),
                int(np.ceil(len(amp_base_res) / 2)),
            )

            amp_overlay_res = signal.convolve(amplitude[:, k],
                                              amplitude[:, k],
                                              mode="full")
            amp_overlay_res = signal.resample(
                amp_overlay_res / np.amax(amp_overlay_res),
                int(np.ceil(len(amp_overlay_res) / 2)),
            )
            peak1, _ = find_peaks(amp_base_res, prominence=0.5)
            peak2, _ = find_peaks(amp_overlay_res, prominence=0.5)

            if (abs(peak2 - peak1) < 2500
                ):  # if the two peaks are close together proceeed to analysis
                kk = kk + 1  # incrementing the figure generator
                cumsum_prev = 1e7
                if peak2 < peak1:  # this guarantee that we always slide the overlay
                    amp_base_res = amplitude[:, k]
                    amp_overlay_res = amplitude[:, j]
                else:
                    amp_base_res = amplitude[:, j]
                    amp_overlay_res = amplitude[:, k]

                if peak_type[j] == 0:
                    inc = -1
                else:
                    inc = 1
                for i in range(0, inc * 80, inc * 1):
                    # x = np.linspace(0, 0 + (len(amp_base_res) * dx), len(amplitude),
                    #                 endpoint=False)  # definition of the distance axis
                    amp_overlay_res_roll = np.roll(amp_overlay_res, i)

                    # amplitude is the vector to analyze +-500 samples from the center
                    amp_tot = (
                        amp_base_res[peaks[j] - 1000:peaks[j] + 1000] +
                        amp_overlay_res_roll[peaks[j] - 1000:peaks[j] + 1000]
                    )  # divided by 2 to normalize
                    # xsel = x[peaks[j] - 1000:peaks[j] + 1000]

                    amp_filt = rm.running_mean(amp_tot, 281)
                    cumsum = np.sum(np.abs(amp_tot - amp_filt))

                    if (  # pylint: disable = no-else-break
                            cumsum > cumsum_prev):  # then we went too far
                        break
                    else:
                        amp_prev = amp_tot
                        amp_filt_prev = amp_filt
                        cumsum_prev = cumsum

                ax = fig.add_subplot(amplitude.shape[1] - 1, 1, kk)
                ax.plot(amp_prev)
                ax.plot(amp_filt_prev)
                if kk == 1:
                    ax.set_title("Minimization result", fontsize=16)
                if (kk == amplitude.shape[1] - 1
                    ):  # if we reach the final plot the add the x axis label
                    ax.set_xlabel("distance [mm]")

                ax.set_ylabel("amplitude")
                # ax.annotate('delta=' + str(abs(i - inc * 1) * dx) + ' mm', xy=(2, 1), xycoords='axes fraction',
                #             xytext=(.35, .10))
                if peaks[kk - 1] != 0:
                    ax.annotate(
                        "delta=" + str(abs(i - inc * 1) * dx) + " mm",
                        xy=(2, 1),
                        xycoords="axes fraction",
                        xytext=(0.35, 0.10),
                    )
                else:
                    ax.annotate(
                        "delta= 0 mm (NO PEAK FOUND)",
                        xy=(2, 1),
                        xycoords="axes fraction",
                        xytext=(0.35, 0.10),
                    )

    return fig
Exemplo n.º 25
0
def _orientation_is_head_first(orientation_vector, is_decubitus):
    if is_decubitus:
        return np.abs(np.sum(orientation_vector)) != 2

    return np.abs(np.sum(orientation_vector)) == 2
Exemplo n.º 26
0
def main():
    st.write("""
        # Electron Insert Factors
        """)

    patient_id = st.text_input("Patient ID")

    if patient_id == "":
        st.stop()

    rccc_string_search_pattern = r"\\monacoda\FocalData\RCCC\1~Clinical\*~{}\plan\*\*tel.1".format(
        patient_id)
    rccc_filepath_list = glob(rccc_string_search_pattern)

    nbccc_string_search_pattern = r"\\tunnel-nbcc-monaco\FOCALDATA\NBCCC\1~Clinical\*~{}\plan\*\*tel.1".format(
        patient_id)
    nbccc_filepath_list = glob(nbccc_string_search_pattern)

    sash_string_search_pattern = r"\\tunnel-sash-monaco\Users\Public\Documents\CMS\FocalData\SASH\1~Clinical\*~{}\plan\*\*tel.1".format(
        patient_id)
    sash_filepath_list = glob(sash_string_search_pattern)

    filepath_list = np.concatenate(
        [rccc_filepath_list, nbccc_filepath_list, sash_filepath_list])

    electronmodel_regex = r"RiverinaAgility - (\d+)MeV"
    applicator_regex = r"(\d+)X\d+"

    insert_data = dict()  # type: ignore

    for telfilepath in filepath_list:
        insert_data[telfilepath] = dict()

        with open(telfilepath, "r") as file:
            telfilecontents = np.array(file.read().splitlines())

        insert_data[telfilepath]["reference_index"] = []
        for i, item in enumerate(telfilecontents):
            if re.search(electronmodel_regex, item):
                insert_data[telfilepath]["reference_index"] += [i]

        insert_data[telfilepath]["applicators"] = [
            re.search(applicator_regex,
                      telfilecontents[i + 12]).group(1)  # type: ignore
            for i in insert_data[telfilepath]["reference_index"]
        ]

        insert_data[telfilepath]["energies"] = [
            re.search(electronmodel_regex,
                      telfilecontents[i]).group(1)  # type: ignore
            for i in insert_data[telfilepath]["reference_index"]
        ]

    for telfilepath in filepath_list:
        with open(telfilepath, "r") as file:
            telfilecontents = np.array(file.read().splitlines())

        insert_data[telfilepath]["x"] = []
        insert_data[telfilepath]["y"] = []

        for i, index in enumerate(insert_data[telfilepath]["reference_index"]):
            insert_initial_range = telfilecontents[
                index +
                51::]  # coords start 51 lines after electron model name
            insert_stop = np.where(insert_initial_range == "0")[0][
                0]  # coords stop right before a line containing 0

            insert_coords_string = insert_initial_range[:insert_stop]
            insert_coords = np.fromstring(",".join(insert_coords_string),
                                          sep=",")
            insert_data[telfilepath]["x"].append(insert_coords[0::2] / 10)
            insert_data[telfilepath]["y"].append(insert_coords[1::2] / 10)

    for telfilepath in filepath_list:
        insert_data[telfilepath]["width"] = []
        insert_data[telfilepath]["length"] = []
        insert_data[telfilepath]["circle_centre"] = []
        insert_data[telfilepath]["P/A"] = []

        for i in range(len(insert_data[telfilepath]["reference_index"])):

            width, length, circle_centre = electronfactors.parameterise_insert(
                insert_data[telfilepath]["x"][i],
                insert_data[telfilepath]["y"][i])

            insert_data[telfilepath]["width"].append(width)
            insert_data[telfilepath]["length"].append(length)
            insert_data[telfilepath]["circle_centre"].append(circle_centre)

            insert_data[telfilepath]["P/A"].append(
                electronfactors.convert2_ratio_perim_area(width, length))

    data_filename = r"S:\Physics\RCCC Specific Files\Dosimetry\Elekta_EFacs\electron_factor_measured_data.csv"
    data = pd.read_csv(data_filename)

    width_data = data["Width (cm @ 100SSD)"]
    length_data = data["Length (cm @ 100SSD)"]
    factor_data = data["RCCC Inverse factor (dose open / dose cutout)"]

    p_on_a_data = electronfactors.convert2_ratio_perim_area(
        width_data, length_data)

    for telfilepath in filepath_list:
        insert_data[telfilepath]["model_factor"] = []

        for i in range(len(insert_data[telfilepath]["reference_index"])):
            applicator = float(insert_data[telfilepath]["applicators"][i])
            energy = float(insert_data[telfilepath]["energies"][i])
            ssd = 100

            reference = ((data["Energy (MeV)"] == energy)
                         & (data["Applicator (cm)"] == applicator)
                         & (data["SSD (cm)"] == ssd))

            number_of_measurements = np.sum(reference)

            if number_of_measurements < 8:
                insert_data[telfilepath]["model_factor"].append(np.nan)
            else:
                insert_data[telfilepath]["model_factor"].append(
                    electronfactors.spline_model_with_deformability(
                        insert_data[telfilepath]["width"],
                        insert_data[telfilepath]["P/A"],
                        width_data[reference],
                        p_on_a_data[reference],
                        factor_data[reference],
                    )[0])

    for telfilepath in filepath_list:
        st.write("---")
        st.write("Filepath: `{}`".format(telfilepath))

        for i in range(len(insert_data[telfilepath]["reference_index"])):
            applicator = float(insert_data[telfilepath]["applicators"][i])
            energy = float(insert_data[telfilepath]["energies"][i])
            ssd = 100

            st.write("Applicator: `{} cm` | Energy: `{} MeV`".format(
                applicator, energy))

            width = insert_data[telfilepath]["width"][i]
            length = insert_data[telfilepath]["length"][i]

            plt.figure()
            plot_insert(
                insert_data[telfilepath]["x"][i],
                insert_data[telfilepath]["y"][i],
                insert_data[telfilepath]["width"][i],
                insert_data[telfilepath]["length"][i],
                insert_data[telfilepath]["circle_centre"][i],
            )

            reference = ((data["Energy (MeV)"] == energy)
                         & (data["Applicator (cm)"] == applicator)
                         & (data["SSD (cm)"] == ssd))

            number_of_measurements = np.sum(reference)

            plt.figure()
            if number_of_measurements < 8:
                plt.scatter(
                    width_data[reference],
                    length_data[reference],
                    s=100,
                    c=factor_data[reference],
                    cmap="viridis",
                    zorder=2,
                )
                plt.colorbar()
            else:
                plot_model(
                    width_data[reference],
                    length_data[reference],
                    factor_data[reference],
                )

            reference_data_table = pd.concat(
                [
                    width_data[reference], length_data[reference],
                    factor_data[reference]
                ],
                axis=1,
            )
            reference_data_table.sort_values(
                ["RCCC Inverse factor (dose open / dose cutout)"],
                ascending=False,
                inplace=True,
            )

            st.write(reference_data_table)

            st.pyplot()

            factor = insert_data[telfilepath]["model_factor"][i]

            st.write(
                "Width: `{0:0.2f} cm` | Length: `{1:0.2f} cm` | Factor: `{2:0.3f}`"
                .format(width, length, factor))
Exemplo n.º 27
0
def minimize_junction_fieldrot(
    amplitude, peaks, peak_type, dx, profilename
):  # minimize junction for field rotations is done differently given the shape of the fields
    print("Field rotation jaw analysis...")
    # print('number of peaks=', peaks)
    amp_prev = 0
    amp_filt_prev = 0

    fig = plt.figure(figsize=(10, 6))  # create the plot

    kk = 1  # counter for figure generation
    for j in range(0, amplitude.shape[1] - 1):
        for k in range(j + 1,
                       amplitude.shape[1]):  # looping through remaining images
            amp_base_res = signal.convolve(amplitude[:, j],
                                           amplitude[:, j],
                                           mode="full")
            amp_base_res = signal.resample(
                amp_base_res / np.amax(amp_base_res),
                int(np.ceil(len(amp_base_res) / 2)),
            )

            amp_overlay_res = signal.convolve(amplitude[:, k],
                                              amplitude[:, k],
                                              mode="full")
            amp_overlay_res = signal.resample(
                amp_overlay_res / np.amax(amp_overlay_res),
                int(np.ceil(len(amp_overlay_res) / 2)),
            )
            # amp_base_res = signal.savgol_filter(amplitude[:, j], 1001, 3)
            # amp_overlay_res = signal.savgol_filter(amplitude[:, k], 1001, 3)
            # peak1, _ = find_peaks(amp_base_res, prominence=0.5)
            # peak2, _ = find_peaks(amp_overlay_res, prominence=0.5)

            cumsum_prev = 1e7
            amp_base_res = amplitude[:, j]
            amp_overlay_res = amplitude[:, k]

            if peak_type[j] == 0:
                inc = -1
            else:
                inc = 1
            for i in range(0, inc * 80, inc * 1):
                # x = np.linspace(0, 0 + (len(amp_base_res) * dx), len(amplitude),
                #                 endpoint=False)  # definition of the distance axis
                amp_overlay_res_roll = np.roll(amp_overlay_res, i)

                # amplitude is the vector to analyze +-500 samples from the center
                amp_tot = (
                    amp_base_res[peaks[j] - 1000:peaks[j] + 1000] +
                    amp_overlay_res_roll[peaks[j] - 1000:peaks[j] + 1000]
                )  # divided by 2 to normalize
                # xsel = x[peaks[j] - 1000:peaks[j] + 1000]
                amp_filt = rm.running_mean(amp_tot, 281)

                cumsum = np.sum(np.abs(amp_tot - amp_filt))

                if (  # pylint: disable = no-else-break
                        cumsum > cumsum_prev):  # then we went too far
                    ax = fig.add_subplot(amplitude.shape[1] - 1, 1, kk)

                    ax.plot(amp_prev)
                    ax.plot(amp_filt_prev)
                    if kk == 1:
                        ax.set_title("Minimization result - " + profilename,
                                     fontsize=16)
                    if (
                            kk == amplitude.shape[1] - 1
                    ):  # if we reach the final plot the add the x axis label
                        ax.set_xlabel("distance [mm]")

                    ax.set_ylabel("amplitude")
                    ax.annotate(
                        "delta=" + str(abs(i - inc * 1) * dx) + " mm",
                        xy=(2, 1),
                        xycoords="axes fraction",
                        xytext=(0.35, 0.10),
                    )

                    # plt.show()

                    kk = kk + 1
                    break
                else:
                    amp_prev = amp_tot
                    amp_filt_prev = amp_filt
                    cumsum_prev = cumsum

    return fig
Exemplo n.º 28
0
    def to_minimise(x):
        a, b, n = x

        cal_fit = create_cal_fit(a, b, n)
        return np.sum((cal_fit(net_od) - dose)**2)
Exemplo n.º 29
0
def calculate_pass_rate(gamma_array):
    valid_gamma = gamma_array[np.invert(np.isnan(gamma_array))]
    percent_pass = 100 * np.sum(valid_gamma < 1) / len(valid_gamma)

    return percent_pass
Exemplo n.º 30
0
def calc_single_control_point(
    mlc,
    jaw,
    delivered_mu=1,
    leaf_pair_widths=__DEFAULT_LEAF_PAIR_WIDTHS,
    grid_resolution=__DEFAULT_GRID_RESOLUTION,
    min_step_per_pixel=__DEFAULT_MIN_STEP_PER_PIXEL,
):
    """Calculate the MU Density for a single control point.

    Examples
    --------
    >>> from pymedphys._imports import numpy as np
    >>> from pymedphys._mudensity.mudensity import (
    ...     calc_single_control_point, display_mu_density)
    >>>
    >>> leaf_pair_widths = (2, 2)
    >>> mlc = np.array([
    ...     [
    ...         [1, 1],
    ...         [2, 2],
    ...     ],
    ...     [
    ...         [2, 2],
    ...         [3, 3],
    ...     ]
    ... ])
    >>> jaw = np.array([
    ...     [1.5, 1.2],
    ...     [1.5, 1.2]
    ... ])
    >>> grid, mu_density = calc_single_control_point(
    ...     mlc, jaw, leaf_pair_widths=leaf_pair_widths)
    >>> display_mu_density(grid, mu_density)
    >>>
    >>> grid['mlc']
    array([-3., -2., -1.,  0.,  1.,  2.,  3.])
    >>>
    >>> grid['jaw']
    array([-1.5, -0.5,  0.5,  1.5])
    >>>
    >>> np.round(mu_density, 2)
    array([[0.  , 0.07, 0.43, 0.5 , 0.43, 0.07, 0.  ],
           [0.  , 0.14, 0.86, 1.  , 0.86, 0.14, 0.  ],
           [0.14, 0.86, 1.  , 1.  , 1.  , 0.86, 0.14],
           [0.03, 0.17, 0.2 , 0.2 , 0.2 , 0.17, 0.03]])
    """

    leaf_pair_widths = np.array(leaf_pair_widths)
    leaf_division = leaf_pair_widths / grid_resolution

    if not np.all(leaf_division.astype(int) == leaf_division):
        raise ValueError(
            "The grid resolution needs to exactly divide every leaf pair width."
        )

    if (
        not np.max(np.abs(jaw))  # pylint: disable = unneeded-not
        <= np.sum(leaf_pair_widths) / 2
    ):
        raise ValueError(
            "The jaw should not travel further out than the maximum leaf limits. "
            f"Max travel was {np.max(np.abs(jaw))}"
        )

    (grid, grid_leaf_map, mlc) = _determine_calc_grid_and_adjustments(
        mlc, jaw, leaf_pair_widths, grid_resolution
    )

    positions = {
        "mlc": {
            1: (-mlc[0, :, 0], -mlc[1, :, 0]),  # left
            -1: (mlc[0, :, 1], mlc[1, :, 1]),  # right
        },
        "jaw": {
            1: (-jaw[0::-1, 0], -jaw[1::, 0]),  # bot
            -1: (jaw[0::-1, 1], jaw[1::, 1]),  # top
        },
    }

    time_steps = _calc_time_steps(positions, grid_resolution, min_step_per_pixel)
    blocked_by_device = _calc_blocked_by_device(
        grid, positions, grid_resolution, time_steps
    )
    device_open = _calc_device_open(blocked_by_device)
    mlc_open, jaw_open = _remap_mlc_and_jaw(device_open, grid_leaf_map)
    open_fraction = _calc_open_fraction(mlc_open, jaw_open)

    mu_density = open_fraction * delivered_mu

    return grid, mu_density