def create_bb_points_function(bb_diameter): max_distance = bb_diameter * 0.5 min_distance = 0 num_steps = 11 min_dist_between_points = (max_distance - min_distance) / num_steps distances = np.arange(min_distance, max_distance + min_dist_between_points, min_dist_between_points) x = [] y = [] dist = [] for _, distance in enumerate(distances): ( new_x, new_y, ) = pymedphys._utilities.createshells.calculate_coordinates_shell_2d( # pylint: disable = protected-access distance, min_dist_between_points) x.append(new_x) y.append(new_y) dist.append(distance * np.ones_like(new_x)) x = np.concatenate(x) y = np.concatenate(y) dist = np.concatenate(dist) def points_to_check(bb_centre): x_shifted = x + bb_centre[0] y_shifted = y + bb_centre[1] return x_shifted, y_shifted return points_to_check, dist
def pcolormesh_grid(x, y, grid_resolution=None): if grid_resolution is None: diffs = np.hstack([np.diff(x), np.diff(y)]) assert np.all(np.abs(diffs - diffs[0]) < 10 ** -12) grid_resolution = diffs[0] new_x = np.concatenate([x - grid_resolution / 2, [x[-1] + grid_resolution / 2]]) new_y = np.concatenate([y - grid_resolution / 2, [y[-1] + grid_resolution / 2]]) return new_x, new_y
def collimation_to_bipolar_mm(mlc_a, mlc_b, coll_y1, coll_y2): mlc1 = 10 * mlc_b[::-1, :] mlc2 = -10 * mlc_a[::-1, :] mlc = np.concatenate([mlc1[None, :, :], mlc2[None, :, :]], axis=0) jaw1 = 10 * coll_y2 jaw2 = -10 * coll_y1 jaw = np.concatenate([jaw1[None, :], jaw2[None, :]], axis=0) return mlc, jaw
def multi_thresholds_gamma_calc( options: GammaInternalFixedOptions, current_gamma, min_relative_dose_difference, distance, to_be_checked, ): gamma_at_distance = np.sqrt( (min_relative_dose_difference[:, None, None] / (options.dose_percent_threshold[None, :, None] / 100))**2 + (distance / options.distance_mm_threshold[None, None, :])**2) current_gamma[to_be_checked, :, :] = np.min( np.concatenate( [ gamma_at_distance[None, :, :, :], current_gamma[None, to_be_checked, :, :], ], axis=0, ), axis=0, ) still_searching_for_gamma = current_gamma > ( distance / options.distance_mm_threshold[None, None, :]) if options.skip_once_passed: still_searching_for_gamma = still_searching_for_gamma & (current_gamma >= 1) return current_gamma, still_searching_for_gamma
def batch_process(image_paths, edge_lengths, bb_diameter=8, penumbra=2, display_figure=True): bb_centres = [] field_centres = [] field_rotations = [] for image_path in image_paths: bb_centre, field_centre, field_rotation = iview_find_bb_and_field( image_path, edge_lengths, bb_diameter=bb_diameter, penumbra=penumbra, display_figure=display_figure, ) bb_centres.append(bb_centre) field_centres.append(field_centre) field_rotations.append(field_rotation) if display_figure: plt.show() data = np.concatenate( [bb_centres, field_centres, np.array(field_rotations)[:, None]], axis=1) return pd.DataFrame( data=data, columns=["BB x", "BB y", "Field x", "Field y", "Rotation"])
def _each_edge(current_edge_length, orthogonal_edge_length): half_field_range = np.linspace(-orthogonal_edge_length / 4, orthogonal_edge_length / 4, 51) a_side_lookup = -current_edge_length / 2 + penumbra_range b_side_lookup = current_edge_length / 2 + penumbra_range current_axis_lookup = np.concatenate([a_side_lookup, b_side_lookup]) return current_axis_lookup, half_field_range
def plot_model(width_data, length_data, factor_data): i, j, k = create_transformed_mesh(width_data, length_data, factor_data) model_width, model_length, model_factor = i, j, k # model_width_mesh, model_length_mesh = np.meshgrid( # model_width, model_length) vmin = np.nanmin( np.concatenate([model_factor.ravel(), factor_data.ravel()])) vmax = np.nanmax( np.concatenate([model_factor.ravel(), factor_data.ravel()])) # vrange = vmax - vmin plt.scatter( width_data, length_data, s=100, c=factor_data, cmap="viridis", vmin=vmin, vmax=vmax, zorder=2, ) plt.colorbar() cs = plt.contour(model_width, model_length, model_factor, 20, vmin=vmin, vmax=vmax) plt.clabel(cs, cs.levels[::2], inline=True) plt.title("Insert model") plt.xlabel("width (cm)") plt.ylabel("length (cm)")
def merge(self: DeliveryGeneric, *args: DeliveryGeneric) -> DeliveryGeneric: cls = type(self) separate: List[DeliveryGeneric] = [self] + [*args] collection: Dict[str, Tuple] = {} for delivery_data in separate: for field in delivery_data._fields: # pylint: disable=no-member try: collection[field] = np.concatenate( [collection[field], getattr(delivery_data, field)], axis=0) except KeyError: collection[field] = getattr(delivery_data, field) mu = np.concatenate([[0], np.diff(collection["monitor_units"])]) mu[mu < 0] = 0 collection["monitor_units"] = np.cumsum(mu) merged = cls(**collection) return merged
def add_shells_to_ref_coords(axes_reference_to_be_checked, coordinates_at_distance_shell): """Add the distance shells to each reference coordinate to make a set of points to be tested for this given distance""" coordinates_at_distance = [] for shell_coord, ref_coord in zip(coordinates_at_distance_shell, axes_reference_to_be_checked): coordinates_at_distance.append( np.array(ref_coord[None, :] + shell_coord[:, None])[:, :, None]) all_points = np.concatenate(coordinates_at_distance, axis=2) return all_points
def _calc_device_open(blocked_by_device): device_open = {} for device, value in blocked_by_device.items(): device_sum = np.sum( np.concatenate( [np.expand_dims(blocked, axis=0) for _, blocked in value.items()], axis=0, ), axis=0, ) device_open[device] = 1 - device_sum return device_open
def delivery_from_icom_stream(icom_stream): icom_stream_points = extract.get_data_points(icom_stream) delivery_raw = [ get_delivery_data_items(single_icom_stream) for single_icom_stream in icom_stream_points ] mu = np.array([item[0] for item in delivery_raw]) diff_mu = np.concatenate([[0], np.diff(mu)]) diff_mu[diff_mu < 0] = 0 mu = np.cumsum(diff_mu) gantry = np.array([item[1] for item in delivery_raw]) collimator = np.array([item[2] for item in delivery_raw]) mlc = np.array([item[3] for item in delivery_raw]) jaw = np.array([item[4] for item in delivery_raw]) return mu, gantry, collimator, mlc, jaw
def _from_mosaiq_base(cls, cursor, field_id): txfield_results, txfieldpoint_results = fetch_and_verify_mosaiq_sql( cursor, field_id) total_mu = np.array(txfield_results[0]).astype(float) cumulative_percentage_mu = txfieldpoint_results[:, 0].astype(float) if np.shape(cumulative_percentage_mu) == (): mu_per_control_point = [0, total_mu] else: cumulative_mu = cumulative_percentage_mu * total_mu / 100 mu_per_control_point = np.concatenate([[0], np.diff(cumulative_mu)]) monitor_units = np.cumsum(mu_per_control_point).tolist() mlc_a = np.squeeze( decode_msq_mlc(txfieldpoint_results[:, 1].astype(bytes))).T mlc_b = np.squeeze( decode_msq_mlc(txfieldpoint_results[:, 2].astype(bytes))).T msq_gantry_angle = txfieldpoint_results[:, 3].astype(float) msq_collimator_angle = txfieldpoint_results[:, 4].astype(float) coll_y1 = txfieldpoint_results[:, 5].astype(float) coll_y2 = txfieldpoint_results[:, 6].astype(float) mlc, jaw = collimation_to_bipolar_mm(mlc_a, mlc_b, coll_y1, coll_y2) gantry = convert_IEC_angle_to_bipolar(msq_gantry_angle) collimator = convert_IEC_angle_to_bipolar(msq_collimator_angle) # TODO Tidy up this axis swap mlc = np.swapaxes(mlc, 0, 2) jaw = np.swapaxes(jaw, 0, 1) mosaiq_delivery_data = cls(monitor_units, gantry, collimator, mlc, jaw) return mosaiq_delivery_data
def _gantry_angle_masks(self, gantry_angles, gantry_tol, allow_missing_angles=False): masks = [ self._gantry_angle_mask(gantry_angle, gantry_tol) for gantry_angle in gantry_angles ] for mask in masks: if np.all(mask == 0): continue # TODO: Apply mask by more than just gantry angle to appropriately # extract beam index even when multiple beams have the same gantry # angle is_duplicate_gantry_angles = (np.sum( np.abs(np.diff(np.concatenate([[0], mask, [0]])))) != 2) if is_duplicate_gantry_angles: raise ValueError("Duplicate gantry angles not yet supported") try: assert np.all(np.sum(masks, axis=0) == 1), ( "Not all beams were captured by the gantry tolerance of " " {}".format(gantry_tol)) except AssertionError: if not allow_missing_angles: print("Allowable gantry angles = {}".format(gantry_angles)) gantry = np.array(self.gantry, copy=False) out_of_tolerance = np.unique( gantry[np.sum(masks, axis=0) == 0]).tolist() print("The gantry angles out of tolerance were {}".format( out_of_tolerance)) raise return masks
def get_mosaiq_delivery_data_bygantry(mosaiq_delivery_data): mu = np.array(mosaiq_delivery_data.monitor_units) mlc = np.array(mosaiq_delivery_data.mlc) jaw = np.array(mosaiq_delivery_data.jaw) gantry_angles = np.array(mosaiq_delivery_data.gantry) unique_mosaiq_gantry_angles = np.unique(gantry_angles) mosaiq_delivery_data_bygantry = dict() for mosaiq_gantry_angle in unique_mosaiq_gantry_angles: gantry_angle_matches = gantry_angles == mosaiq_gantry_angle diff_mu = np.concatenate([[0], np.diff(mu)])[gantry_angle_matches] gantry_angle_specific_mu = np.cumsum(diff_mu) mosaiq_delivery_data_bygantry[mosaiq_gantry_angle] = dict() mosaiq_delivery_data_bygantry[mosaiq_gantry_angle][ "mu"] = gantry_angle_specific_mu mosaiq_delivery_data_bygantry[mosaiq_gantry_angle]["mlc"] = mlc[ gantry_angle_matches] mosaiq_delivery_data_bygantry[mosaiq_gantry_angle]["jaw"] = jaw[ gantry_angle_matches] return mosaiq_delivery_data_bygantry
def align_cube_to_structure( structure_name: str, dcm_struct: pydicom.dataset.FileDataset, quiet=False, niter=10, x0=None, ): """Align a cube to a dicom structure set. Designed to allow arbitrary references frames within a dicom file to be extracted via contouring a cube. Parameters ---------- structure_name The DICOM label of the cube structure dcm_struct The pydicom reference to the DICOM structure file. quiet : ``bool`` Tell the function to not print anything. Defaults to False. x0 : ``np.ndarray``, optional A 3x3 array with each row defining a 3-D point in space. These three points are used as initial conditions to search for a cube that fits the contours. Choosing initial values close to the structure set, and in the desired orientation will allow consistent results. See examples within `pymedphys.experimental.cubify`_ on what the effects of each of the three points are on the resulting cube. By default, this parameter is defined using the min/max values of the contour structure. Returns ------- cube_definition_array Four 3-D points the define the vertices of the cube. vectors The vectors between the points that can be used to traverse the cube. Examples -------- >>> import numpy as np >>> import pydicom >>> import pymedphys >>> from pymedphys.experimental import align_cube_to_structure >>> >>> struct_path = str(pymedphys.data_path('example_structures.dcm')) >>> dcm_struct = pydicom.dcmread(struct_path, force=True) >>> structure_name = 'ANT Box' >>> cube_definition_array, vectors = align_cube_to_structure( ... structure_name, dcm_struct, quiet=True, niter=1) >>> np.round(cube_definition_array) array([[-266., -31., 43.], [-266., 29., 42.], [-207., -31., 33.], [-276., -31., -16.]]) >>> >>> np.round(vectors, 1) array([[ 0.7, 59.9, -0.5], [ 59.2, -0.7, -9.7], [ -9.7, -0.4, -59.2]]) """ contours = pull_structure(structure_name, dcm_struct) contour_points = contour_to_points(contours) def to_minimise(cube): cube_definition = cubify([tuple(cube[0:3]), tuple(cube[3:6]), tuple(cube[6::])]) min_dist_squared = calc_min_distance(cube_definition, contour_points) return np.sum(min_dist_squared) if x0 is None: concatenated_contours = [ np.concatenate(contour_coord) for contour_coord in contours ] bounds = [ (np.min(concatenated_contour), np.max(concatenated_contour)) for concatenated_contour in concatenated_contours ] x0 = np.array( [ (bounds[1][0], bounds[0][0], bounds[2][1]), (bounds[1][0], bounds[0][1], bounds[2][1]), (bounds[1][1], bounds[0][0], bounds[2][1]), ] ) if quiet: def print_fun(x, f, accepted): # pylint: disable = unused-argument pass else: def print_fun(x, f, accepted): # pylint: disable = unused-argument print("at minimum %.4f accepted %d" % (f, int(accepted))) result = basinhopping(to_minimise, x0, callback=print_fun, niter=niter, stepsize=5) cube = result.x cube_definition = cubify([tuple(cube[0:3]), tuple(cube[3:6]), tuple(cube[6::])]) cube_definition_array = np.array([np.array(list(item)) for item in cube_definition]) vectors = [ cube_definition_array[1] - cube_definition_array[0], cube_definition_array[2] - cube_definition_array[0], cube_definition_array[3] - cube_definition_array[0], ] return cube_definition_array, vectors
def contour_to_points(contours): resampled_contours = resample_contour_set([contours[1], contours[0], contours[2]]) contour_points = np.concatenate(resampled_contours, axis=1) return contour_points
def peak_find(ampl_resamp, dx): peak_figs = [] peaks = [] peak_type = [] for j in range(0, ampl_resamp.shape[1] - 1): amp_base_res = signal.convolve(ampl_resamp[:, j], ampl_resamp[:, j], mode="full") amp_base_res = signal.resample(amp_base_res / np.amax(amp_base_res), int(np.ceil(len(amp_base_res) / 2))) for k in range(j + 1, ampl_resamp.shape[1]): amp_overlay_res = signal.convolve(ampl_resamp[:, k], ampl_resamp[:, k], mode="full") amp_overlay_res = signal.resample( amp_overlay_res / np.amax(amp_overlay_res), int(np.ceil(len(amp_overlay_res) / 2)), ) # amp_overlay_res = signal.savgol_filter(ampl_resamp[:, k], 1501, 1) peak1, _ = find_peaks(amp_base_res, prominence=0.5) peak2, _ = find_peaks(amp_overlay_res, prominence=0.5) if ( abs(peak2 - peak1) < 2500 ): # if the two peaks are separated the two fields are not adjacent. amp_peak = ampl_resamp[:, j] + ampl_resamp[:, k] x = np.linspace( 0, 0 + (len(amp_peak) * dx / 10), len(amp_peak), endpoint=False) # definition of the distance axis peak_pos, _ = find_peaks( signal.savgol_filter( amp_peak[min(peak1[0], peak2[0] ):max(peak1[0], peak2[0])], 201, 3, ), prominence=0.010, ) pos_prominence = signal.peak_prominences( signal.savgol_filter( amp_peak[min(peak1[0], peak2[0] ):max(peak1[0], peak2[0])], 201, 3, ), peak_pos, ) # print('#peaks pos det=', len(peak_pos), peak_pos) # print('#pos peaks prominence=', pos_prominence[0]) peak_neg, _ = find_peaks( signal.savgol_filter( -amp_peak[min(peak1[0], peak2[0] ):max(peak1[0], peak2[0])], 201, 3, ), prominence=0.010, ) neg_prominence = signal.peak_prominences( signal.savgol_filter( -amp_peak[min(peak1[0], peak2[0] ):max(peak1[0], peak2[0])], 201, 3, ), peak_neg, ) # print('#peaks neg det=',len(peak_neg),peak_neg) # print('#neg peaks prominence=', neg_prominence[0]) # we now need to select the peak with the largest prominence positve or negative # we add all the peaks and prominences toghether peaks_all = np.concatenate((peak_pos, peak_neg), axis=None) prom_all = np.concatenate( (pos_prominence[0], neg_prominence[0]), axis=None) # print('all peaks',peaks_all,prom_all) if peaks_all.size != 0: peak = peaks_all[np.argmax(prom_all)] if peak in peak_pos: peak_type.append(1) peaks.append(min(peak1[0], peak2[0]) + peak) # print('pos peak') elif peak in peak_neg: peak_type.append(0) peaks.append(min(peak1[0], peak2[0]) + peak) # print('neg peak') fig = plt.figure(figsize=(10, 6)) plt.plot(x, amp_peak, label="Total amplitude profile") plt.plot( x[min(peak1[0], peak2[0]) + peak], amp_peak[min(peak1[0], peak2[0]) + peak], "x", label="Peaks detected", ) plt.ylabel("amplitude [a.u.]") plt.xlabel("distance [mm]") plt.legend() fig.suptitle("Junctions", fontsize=16) peak_figs.append(fig) elif peaks_all.size == 0: peaks.append(0) peak_type.append(0) print("no peak has been found") fig = plt.figure(figsize=(10, 6)) plt.plot(x, amp_peak, label="Total amplitude profile") # plt.plot(x[min(peak1[0], peak2[0]) + peak], amp_peak[min(peak1[0], peak2[0]) + peak], "x", # label='Peaks detected') plt.ylabel("amplitude [a.u.]") plt.xlabel("distance [mm]") plt.legend() fig.suptitle("Junctions", fontsize=16) peak_figs.append(fig) # else: # print(j, k, 'the data is not contiguous finding another curve in dataset') # print('peaks_here=',peaks) return peaks, peak_type, peak_figs
def main(): st.write(""" # Electron Insert Factors """) patient_id = st.text_input("Patient ID") if patient_id == "": st.stop() rccc_string_search_pattern = r"\\monacoda\FocalData\RCCC\1~Clinical\*~{}\plan\*\*tel.1".format( patient_id) rccc_filepath_list = glob(rccc_string_search_pattern) nbccc_string_search_pattern = r"\\tunnel-nbcc-monaco\FOCALDATA\NBCCC\1~Clinical\*~{}\plan\*\*tel.1".format( patient_id) nbccc_filepath_list = glob(nbccc_string_search_pattern) sash_string_search_pattern = r"\\tunnel-sash-monaco\Users\Public\Documents\CMS\FocalData\SASH\1~Clinical\*~{}\plan\*\*tel.1".format( patient_id) sash_filepath_list = glob(sash_string_search_pattern) filepath_list = np.concatenate( [rccc_filepath_list, nbccc_filepath_list, sash_filepath_list]) electronmodel_regex = r"RiverinaAgility - (\d+)MeV" applicator_regex = r"(\d+)X\d+" insert_data = dict() # type: ignore for telfilepath in filepath_list: insert_data[telfilepath] = dict() with open(telfilepath, "r") as file: telfilecontents = np.array(file.read().splitlines()) insert_data[telfilepath]["reference_index"] = [] for i, item in enumerate(telfilecontents): if re.search(electronmodel_regex, item): insert_data[telfilepath]["reference_index"] += [i] insert_data[telfilepath]["applicators"] = [ re.search(applicator_regex, telfilecontents[i + 12]).group(1) # type: ignore for i in insert_data[telfilepath]["reference_index"] ] insert_data[telfilepath]["energies"] = [ re.search(electronmodel_regex, telfilecontents[i]).group(1) # type: ignore for i in insert_data[telfilepath]["reference_index"] ] for telfilepath in filepath_list: with open(telfilepath, "r") as file: telfilecontents = np.array(file.read().splitlines()) insert_data[telfilepath]["x"] = [] insert_data[telfilepath]["y"] = [] for i, index in enumerate(insert_data[telfilepath]["reference_index"]): insert_initial_range = telfilecontents[ index + 51::] # coords start 51 lines after electron model name insert_stop = np.where(insert_initial_range == "0")[0][ 0] # coords stop right before a line containing 0 insert_coords_string = insert_initial_range[:insert_stop] insert_coords = np.fromstring(",".join(insert_coords_string), sep=",") insert_data[telfilepath]["x"].append(insert_coords[0::2] / 10) insert_data[telfilepath]["y"].append(insert_coords[1::2] / 10) for telfilepath in filepath_list: insert_data[telfilepath]["width"] = [] insert_data[telfilepath]["length"] = [] insert_data[telfilepath]["circle_centre"] = [] insert_data[telfilepath]["P/A"] = [] for i in range(len(insert_data[telfilepath]["reference_index"])): width, length, circle_centre = electronfactors.parameterise_insert( insert_data[telfilepath]["x"][i], insert_data[telfilepath]["y"][i]) insert_data[telfilepath]["width"].append(width) insert_data[telfilepath]["length"].append(length) insert_data[telfilepath]["circle_centre"].append(circle_centre) insert_data[telfilepath]["P/A"].append( electronfactors.convert2_ratio_perim_area(width, length)) data_filename = r"S:\Physics\RCCC Specific Files\Dosimetry\Elekta_EFacs\electron_factor_measured_data.csv" data = pd.read_csv(data_filename) width_data = data["Width (cm @ 100SSD)"] length_data = data["Length (cm @ 100SSD)"] factor_data = data["RCCC Inverse factor (dose open / dose cutout)"] p_on_a_data = electronfactors.convert2_ratio_perim_area( width_data, length_data) for telfilepath in filepath_list: insert_data[telfilepath]["model_factor"] = [] for i in range(len(insert_data[telfilepath]["reference_index"])): applicator = float(insert_data[telfilepath]["applicators"][i]) energy = float(insert_data[telfilepath]["energies"][i]) ssd = 100 reference = ((data["Energy (MeV)"] == energy) & (data["Applicator (cm)"] == applicator) & (data["SSD (cm)"] == ssd)) number_of_measurements = np.sum(reference) if number_of_measurements < 8: insert_data[telfilepath]["model_factor"].append(np.nan) else: insert_data[telfilepath]["model_factor"].append( electronfactors.spline_model_with_deformability( insert_data[telfilepath]["width"], insert_data[telfilepath]["P/A"], width_data[reference], p_on_a_data[reference], factor_data[reference], )[0]) for telfilepath in filepath_list: st.write("---") st.write("Filepath: `{}`".format(telfilepath)) for i in range(len(insert_data[telfilepath]["reference_index"])): applicator = float(insert_data[telfilepath]["applicators"][i]) energy = float(insert_data[telfilepath]["energies"][i]) ssd = 100 st.write("Applicator: `{} cm` | Energy: `{} MeV`".format( applicator, energy)) width = insert_data[telfilepath]["width"][i] length = insert_data[telfilepath]["length"][i] plt.figure() plot_insert( insert_data[telfilepath]["x"][i], insert_data[telfilepath]["y"][i], insert_data[telfilepath]["width"][i], insert_data[telfilepath]["length"][i], insert_data[telfilepath]["circle_centre"][i], ) reference = ((data["Energy (MeV)"] == energy) & (data["Applicator (cm)"] == applicator) & (data["SSD (cm)"] == ssd)) number_of_measurements = np.sum(reference) plt.figure() if number_of_measurements < 8: plt.scatter( width_data[reference], length_data[reference], s=100, c=factor_data[reference], cmap="viridis", zorder=2, ) plt.colorbar() else: plot_model( width_data[reference], length_data[reference], factor_data[reference], ) reference_data_table = pd.concat( [ width_data[reference], length_data[reference], factor_data[reference] ], axis=1, ) reference_data_table.sort_values( ["RCCC Inverse factor (dose open / dose cutout)"], ascending=False, inplace=True, ) st.write(reference_data_table) st.pyplot() factor = insert_data[telfilepath]["model_factor"][i] st.write( "Width: `{0:0.2f} cm` | Length: `{1:0.2f} cm` | Factor: `{2:0.3f}`" .format(width, length, factor))