예제 #1
0
def get_logfile_mosaiq_info(headers):
    machine_centre_map = get_machine_centre_map()
    mosaiq_details = get_mosaiq_details()

    centres = {
        machine_centre_map[machine_id]
        for machine_id in headers["machine"]
    }
    mosaiq_servers = [mosaiq_details[centre]["server"] for centre in centres]

    details = []

    cursors = {server: get_mosaiq_cursor(server) for server in mosaiq_servers}

    for _, header in headers.iterrows():
        machine_id = header["machine"]
        centre = machine_centre_map[machine_id]
        mosaiq_timezone = mosaiq_details[centre]["timezone"]
        server = mosaiq_details[centre]["server"]
        cursor = cursors[server]

        field_label = header["field_label"]
        field_name = header["field_name"]
        utc_date = header["date"]

        current_details = pmp_index.get_logfile_mosaiq_info(
            cursor, machine_id, utc_date, mosaiq_timezone, field_label,
            field_name)
        current_details = pd.Series(data=current_details)

        details.append(current_details)

    details = pd.concat(details, axis=1).T

    return details
예제 #2
0
def trf_input_method(patient_id="", key_namespace="", **_):
    indexed_trf_directory = get_indexed_trf_directory()

    patient_id = st.text_input("Patient ID",
                               patient_id,
                               key=f"{key_namespace}_patient_id")
    patient_id

    filepaths = list(
        indexed_trf_directory.glob(f"*/{patient_id}_*/*/*/*/*.trf"))

    raw_timestamps = [
        "_".join(path.parent.name.split("_")[0:2]) for path in filepaths
    ]
    timestamps = list(
        pd.to_datetime(raw_timestamps, format="%Y-%m-%d_%H%M%S").astype(str))

    timestamp_filepath_map = dict(zip(timestamps, filepaths))

    timestamps = sorted(timestamps, reverse=True)

    if len(timestamps) == 0:
        if patient_id != "":
            st.write(
                NoRecordsFound(
                    f"No TRF log file found for patient ID {patient_id}"))
        return {"patient_id": patient_id}

    if len(timestamps) == 1:
        default_timestamp = timestamps[0]
    else:
        default_timestamp = []

    selected_trf_deliveries = st.multiselect(
        "Select TRF delivery timestamp(s)",
        timestamps,
        default=default_timestamp,
        key=f"{key_namespace}_trf_deliveries",
    )

    if not selected_trf_deliveries:
        return {}
    """
    #### TRF filepath(s)
    """

    selected_filepaths = [
        timestamp_filepath_map[timestamp]
        for timestamp in selected_trf_deliveries
    ]
    [str(path.resolve()) for path in selected_filepaths]
    """
    #### Log file header(s)
    """

    headers = []
    tables = []
    for path in selected_filepaths:
        header, table = read_trf(path)
        headers.append(header)
        tables.append(table)

    headers = pd.concat(headers)
    headers.reset_index(inplace=True)
    headers.drop("index", axis=1, inplace=True)

    headers
    """
    #### Corresponding Mosaiq SQL Details
    """

    mosaiq_details = get_logfile_mosaiq_info(headers)
    mosaiq_details = mosaiq_details.drop("beam_completed", axis=1)

    mosaiq_details

    patient_names = set()
    for _, row in mosaiq_details.iterrows():
        row
        patient_name = utl_patient.convert_patient_name_from_split(
            row["last_name"], row["first_name"])
        patient_names.add(patient_name)

    patient_name = filter_patient_names(patient_names)

    deliveries = cached_deliveries_loading(tables, delivery_from_trf)

    individual_identifiers = [
        f"{path.parent.parent.parent.parent.name} {path.parent.name}"
        for path in selected_filepaths
    ]

    identifier = f"TRF ({individual_identifiers[0]})"

    return {
        "patient_id": patient_id,
        "patient_name": patient_name,
        "data_paths": selected_filepaths,
        "identifier": identifier,
        "deliveries": deliveries,
    }
예제 #3
0
파일: _trf.py 프로젝트: lc52520/pymedphys
def trf_input_method(patient_id="", key_namespace="", **_):
    """Streamlit GUI method to facilitate TRF data provision.

    Notes
    -----
    TRF files themselves have no innate patient alignment. An option
    for TRF collection is to use the CLI tool
    ``pymedphys trf orchestrate``. This connects to the SAMBA server
    hosted on the Elekta NSS and downloads the diagnostic backup zips.
    It then takes these TRF files and queries the Mosaiq database using
    time of delivery to identify these with a patient id (Ident.Pat_ID1)
    and name.

    As such, all references to patient ID and name within this
    ``trf_input_method`` are actually a reference to their Mosaiq
    database counterparts.
    """
    FILE_UPLOAD = "File upload"
    INDEXED_TRF_SEARCH = "Search indexed TRF directory"

    import_method = st.radio(
        "TRF import method",
        [FILE_UPLOAD, INDEXED_TRF_SEARCH],
        key=f"{key_namespace}_trf_file_import_method",
    )

    if import_method == FILE_UPLOAD:
        selected_files = st.file_uploader(
            "Upload TRF files",
            key=f"{key_namespace}_trf_file_uploader",
            accept_multiple_files=True,
        )

        if not selected_files:
            return {}

        data_paths = []
        individual_identifiers = ["Uploaded TRF file(s)"]

    if import_method == INDEXED_TRF_SEARCH:
        try:
            indexed_trf_directory = _config.get_indexed_trf_directory()
        except KeyError:
            st.write(
                _exceptions.ConfigMissing(
                    "No indexed TRF directory is configured. Please use "
                    f"'{FILE_UPLOAD}' instead."))
            return {}

        patient_id = st.text_input("Patient ID",
                                   patient_id,
                                   key=f"{key_namespace}_patient_id")
        st.write(patient_id)

        filepaths = list(
            indexed_trf_directory.glob(f"*/{patient_id}_*/*/*/*/*.trf"))

        raw_timestamps = [
            "_".join(path.parent.name.split("_")[0:2]) for path in filepaths
        ]
        timestamps = list(
            pd.to_datetime(raw_timestamps,
                           format="%Y-%m-%d_%H%M%S").astype(str))

        timestamp_filepath_map = dict(zip(timestamps, filepaths))

        timestamps = sorted(timestamps, reverse=True)

        if len(timestamps) == 0:
            if patient_id != "":
                st.write(
                    _exceptions.NoRecordsFound(
                        f"No TRF log file found for patient ID {patient_id}"))
            return {"patient_id": patient_id}

        if len(timestamps) == 1:
            default_timestamp = timestamps[0]
        else:
            default_timestamp = []

        selected_trf_deliveries = st.multiselect(
            "Select TRF delivery timestamp(s)",
            timestamps,
            default=default_timestamp,
            key=f"{key_namespace}_trf_deliveries",
        )

        if not selected_trf_deliveries:
            return {}

        st.write("""
            #### TRF filepath(s)
            """)

        selected_files = [
            timestamp_filepath_map[timestamp]
            for timestamp in selected_trf_deliveries
        ]
        st.write([str(path.resolve()) for path in selected_files])

        individual_identifiers = [
            f"{path.parent.parent.parent.parent.name} {path.parent.name}"
            for path in selected_files
        ]

        data_paths = selected_files

    st.write("""
        #### Log file header(s)
        """)

    headers = []
    tables = []
    for path_or_binary in selected_files:
        try:
            path_or_binary.seek(0)
        except AttributeError:
            pass

        header, table = read_trf(path_or_binary)
        headers.append(header)
        tables.append(table)

    headers = pd.concat(headers)
    headers.reset_index(inplace=True)
    headers.drop("index", axis=1, inplace=True)

    st.write(headers)

    deliveries = _deliveries.cached_deliveries_loading(
        tables, _deliveries.delivery_from_trf)

    identifier = f"TRF ({individual_identifiers[0]})"

    patient_name = _attempt_patient_name_from_mosaiq(headers)

    return {
        "site": None,
        "patient_id": patient_id,
        "patient_name": patient_name,
        "data_paths": data_paths,
        "identifier": identifier,
        "deliveries": deliveries,
    }
예제 #4
0
def main():
    st.write("""
        # Electron Insert Factors
        """)

    patient_id = st.text_input("Patient ID")

    if patient_id == "":
        st.stop()

    rccc_string_search_pattern = r"\\monacoda\FocalData\RCCC\1~Clinical\*~{}\plan\*\*tel.1".format(
        patient_id)
    rccc_filepath_list = glob(rccc_string_search_pattern)

    nbccc_string_search_pattern = r"\\tunnel-nbcc-monaco\FOCALDATA\NBCCC\1~Clinical\*~{}\plan\*\*tel.1".format(
        patient_id)
    nbccc_filepath_list = glob(nbccc_string_search_pattern)

    sash_string_search_pattern = r"\\tunnel-sash-monaco\Users\Public\Documents\CMS\FocalData\SASH\1~Clinical\*~{}\plan\*\*tel.1".format(
        patient_id)
    sash_filepath_list = glob(sash_string_search_pattern)

    filepath_list = np.concatenate(
        [rccc_filepath_list, nbccc_filepath_list, sash_filepath_list])

    electronmodel_regex = r"RiverinaAgility - (\d+)MeV"
    applicator_regex = r"(\d+)X\d+"

    insert_data = dict()  # type: ignore

    for telfilepath in filepath_list:
        insert_data[telfilepath] = dict()

        with open(telfilepath, "r") as file:
            telfilecontents = np.array(file.read().splitlines())

        insert_data[telfilepath]["reference_index"] = []
        for i, item in enumerate(telfilecontents):
            if re.search(electronmodel_regex, item):
                insert_data[telfilepath]["reference_index"] += [i]

        insert_data[telfilepath]["applicators"] = [
            re.search(applicator_regex,
                      telfilecontents[i + 12]).group(1)  # type: ignore
            for i in insert_data[telfilepath]["reference_index"]
        ]

        insert_data[telfilepath]["energies"] = [
            re.search(electronmodel_regex,
                      telfilecontents[i]).group(1)  # type: ignore
            for i in insert_data[telfilepath]["reference_index"]
        ]

    for telfilepath in filepath_list:
        with open(telfilepath, "r") as file:
            telfilecontents = np.array(file.read().splitlines())

        insert_data[telfilepath]["x"] = []
        insert_data[telfilepath]["y"] = []

        for i, index in enumerate(insert_data[telfilepath]["reference_index"]):
            insert_initial_range = telfilecontents[
                index +
                51::]  # coords start 51 lines after electron model name
            insert_stop = np.where(insert_initial_range == "0")[0][
                0]  # coords stop right before a line containing 0

            insert_coords_string = insert_initial_range[:insert_stop]
            insert_coords = np.fromstring(",".join(insert_coords_string),
                                          sep=",")
            insert_data[telfilepath]["x"].append(insert_coords[0::2] / 10)
            insert_data[telfilepath]["y"].append(insert_coords[1::2] / 10)

    for telfilepath in filepath_list:
        insert_data[telfilepath]["width"] = []
        insert_data[telfilepath]["length"] = []
        insert_data[telfilepath]["circle_centre"] = []
        insert_data[telfilepath]["P/A"] = []

        for i in range(len(insert_data[telfilepath]["reference_index"])):

            width, length, circle_centre = electronfactors.parameterise_insert(
                insert_data[telfilepath]["x"][i],
                insert_data[telfilepath]["y"][i])

            insert_data[telfilepath]["width"].append(width)
            insert_data[telfilepath]["length"].append(length)
            insert_data[telfilepath]["circle_centre"].append(circle_centre)

            insert_data[telfilepath]["P/A"].append(
                electronfactors.convert2_ratio_perim_area(width, length))

    data_filename = r"S:\Physics\RCCC Specific Files\Dosimetry\Elekta_EFacs\electron_factor_measured_data.csv"
    data = pd.read_csv(data_filename)

    width_data = data["Width (cm @ 100SSD)"]
    length_data = data["Length (cm @ 100SSD)"]
    factor_data = data["RCCC Inverse factor (dose open / dose cutout)"]

    p_on_a_data = electronfactors.convert2_ratio_perim_area(
        width_data, length_data)

    for telfilepath in filepath_list:
        insert_data[telfilepath]["model_factor"] = []

        for i in range(len(insert_data[telfilepath]["reference_index"])):
            applicator = float(insert_data[telfilepath]["applicators"][i])
            energy = float(insert_data[telfilepath]["energies"][i])
            ssd = 100

            reference = ((data["Energy (MeV)"] == energy)
                         & (data["Applicator (cm)"] == applicator)
                         & (data["SSD (cm)"] == ssd))

            number_of_measurements = np.sum(reference)

            if number_of_measurements < 8:
                insert_data[telfilepath]["model_factor"].append(np.nan)
            else:
                insert_data[telfilepath]["model_factor"].append(
                    electronfactors.spline_model_with_deformability(
                        insert_data[telfilepath]["width"],
                        insert_data[telfilepath]["P/A"],
                        width_data[reference],
                        p_on_a_data[reference],
                        factor_data[reference],
                    )[0])

    for telfilepath in filepath_list:
        st.write("---")
        st.write("Filepath: `{}`".format(telfilepath))

        for i in range(len(insert_data[telfilepath]["reference_index"])):
            applicator = float(insert_data[telfilepath]["applicators"][i])
            energy = float(insert_data[telfilepath]["energies"][i])
            ssd = 100

            st.write("Applicator: `{} cm` | Energy: `{} MeV`".format(
                applicator, energy))

            width = insert_data[telfilepath]["width"][i]
            length = insert_data[telfilepath]["length"][i]

            plt.figure()
            plot_insert(
                insert_data[telfilepath]["x"][i],
                insert_data[telfilepath]["y"][i],
                insert_data[telfilepath]["width"][i],
                insert_data[telfilepath]["length"][i],
                insert_data[telfilepath]["circle_centre"][i],
            )

            reference = ((data["Energy (MeV)"] == energy)
                         & (data["Applicator (cm)"] == applicator)
                         & (data["SSD (cm)"] == ssd))

            number_of_measurements = np.sum(reference)

            plt.figure()
            if number_of_measurements < 8:
                plt.scatter(
                    width_data[reference],
                    length_data[reference],
                    s=100,
                    c=factor_data[reference],
                    cmap="viridis",
                    zorder=2,
                )
                plt.colorbar()
            else:
                plot_model(
                    width_data[reference],
                    length_data[reference],
                    factor_data[reference],
                )

            reference_data_table = pd.concat(
                [
                    width_data[reference], length_data[reference],
                    factor_data[reference]
                ],
                axis=1,
            )
            reference_data_table.sort_values(
                ["RCCC Inverse factor (dose open / dose cutout)"],
                ascending=False,
                inplace=True,
            )

            st.write(reference_data_table)

            st.pyplot()

            factor = insert_data[telfilepath]["model_factor"][i]

            st.write(
                "Width: `{0:0.2f} cm` | Length: `{1:0.2f} cm` | Factor: `{2:0.3f}`"
                .format(width, length, factor))