def __init__(self, batch_file_name):
     super(BatchCsvParser, self).__init__()
     # Get the full file path
     self._batch_file_name = find_full_file_path(batch_file_name)
     if not self._batch_file_name:
         raise RuntimeError("batch_csv_file_parser: Could not find specified batch file. Make sure it is available"
                            "in the Mantid path settings.")
Beispiel #2
0
def apply_calibration(calibration_file_name, workspaces, monitor_workspaces, use_loaded, publish_to_ads, parent_alg):
    """
    Apply (tube) calibration to scatter workspaces and corresponding monitor workspaces.

    :param calibration_file_name: the file name of the calibration file.
    :param workspaces: a map with scatter workspaces for sample and can
    :param monitor_workspaces: a map with scatter monitor workspaces for sample and can
    :param use_loaded: if calibration file from ADS is to be used (if it exists)
    :param publish_to_ads: if the calibration file should be published to the ADS
    :param parent_alg: a handle to the parent algorithm
    :return:
    """
    full_file_path = find_full_file_path(calibration_file_name)

    # Check for the sample scatter and the can scatter workspaces
    workspaces_to_calibrate = {}
    if SANSDataType.SampleScatter in workspaces:
        workspaces_to_calibrate.update({SANSDataType.SampleScatter: workspaces[SANSDataType.SampleScatter]})
    if SANSDataType.CanScatter in workspaces:
        workspaces_to_calibrate.update({SANSDataType.CanScatter: workspaces[SANSDataType.CanScatter]})
    do_apply_calibration(full_file_path, workspaces_to_calibrate, use_loaded, publish_to_ads, parent_alg)

    # Check for the sample scatter and the can scatter workspaces monitors
    workspace_monitors_to_calibrate = {}
    if SANSDataType.SampleScatter in monitor_workspaces:
        workspace_monitors_to_calibrate.update({SANSDataType.SampleScatter:
                                                monitor_workspaces[SANSDataType.SampleScatter]})
    if SANSDataType.CanScatter in monitor_workspaces:
        workspace_monitors_to_calibrate.update({SANSDataType.CanScatter:
                                                monitor_workspaces[SANSDataType.CanScatter]})
    do_apply_calibration(full_file_path, workspace_monitors_to_calibrate,
                         use_loaded, publish_to_ads, parent_alg)
Beispiel #3
0
    def parse_batch_file(self, batch_file_name):
        """
        Parses the batch csv file and returns the elements in a parsed form

        Returns: parsed csv elements
        """
        # Get the full file path
        batch_file_name = find_full_file_path(batch_file_name)
        if not batch_file_name:
            raise RuntimeError(
                "batch_csv_file_parser: Could not find specified batch file. Make sure it is available"
                "in the Mantid path settings.")

        parsed_rows = []

        with open(batch_file_name, 'r') as csvfile:
            batch_reader = reader(csvfile, delimiter=",")
            read_rows = list(batch_reader)

        for row_number, row in enumerate(read_rows):
            # Check if the row is empty or is a comment
            if not row:
                continue

            key = row[0].strip()
            if any(key.startswith(i) for i in self.COMMENT_KEWORDS):
                continue

            # Else we perform a parse of the row
            parsed_row = self._parse_csv_row(row, row_number)
            parsed_rows.append(parsed_row)
        return parsed_rows
Beispiel #4
0
def apply_calibration(calibration_file_name, workspaces, monitor_workspaces, use_loaded, publish_to_ads, parent_alg):
    """
    Apply (tube) calibration to scatter workspaces and corresponding monitor workspaces.

    :param calibration_file_name: the file name of the calibration file.
    :param workspaces: a map with scatter workspaces for sample and can
    :param monitor_workspaces: a map with scatter monitor workspaces for sample and can
    :param use_loaded: if calibration file from ADS is to be used (if it exists)
    :param publish_to_ads: if the calibration file should be published to the ADS
    :param parent_alg: a handle to the parent algorithm
    :return:
    """
    full_file_path = find_full_file_path(calibration_file_name)

    # Check for the sample scatter and the can scatter workspaces
    workspaces_to_calibrate = {}
    if SANSDataType.SAMPLE_SCATTER in workspaces:
        workspaces_to_calibrate.update({SANSDataType.SAMPLE_SCATTER: workspaces[SANSDataType.SAMPLE_SCATTER]})
    if SANSDataType.CAN_SCATTER in workspaces:
        workspaces_to_calibrate.update({SANSDataType.CAN_SCATTER: workspaces[SANSDataType.CAN_SCATTER]})
    do_apply_calibration(full_file_path, workspaces_to_calibrate, use_loaded, publish_to_ads, parent_alg)

    # Check for the sample scatter and the can scatter workspaces monitors
    workspace_monitors_to_calibrate = {}
    if SANSDataType.SAMPLE_SCATTER in monitor_workspaces:
        workspace_monitors_to_calibrate.update({SANSDataType.SAMPLE_SCATTER:
                                                monitor_workspaces[SANSDataType.SAMPLE_SCATTER]})
    if SANSDataType.CAN_SCATTER in monitor_workspaces:
        workspace_monitors_to_calibrate.update({SANSDataType.CAN_SCATTER:
                                                monitor_workspaces[SANSDataType.CAN_SCATTER]})
    do_apply_calibration(full_file_path, workspace_monitors_to_calibrate,
                         use_loaded, publish_to_ads, parent_alg)
Beispiel #5
0
 def __init__(self, batch_file_name):
     super(BatchCsvParser, self).__init__()
     # Get the full file path
     self._batch_file_name = find_full_file_path(batch_file_name)
     if not self._batch_file_name:
         raise RuntimeError("batch_csv_file_parser: Could not find specified batch file. Make sure it is available"
                            "in the Mantid path settings.")
def SetDetectorFloodFile(filename, detector_name="REAR"):
    """
    Sets the pixel correction file for a particular detector

    @param filename: the name of the file.
    @param detector_name: the name of the detector
    """
    file_name = find_full_file_path(filename)
    detector_name = convert_bank_name_to_detector_type_isis(detector_name)
    flood_command = NParameterCommand(command_id=NParameterCommandId.flood_file, values=[file_name, detector_name])
    director.add_command(flood_command)
Beispiel #7
0
def SetDetectorFloodFile(filename, detector_name="REAR"):
    """
    Sets the pixel correction file for a particular detector

    @param filename: the name of the file.
    @param detector_name: the name of the detector
    """
    file_name = find_full_file_path(filename)
    detector_name = convert_bank_name_to_detector_type_isis(detector_name)
    flood_command = NParameterCommand(command_id=NParameterCommandId.FLOOD_FILE, values=[file_name, detector_name])
    director.add_command(flood_command)
Beispiel #8
0
def mask_with_mask_files(mask_info, workspace):
    """
    Apply mask files to the workspace

    Rolling our own MaskDetectors wrapper since masking is broken in a couple
    of places that affect us here.
    Calling MaskDetectors(Workspace=ws_name, MaskedWorkspace=mask_ws_name) is
    not something we can do because the algorithm masks by ws index rather than
    detector id, and unfortunately for SANS the detector table is not the same
    for MaskingWorkspaces as it is for the workspaces containing the data to be
    masked.  Basically, we get a mirror image of what we expect.  Instead, we
    have to extract the det IDs and use those via the DetectorList property.
    :param mask_info: a SANSStateMask object.
    :param workspace: the workspace to be masked.
    :return: the masked workspace.
    """
    mask_files = mask_info.mask_files
    if mask_files:
        idf_path = mask_info.idf_path

        # Mask loader
        load_name = "LoadMask"
        load_options = {"Instrument": idf_path,
                        "OutputWorkspace": EMPTY_NAME}
        load_alg = create_unmanaged_algorithm(load_name, **load_options)
        dummy_params = {"OutputWorkspace": EMPTY_NAME}
        mask_alg = create_unmanaged_algorithm("MaskInstrument", **dummy_params)
        clear_alg = create_unmanaged_algorithm("ClearMaskedSpectra", **dummy_params)

        # Masker
        for mask_file in mask_files:
            mask_file = find_full_file_path(mask_file)

            # Get the detector ids which need to be masked
            load_alg.setProperty("InputFile", mask_file)
            load_alg.execute()
            masking_workspace = load_alg.getProperty("OutputWorkspace").value
            # Could use MaskDetectors directly with masking_workspace but it does not
            # support MPI. Use a three step approach via a, b, and c instead.
            # a) Extract detectors to mask from MaskWorkspace
            det_ids = masking_workspace.getMaskedDetectors()
            # b) Mask the detector ids on the instrument
            mask_alg.setProperty("InputWorkspace", workspace)
            mask_alg.setProperty("OutputWorkspace", workspace)
            mask_alg.setProperty("DetectorIDs", det_ids)
            mask_alg.execute()
            workspace = mask_alg.getProperty("OutputWorkspace").value
        # c) Clear data in all spectra associated with masked detectors
        clear_alg.setProperty("InputWorkspace", workspace)
        clear_alg.setProperty("OutputWorkspace", workspace)
        clear_alg.execute()
        workspace = clear_alg.getProperty("OutputWorkspace").value
    return workspace
Beispiel #9
0
def mask_with_mask_files(mask_info, inst_info, workspace):
    """
    Apply mask files to the workspace

    Rolling our own MaskDetectors wrapper since masking is broken in a couple
    of places that affect us here.
    Calling MaskDetectors(Workspace=ws_name, MaskedWorkspace=mask_ws_name) is
    not something we can do because the algorithm masks by ws index rather than
    detector id, and unfortunately for SANS the detector table is not the same
    for MaskingWorkspaces as it is for the workspaces containing the data to be
    masked.  Basically, we get a mirror image of what we expect.  Instead, we
    have to extract the det IDs and use those via the DetectorList property.
    :param mask_info: a SANSStateMask object.
    :param workspace: the workspace to be masked.
    :return: the masked workspace.
    """
    mask_files = mask_info.mask_files
    if mask_files:
        idf_path = inst_info.idf_path

        # Mask loader
        load_name = "LoadMask"
        load_options = {"Instrument": idf_path, "OutputWorkspace": EMPTY_NAME}
        load_alg = create_unmanaged_algorithm(load_name, **load_options)
        mask_alg = create_unmanaged_algorithm("MaskDetectors")

        file_paths = [find_full_file_path(i) for i in mask_files]
        # Find full file path returns an empty string, so we need to remake it
        missing_file_paths = [
            mask_files[i] for i, path in enumerate(file_paths) if not path
        ]

        if missing_file_paths:
            err_str = "The following mask files are missing:"
            err_str += "\n".join(missing_file_paths)
            raise FileNotFoundError(err_str)

        # Masker
        for mask_file in file_paths:
            # Get the detector ids which need to be masked
            load_alg.setProperty("InputFile", mask_file)
            load_alg.execute()
            masking_workspace = load_alg.getProperty("OutputWorkspace").value
            # Could use MaskDetectors directly with masking_workspace but it does not
            # support MPI. Use a two step approach via a and b instead.
            # a) Extract detectors to mask from MaskWorkspace
            det_ids = masking_workspace.getMaskedDetectors()
            # b) Mask the detector ids on the instrument
            mask_alg.setProperty("Workspace", workspace)
            mask_alg.setProperty("DetectorList", det_ids)
            mask_alg.execute()
            workspace = mask_alg.getProperty("Workspace").value
    return workspace
Beispiel #10
0
def MaskFile(file_name):
    """
    Loads the user file (note that mask file is the legacy description user file)

    @param file_name: path to the user file.
    """
    print_message('#Opening "' + file_name + '"')

    # Get the full file path
    file_name_full = find_full_file_path(file_name)
    user_file_command = NParameterCommand(command_id=NParameterCommandId.user_file, values=[file_name_full])
    director.add_command(user_file_command)
Beispiel #11
0
def mask_with_mask_files(mask_info, workspace):
    """
    Apply mask files to the workspace

    Rolling our own MaskDetectors wrapper since masking is broken in a couple
    of places that affect us here.
    Calling MaskDetectors(Workspace=ws_name, MaskedWorkspace=mask_ws_name) is
    not something we can do because the algorithm masks by ws index rather than
    detector id, and unfortunately for SANS the detector table is not the same
    for MaskingWorkspaces as it is for the workspaces containing the data to be
    masked.  Basically, we get a mirror image of what we expect.  Instead, we
    have to extract the det IDs and use those via the DetectorList property.
    :param mask_info: a SANSStateMask object.
    :param workspace: the workspace to be masked.
    :return: the masked workspace.
    """
    mask_files = mask_info.mask_files
    if mask_files:
        idf_path = mask_info.idf_path

        # Mask loader
        load_name = "LoadMask"
        load_options = {"Instrument": idf_path,
                        "OutputWorkspace": EMPTY_NAME}
        load_alg = create_unmanaged_algorithm(load_name, **load_options)
        dummy_params = {"OutputWorkspace": EMPTY_NAME}
        mask_alg = create_unmanaged_algorithm("MaskInstrument", **dummy_params)
        clear_alg = create_unmanaged_algorithm("ClearMaskedSpectra", **dummy_params)

        # Masker
        for mask_file in mask_files:
            mask_file = find_full_file_path(mask_file)

            # Get the detector ids which need to be masked
            load_alg.setProperty("InputFile", mask_file)
            load_alg.execute()
            masking_workspace = load_alg.getProperty("OutputWorkspace").value
            # Could use MaskDetectors directly with masking_workspace but it does not
            # support MPI. Use a three step approach via a, b, and c instead.
            # a) Extract detectors to mask from MaskWorkspace
            det_ids = masking_workspace.getMaskedDetectors()
            # b) Mask the detector ids on the instrument
            mask_alg.setProperty("InputWorkspace", workspace)
            mask_alg.setProperty("OutputWorkspace", workspace)
            mask_alg.setProperty("DetectorIDs", det_ids)
            mask_alg.execute()
            workspace = mask_alg.getProperty("OutputWorkspace").value
        # c) Clear data in all spectra associated with masked detectors
        clear_alg.setProperty("InputWorkspace", workspace)
        clear_alg.setProperty("OutputWorkspace", workspace)
        clear_alg.execute()
        workspace = clear_alg.getProperty("OutputWorkspace").value
    return workspace
Beispiel #12
0
def MaskFile(file_name):
    """
    Loads the user file (note that mask file is the legacy description user file)

    @param file_name: path to the user file.
    """
    print_message('#Opening "' + file_name + '"')

    # Get the full file path
    file_name_full = find_full_file_path(file_name)
    user_file_command = NParameterCommand(command_id=NParameterCommandId.user_file, values=[file_name_full])
    director.add_command(user_file_command)
Beispiel #13
0
    def validate(self):
        is_invalid = dict()

        # --------------------
        # Radius Mask
        # --------------------
        # Radius mask rule: the min radius must be less or equal to the max radius
        if self.radius_max is not None and self.radius_min is not None and\
           self.radius_max != -1 and self.radius_min != -1:  # noqa
            if self.radius_min > 0 and self.radius_max > 0 and (
                    self.radius_min > self.radius_max):
                entry = validation_message(
                    "Incorrect radius bounds.",
                    "Makes sure that the lower radius bound is smaller than the"
                    " upper radius bound.", {
                        "radius_min": self.radius_min,
                        "radius_max": self.radius_max
                    })
                is_invalid.update(entry)

        # --------------------
        # General bin mask
        # --------------------
        range_check(self.bin_mask_general_start, self.bin_mask_general_stop,
                    is_invalid, "bin_mask_general_start",
                    "bin_mask_general_stop", "bin_mask_general")

        # --------------------
        # Mask files
        # --------------------
        if self.mask_files:
            for mask_file in self.mask_files:
                if not find_full_file_path(mask_file):
                    entry = validation_message(
                        "Mask file not found.",
                        "Makes sure that the mask file is in your path",
                        {"mask_file": self.mask_files})
                    is_invalid.update(entry)

        # --------------------
        # Detectors
        # --------------------
        for _, value in list(self.detectors.items()):
            value.validate()

        if is_invalid:
            raise ValueError("StateMask: The provided inputs are illegal. "
                             "Please see: {0}".format(json.dumps(is_invalid)))
Beispiel #14
0
def MaskFile(file_name):
    """
    Loads the user file (note that mask file is the legacy description user file)

    @param file_name: path to the user file.
    """
    if not file_name:
        raise ValueError("An empty filename was passed to MaskFile")

    file_path = file_name if os.path.exists(file_name) else find_full_file_path(file_name)

    if not file_path or not os.path.isfile(file_path):
        raise FileNotFoundError("Could not find MaskFile: {0}".format(file_name))

    print_message('#Opening "' + file_path + '"')
    user_file_command = NParameterCommand(command_id=NParameterCommandId.USER_FILE, values=[file_path])
    director.add_command(user_file_command)
Beispiel #15
0
def SetCorrectionFile(bank, filename):
    # 10/03/15 RKH, create a new routine that allows change of "direct beam file" = correction file,
    # for a given detector, this simplify the iterative process used to adjust it.
    # Will still have to keep changing the name of the file
    # for each iteratiom to avoid Mantid using a cached version, but can then use
    # only a single user (=mask) file for each set of iterations.
    # Modelled this on SetDetectorOffsets above ...
    """
        @param bank: Must be either 'front' or 'rear' (not case sensitive)
        @param filename: self explanatory
    """
    print_message("SetCorrectionFile(" + str(bank) + ', ' + filename + ')')
    detector_type = convert_bank_name_to_detector_type_isis(bank)
    file_name = find_full_file_path(filename)
    flood_command = NParameterCommand(command_id=NParameterCommandId.WAVELENGTH_CORRECTION_FILE,
                                      values=[file_name, detector_type])
    director.add_command(flood_command)
Beispiel #16
0
def SetCorrectionFile(bank, filename):
    # 10/03/15 RKH, create a new routine that allows change of "direct beam file" = correction file,
    # for a given detector, this simplify the iterative process used to adjust it.
    # Will still have to keep changing the name of the file
    # for each iteratiom to avoid Mantid using a cached version, but can then use
    # only a single user (=mask) file for each set of iterations.
    # Modelled this on SetDetectorOffsets above ...
    """
        @param bank: Must be either 'front' or 'rear' (not case sensitive)
        @param filename: self explanatory
    """
    print_message("SetCorrectionFile(" + str(bank) + ', ' + filename + ')')
    detector_type = convert_bank_name_to_detector_type_isis(bank)
    file_name = find_full_file_path(filename)
    flood_command = NParameterCommand(command_id=NParameterCommandId.wavelength_correction_file,
                                      values=[file_name, detector_type])
    director.add_command(flood_command)
Beispiel #17
0
def use_cached_workspaces_from_ads(file_information, is_transmission, period,
                                   calibration_file_name):
    """
    Use cached workspaces from the ADS. This goes through the workspaces on the ADS and check on their sample logs
    if there is an entry called sans_original_file_name and

    This optimization uses already loaded workspaces from the ADS.
    :param file_information: a SANSFileInformation object.
    :param is_transmission: true if the workspaces are of transmission type
    :param period: the selected period.
    :param calibration_file_name: the name of the calibration file
    :return: a list of workspaces and a list of monitors loaded from the ADS.
    """
    workspaces = []
    workspace_monitors = []

    full_calibration_file_path = find_full_file_path(
        calibration_file_name) if calibration_file_name else ""

    # Get the expected sans_original_workspace tag entries
    file_tags = get_expected_file_tags(file_information, is_transmission,
                                       period)
    get_workspaces_from_ads_if_exist(file_tags, full_calibration_file_path,
                                     workspaces)

    if not is_transmission:
        file_tags_monitors = [
            file_tag + MONITOR_SUFFIX for file_tag in file_tags
        ]
        get_workspaces_from_ads_if_exist(file_tags_monitors,
                                         full_calibration_file_path,
                                         workspace_monitors)

    # Check if all required workspaces could be found on the ADS. For now, we allow only full loading, ie we don't
    # allow picking up some child workspaces of a multi-period file from the ADS and having to load others. Either
    # all are found in the ADS or we have to reload again. If we are loading a scatter workspace and the monitors
    # are not complete, then we have to load the regular workspaces as well
    if not has_loaded_correctly_from_ads(file_information, workspaces, period):
        workspaces = []
    if not is_transmission and not has_loaded_correctly_from_ads(
            file_information, workspace_monitors, period):
        workspaces = []
        workspace_monitors = []

    return workspaces, workspace_monitors
Beispiel #18
0
def mask_with_mask_files(mask_info, workspace):
    """
    Apply mask files to the workspace

    Rolling our own MaskDetectors wrapper since masking is broken in a couple
    of places that affect us here.
    Calling MaskDetectors(Workspace=ws_name, MaskedWorkspace=mask_ws_name) is
    not something we can do because the algorithm masks by ws index rather than
    detector id, and unfortunately for SANS the detector table is not the same
    for MaskingWorkspaces as it is for the workspaces containing the data to be
    masked.  Basically, we get a mirror image of what we expect.  Instead, we
    have to extract the det IDs and use those via the DetectorList property.
    :param mask_info: a SANSStateMask object.
    :param workspace: the workspace to be masked.
    :return: the masked workspace.
    """
    mask_files = mask_info.mask_files
    if mask_files:
        idf_path = mask_info.idf_path

        # Mask loader
        load_name = "LoadMask"
        load_options = {"Instrument": idf_path,
                        "OutputWorkspace": EMPTY_NAME}
        load_alg = create_unmanaged_algorithm(load_name, **load_options)

        # Masker
        mask_name = "MaskDetectors"
        mask_options = {"ForceInstrumentMasking": True}
        mask_alg = create_unmanaged_algorithm(mask_name, **mask_options)
        for mask_file in mask_files:
            mask_file = find_full_file_path(mask_file)

            # Get the detector ids which need to be masked
            load_alg.setProperty("InputFile", mask_file)
            load_alg.execute()
            masking_workspace = load_alg.getProperty("OutputWorkspace").value
            # Mask the detector ids on the original workspace
            mask_alg.setProperty("Workspace", workspace)
            mask_alg.setProperty("MaskedWorkspace", masking_workspace)
            mask_alg.execute()
            workspace = mask_alg.getProperty("Workspace").value
    return workspace
Beispiel #19
0
    def validate(self):
        is_invalid = dict()

        # --------------------
        # Radius Mask
        # --------------------
        # Radius mask rule: the min radius must be less or equal to the max radius
        if self.radius_max is not None and self.radius_min is not None and\
           self.radius_max != -1 and self.radius_min != -1:  # noqa
            if self.radius_min > 0 and self.radius_max > 0 and (self.radius_min > self.radius_max):
                entry = validation_message("Incorrect radius bounds.",
                                           "Makes sure that the lower radius bound is smaller than the"
                                           " upper radius bound.",
                                           {"radius_min": self.radius_min,
                                            "radius_max": self.radius_max})
                is_invalid.update(entry)

        # --------------------
        # General bin mask
        # --------------------
        range_check(self.bin_mask_general_start, self.bin_mask_general_stop,
                    is_invalid, "bin_mask_general_start", "bin_mask_general_stop", "bin_mask_general")

        # --------------------
        # Mask files
        # --------------------
        if self.mask_files:
            for mask_file in self.mask_files:
                if not find_full_file_path(mask_file):
                    entry = validation_message("Mask file not found.",
                                               "Makes sure that the mask file is in your path",
                                               {"mask_file": self.mask_files})
                    is_invalid.update(entry)

        # --------------------
        # Detectors
        # --------------------
        for _, value in list(self.detectors.items()):
            value.validate()

        if is_invalid:
            raise ValueError("StateMask: The provided inputs are illegal. "
                             "Please see: {0}".format(json.dumps(is_invalid)))
Beispiel #20
0
def use_cached_workspaces_from_ads(file_information,  is_transmission,  period, calibration_file_name):
    """
    Use cached workspaces from the ADS. This goes through the workspaces on the ADS and check on their sample logs
    if there is an entry called sans_original_file_name and

    This optimization uses already loaded workspaces from the ADS.
    :param file_information: a SANSFileInformation object.
    :param is_transmission: true if the workspaces are of transmission type
    :param period: the selected period.
    :param calibration_file_name: the name of the calibration file
    :return: a list of workspaces and a list of monitors loaded from the ADS.
    """
    workspaces = []
    workspace_monitors = []

    full_calibration_file_path = find_full_file_path(calibration_file_name) if calibration_file_name else ""

    # Get the expected sans_original_workspace tag entries
    file_tags = get_expected_file_tags(file_information, is_transmission, period)
    get_workspaces_from_ads_if_exist(file_tags, full_calibration_file_path, workspaces)

    if not is_transmission:
        file_tags_monitors = [file_tag + MONITOR_SUFFIX for file_tag in file_tags]
        get_workspaces_from_ads_if_exist(file_tags_monitors, full_calibration_file_path, workspace_monitors)

    # Check if all required workspaces could be found on the ADS. For now, we allow only full loading, ie we don't
    # allow picking up some child workspaces of a multi-period file from the ADS and having to load others. Either
    # all are found in the ADS or we have to reload again. If we are loading a scatter workspace and the monitors
    # are not complete, then we have to load the regular workspaces as well
    if not has_loaded_correctly_from_ads(file_information, workspaces, period):
        workspaces = []
    if not is_transmission and not has_loaded_correctly_from_ads(file_information, workspace_monitors, period):
        workspaces = []
        workspace_monitors = []

    return workspaces, workspace_monitors
Beispiel #21
0
 def __init__(self, user_file):
     super(UserFileReader, self).__init__()
     self._user_file = find_full_file_path(user_file)