示例#1
0
 def test_that_non_matching_workspaces_are_detected(self):
     # Arrange
     front_name = "front"
     rear_name = "rear"
     result_name = "result"
     x1 = [1,2,3]
     e1 = [1,1]
     y1 = [2,2]
     dx1 = [1.,2.,3.]
     x2 = [1,2,3,4]
     e2 = [1,1, 1]
     y2 = [2,2, 2]
     dx2 = [1.,2.,3.,4.]
     provide_workspace_with_x_errors(front_name, True, 1, x1, y1, e1, dx1)
     provide_workspace_with_x_errors(rear_name, True, 1, x2, y2, e2, dx2)
     provide_workspace_with_x_errors(result_name, False, 1)
     front = mtd[front_name]
     rear = mtd[rear_name]
     result = mtd[result_name]
     scale = 2.
     # Act
     su.correct_q_resolution_for_merged(front, rear,result, scale)
     # Assert
     self.assertFalse(result.hasDx(0))
     # Clean up
     DeleteWorkspace(front)
     DeleteWorkspace(rear)
     DeleteWorkspace(result)
示例#2
0
    def test_monitors_are_renamed_correctly(self):
        #Arrange
        ws_1 = CreateSampleWorkspace()
        ws_2 = CreateSampleWorkspace()
        ws_3 = CreateSampleWorkspace()

        ws_mon_1 = CreateSampleWorkspace()
        ws_mon_2 = CreateSampleWorkspace()
        ws_mon_3 = CreateSampleWorkspace()

        ws_group = GroupWorkspaces(InputWorkspaces=[ws_1, ws_2, ws_3])
        ws_mon_group = GroupWorkspaces(InputWorkspaces=[ws_mon_1, ws_mon_2, ws_mon_3])

        # Act
        su.rename_monitors_for_multiperiod_event_data(ws_mon_group, ws_group, self.monitor_appendix)

        # Assert
        self.assertTrue(ws_mon_1.name() == ws_1.name() + self.monitor_appendix, "Monitors should be renamed to xxxx_monitors")
        self.assertTrue(ws_mon_2.name() == ws_2.name() + self.monitor_appendix, "Monitors should be renamed to xxxx_monitors")
        self.assertTrue(ws_mon_3.name() == ws_3.name() + self.monitor_appendix, "Monitors should be renamed to xxxx_monitors")

        # Clean up
        for element in mtd.getObjectNames():
            if element in mtd:
                DeleteWorkspace(element)
示例#3
0
    def test_removes_zero_errors_correctly(self):
        # Arrange
        ws_name = 'test'
        type ='Histogram'
        self._setup_workspace(ws_name, type)
        ws = mtd[ws_name]

        # Act and Assert
        errors = ws.dataE
        self.assertTrue(errors(0)[0] == 0.0)
        self.assertTrue(errors(1)[0] != 0.0)
        self.assertTrue(errors(2)[0] == 0.0)
        self.assertTrue(errors(3)[0] != 0.0)

        su.remove_zero_errors_from_workspace(ws)

        self.assertTrue(errors(0)[0] == su.ZERO_ERROR_DEFAULT)
        self.assertTrue(errors(1)[0] != 0.0)
        self.assertTrue(errors(1)[0] != su.ZERO_ERROR_DEFAULT)
        self.assertTrue(errors(2)[0] == su.ZERO_ERROR_DEFAULT)
        self.assertTrue(errors(3)[0] != 0.0)
        self.assertTrue(errors(3)[0] != su.ZERO_ERROR_DEFAULT)

        self._removeWorkspace(ws_name)
        self.assertTrue(not ws_name in mtd)
示例#4
0
    def _match_IDF(self, run):
        '''
        Compares the IDF in the stored instrument with the IDF in the workspace.
        If they are the same all is well. If they diff, then load the adequate
        user file.
        @param run: name of the run for which the file is to be extracted
        '''
        # We need the instrument name and the measurement time to determine
        # the IDF
        measurement_time = None
        instrument_name = self.get_instrument_name()
        # We need to be able to handle file-based and workspace-based queries
        # If we have a workspace we look at the end time, else we
        # need a sophisticated extraction mechanism
        if isinstance(run, Workspace):
            ws = None
            if isinstance(run, WorkspaceGroup):
                # Just look at the first element in a workspace group
                ws = run[0]
            else:
                ws = run
            measurement_time = str(ws.getRun().endTime()).strip()
        else:
            if run is None or run == "":
                return
            measurement_time = su.get_measurement_time_from_file(run)

        # Get the path to the instrument definition file
        idf_path_workspace = ExperimentInfo.getInstrumentFilename(instrument_name, measurement_time)
        idf_path_workspace = os.path.normpath(idf_path_workspace)

        # Get the idf from the reducer
        idf_path_reducer = self.get_idf_file_path()
        idf_path_reducer = os.path.normpath(idf_path_reducer)

        # Now check if both idf paths and underlying files. If they are, then don't do anything
        # else switch the underlying instrument
        if idf_path_reducer == idf_path_workspace and su.are_two_files_identical(idf_path_reducer, idf_path_reducer):
            return
        else:
            logger.notice("Updating the IDF of the Reducer. Switching from " +
                          str(idf_path_reducer) + " to " + str(idf_path_workspace))
            idf_path = os.path.basename(idf_path_workspace)
            instrument = self._get_correct_instrument(instrument_name, idf_path)

            # Get detector of the old instrument
            old_instrument = self.get_instrument()
            old_detector_selection = old_instrument.get_detector_selection()

            if instrument is not None:
                self.set_instrument(instrument)

                # We need to update the instrument, by reloading the user file.
                # This is pretty bad, but looking at the reducer architecture this
                # seems to be the only reasonable way to do this.
                self.user_settings.execute(self)

                # Now we set the correct detector, this is also being done in the GUI
                self.get_instrument().setDetector(old_detector_selection)
示例#5
0
    def set_run(self, run, reload, period, reducer):

        super(Can, self).set_run(run, reload, period, reducer)

        # currently, no slices will be applied to Can #8535
        for period in reversed(range(self.loader.periods_in_file)):
            self.loader.move2ws(period)
            name = self.loader.wksp_name
            if su.isEventWorkspace(name):
                su.fromEvent2Histogram(mtd[name], self.get_monitor())
示例#6
0
    def set_run(self, run, reload, period, reducer):

        super(Can, self).set_run(run, reload, period, reducer)

        # currently, no slices will be applied to Can #8535
        for period in reversed(range(self.loader.periods_in_file)):
            self.loader.move2ws(period)
            name = self.loader.wksp_name
            if su.isEventWorkspace(name):
                su.fromEvent2Histogram(mtd[name], self.get_monitor())
示例#7
0
def delete_cloned_workspaces(save_names_dict):
    """
        If there are cloned workspaces in the worksapce map, then they are deleted
        @param save_names_dict: a workspace name map
    """
    to_delete =[]
    for key in save_names_dict:
        if key != save_names_dict[key]:
            to_delete.append(save_names_dict[key])
    for element in to_delete:
        su.delete_zero_error_free_workspace(input_workspace_name = element)
示例#8
0
def delete_cloned_workspaces(save_names_dict):
    """
        If there are cloned workspaces in the worksapce map, then they are deleted
        @param save_names_dict: a workspace name map
    """
    to_delete =[]
    for key in save_names_dict:
        if key != save_names_dict[key]:
            to_delete.append(save_names_dict[key])
    for element in to_delete:
        su.delete_zero_error_free_workspace(input_workspace_name = element)
示例#9
0
 def test_converts_true_to_float_when_float(self):
     # Arrange
     input = 3.8
     # Act
     result = su.is_convertible_to_float(input)
     # Assert
     self.assertTrue(result)
示例#10
0
    def test_load_valid_added_event_data_and_monitor_file_produces_group_ws(self):
        # Arrange
        names = ['event_data', 'monitor']
        file_names = self._prepare_workspaces(names = names)
        self._cleanup_workspaces(names = names)

        # Act
        group_ws_name = 'g_ws'
        output_group_file_name = su.bundle_added_event_data_as_group(file_names[0], file_names[1])

        Load(Filename = output_group_file_name, OutputWorkspace = group_ws_name)
        group_ws = mtd[group_ws_name]

        # Assert
        self.assertTrue(isinstance(group_ws, WorkspaceGroup))
        self.assertEqual(group_ws.size(), 2)
        self.assertTrue(os.path.exists(file_names[0])) # File for group workspace exists
        self.assertFalse(os.path.exists(file_names[1]))  # File for monitors is deleted

        # Clean up
        ws_names_to_delete = []
        for ws_name in mtd.getObjectNames():
            if ws_name != group_ws_name:
                ws_names_to_delete.append(str(ws_name))
        self._cleanup_workspaces(names = ws_names_to_delete)

        if os.path.exists(file_names[0]):
            os.remove(file_names[0])
示例#11
0
    def test_that_error_is_transferred(self):
        # Arrange
        x1 = [1,2,3,4,5,6,7,8,9]
        x2 = [1,2,3,4,5,6,7,8,9]
        y1 = [2,2,2,2,2,2,2,2]
        y2 = [2,2,2,2,2,2,2,2]
        e1 = [1,1,1,1,1,1,1,1]
        e2 = [2,2,2,2,2,2,2,2]
        front, rear = self._createWorkspace(x1,y1, e1, x2, y2, e2)

        x_min = 3
        x_max = 7
        # Act 
        f_return, r_return = su.get_error_corrected_front_and_rear_data_sets(front, rear,x_min, x_max)

        # Assert
        self.assertEqual(5, len(f_return.dataX(0)))
        self.assertEqual(5, len(r_return.dataX(0)))

        expected_errors_in_rear = [np.sqrt(5),np.sqrt(5),np.sqrt(5),np.sqrt(5)]
        self.assertTrue(expected_errors_in_rear[0] == r_return.dataE(0)[0])
        self.assertTrue(expected_errors_in_rear[1] == r_return.dataE(0)[1])
        self.assertTrue(expected_errors_in_rear[2] == r_return.dataE(0)[2])
        self.assertTrue(expected_errors_in_rear[3] == r_return.dataE(0)[3])

        # Clean up
        DeleteWorkspace(front)
        DeleteWorkspace(rear)
示例#12
0
 def test_convertible_true_to_float_when_convertible_string(self):
     # Arrange
     input = "4.78"
     # Act
     result = su.is_convertible_to_float(input)
     # Assert
     self.assertTrue(result)
示例#13
0
 def test__converts_false_to_integer_when_non_convertible_string(self):
     # Arrange
     input = '34_gt'
     # Act
     result = su.is_convertible_to_int(input)
     # Assert
     self.assertFalse(result)
示例#14
0
 def test_converts_false_to_float_when_convertible_string(self):
     # Arrange
     input = "4.78_tg"
     # Act
     result = su.is_convertible_to_float(input)
     # Assert
     self.assertFalse(result)
示例#15
0
 def test_finds_invalid_xml_file_list(self):
     # Arrange
     input = ["test1.xml", "test2.ccl", "test3.xml"]
     # Act
     result =su.is_valid_xml_file_list(input)
     # Assert
     self.assertFalse(result)
示例#16
0
 def test_converts_true_to_integer_when_convertible_string(self):
     # Arrange
     input = '34'
     # Act
     result = su.is_convertible_to_int(input)
     # Assert
     self.assertTrue(result)
示例#17
0
 def test_finds_empty_list(self):
     # Arrange
     input = []
     # Act
     result = su.is_valid_xml_file_list(input)
     # Assert
     self.assertFalse(result)
示例#18
0
 def _do_test(self, file_name, expected_time):
     exists, full_path =  get_full_path_SANS_system_test(file_name)
     if exists:
         measurement_time = su.get_measurement_time_from_file(full_path)
         self.assertEqual(measurement_time, expected_time)
     else:
         print("Missing data files. Path to system test data needs to be set.")
         self.assertTrue(False)
示例#19
0
    def do_test_extraction(self, event_name, monitor_name):
        out_ws_name = 'out_group'
        event_name_expect = out_ws_name
        monitor_name_expect = out_ws_name + self._appendix

        provide_group_workspace_for_added_event_data(event_ws_name = event_name, monitor_ws_name = monitor_name, out_ws_name = out_ws_name)
        out_ws_group = mtd[out_ws_name]

        # Act
        su.extract_child_ws_for_added_eventdata(out_ws_group, self._appendix)

        # Assert
        self.assertTrue(event_name_expect in mtd)
        self.assertTrue(monitor_name_expect in mtd)

        DeleteWorkspace(event_name_expect)
        DeleteWorkspace(monitor_name_expect)
示例#20
0
 def test_converts_from_string_to_list(self):
     # Arrange
     input = "test1.xml, test2.xml, test3.xml"
     # Act
     result = su.convert_to_string_list(input)
     # Assert
     expected = "['test1.xml','test2.xml','test3.xml']"
     self.assertEqual(expected, result)
示例#21
0
 def test_converts_from_list_to_string(self):
     # Arrange
     input = ["test1.xml", "test2.xml", "test3.xml"]
     # Act
     result = su.convert_from_string_list(input)
     # Assert
     expected = "test1.xml,test2.xml,test3.xml"
     self.assertEqual(expected, result)
示例#22
0
def setUserFileInBatchMode(new_user_file, current_user_file,
                           original_user_file, original_settings,
                           original_prop_man_settings):
    """
        Loads a specified user file. The file is loaded if
        new_user_file is different from  current_user_file. Else we just
        keep the user file loaded. If the new_user_file is empty then we default to
        original_user_file.
        @param new_user_file: The new user file. Note that this can be merely the filename (+ extension)
        @param current_user_file: The currently loaded user file
        @param original_user_file: The originally loaded user file. This is used as a default
        @param original_settings: The original reducer
        @param original_prop_man_settings: Original properties settings
    """
    user_file_to_set = ""

    if new_user_file == '':
        user_file_to_set = original_user_file
    else:
        # Try to find the user file in the default paths
        if not os.path.isfile(new_user_file):
            # Find the user file in the Mantid path. we make sure that the user file has a txt extension.
            user_file_names_with_extension = su.get_user_file_name_options_with_txt_extension(
                new_user_file)
            for user_file_name_with_extension in user_file_names_with_extension:
                user_file = FileFinder.getFullPath(
                    user_file_name_with_extension)
                if user_file:
                    break

            if not os.path.isfile(user_file):
                message = "Error could not find specified user file {0}"\
                    .format(new_user_file)
                raise RuntimeError(message)
            else:
                user_file_to_set = user_file
        else:
            user_file_to_set = new_user_file

    # Set the user file in the reducer and load the user file
    if os.path.normpath(user_file_to_set) != os.path.normpath(
            current_user_file):
        # Need to setup a clean reducer. If we are dealing with the original user file,
        # then we should take gui changes into account, ie reset to the original reducer
        if user_file_to_set == original_user_file:
            ReductionSingleton().replace(copy.deepcopy(original_settings))
            ReductionSingleton().settings = original_prop_man_settings.clone(
                REDUCTION_SETTINGS_OBJ_NAME)
        else:
            instrument = copy.deepcopy(ReductionSingleton().get_instrument())
            output_type = ReductionSingleton().to_Q.output_type
            ReductionSingleton().clean(isis_reducer.ISISReducer)
            ReductionSingleton().set_instrument(instrument)
            ReductionSingleton().user_settings = UserFile(user_file_to_set)
            ReductionSingleton().to_Q.output_type = output_type
            ReductionSingleton().user_settings.execute(ReductionSingleton())
        current_user_file = user_file_to_set
    return current_user_file
示例#23
0
 def test_that_deletion_of_non_existent_ws_creates_error_message(self):
     # Arrange
     ws_name = 'ws'
     # Act
     message, complete = su.delete_zero_error_free_workspace(input_workspace_name = ws_name)
     # Assert
     message.strip()
     self.assertTrue(message)
     self.assertTrue(not complete)
示例#24
0
    def _create_quadrant(self, setup, reduced_ws, quadrant, xcentre, ycentre, r_min, r_max, suffix):
        out_ws = quadrant+suffix
        # Need to create a copy because we're going to mask 3/4 out and that's a one-way trip
        CloneWorkspace(InputWorkspace=reduced_ws,OutputWorkspace= out_ws)
        objxml = SANSUtility.QuadrantXML([0, 0, 0.0], r_min, r_max, quadrant)
        # Mask out everything outside the quadrant of interest
        MaskDetectorsInShape(Workspace=out_ws,ShapeXML= objxml)

        setup.to_Q.execute(setup, out_ws)
示例#25
0
 def test_error_is_ignored_for_more_than_one_spectrum(self):
     # Arrange
     orig_name = "orig"
     can_name = "can"
     result_name = "result"
     provide_workspace_with_x_errors(orig_name, True, 2)
     provide_workspace_with_x_errors(can_name, True, 2)
     provide_workspace_with_x_errors(result_name, False, 2)
     orig = mtd[orig_name]
     can = mtd[can_name]
     result = mtd[result_name]
     # Act
     su.correct_q_resolution_for_can(orig, can, result)
     # Assert
     self.assertFalse(result.hasDx(0))
     # Clean up
     DeleteWorkspace(orig)
     DeleteWorkspace(can)
     DeleteWorkspace(result)
示例#26
0
 def test_error_is_not_passed_on_when_did_not_exist_beforehand(self):
     # Arrange
     orig_name = "orig"
     can_name = "can"
     result_name = "result"
     provide_workspace_with_x_errors(orig_name, False, 1)
     provide_workspace_with_x_errors(can_name, False, 1)
     provide_workspace_with_x_errors(result_name, False, 1)
     orig = mtd[orig_name]
     can = mtd[can_name]
     result = mtd[result_name]
     # Act
     su.correct_q_resolution_for_can(orig, can, result)
     # Assert
     self.assertFalse(result.hasDx(0))
     # Clean up
     DeleteWorkspace(orig)
     DeleteWorkspace(can)
     DeleteWorkspace(result)
示例#27
0
    def post_process(self):
        # Store the mask file within the final workspace so that it is saved to the CanSAS file
        if self.user_settings is None:
            user_file = 'None'
        else:
            user_file = self.user_settings.filename
        AddSampleLog(Workspace=self.output_wksp,
                     LogName="UserFile",
                     LogText=user_file)

        # get the value of __transmission_sample from the transmission_calculator if it has
        if (not self.get_can()) and self.transmission_calculator.output_wksp:
            # it updates only if there was not can, because, when there is can, the __transmission_sample
            # is already correct and transmission_calculator.output_wksp points to the can transmission
            self.__transmission_sample = self.transmission_calculator.output_wksp

        # The reducer itself sometimes will be reset, and the users of the singleton
        # not always will have access to its settings. So, we will add the transmission workspaces
        # to the SampleLog, to be connected to the workspace, and be available outside. These values
        # are current being used for saving CanSAS (ticket #6929)
        if self.__transmission_sample:
            unfitted_transmission_workspace_name = su.get_unfitted_transmission_workspace_name(
                self.__transmission_sample)
            AddSampleLog(Workspace=self.output_wksp,
                         LogName="Transmission",
                         LogText=unfitted_transmission_workspace_name)
        if self.__transmission_can:
            unfitted_transmission_workspace_name = su.get_unfitted_transmission_workspace_name(
                self.__transmission_can)
            AddSampleLog(Workspace=self.output_wksp,
                         LogName="TransmissionCan",
                         LogText=unfitted_transmission_workspace_name)

        # clean these values for subsequent executions
        self.__transmission_sample = ""
        self.__transmission_can = ""

        for role in list(self._temporys.keys()):
            try:
                DeleteWorkspace(Workspace=self._temporys[role])
            except (Exception, Warning):
                # if cleaning up isn't possible there is probably nothing we can do
                pass
示例#28
0
 def _get_idf_path_for_workspace(self, filename, instrument_name):
     exists, full_path =  get_full_path_SANS_system_test(filename)
     idf_path_workspace = None
     if exists:
         measurement_time = su.get_measurement_time_from_file(full_path)
         idf_path_workspace = ExperimentInfo.getInstrumentFilename(instrument_name, measurement_time)
     else:
         print("Missing data files. Path to system test data needs to be set.")
         self.assertTrue(False)
     return idf_path_workspace
示例#29
0
 def test_that_non_existent_ws_creates_error_message(self):
     # Arrange
     ws_name = 'original'
     ws_clone_name = 'clone'
     # Act
     message, complete = su.create_zero_error_free_workspace(input_workspace_name = ws_name, output_workspace_name = ws_clone_name)
     # Assert
     message.strip()
     self.assertTrue(message)
     self.assertTrue(not complete)
示例#30
0
 def test_that_deletion_of_extent_ws_is_successful(self):
     # Arrange
     ws_name = 'ws'
     self._setup_workspace(ws_name, 'Histogram')
     # Act + Assert
     self.assertTrue(ws_name in mtd)
     message, complete = su.delete_zero_error_free_workspace(input_workspace_name = ws_name)
     message.strip()
     self.assertTrue(not message)
     self.assertTrue(complete)
     self.assertTrue(not ws_name in mtd)
示例#31
0
 def test_error_is_ignored_when_only_one_input_has_dx(self):
     # Arrange
     front_name = "front"
     rear_name = "rear"
     result_name = "result"
     provide_workspace_with_x_errors(front_name, True, 1)
     provide_workspace_with_x_errors(rear_name, False, 1)
     provide_workspace_with_x_errors(result_name, False, 1)
     front = mtd[front_name]
     rear = mtd[rear_name]
     result = mtd[result_name]
     scale = 2.
     # Act
     su.correct_q_resolution_for_merged(front, rear,result, scale)
     # Assert
     self.assertFalse(result.hasDx(0))
     # Clean up
     DeleteWorkspace(front)
     DeleteWorkspace(rear)
     DeleteWorkspace(result)
示例#32
0
    def test_get_masked_det_ids(self):
        ws = CreateSampleWorkspace("Histogram", "Multiple Peaks")

        MaskDetectors(Workspace=ws, DetectorList=[100, 102, 104])

        masked_det_ids = list(su.get_masked_det_ids(ws))

        self.assertTrue(100 in masked_det_ids)
        self.assertTrue(102 in masked_det_ids)
        self.assertTrue(104 in masked_det_ids)
        self.assertEquals(len(masked_det_ids), 3)
示例#33
0
    def test_parse_strings(self):
        inputs = { '1-2':[[1,2]],         # single period syntax  min < x < max
                   '1.3-5.6':[[1.3,5.6]], # float
                   '1-2,3-4':[[1,2],[3,4]],# more than one slice
                   '>1':[[1, -1]],       # just lower bound
                   '<5':[[-1, 5]],      # just upper bound
                   '<5,8-9': [[-1, 5], [8,9]],
                   '1:2:5': [[1,3], [3,5]] # sintax: start, step, stop
            }

        for (k, v) in inputs.items():
            self.checkValues(su.sliceParser(k),v)
示例#34
0
def setUserFileInBatchMode(new_user_file, current_user_file, original_user_file, original_settings, original_prop_man_settings):
    """
        Loads a specified user file. The file is loaded if
        new_user_file is different from  current_user_file. Else we just
        keep the user file loaded. If the new_user_file is empty then we default to
        original_user_file.
        @param new_user_file: The new user file. Note that this can be merely the filename (+ extension)
        @param current_user_file: The currently loaded user file
        @param original_user_file: The originally loaded user file. This is used as a default
        @param original_settings: The original reducer
        @param original_prop_man_settings: Original properties settings
    """
    user_file_to_set = ""

    if new_user_file == '':
        user_file_to_set = original_user_file
    else:
        # Try to find the user file in the default paths
        if not os.path.isfile(new_user_file):
            # Find the user file in the Mantid path. we make sure that the user file has a txt extension.
            user_file_names_with_extension = su.get_user_file_name_options_with_txt_extension(new_user_file)
            for user_file_name_with_extension in user_file_names_with_extension:
                user_file = FileFinder.getFullPath(user_file_name_with_extension)
                if user_file:
                    break

            if not os.path.isfile(user_file):
                message = "Error could not find specified user file {0}"\
                    .format(new_user_file)
                raise RuntimeError(message)
            else:
                user_file_to_set = user_file
        else:
            user_file_to_set = new_user_file

    # Set the user file in the reducer and load the user file
    if os.path.normpath(user_file_to_set) != os.path.normpath(current_user_file):
        # Need to setup a clean reducer. If we are dealing with the original user file,
        # then we should take gui changes into account, ie reset to the original reducer
        if user_file_to_set == original_user_file:
            ReductionSingleton().replace(copy.deepcopy(original_settings))
            ReductionSingleton().settings = original_prop_man_settings.clone(REDUCTION_SETTINGS_OBJ_NAME)
        else:
            instrument = copy.deepcopy(ReductionSingleton().get_instrument())
            output_type = ReductionSingleton().to_Q.output_type
            ReductionSingleton().clean(isis_reducer.ISISReducer)
            ReductionSingleton().set_instrument(instrument)
            ReductionSingleton().user_settings = UserFile(user_file_to_set)
            ReductionSingleton().to_Q.output_type = output_type
            ReductionSingleton().user_settings.execute(ReductionSingleton())
        current_user_file = user_file_to_set
    return current_user_file
示例#35
0
    def test_non_Q1D_and_Qxy_history_is_not_valid_and_produces_error_message(self):
        # Arrange
        ws_name = 'ws'
        self._setup_workspace(ws_name, 'Histogram')
        # Act
        message, complete = su.is_valid_ws_for_removing_zero_errors(input_workspace_name = ws_name)
        # Assert
        message.strip()
        self.assertTrue(message)
        self.assertTrue(not complete)

        self._removeWorkspace(ws_name)
        self.assertTrue(not ws_name in mtd)
示例#36
0
    def test_parse_strings(self):
        inputs = {
            '1-2': [[1, 2]],  # single period syntax  min < x < max
            '1.3-5.6': [[1.3, 5.6]],  # float
            '1-2,3-4': [[1, 2], [3, 4]],  # more than one slice
            '>1': [[1, -1]],  # just lower bound
            '<5': [[-1, 5]],  # just upper bound
            '<5,8-9': [[-1, 5], [8, 9]],
            '1:2:5': [[1, 3], [3, 5]]  # sintax: start, step, stop
        }

        for (k, v) in inputs.items():
            self.checkValues(su.sliceParser(k), v)
示例#37
0
def get_mapped_workspaces(save_names, save_as_zero_error_free):
    """
        Get a workspace name map, which maps from the original
        workspace to a zero-error-free cloned workspace if
        save_as_zero_error_free is checked otherwise the
        workspaces are mapped to themselves.
        @param save_names: a list of workspace names
        @param save_as_zero_error_free : if the user wants the zero-errors removed
        @returns a map of workspaces
    """
    workspace_dictionary = {}
    for name in save_names:
        if save_as_zero_error_free:
            cloned_name = name + '_cloned_temp'
            dummy_message, complete = su.create_zero_error_free_workspace(input_workspace_name = name, output_workspace_name = cloned_name)
            if complete:
                workspace_dictionary[name] = cloned_name
            else:
                workspace_dictionary[name] = name
        else:
            workspace_dictionary[name] = name
    return workspace_dictionary
示例#38
0
def BatchReduce(
        filename,
        format,
        plotresults=False,
        saveAlgs={'SaveRKH': 'txt'},
        verbose=False,  # noqa: C901
        centreit=False,
        reducer=None,
        combineDet=None,
        save_as_zero_error_free=False):
    """
        @param filename: the CSV file with the list of runs to analyse
        @param format: type of file to load, nxs for Nexus, etc.
        @param plotresults: if true and this function is run from Mantidplot a graph will be created for the results of each reduction
        @param saveAlgs: this named algorithm will be passed the name of the results workspace and filename (default = 'SaveRKH').
            Pass a tuple of strings to save to multiple file formats
        @param verbose: set to true to write more information to the log (default=False)
        @param centreit: do centre finding (default=False)
        @param reducer: if to use the command line (default) or GUI reducer object
        @param combineDet: that will be forward to WavRangeReduction (rear, front, both, merged, None)
        @param save_as_zero_error_free: Should the reduced workspaces contain zero errors or not
        @return final_setings: A dictionary with some values of the Reduction - Right Now:(scale, shift)
    """
    if not format.startswith('.'):
        format = '.' + format

    # Read CSV file and store information in runinfo using format IN_FORMAT
    file_handle = open(filename, 'r')
    runinfo = []
    for line in file_handle:
        # See how many pieces of information have been provided;
        # brackets delineate the field separator (nothing for space-delimited, ',' for comma-seperated)
        parts = line.rstrip().split(',')
        if addRunToStore(parts, runinfo) > 0:
            issueWarning('Incorrect structure detected in input file "' +
                         filename + '" at line \n"' + line +
                         '"\nEntry skipped\n')
    file_handle.close()

    if reducer:
        ReductionSingleton().replace(reducer)
    ins_name = ReductionSingleton().instrument.name()
    # is used for SaveCanSAS1D go give the detectors names
    detnames = ', '.join(ReductionSingleton().instrument.listDetectors())

    # LARMOR has just one detector, but, it defines two because ISISInstrument is defined as two banks! #8395
    if ins_name == 'LARMOR':
        detnames = ReductionSingleton().instrument.cur_detector().name()

    scale_shift = {'scale': 1.0000, 'shift': 0.0000}
    #first copy the user settings in case running the reductionsteps can change it
    settings = copy.deepcopy(ReductionSingleton().reference())
    prop_man_settings = ReductionSingleton().settings.clone("TEMP_SETTINGS")

    # Make a note of the original user file, as we want to set it
    original_user_file = ReductionSingleton().user_settings.filename
    current_user_file = original_user_file

    # Store the original combineDet which was set either by the input. this should be used whenever we are using the
    # original user file
    original_combine_det = combineDet

    # Now loop over all the lines and do a reduction (hopefully) for each
    for run in runinfo:
        # Set the user file, if it is required
        try:
            current_user_file = setUserFileInBatchMode(
                new_user_file=run['user_file'],
                current_user_file=current_user_file,
                original_user_file=original_user_file,
                original_settings=settings,
                original_prop_man_settings=prop_man_settings)

            if current_user_file == original_user_file:
                combineDet = original_combine_det
            else:
                # When we set a new user file, that means that the combineDet feature could be invalid,
                # ie if the detector under investigation changed in the user file. We need to change this
                # here too. But only if it is not None.
                if combineDet is not None:
                    new_combineDet = ReductionSingleton(
                    ).instrument.get_detector_selection()
                    combineDet = su.get_correct_combinDet_setting(
                        ins_name, new_combineDet)
        except (RuntimeError, ValueError) as e:
            raise RuntimeError(
                "Error in Batchmode user files: Could not reset the specified user file {0}. More info: {1}"
                .format(str(run['user_file']), str(e)))

        local_settings = copy.deepcopy(ReductionSingleton().reference())
        local_prop_man_settings = ReductionSingleton().settings.clone(
            "TEMP_SETTINGS")

        raw_workspaces = []
        geometry_properties = {}
        try:
            # Load in the sample runs specified in the csv file
            raw_workspaces.append(read_run(run, 'sample_sans', format))

            #Transmission runs to be applied to the sample
            raw_workspaces += read_trans_runs(run, 'sample', format)

            # Can run
            raw_workspaces.append(read_run(run, 'can_sans', format))

            #Transmission runs for the can
            raw_workspaces += read_trans_runs(run, 'can', format)

            if centreit == 1:
                if verbose == 1:
                    FindBeamCentre(50., 170., 12)

            try:
                geometry_properties = get_geometry_properties(
                    ReductionSingleton())
            except RuntimeError as e:
                message = "Could not extract geometry properties from the reducer: {0}".format(
                    str(e))
                sanslog.warning(message)

            # WavRangeReduction runs the reduction for the specified
            # wavelength range where the final argument can either be
            # DefaultTrans or CalcTrans:
            reduced = WavRangeReduction(combineDet=combineDet,
                                        out_fit_settings=scale_shift)

        except SkipEntry as reason:
            #this means that a load step failed, the warning and the fact that the results aren't there is enough for the user
            issueWarning(str(reason) + ', skipping entry')
            continue
        except SkipReduction as reason:
            #this means that a load step failed, the warning and the fact that the results aren't there is enough for the user
            issueWarning(str(reason) + ', skipping reduction')
            continue
        except ValueError as reason:
            issueWarning('Cannot load file :' + str(reason))
            #when we are all up to Python 2.5 replace the duplicated code below with one finally:
            delete_workspaces(raw_workspaces)
            raise

        delete_workspaces(raw_workspaces)

        if verbose:
            sanslog.notice(
                createColetteScript(run, format, reduced, centreit,
                                    plotresults, filename))
        # Rename the final workspace
        final_name = run['output_as'].strip()
        if final_name == '':
            final_name = reduced

        # Remove illegal characters
        final_name = sanitize_name(final_name)

        #convert the names from the default one, to the agreement
        # This caused a renaming with the following logic
        # | combinDet      |    Name HAB    |   Name LAB   | Name Merged  |
        # | rear           |    +_rear      |     -        |     -        |
        # | front          |      -         |    +_front   |     -        |
        # | both           |    +_rear      |    +_front   |     -        |
        # | merged         |    +_rear      |    +_front   |     +_merged |
        # This is not great since it uses SANS2D terminology for all instruments
        names = [final_name]
        if combineDet == 'rear':
            new_name = su.rename_workspace_correctly(ins_name,
                                                     su.ReducedType.LAB,
                                                     final_name, reduced)
            names = [new_name]
        elif combineDet == 'front':
            new_name = su.rename_workspace_correctly(ins_name,
                                                     su.ReducedType.HAB,
                                                     final_name, reduced)
            names = [new_name]
        elif combineDet == 'both':
            if ins_name == 'SANS2D':
                rear_reduced = reduced.replace('front', 'rear')
            else:  #if ins_name == 'lOQ':
                rear_reduced = reduced.replace('HAB', 'main')
            new_name_HAB = su.rename_workspace_correctly(
                ins_name, su.ReducedType.HAB, final_name, reduced)
            new_name_LAB = su.rename_workspace_correctly(
                ins_name, su.ReducedType.LAB, final_name, rear_reduced)
            names = [new_name_HAB, new_name_LAB]
        elif combineDet == 'merged':
            if ins_name == 'SANS2D':
                rear_reduced = reduced.replace('merged', 'rear')
                front_reduced = reduced.replace('merged', 'front')
            else:
                rear_reduced = reduced.replace('merged', 'main')
                front_reduced = rear_reduced.replace('main', 'HAB')
            new_name_Merged = su.rename_workspace_correctly(
                ins_name, su.ReducedType.Merged, final_name, reduced)
            new_name_LAB = su.rename_workspace_correctly(
                ins_name, su.ReducedType.LAB, final_name, rear_reduced)
            new_name_HAB = su.rename_workspace_correctly(
                ins_name, su.ReducedType.HAB, final_name, front_reduced)
            names = [new_name_Merged, new_name_LAB, new_name_HAB]
        else:
            RenameWorkspace(InputWorkspace=reduced, OutputWorkspace=final_name)

        file = run['output_as']
        #saving if optional and doesn't happen if the result workspace is left blank. Is this feature used?
        if file:
            save_names = []
            for n in names:
                w = mtd[n]
                if isinstance(w, WorkspaceGroup):
                    save_names.extend(w.getNames())
                else:
                    save_names.append(n)

            # If we want to remove zero-errors, we map the original workspace to a cleaned workspace clone,
            # else we map it to itself.
            save_names_dict = get_mapped_workspaces(save_names,
                                                    save_as_zero_error_free)

            for algor in list(saveAlgs.keys()):
                for workspace_name in save_names:
                    #add the file extension, important when saving different types of file so they don't over-write each other
                    ext = saveAlgs[algor]
                    if not ext.startswith('.'):
                        ext = '.' + ext
                    if algor == "SaveCanSAS1D":
                        # From v2, SaveCanSAS1D is able to save the Transmission workspaces related to the
                        # reduced data. The name of workspaces of the Transmission are available at the
                        # sample logs.
                        _ws = mtd[workspace_name]
                        transmission_properties = get_transmission_properties(
                            _ws)

                        # Add the geometry properties if they exist
                        if geometry_properties:
                            transmission_properties.update(geometry_properties)

                        # Call the SaveCanSAS1D with the Transmission and TransmissionCan if they are
                        # available
                        SaveCanSAS1D(save_names_dict[workspace_name],
                                     workspace_name + ext,
                                     DetectorNames=detnames,
                                     **transmission_properties)
                    elif algor == "SaveNXcanSAS":
                        _ws = mtd[workspace_name]
                        transmission_properties = get_transmission_properties(
                            _ws)
                        # Call the SaveNXcanSAS with the Transmission and TransmissionCan if they are
                        # available
                        SaveNXcanSAS(save_names_dict[workspace_name],
                                     workspace_name + ext,
                                     DetectorNames=detnames,
                                     **transmission_properties)
                    elif algor == "SaveRKH":
                        SaveRKH(save_names_dict[workspace_name],
                                workspace_name + ext,
                                Append=False)
                    else:
                        exec(algor + "('" + save_names_dict[workspace_name] +
                             "', workspace_name+ext)")
            # If we performed a zero-error correction, then we should get rid of the cloned workspaces
            if save_as_zero_error_free:
                delete_cloned_workspaces(save_names_dict)

        if plotresults == 1:
            for final_name in names:
                PlotResult(final_name)

        #the call to WaveRang... killed the reducer so copy back over the settings
        ReductionSingleton().replace(copy.deepcopy(local_settings))
        ReductionSingleton().settings = local_prop_man_settings.clone(
            REDUCTION_SETTINGS_OBJ_NAME)

    # Set everything back to the initial state
    ReductionSingleton().replace(copy.deepcopy(settings))
    ReductionSingleton().settings = prop_man_settings.clone(
        REDUCTION_SETTINGS_OBJ_NAME)

    #end of reduction of all entries of batch file
    return scale_shift
示例#39
0
def unixLikePathFromWorkspace(ws):
    return su.getFilePathFromWorkspace(ws).replace('\\', '/')
# D_T for nine blocks when dae playing up  in cycle 09/1 or 09/2
wksp="SANS2D00001674"
wksp="5976_sans_nxs"
dimdet=192
spec1=8+1+192*192
tag="frt"
#spec1=8+1
#tag="rear"
#print speclist
i=0
namelist=["A TL","D TC","G TR","B ML","E MC","H MR","C BL","F BC","I BR"]
masklist=["h0>h127,v64>v191","h0>h127,v0>v63,v128>v191","h0>h127,v0>v127","h0>h63,h128>h191,v64>v191","h0>h63,h128>h191,v0>v63,v128>v191","h0>h63,h128>h191,v0>v127","h63>h191,v64>v191","h63>h191,v0>v63,v128>v191","h63>h191,v0>v127"]
for name in namelist:
	print "i= "+str(i)
	# guess "orientation"
	list = SANSUtility.ConvertToSpecList(masklist[i], spec1, dimdet,'0')
	CropWorkspace(wksp, OutputWorkspace = "D_T "+name+tag, StartWorkspaceIndex=(spec1-1), EndWorkspaceIndex=str(spec1+192*192-2))
	SANSUtility.MaskBySpecNumber("D_T "+name+tag, list)
	SumSpectra("D_T "+name+tag,"D_T "+name+tag)
	i=i+1
print "done"
for wext in namelist:
	wsp="D_T "+wext
	if wext==namelist[0]:
		plot2=plotSpectrum(wsp+tag,0)
		layer=plot2.activeLayer()
		layer.setTitle("D_T modules")
	else:
		mergePlots(plot2,plotSpectrum(wsp+tag,0))
	layer.setCurveTitle(i,name)
#
示例#41
0
 def test_empty_string_is_valid(self):
     self.checkValues(su.sliceParser(""), [[-1, -1]])
示例#42
0
 def getCurrSliceLimit(self):
     if not self._slices_def:
         self._slices_def = su.sliceParser("")
         assert self._slice_index == 0
     return self._slices_def[self._slice_index]
示例#43
0
 def setSlicesLimits(self, str_def):
     self._slices_def = su.sliceParser(str_def)
     self._slice_index = 0
示例#44
0
 def test_accept_spaces(self):
     self.checkValues(su.sliceParser("1 - 2, 3 - 4"), [[1, 2], [3, 4]])
示例#45
0
    def _match_IDF(self, run):
        '''
        Compares the IDF in the stored instrument with the IDF in the workspace.
        If they are the same all is well. If they diff, then load the adequate
        user file.
        @param run: name of the run for which the file is to be extracted
        '''
        # We need the instrument name and the measurement time to determine
        # the IDF
        measurement_time = None
        instrument_name = self.get_instrument_name()
        # We need to be able to handle file-based and workspace-based queries
        # If we have a workspace we look at the end time, else we
        # need a sophisticated extraction mechanism
        if isinstance(run, Workspace):
            ws = None
            if isinstance(run, WorkspaceGroup):
                # Just look at the first element in a workspace group
                ws = run[0]
            else:
                ws = run
            measurement_time = str(ws.getRun().endTime()).strip()
        else:
            if run is None or run == "":
                return
            measurement_time = su.get_measurement_time_from_file(run)

        # Get the path to the instrument definition file
        idf_path_workspace = ExperimentInfo.getInstrumentFilename(
            instrument_name, measurement_time)
        idf_path_workspace = os.path.normpath(idf_path_workspace)

        # Get the idf from the reducer
        idf_path_reducer = self.get_idf_file_path()
        idf_path_reducer = os.path.normpath(idf_path_reducer)

        # Now check if both idf paths and underlying files. If they are, then don't do anything
        # else switch the underlying instrument
        if idf_path_reducer == idf_path_workspace and su.are_two_files_identical(
                idf_path_reducer, idf_path_reducer):
            return
        else:
            logger.notice("Updating the IDF of the Reducer. Switching from " +
                          str(idf_path_reducer) + " to " +
                          str(idf_path_workspace))
            idf_path = os.path.basename(idf_path_workspace)
            instrument = self._get_correct_instrument(instrument_name,
                                                      idf_path)

            # Get detector of the old instrument
            old_instrument = self.get_instrument()
            old_detector_selection = old_instrument.get_detector_selection()

            if instrument is not None:
                self.set_instrument(instrument)

                # We need to update the instrument, by reloading the user file.
                # This is pretty bad, but looking at the reducer architecture this
                # seems to be the only reasonable way to do this.
                self.user_settings.execute(self)

                # Now we set the correct detector, this is also being done in the GUI
                self.get_instrument().setDetector(old_detector_selection)