def validate_monitors(position, data): monitor_values = reader.read() combined_data = zip(self.dimensions[-1]['Monitor'], self.dimensions[-1]['MonitorValue'], self.dimensions[-1]['MonitorTolerance'], self.dimensions[-1]['MonitorAction'], self.dimensions[-1]['MonitorTimeout'], monitor_values) for pv, expected_value, tolerance, action, timeout, value in combined_data: # Monitor value does not match. if not compare_channel_value(value, expected_value, tolerance): if action == "Abort": raise ValueError( "Monitor %s, expected value %s, tolerance %s, has value %s. Aborting." % (pv, expected_value, tolerance, value)) elif action == "WaitAndAbort": return False else: raise ValueError( "MonitorAction %s, on PV %s, is not supported." % (pv, action)) return True
def validate_data(current_position, data): bs_values = iter(bs_reader.read_cached_conditions() if bs_reader else []) epics_values = iter(epics_condition_reader.read() if epics_condition_reader else []) function_values = iter(function_condition.read() if function_condition else []) for index, source in enumerate(conditions_order): if source == BS_CONDITION: value = next(bs_values) elif source == EPICS_CONDITION: value = next(epics_values) elif source == FUNCTION_CONDITION: value = next(function_values) else: raise ValueError("Unknown type of condition %s used." % source) # Function conditions are self contained. if source == FUNCTION_CONDITION: if not value: raise ValueError("Function condition %s returned False." % conditions[index].identifier) else: expected_value = conditions[index].value tolerance = conditions[index].tolerance if not compare_channel_value(value, expected_value, tolerance): raise ValueError("Condition %s, expected value %s, actual value %s, tolerance %s." % (conditions[index].identifier, expected_value, value, tolerance)) return True
def set_and_match(self, values, tolerances=None, timeout=None): """ Set the value and wait for the PV to reach it, within tollerance. :param values: Values to set (Must match the number of PVs in this group) :param tolerances: Tolerances for each PV (Must match the number of PVs in this group) :param timeout: Timeout, single value, to wait until the value is reached. :raise ValueError if any position cannot be reached. """ values = convert_to_list(values) if not tolerances: tolerances = self.tolerances else: # We do not allow tolerances to be less than the default tolerance. tolerances = self._setup_tolerances(tolerances) if not timeout: timeout = self.timeout # Verify if all provided lists are of same size. validate_lists_length(self.pvs, values, tolerances) # Check if timeout is int or float. if not isinstance(timeout, (int, float)): raise ValueError("Timeout must be int or float, but %s was provided." % timeout) # Write all the PV values. for pv, value in zip(self.pvs, values): pv.put(value) # Boolean array to represent which PVs have reached their target value.s within_tolerance = [False] * len(self.pvs) initial_timestamp = time.time() # Read values until all PVs have reached the desired value or time has run out. while (not all(within_tolerance)) and (time.time() - initial_timestamp < timeout): # Get only the PVs that have not yet reached the final position. for index, pv, tolerance in ((index, pv, tolerance) for index, pv, tolerance, values_reached in zip(count(), self.readback_pvs, tolerances, within_tolerance) if not values_reached): current_value = pv.get() expected_value = values[index] if compare_channel_value(current_value, expected_value, tolerance): within_tolerance[index] = True time.sleep(self.default_get_sleep) if not all(within_tolerance): error_message = "" # Get the indexes that did not reach the supposed values. for index in [index for index, reached_value in enumerate(within_tolerance) if not reached_value]: expected_value = values[index] pv_name = self.pv_names[index] tolerance = tolerances[index] error_message += "Cannot achieve value %s, on PV %s, with tolerance %s.\n" % \ (expected_value, pv_name, tolerance) raise ValueError(error_message)
def validate_data(current_position_index, data): _logger.debug("Reading data for position index %s." % current_position_index) bs_values = iter( bs_reader.read_cached_conditions() if bs_reader else []) epics_values = iter( epics_condition_reader.read(current_position_index ) if epics_condition_reader else []) function_values = iter( function_condition.read(current_position_index ) if function_condition else []) for index, source in enumerate(conditions_order): if source == BS_CONDITION: value = next(bs_values) elif source == EPICS_CONDITION: value = next(epics_values) elif source == FUNCTION_CONDITION: value = next(function_values) else: raise ValueError("Unknown type of condition %s used." % source) value_valid = False # Function conditions are self contained. if source == FUNCTION_CONDITION: if value: value_valid = True else: expected_value = conditions[index].value tolerance = conditions[index].tolerance operation = conditions[index].operation if compare_channel_value(value, expected_value, tolerance, operation): value_valid = True if not value_valid: if conditions[index].action == ConditionAction.Retry: return False if source == FUNCTION_CONDITION: raise ValueError("Function condition %s returned False." % conditions[index].identifier) else: raise ValueError( "Condition %s failed, expected value %s, actual value %s, " "tolerance %s, operation %s." % (conditions[index].identifier, conditions[index].value, value, conditions[index].tolerance, conditions[index].operation)) return True
def is_close(list1, list2, epsilon=0): """ Comparator 2 lists of floats. Since we are dealing with floats, an exact match cannot be enforced. :param list1: First list to compare. :param list2: Second list to compare. :param epsilon: Maximum difference we allow at each step. Default 10e-5 :return: True if all elements are in the specified error range. """ return all( compare_channel_value(value1, value2, epsilon) for value1, value2 in zip(list1, list2))
def test_compare_channel_value(self): self.assertTrue( compare_channel_value(current_value=10.4, expected_value=10.4)) self.assertTrue( compare_channel_value(current_value=10.4, expected_value=10.4, operation=ConditionComparison.EQUAL)) self.assertTrue( compare_channel_value(current_value=10.4, expected_value=10.3, operation=ConditionComparison.EQUAL, tolerance=0.1)) self.assertFalse( compare_channel_value(current_value=10.4, expected_value=10.29, operation=ConditionComparison.EQUAL, tolerance=0.1)) self.assertTrue( compare_channel_value(current_value=10.4, expected_value=10.3, operation=ConditionComparison.NOT_EQUAL)) self.assertTrue( compare_channel_value(current_value=10.4, expected_value=10.3, operation=ConditionComparison.NOT_EQUAL, tolerance=0.09)) self.assertFalse( compare_channel_value(current_value=10.4, expected_value=10.3, operation=ConditionComparison.NOT_EQUAL, tolerance=0.1)) self.assertTrue( compare_channel_value(current_value=10.4, expected_value=10.5, operation=ConditionComparison.LOWER)) self.assertTrue( compare_channel_value(current_value=10.4, expected_value=10.4, operation=ConditionComparison.LOWER, tolerance=0.1)) self.assertTrue( compare_channel_value(current_value=10.5, expected_value=10.4, operation=ConditionComparison.LOWER, tolerance=0.12)) self.assertTrue( compare_channel_value(current_value=10.4, expected_value=10.5, operation=ConditionComparison.LOWER)) self.assertFalse( compare_channel_value(current_value=10.51, expected_value=10.5, operation=ConditionComparison.LOWER)) self.assertTrue( compare_channel_value( current_value=10.4, expected_value=10.4, operation=ConditionComparison.LOWER_OR_EQUAL)) self.assertTrue( compare_channel_value(current_value=10.5, expected_value=10.4, operation=ConditionComparison.LOWER_OR_EQUAL, tolerance=0.1)) self.assertFalse( compare_channel_value( current_value=10.5, expected_value=10.4, operation=ConditionComparison.LOWER_OR_EQUAL))
def test_PyScanTool(self): knob = "PYSCAN:TEST:MOTOR1:SET" instrument = "PYSCAN:TEST:OBS1" numberOfReps = 3 scanValues = [0, 1, 2, 3] indict = {} indict['Knob'] = knob indict['KnobWaiting'] = 0.001 # later set by expert panel... indict['KnobWaitingExtra'] = 0.1 # later set by expert panel... indict['Waiting'] = 0.01 # later set by expert panel... indict['Observable'] = instrument indict['ScanValues'] = scanValues indict['NumberOfMeasurements'] = numberOfReps # restore initial set after measurement: indict['PostAction'] = 'Restore' test_dal = CurrentMockDal( pv_fixed_values={"PYSCAN:TEST:OBS1": [0.9, 1.0, 1.1]}) pyscan = CurrentScan() # Check if the progress bar works. def monitor_scan(): # make sure the initialization is done: while pyscan.ProgDisp.Progress: sleep(0.001) current_value = 0 while current_value < 100: last_value = pyscan.ProgDisp.Progress if last_value > current_value: progress_values.append(pyscan.ProgDisp.Progress) current_value = last_value else: nonlocal progress_completed progress_completed = True progress_values = [] progress_completed = False threading.Thread(target=monitor_scan).start() pyscan.initializeScan(indict, dal=test_dal) outdict = pyscan.startScan() # Wait for the progress thread to terminate. sleep(0.2) self.assertTrue(progress_completed, "Progress bar did not complete.") self.assertListEqual(progress_values, [25, 50, 75, 100], "The completed percentage is wrong.") scanResultKnob = outdict['KnobReadback'] scanResultInst = outdict['Observable'] # remove empty fields: scanResultKnob_clean = [x for x in scanResultKnob if x] scanResultInst_clean = [y for y in scanResultInst if y] self.assertListEqual(scanResultKnob, scanResultKnob_clean, "Scan knob lists are not identical.") self.assertListEqual(scanResultInst, scanResultInst_clean, "Instrument lists are not identical.") # mean and standard deviation via numpy arrays scanResultKnobMean = [np.array(x).mean() for x in scanResultKnob] scanResultInstMean = [np.array(y).mean() for y in scanResultInst] scanResultInstStd = [np.array(y).std() for y in scanResultInst] scanResultArray = np.array(scanResultInst) scanResultMean = [x.mean() for x in scanResultArray] scanResultStd = [x.std() for x in scanResultArray] self.assertListEqual(scanResultKnobMean, [0, 1, 2, 3], "The scan knobs result is not the same.") self.assertListEqual(scanResultInstMean, [1, 1, 1, 1], "The instrument mean values do not match.") self.assertListEqual(scanResultMean, [1, 1, 1, 1], "The instrument mean values do not match.") self.assertListEqual(scanResultInstStd, scanResultStd, "Standard deviation results are not the same.") if numberOfReps == 3: self.assertTrue( all( compare_channel_value(i1, i2) for i1, i2 in zip(scanResultInstStd, [0.081649] * 4)), "Unexpected result for standard deviation.")
def test_EmitMeasTool(self): # Initialize the values. Images = 5 QuadIch = ["PYSCAN:TEST:MOTOR1:SET", "PYSCAN:TEST:MOTOR2:SET"] QuadI = [[0, 1, 2, 3], [0, 1, 2, 3]] test_dal = CurrentMockDal( initial_values={ "PYSCAN:TEST:OBS1": 1, "PYSCAN:TEST:OBS2": 2, "PYSCAN:TEST:OBS3": 1, "PYSCAN:TEST:OBS4": 1, "PYSCAN:TEST:MONITOR1": 9.95 }, pv_fixed_values={ "PYSCAN:TEST:OBS1": [0.8, 0.9, 1.0, 1.1, 1.2], "PYSCAN:TEST:OBS2": [-0.8, -0.9, -1.0, -1.1, -1.2] }, ) pyscan = CurrentScan() indict1 = {} # Knob setup indict1['Knob'] = QuadIch indict1['KnobReadback'] = [c.replace('SET', 'GET') for c in QuadIch] indict1['KnobTolerance'] = [0.01] * len(QuadIch) indict1['KnobWaiting'] = [10] * len(QuadIch) indict1['KnobWaitingExtra'] = 0.01 indict1['ScanValues'] = QuadI # Measurement setup. indict1['Observable'] = [ "PYSCAN:TEST:OBS1", "PYSCAN:TEST:OBS2", "PYSCAN:TEST:OBS3", "PYSCAN:TEST:OBS4" ] indict1['Waiting'] = 0.01 indict1['NumberOfMeasurements'] = int(Images) # Monitor setup # only doing the measurement if laser is producing beam, we do that the frequency is close to 10 Hz indict1['Monitor'] = ['PYSCAN:TEST:MONITOR1'] indict1['MonitorValue'] = [10] indict1['MonitorTolerance'] = [0.1] indict1['MonitorAction'] = ['WaitAndAbort'] indict1['MonitorTimeout'] = [15] # inserting the screen before measuring - to be defined as a variable depending on the PM indict1['PreAction'] = [[ "PYSCAN:TEST:PRE1:SET", "PYSCAN:TEST:PRE1:GET", 1, 0, 10 ]] # removing the screen after doing the measurement, to be added possible cycling indict1['PostAction'] = [[ "PYSCAN:TEST:PRE1:SET", "PYSCAN:TEST:PRE1:GET", 0, 0, 10 ], 'Restore'] pyscan.initializeScan(indict1, dal=test_dal) outdict = pyscan.startScan() if int(Images) == 1: sigx = np.array([v[0] for v in outdict['Observable']]) sigy = np.array([v[1] for v in outdict['Observable']]) errx = np.ones(len(sigx)) * 0 erry = errx jitx = np.ones(len(sigx)) * 0 jity = np.ones(len(sigx)) * 0 rel_jitx = np.ones(len(sigx)) * 0 rel_jity = np.ones(len(sigx)) * 0 else: sigx = np.zeros(len(outdict['Observable'])) sigy = np.zeros(len(outdict['Observable'])) errx = np.zeros(len(outdict['Observable'])) erry = np.zeros(len(outdict['Observable'])) for i in range(0, len(outdict['Observable'])): sigx[i] = np.mean(np.array( [v[0] for v in outdict['Observable'][i]]), axis=0) errx[i] = np.std(np.array( [v[0] for v in outdict['Observable'][i]]), axis=0) sigy[i] = np.mean(np.array( [v[1] for v in outdict['Observable'][i]]), axis=0) erry[i] = np.std(np.array( [v[1] for v in outdict['Observable'][i]]), axis=0) jitx = np.zeros(len(outdict['Observable'])) jity = np.zeros(len(outdict['Observable'])) for i in range(0, len(outdict['Observable'])): jitx[i] = np.std(np.array( [v[2] for v in outdict['Observable'][i]]), axis=0) jity[i] = np.std(np.array( [v[3] for v in outdict['Observable'][i]]), axis=0) rel_jitx = 100 * jitx / sigx rel_jity = 100 * jity / sigy errx[errx == 0] = 1e-99 erry[erry == 0] = 1e-99 self.assertTrue( all( compare_channel_value(i1, i2) for i1, i2 in zip(sigx, [1] * 4)), "Unexpected result.") self.assertTrue( all( compare_channel_value(i1, i2) for i1, i2 in zip(sigy, [-1] * 4)), "Unexpected result.") self.assertTrue( all( compare_channel_value(i1, i2) for i1, i2 in zip(errx, [0.14142] * 4)), "Standard error does not match the expected one.") self.assertTrue( all( compare_channel_value(i1, i2) for i1, i2 in zip(erry, [0.14142] * 4)), "Standard error does not match the expected one.") self.assertTrue( all( compare_channel_value(i1, i2) for i1, i2 in zip(jitx, [0, 0, 0, 0])), "Unexpected result.") self.assertTrue( all( compare_channel_value(i1, i2) for i1, i2 in zip(jity, [0, 0, 0, 0])), "Unexpected result.") self.assertTrue( all( compare_channel_value(i1, i2) for i1, i2 in zip(rel_jitx, [0, 0, 0, 0])), "Unexpected result.") self.assertTrue( all( compare_channel_value(i1, i2) for i1, i2 in zip(rel_jity, [0, 0, 0, 0])), "Unexpected result.")