def test_measure_average(RE, one_bounce_system): logger.debug("test_measure_average") _, mot, det = one_bounce_system centroids = [] readbacks = [] col_c = collector(det.name + '_detector_stats2_centroid_x', centroids) col_r = collector(mot.name + '_sim_alpha', readbacks) RE(run_wrapper(measure_average([det, mot], delay=0.1, num=5)), {'event': [col_c, col_r]}) assert centroids == [250., 250., 250., 250., 250.] assert readbacks == [0., 0., 0., 0., 0.] centroids.clear() readbacks.clear() # Run with array of delays RE(run_wrapper(measure_average([det, mot], delay=[0.1], num=2)), {'event': [col_c, col_r]}) assert centroids == [250., 250.] assert readbacks == [0., 0.] # Invalid delay settings with pytest.raises(ValueError): RE(run_wrapper(measure_average([det, mot], delay=[0.1], num=3)))
def test_measure_average(RE, one_bounce_system): logger.debug("test_measure_average") #Load motor and yag _, mot, det = one_bounce_system #Fake event storage centroids = [] readbacks = [] col_c = collector(det.name + '_detector_stats2_centroid_x', centroids) col_r = collector(mot.name + '_pitch', readbacks) #Run plan RE(run_wrapper(measure_average([det, mot], delay=0.1, num=5)), subs={'event':[col_c, col_r]}) #Check events assert centroids == [250.,250.,250.,250.,250.] assert readbacks == [0.,0.,0.,0.,0.] #Clear from last test centroids.clear() readbacks.clear() #Run with array of delays RE(run_wrapper(measure_average([det, mot], delay=[0.1], num=2)), subs={'event':[col_c, col_r]}) #Check events assert centroids == [250., 250.] assert readbacks == [0., 0.] #Invalid delay settings with pytest.raises(ValueError): RE(run_wrapper(measure_average([det, mot], delay=[0.1], num=3)))
def measure(detectors, motor, step): # Perform step logger.debug("Measuring average at step %s ...", step) yield from checkpoint() yield from abs_set(motor, step, wait=True) # Measure the average return (yield from measure_average([motor, detector], num=average, filters=filters))
def per_step(detectors, motor, step): # Perform step yield from checkpoint() logger.debug("Measuring average at step {0} ...".format(step)) yield from abs_set(motor, step, wait=True) # Measure the average reads = (yield from measure_average(all_devices, num=average, filters=filters, *args, **kwargs)) # Fill the dataframe at this step with the centroid difference for fld in all_fields: df.loc[step, fld] = reads[fld]
def euclidean_distance(device, device_fields, targets, average=None, filters=None): """ Calculates the euclidean distance between the device_fields and targets. Parameters ---------- device : :class:`.Device` Device from which to take the value measurements device_fields : iterable Fields of the device to measure targets : iterable Target value to calculate the distance from average : int, optional Number of averages to take for each measurement Returns ------- distance : float The euclidean distance between the device fields and the targets. """ average = average or 1 # Turn things into lists device_fields = as_list(device_fields) targets = as_list(targets, len(device_fields)) # Get the full detector fields prep_dev_fields = [field_prepend(fld, device) for fld in device_fields] # Make sure the number of device fields and targets is the same if len(device_fields) != len(targets): raise ValueError( "Number of device fields and targets must be the same." "Got {0} and {1}".format(len(device_fields, len(targets)))) # Measure the average read = (yield from measure_average([device], num=average, filters=filters)) # Get the squared differences between the centroids squared_differences = [(read[fld] - target)**2 for fld, target in zip(prep_dev_fields, targets)] # Combine into euclidean distance distance = math.sqrt(sum(squared_differences)) return distance
def test_measure_average_system(RE, lcls_two_bounce_system): logger.debug("test_measure_average_system") _, m1, m2, y1, y2 = lcls_two_bounce_system centroids = [] readbacks = [] col_c = collector(y1.name + '_detector_stats2_centroid_x', centroids) col_r = collector(m1.name + '_sim_alpha', readbacks) RE(run_wrapper(measure_average([y1, m1, y2, m2], delay=0.1, num=5)), {'event': [col_c, col_r]}) assert centroids == [y1.detector._get_readback_centroid_x()] * 5 assert readbacks == [m1.position] * 5 # RE.msg_hook is a message collector m1_reads = 0 m2_reads = 0 y1_reads = 0 y2_reads = 0 saves = 0 for msg in RE.msg_hook.msgs: if msg.command == 'read': if msg.obj == m1: m1_reads += 1 if msg.obj == m2: m2_reads += 1 if msg.obj == y1: y1_reads += 1 if msg.obj == y2: y2_reads += 1 if msg.command == 'save': saves += 1 assert saves > 0 assert all(map(lambda x: x == saves, [m1_reads, m2_reads, y1_reads, y2_reads]))
def detector_scaling_walk(df_scan, detector, calib_motors, first_step=0.01, average=None, filters=None, tolerance=1, delay=None, max_steps=5, system=None, drop_missing=True, gradients=None, *args, **kwargs): """Performs a walk to to the detector value farthest from the current value using each of calibration motors, and then determines the motor to detector scaling Using the inputted scan dataframe, the plan loops through each detector field, then finds the value that is farthest from the current value, and then performs a walk_to_pixel to that value using the corresponding calibration motor. Since the final absolute position does not matter so long as it is recorded, if a RuntimeError or LimitError is raised, the plan will simply use the current motor position for the scaling calculation. Parameters ---------- df_scan : pd.DataFrame Dataframe containing the results of a centroid scan performed using the detector, motor, and calibration motors. detector : :class:`.Detector` Detector from which to take the value measurements calib_motors : iterable, :class:`.Motor` Motor to calibrate each detector field with first_step : float, optional First step to take on each calibration motor when performing the correction average : int, optional Number of averages to take for each measurement delay : float, optional Time to wait inbetween reads tolerance : float, optional Tolerance to use when applying the correction to detector field max_steps : int, optional Limit the number of steps the correction will take before exiting drop_missing : bool, optional Choice to include events where event keys are missing gradients : float, optional Assume an initial gradient for the relationship between detector value and calibration motor position Returns ------- scaling : list List of scales in the units of motor egu / detector value start_positions : list List of the initial positions of the motors before the walk """ detector_fields = [col for col in df_scan.columns if detector.name in col] calib_fields = [ col[:-4] for col in df_scan.columns if col.endswith("_pre") ] if len(detector_fields) != len(calib_fields): raise ValueError("Must have same number of calibration fields as " "detector fields, but got {0} and {1}.".format( len(calib_fields), len(detector_fields))) # Perform all the initial necessities num = len(detector_fields) average = average or 1 calib_motors = as_list(calib_motors) first_step = as_list(first_step, num, float) tolerance = as_list(tolerance, num) gradients = as_list(gradients, num) max_steps = as_list(max_steps, num) system = as_list(system or []) + calib_motors # Define the list that will hold the scaling scaling, start_positions = [], [] # Now let's get the detector value to motor position conversion for each fld for i, (dfld, cfld, cmotor) in enumerate( zip(detector_fields, calib_fields, calib_motors)): # Get a list of devices without the cmotor we are inputting inp_system = list(system) inp_system.remove(cmotor) # Store the current motor and detector value and position reads = yield from measure_average([detector] + system, num=average, filters=filters) motor_start = reads[cfld] dfld_start = reads[dfld] # Get the farthest detector value we know we can move to from the # current position idx_max = abs(df_scan[dfld] - dfld_start).values.argmax() # Walk the cmotor to the first pre-correction detector entry try: logger.debug("Beginning walk to {0} on {1} using {2}".format( df_scan.iloc[idx_max][dfld], detector.name, cmotor.name)) yield from walk_to_pixel(detector, cmotor, df_scan.iloc[idx_max][dfld], *args, filters=filters, gradient=gradients[i], target_fields=[dfld, cfld], first_step=first_step[i], tolerance=tolerance[i], system=inp_system, average=average, max_steps=max_steps[i], **kwargs) except RuntimeError: logger.warning( "walk_to_pixel raised a RuntimeError for motor '{0}'" ". Using its current position {1} for scale " "calulation.".format(cmotor.desc, cmotor.position)) except LimitError: logger.warning("walk_to_pixel tried to exceed the limits of motor " "'{0}'. Using current position '{1}' for scale " "calculation.".format(cmotor.desc, cmotor.position)) # Get the positions and values we moved to reads = (yield from measure_average([detector] + system, num=average, filters=filters)) motor_end = reads[cfld] dfld_end = reads[dfld] # Now lets find the conversion from signal value to motor distance scaling.append((motor_end - motor_start) / (dfld_end - dfld_start)) # Add the starting position to the motor start list start_positions.append(motor_start) # Return the final scaling list return scaling, start_positions