def test_get_progress(self): """ Test getMovementProgress function behaves as expected """ start_point = {'x': 0, 'y': 0, 'z': 0} end_point = {'x': 2, 'y': 2, 'z': 2} current_point = {'x': 1, 'y': 1, 'z': 1} progress = getMovementProgress(current_point, start_point, end_point) self.assertTrue(util.almost_equal(progress, 0.5, rtol=RTOL_PROGRESS)) current_point = { 'x': .998, 'y': .999, 'z': .999 } # slightly off the line progress = getMovementProgress(current_point, start_point, end_point) self.assertTrue(util.almost_equal(progress, 0.5, rtol=RTOL_PROGRESS)) current_point = {'x': 3, 'y': 3, 'z': 3} # away from the line progress = getMovementProgress(current_point, start_point, end_point) self.assertIsNone(progress) current_point = {'x': 1, 'y': 1, 'z': 3} # away from the line progress = getMovementProgress(current_point, start_point, end_point) self.assertIsNone(progress) current_point = {'x': -1, 'y': 0, 'z': 0} # away from the line progress = getMovementProgress(current_point, start_point, end_point) self.assertIsNone(progress)
def getMovementProgress(current_pos, start_pos, end_pos): """ Compute the position on the path between start and end positions of a stage movement (such as LOADING to IMAGING) If it’s too far from the line between the start and end positions, then it’s considered out of the path. :param current_pos: (dict str->float) Current position of the stage :param start_pos: (dict str->float) A position to start the movement from :param end_pos: (dict str->float) A position to end the movement to :return:(0<=float<=1, or None) Ratio of the progress, None if it's far away from of the path """ def get_distance(start, end): # Calculate the euclidean distance between two 3D points axes = start.keys() & end.keys() # only the axes found on both points sp = numpy.array([start[a] for a in axes]) ep = numpy.array([end[a] for a in axes]) return scipy.spatial.distance.euclidean(ep, sp) # Get distance for current point in respect to start and end from_start = get_distance(start_pos, current_pos) to_end = get_distance(current_pos, end_pos) total_length = get_distance(start_pos, end_pos) if total_length == 0: # same value return 1 # Check if current position is on the line from start to end position # That would happen if start_to_current + current_to_start = total_distance from start to end if util.almost_equal((from_start + to_end), total_length, rtol=RTOL_PROGRESS): return min(from_start / total_length, 1.0) # Clip in case from_start slightly > total_length else: return None
def _onUpdateTriggerDelayMD(self, evt): """ Callback method for trigger delay ctrl GUI element. Overwrites the triggerDelay value in the MD after a new value was requested via the GUI. """ evt.Skip() cur_timeRange = self.streak_unit.timeRange.value requested_triggerDelay = self.ctrl_triggerDelay.GetValue() # get a copy of MD trigger2delay_MD = self.streak_delay.getMetadata()[model.MD_TIME_RANGE_TO_DELAY] # check if key already exists (prevent creating new key due to floating point issues) key = util.find_closest(cur_timeRange, trigger2delay_MD.keys()) if util.almost_equal(key, cur_timeRange): # Replace the current delay value with the requested for an already existing timeRange in the dict. # This avoid duplication of keys, which are only different because of floating point issues. trigger2delay_MD[key] = requested_triggerDelay else: trigger2delay_MD[cur_timeRange] = requested_triggerDelay logging.warning("A new entry %s was added to MD_TIME_RANGE_TO_DELAY, " "which is not in the device .timeRange choices.", cur_timeRange) # check the number of keys in the dict is same as choices for VA if len(trigger2delay_MD.keys()) != len(self.streak_unit.timeRange.choices): logging.warning("MD_TIME_RANGE_TO_DELAY has %d entries, while the device .timeRange has %d choices.", len(trigger2delay_MD.keys()), len(self.streak_unit.timeRange.choices)) self.streak_delay.updateMetadata({model.MD_TIME_RANGE_TO_DELAY: trigger2delay_MD}) # Note: updateMetadata should here never raise an exception as the UnitFloatCtrl already # catches errors regarding type and out-of-range inputs # update txt displayed in GUI self._onUpdateTriggerDelayGUI("Calibration not saved yet", odemis.gui.FG_COLOUR_WARNING)
def assert_pos_almost_equal(actual, expected, *args, **kwargs): """ Asserts that two stage positions have almost equal coordinates. """ if set(expected.keys()) != set(actual.keys()): raise AssertionError("Dimensions of position do not match: %s != %s" % (actual.keys(), expected.keys())) for k in expected.keys(): if not util.almost_equal(actual[k], expected[k], *args, **kwargs): raise AssertionError("Position %s != %s" % (actual, expected))
def test_simple(self): in_exp = {(0., 0): True, (-5, -5.): True, (1., 1. - 1e-9): True, (1., 1. - 1e-3): False, (1., 1. + 1e-3): False, (-5e-8, -5e-8 + 1e-19): True, (5e18, 5e18 + 1): True, } for i, eo in in_exp.items(): o = util.almost_equal(*i) self.assertEqual(o, eo, "Failed to get correct output for %s" % (i,))
def cb_set(value, ctrl=value_ctrl, unit=unit): for i in range(ctrl.GetCount()): if ((isinstance(value, float) and util.almost_equal(ctrl.GetClientData(i), value)) or ctrl.GetClientData(i) == value): logging.debug("Setting ComboBox value to %s", ctrl.Items[i]) ctrl.SetSelection(i) break else: logging.warning("No existing label found for value %s", value) # entering value as free text txt = value_to_str(value, unit) ctrl.SetValue(txt)
def acquire_volts(volts, detector): """ vots (list of floats > 0): voltage in kV detector (str): role of the spectrometer to use returns (list of DataArray): all the spectra, in order """ ebeam = model.getComponent(role="e-beam") sed = model.getComponent(role="se-detector") spmt = model.getComponent(role=detector) hw_settings = save_hw_settings(ebeam) # Go to spot mode (ie, res = 1x1) if ebeam.resolution.value != (1, 1): ebeam.resolution.value = (1, 1) ebeam.translation.value = (0, 0) # at the center of the FoV else: logging.info("Leaving the e-beam in spot mode at %s", ebeam.translation.value) ebeam.dwellTime.value = 0.1 try: # Activate the e-beam sed.data.subscribe(discard_data) das = [] for vstr in volts: v = float(vstr) * 1000 ebeam.accelVoltage.value = v if not util.almost_equal(ebeam.accelVoltage.value, v): logging.warning( "Voltage requested at %g kV, but e-beam set at %g kV", v / 1000, ebeam.accelVoltage.value / 1000) else: logging.info("Acquiring at %g kV", v / 1000) # Acquire one spectrum spec = spmt.data.get() # Add dimensions to make it a spectrum (X, first dim -> C, 5th dim) spec.shape = (spec.shape[-1], 1, 1, 1, 1) # Add some useful metadata spec.metadata[model.MD_DESCRIPTION] = "Spectrum at %g kV" % (v / 1000) spec.metadata[model.MD_EBEAM_VOLTAGE] = v # TODO: store the spot position in MD_POS das.append(spec) finally: sed.data.unsubscribe(discard_data) # Just to be sure resume_hw_settings(ebeam, hw_settings) return das
def assert_pos_almost_equal(actual, expected, match_all=True, *args, **kwargs): """ Asserts that two stage positions have almost equal coordinates. :param match_all: (bool) if False, only the expected keys are checked, and actual can have more keys """ if match_all and set(expected.keys()) != set(actual.keys()): raise AssertionError("Dimensions of position do not match: %s != %s" % (list(actual.keys()), list(expected.keys()))) for k in expected.keys(): if not util.almost_equal(actual[k], expected[k], *args, **kwargs): raise AssertionError("Position %s != %s" % (actual, expected))
def _updatePosition(self): """ update the position VA """ # if it is an unsupported position report the nearest supported one real_pos = self._position[self._axis] nearest = util.find_closest(real_pos, self._positions.keys()) if not util.almost_equal(real_pos, nearest): logging.warning("Reporting axis %s @ %s (known position), while physical axis %s @ %s", self._axis, nearest, self._caxis, real_pos) pos = {self._axis: nearest} logging.debug("reporting position %s", pos) self.position._set_value(pos, force_write=True)
def _check_fov(self, das, sfov): """ Checks the fov based on the data arrays. das: list of DataArryas sfov: previous estimate for the fov """ afovs = [self._get_fov(d) for d in das] asfov = (min(f[1] for f in afovs), min(f[0] for f in afovs)) if not all(util.almost_equal(e, a) for e, a in zip(sfov, asfov)): logging.warning("Unexpected min FoV = %s, instead of %s", asfov, sfov) sfov = asfov return sfov
def get_time_range_to_trigger_delay(data, timeRange_choices, triggerDelay_range): """ Reads the time range and trigger delay values from a csv object. Checks values for validity. :parameter data: (csv.reader object) calibration file :parameter timeRange_choices: (frozenset) choices possible for timeRange VA :parameter triggerDelay_range: (tuple) range possible for trigger delay values :return: (dict) new dictionary containing the loaded time range to trigger delay info """ new_dict = {} for timeRange, delay in data: try: timeRange = float(timeRange) delay = float(delay) except ValueError: raise ValueError( "Trigger delay %s and/or time range %s is not of type float. " "Please check calibration file for trigger delay." % (delay, timeRange)) # check delay in range allowed if not triggerDelay_range[0] <= delay <= triggerDelay_range[1]: raise ValueError( "Trigger delay %s corresponding to time range %s is not in range %s. " "Please check the calibration file for the trigger delay." % (delay, timeRange, triggerDelay_range)) # check timeRange is in possible choices for timeRange on HW choice = find_closest(timeRange, timeRange_choices) if not almost_equal(timeRange, choice): raise ValueError( "Time range % s found in calibration file is not a possible choice " "for the time range of the streak unit. " "Please modify csv file so it fits the possible choices for the " "time range of the streak unit. " "Values in file must be of format timeRange:triggerDelay (per line)." % timeRange) new_dict[timeRange] = delay # check all time ranges are there if len(new_dict) == len(timeRange_choices): return new_dict else: raise ValueError( "The total number of %s time ranges in the loaded calibration file does not " "match the requested number of %s time ranges." % (len(new_dict), len(timeRange_choices)))
def acquire_volts(volts, detector): """ vots (list of floats > 0): voltage in kV detector (str): role of the spectrometer to use returns (list of DataArray): all the spectra, in order """ ebeam = model.getComponent(role="e-beam") sed = model.getComponent(role="se-detector") spmt = model.getComponent(role=detector) hw_settings = save_hw_settings(ebeam) # Go to spot mode (ie, res = 1x1) if ebeam.resolution.value != (1, 1): ebeam.resolution.value = (1, 1) ebeam.translation.value = (0, 0) # at the center of the FoV else: logging.info("Leaving the e-beam in spot mode at %s", ebeam.translation.value) ebeam.dwellTime.value = 0.1 try: # Activate the e-beam sed.data.subscribe(discard_data) das = [] for vstr in volts: v = float(vstr) * 1000 ebeam.accelVoltage.value = v if not util.almost_equal(ebeam.accelVoltage.value, v): logging.warning("Voltage requested at %g kV, but e-beam set at %g kV", v / 1000, ebeam.accelVoltage.value / 1000) else: logging.info("Acquiring at %g kV", v / 1000) # Acquire one spectrum spec = spmt.data.get() # Add dimensions to make it a spectrum (X, first dim -> C, 5th dim) spec.shape = (spec.shape[-1], 1, 1, 1, 1) # Add some useful metadata spec.metadata[model.MD_DESCRIPTION] = "Spectrum at %g kV" % (v / 1000) spec.metadata[model.MD_EBEAM_VOLTAGE] = v # TODO: store the spot position in MD_POS das.append(spec) finally: sed.data.unsubscribe(discard_data) # Just to be sure resume_hw_settings(ebeam, hw_settings) return das
def test_simple(self): in_exp = { (0., 0): True, (-5, -5.): True, (1., 1. - 1e-9): True, (1., 1. - 1e-3): False, (1., 1. + 1e-3): False, (-5e-8, -5e-8 + 1e-19): True, (5e18, 5e18 + 1): True, } for i, eo in in_exp.items(): o = util.almost_equal(*i) self.assertEqual(o, eo, "Failed to get correct output for %s" % (i, ))
def read_trigger_delay_csv(filename, time_choices, trigger_delay_range): """ Read the MD_TIME_RANGE_TO_DELAY from a CSV file, and check its validity based on the hardware filename (str): the path to file time_choices (set): choices possible for timeRange VA trigger_delay_range (float, float): min/max value of the trigger delay return (dict float -> float): new dictionary containing the loaded time range to trigger delay info raise ValueError: if the data of the CSV file cannot be parsed or doesn't fit the hardware raise IOError: if the file doesn't exist """ tr2d = {} with open(filename, 'r', newline='') as csvfile: calibFile = csv.reader(csvfile, delimiter=':') for time_range, delay in calibFile: try: time_range = float(time_range) delay = float(delay) except ValueError: raise ValueError( "Trigger delay %s and/or time range %s is not of type float. " "Please check calibration file for trigger delay." % (delay, time_range)) # check delay in range allowed if not trigger_delay_range[0] <= delay <= trigger_delay_range[1]: raise ValueError( "Trigger delay %s corresponding to time range %s is not in range %s. " "Please check the calibration file for the trigger delay." % (delay, time_range, trigger_delay_range)) # check timeRange is in possible choices for timeRange on HW time_range_hw = find_closest(time_range, time_choices) if not almost_equal(time_range, time_range_hw): raise ValueError( "Time range % s found in calibration file is not a possible choice " "for the time range of the streak unit. " "Please modify CSV file so it fits the possible choices for the " "time range of the streak unit. " "Values in file must be of format timeRange:triggerDelay (per line)." % time_range) tr2d[time_range_hw] = delay # check all time ranges are there if len(tr2d) != len(time_choices): raise ValueError( "The total number of %s time ranges in the loaded calibration file does not " "match the requested number of %s time ranges." % (len(tr2d), len(time_choices))) return tr2d
def cb_set(value, ctrl=value_ctrl, u=unit, acc=accuracy): for i in range(ctrl.Count): d = ctrl.GetClientData(i) if (d == value or (all(isinstance(v, float) for v in (value, d)) and util.almost_equal(d, value)) ): logging.debug("Setting combobox value to %s", ctrl.Items[i]) ctrl.SetSelection(i) break else: logging.debug("No existing label found for value %s in combobox ctrl %d", value, id(ctrl)) # entering value as free text txt = readable_str(value, u, sig=acc) ctrl.SetValue(txt)
def get_time_range_to_trigger_delay(data, timeRange_choices, triggerDelay_range): """ Reads the time range and trigger delay values from a csv object. Checks values for validity. :parameter data: (csv.reader object) calibration file :parameter timeRange_choices: (frozenset) choices possible for timeRange VA :parameter triggerDelay_range: (tuple) range possible for trigger delay values :return: (dict) new dictionary containing the loaded time range to trigger delay info """ new_dict = {} for timeRange, delay in data: try: timeRange = float(timeRange) delay = float(delay) except ValueError: raise ValueError("Trigger delay %s and/or time range %s is not of type float. " "Please check calibration file for trigger delay." % (delay, timeRange)) # check delay in range allowed if not triggerDelay_range[0] <= delay <= triggerDelay_range[1]: raise ValueError("Trigger delay %s corresponding to time range %s is not in range (0, 1). " "Please check the calibration file for the trigger delay." % (delay, timeRange)) # check timeRange is in possible choices for timeRange on HW choice = find_closest(timeRange, timeRange_choices) if not almost_equal(timeRange, choice): raise ValueError("Time range % s found in calibration file is not a possible choice " "for the time range of the streak unit. " "Please modify csv file so it fits the possible choices for the " "time range of the streak unit. " "Values in file must be of format timeRange:triggerDelay (per line)." % timeRange) new_dict[timeRange] = delay # check all time ranges are there if len(new_dict) == len(timeRange_choices): return new_dict else: raise ValueError("The total number of %s time ranges in the loaded calibration file does not " "match the requested number of %s time ranges." % (len(new_dict), len(timeRange_choices)))
def cb_set(value, va=va, ctrl=value_ctrl, u=unit, acc=accuracy): # Re-read the value from the VA because it'll be called via # CallAfter(), and if the value is changed multiple times, it might # not be in chronological order. value = va.value for i in range(ctrl.GetCount()): d = ctrl.GetClientData(i) if (d == value or (all(isinstance(v, float) for v in (value, d)) and util.almost_equal(d, value)) ): logging.debug("Setting combobox value to %s", ctrl.Items[i]) ctrl.SetSelection(i) break else: logging.debug("No existing label found for value %s in combobox ctrl %d", value, id(ctrl)) # entering value as free text txt = value_to_str(value, u, acc) ctrl.SetValue(txt)
def assert_pos_not_almost_equal(actual, expected, match_all=True, *args, **kwargs): """ Asserts that two stage positions do not have almost equal coordinates. This means at least one of the axes has a different value. :param match_all: (bool) if False, only the expected keys are checked, and actual can have more keys """ if match_all and set(expected.keys()) != set(actual.keys()): raise AssertionError("Dimensions of position do not match: %s != %s" % (list(actual.keys()), list(expected.keys()))) # Check that at least one of the axes not equal for k in expected.keys(): if not util.almost_equal(actual[k], expected[k], *args, **kwargs): return # Otherwise coordinates are almost equal raise AssertionError("Position %s == %s" % (actual, expected))
def _setTimeRange(self, value): """ Updates the timeRange VA. :parameter value: (float) value to be set :return: (float) current time range """ logging.debug("Reporting time range %s for streak unit.", value) self._metadata[model.MD_STREAK_TIMERANGE] = value # set corresponding trigger delay tr2d = self.parent._delaybox._metadata.get(model.MD_TIME_RANGE_TO_DELAY) if tr2d: key = util.find_closest(value, tr2d.keys()) if util.almost_equal(key, value): self.parent._delaybox.triggerDelay.value = tr2d[key] else: logging.warning("Time range %s is not a key in MD for time range to " "trigger delay calibration" % value) return value
def hfw_choices(comp, va, conf): """ Return a set of HFW choices If the VA has predefined choices, return those. Otherwise calculate the choices using the range of the VA. """ try: choices = va.choices except (NotApplicableError, AttributeError): # Pick every x2, x5, x10, starting from the min value factors = (2, 5, 10) mn, mx = va.range choices = {mn} cur_val = va.value # starting point (might be even less than mn) base = 10 ** int(math.log10(mn) - 1) while base < mx and max(choices) < mx: for f in factors: v = base * f if mn < v < mx: if util.almost_equal(v, cur_val): # To avoid having twice (almost) the same value shown in # the choices when the current value is among them, but # slightly modified due to rounding. choices.add(cur_val) else: choices.add(v) elif v >= mx: break base *= 10 choices.add(mx) # We don't add the current value, as it's a range, so anyway any other # value can also happen so the GUI must be able to handle well any other # value. return choices
def hfw_choices(comp, va, conf): """ Return a set of HFW choices If the VA has predefined choices, return those. Otherwise calculate the choices using the range of the VA. """ try: choices = va.choices except AttributeError: # Pick every x2, x5, x10, starting from the min value factors = (2, 5, 10) mn, mx = va.range choices = {mn} cur_val = va.value # starting point (might be even less than mn) base = 10 ** int(math.log10(mn) - 1) while base < mx and max(choices) < mx: for f in factors: v = base * f if mn < v < mx: if util.almost_equal(v, cur_val): # To avoid having twice (almost) the same value shown in # the choices when the current value is among them, but # slightly modified due to rounding. choices.add(cur_val) else: choices.add(v) elif v >= mx: break base *= 10 choices.add(mx) # We don't add the current value, as it's a range, so anyway any other # value can also happen so the GUI must be able to handle well any other # value. return choices
def getMovementProgress(current_pos, start_pos, end_pos): """ Compute the position on the path between start and end positions of a stage movement (such as LOADING to IMAGING) If it’s too far from the line between the start and end positions, then it’s considered out of the path. :param current_pos: (dict str->float) Current position of the stage :param start_pos: (dict str->float) A position to start the movement from :param end_pos: (dict str->float) A position to end the movement to :return:(0<=float<=1, or None) Ratio of the progress, None if it's far away from of the path """ # Get distance for current point in respect to start and end from_start = _getDistance(start_pos, current_pos) to_end = _getDistance(current_pos, end_pos) total_length = _getDistance(start_pos, end_pos) if total_length == 0: # same value return 1 # Check if current position is on the line from start to end position # That would happen if start_to_current + current_to_start = total_distance from start to end if util.almost_equal((from_start + to_end), total_length, rtol=RTOL_PROGRESS): return min(from_start / total_length, 1.0) # Clip in case from_start slightly > total_length else: return None
def getMovementProgress(current_pos, start_pos, end_pos): """ Compute the position on the path between start and end positions of a stage movement (such as LOADING to IMAGING) If it’s too far from the line between the start and end positions, then it’s considered out of the path. :param current_pos: (dict str->float) Current position of the stage :param start_pos: (dict str->float) A position to start the movement from :param end_pos: (dict str->float) A position to end the movement to :return:(0<=float<=1, or None) Ratio of the progress, None if it's far away from of the path """ def get_distance(start, end): # Calculate the euclidean distance between two 3D points sp = numpy.array([start['x'], start['y'], start['z']]) ep = numpy.array([end['x'], end['y'], end['z']]) return scipy.spatial.distance.euclidean(ep, sp) def check_axes(pos): if not {'x', 'y', 'z'}.issubset(set(pos.keys())): raise ValueError( "Missing x,y,z axes in {} for correct distance measurement.". format(pos)) # Check we have the x,y,z axes in all points check_axes(current_pos) check_axes(start_pos) check_axes(end_pos) # Get distance for current point in respect to start and end from_start = get_distance(start_pos, current_pos) to_end = get_distance(current_pos, end_pos) total_length = get_distance(start_pos, end_pos) # Check if current position is on the line from start to end position # That would happen if start_to_current + current_to_start = total_distance from start to end if util.almost_equal((from_start + to_end), total_length, rtol=RTOL_PROGRESS): return min(from_start / total_length, 1.0) # Clip in case from_start slightly > total_length else: return None
def _get_center_pxs(self, rep, sub_shape, datatl, pxs): """ Computes the center and pixel size of the entire data based on the top-left data acquired. rep (int, int): number of pixels (tiles) in X, Y sub_shape (int, int): number of sub-pixels in a pixel datatl (DataArray): first data array acquired pxs (float, float): the pixel in m of a pixel return: center (tuple of floats): position in m of the whole data pxs (tuple of floats): pixel size in m of the sub-pixels """ # Compute center of area, based on the position of the first point (the # position of the other points can be wrong due to drift correction) center_tl = datatl.metadata[model.MD_POS] dpxs = datatl.metadata[model.MD_PIXEL_SIZE] tl = (center_tl[0] - (dpxs[0] * (datatl.shape[-1] - 1)) / 2, center_tl[1] + (dpxs[1] * (datatl.shape[-2] - 1)) / 2) logging.debug("Computed center of top-left pixel at at %s", tl) # Note: we don't rely on the MD_PIXEL_SIZE, because if the e-beam was in # spot mode (res 1x1), the scale is not always correct, which gives an # incorrect metadata. sub_pxs = pxs[0] / sub_shape[0], pxs[1] / sub_shape[1] trep = rep[0] * sub_shape[0], rep[1] * sub_shape[1] center = (tl[0] + (sub_pxs[0] * (trep[0] - 1)) / 2, tl[1] - (sub_pxs[1] * (trep[1] - 1)) / 2) logging.debug("Computed data width to be %s x %s, with center at %s", pxs[0] * rep[0], pxs[1] * rep[1], center) if numpy.prod(datatl.shape) > 1: # pxs and dpxs ought to be identical if not util.almost_equal(sub_pxs[0], dpxs[0]): logging.warning("Expected pixel size of %s, but data has %s", sub_pxs, dpxs) return center, sub_pxs
def __init__(self, name, data, *args, **kwargs): """ name (string) data (model.DataArray(Shadow) of shape (YX) or list of such DataArray(Shadow)). The metadata MD_POS, MD_AR_POLE and MD_POL_MODE should be provided """ if not isinstance(data, collections.Iterable): data = [data] # from now it's just a list of DataArray # TODO: support DAS, as a "delayed loading" by only calling .getData() # when the projection for the particular data needs to be computed (or # .raw needs to be accessed?) # Ensure all the data is a DataArray, as we don't handle (yet) DAS data = [d.getData() if isinstance(d, model.DataArrayShadow) else d for d in data] # find positions of each acquisition # (float, float, str or None)) -> DataArray: position on SEM + polarization -> data self._pos = {} sempositions = set() polpositions = set() for d in data: try: sempos_cur = d.metadata[MD_POS] # When reading data: floating point error (slightly different keys for same ebeam pos) # -> check if there is already a position specified, which is very close by # (and therefore the same ebeam pos) and replace with that ebeam position # (e.g. all polarization positions for the same ebeam positions will have exactly the same ebeam pos) for sempos in sempositions: if almost_equal(sempos_cur[0], sempos[0]) and almost_equal(sempos_cur[1], sempos[1]): sempos_cur = sempos break self._pos[sempos_cur + (d.metadata.get(MD_POL_MODE, None),)] = img.ensure2DImage(d) sempositions.add(sempos_cur) if MD_POL_MODE in d.metadata: polpositions.add(d.metadata.get(MD_POL_MODE)) except KeyError: logging.info("Skipping DataArray without known position") # Cached conversion of the CCD image to polar representation # TODO: automatically fill it in a background thread self._polar = {} # dict tuple (float, float, str or None) -> DataArray # SEM position VA # SEM position displayed, (None, None) == no point selected (x, y) self.point = model.VAEnumerated((None, None), choices=frozenset([(None, None)] + list(sempositions))) if self._pos: # Pick one point, e.g., top-left bbtl = (min(x for x, y in sempositions if x is not None), min(y for x, y in sempositions if y is not None)) # top-left point is the closest from the bounding-box top-left def dis_bbtl(v): try: return math.hypot(bbtl[0] - v[0], bbtl[1] - v[1]) except TypeError: return float("inf") # for None, None self.point.value = min(sempositions, key=dis_bbtl) # no need for init=True, as Stream.__init__ will update the image self.point.subscribe(self._onPoint) # polarization VA # check if any polarization analyzer data, (None) == no analyzer data (pol) if self._pos.keys()[0][-1]: # use first entry in acquisition to populate VA (acq could have 1 or 6 pol pos) self.polarization = model.VAEnumerated(self._pos.keys()[0][-1], choices=polpositions) if self._pos.keys()[0][-1]: self.polarization.subscribe(self._onPolarization) if "acq_type" not in kwargs: kwargs["acq_type"] = model.MD_AT_AR super(StaticARStream, self).__init__(name, list(self._pos.values()), *args, **kwargs)
def getFullImage(self): """ return (2D DataArray): same dtype as the tiles, with shape corresponding to the bounding box. """ tiles = self.tiles # Compute the bounding box of each tile and the global bounding box # Get a fixed pixel size by using the first one # TODO: use the mean, in case they are all slightly different due to # correction? pxs = tiles[0].metadata[model.MD_PIXEL_SIZE] tbbx_phy = [] # tuples of ltrb in physical coordinates for t in tiles: c = t.metadata[model.MD_POS] w = t.shape[-1], t.shape[-2] if not util.almost_equal(pxs[0], t.metadata[model.MD_PIXEL_SIZE][0], rtol=0.01): logging.warning("Tile @ %s has a unexpected pixel size (%g vs %g)", c, t.metadata[model.MD_PIXEL_SIZE][0], pxs[0]) bbx = (c[0] - (w[0] * pxs[0] / 2), c[1] - (w[1] * pxs[1] / 2), c[0] + (w[0] * pxs[0] / 2), c[1] + (w[1] * pxs[1] / 2)) tbbx_phy.append(bbx) gbbx_phy = (min(b[0] for b in tbbx_phy), min(b[1] for b in tbbx_phy), max(b[2] for b in tbbx_phy), max(b[3] for b in tbbx_phy)) # Compute the bounding-boxes in pixel coordinates tbbx_px = [] # that's the origin (Y is max as Y is inverted) glt = gbbx_phy[0], gbbx_phy[3] for bp, t in zip(tbbx_phy, tiles): lt = (int(round((bp[0] - glt[0]) / pxs[0])), int(round(-(bp[3] - glt[1]) / pxs[1]))) w = t.shape[-1], t.shape[-2] bbx = (lt[0], lt[1], lt[0] + w[0], lt[1] + w[1]) tbbx_px.append(bbx) gbbx_px = (min(b[0] for b in tbbx_px), min(b[1] for b in tbbx_px), max(b[2] for b in tbbx_px), max(b[3] for b in tbbx_px)) assert gbbx_px[0] == gbbx_px[1] == 0 if numpy.greater(gbbx_px[-2:], 4 * numpy.sum(tbbx_px[-2:])).any(): # Overlap > 50% or missing tiles logging.warning("Global area much bigger than sum of tile areas") # Paste each tile logging.debug("Generating global image of size %dx%d px", gbbx_px[-2], gbbx_px[-1]) im = numpy.empty((gbbx_px[-1], gbbx_px[-2]), dtype=tiles[0].dtype) # Use minimum of the values in the tiles for background im[:] = numpy.amin(tiles) for b, t in zip(tbbx_px, tiles): im[b[1]:b[1] + t.shape[0], b[0]:b[0] + t.shape[1]] = t # TODO: border # Update metadata # TODO: check this is also correct based on lt + half shape * pxs c_phy = ((gbbx_phy[0] + gbbx_phy[2]) / 2, (gbbx_phy[1] + gbbx_phy[3]) / 2) md = tiles[0].metadata.copy() md[model.MD_POS] = c_phy return model.DataArray(im, md)
def getFullImage(self): """ return (2D DataArray): same dtype as the tiles, with shape corresponding to the bounding box. """ tiles = self.tiles # Compute the bounding box of each tile and the global bounding box # Get a fixed pixel size by using the first one # TODO: use the mean, in case they are all slightly different due to # correction? pxs = tiles[0].metadata[model.MD_PIXEL_SIZE] tbbx_phy = [] # tuples of ltrb in physical coordinates for t in tiles: c = t.metadata[model.MD_POS] w = t.shape[-1], t.shape[-2] if not util.almost_equal(pxs[0], t.metadata[model.MD_PIXEL_SIZE][0], rtol=0.01): logging.warning("Tile @ %s has a unexpected pixel size (%g vs %g)", c, t.metadata[model.MD_PIXEL_SIZE][0], pxs[0]) bbx = (c[0] - (w[0] * pxs[0] / 2), c[1] - (w[1] * pxs[1] / 2), c[0] + (w[0] * pxs[0] / 2), c[1] + (w[1] * pxs[1] / 2)) tbbx_phy.append(bbx) gbbx_phy = (min(b[0] for b in tbbx_phy), min(b[1] for b in tbbx_phy), max(b[2] for b in tbbx_phy), max(b[3] for b in tbbx_phy)) # Compute the bounding-boxes in pixel coordinates tbbx_px = [] # that's the origin (Y is max as Y is inverted) glt = gbbx_phy[0], gbbx_phy[3] for bp, t in zip(tbbx_phy, tiles): lt = (int(round((bp[0] - glt[0]) / pxs[0])), int(round(-(bp[3] - glt[1]) / pxs[1]))) w = t.shape[-1], t.shape[-2] bbx = (lt[0], lt[1], lt[0] + w[0], lt[1] + w[1]) tbbx_px.append(bbx) gbbx_px = (min(b[0] for b in tbbx_px), min(b[1] for b in tbbx_px), max(b[2] for b in tbbx_px), max(b[3] for b in tbbx_px)) assert gbbx_px[0] == gbbx_px[1] == 0 if numpy.greater(gbbx_px[-2:], 4 * numpy.sum(tbbx_px[-2:])).any(): # Overlap > 50% or missing tiles logging.warning("Global area much bigger than sum of tile areas") # Weave tiles by using a smooth gradient. The part of the tile that does not overlap # with any previous tiles is inserted into the part of the # ovv image that is still empty. This part is determined by a mask, which indicates # the parts of the image that already contain image data (True) and the ones that are still # empty (False). For the overlapping parts, the tile is multiplied with weights corresponding # to a gradient that has its maximum at the center of the tile and # smoothly decreases toward the edges. The function for creating the weights is # a distance measure resembling the maximum-norm, i.e. equidistant points lie # on a rectangle (instead of a circle like for the euclidean norm). Additionally, # the x and y values generating this norm are raised to the power of 6 to # create a steeper gradient. The value 6 is quite arbitrary and was found to give # good results during experimentation. # The part of the overview image that overlaps with the new tile is multiplied with the # complementary weights (1 - weights) and the weighted overlapping parts of the new tile and # the ovv image are added, so the resulting image contains a gradient in the overlapping regions # between all the tiles that have been inserted before and the newly inserted tile. # Paste each tile logging.debug("Generating global image of size %dx%d px", gbbx_px[-2], gbbx_px[-1]) im = numpy.empty((gbbx_px[-1], gbbx_px[-2]), dtype=tiles[0].dtype) # Use minimum of the values in the tiles for background im[:] = numpy.amin(tiles) # The mask is multiplied with the tile, thereby creating a tile with a gradient mask = numpy.zeros((gbbx_px[-1], gbbx_px[-2]), dtype=numpy.bool) for b, t in zip(tbbx_px, tiles): # Part of image overlapping with tile roi = im[b[1]:b[1] + t.shape[0], b[0]:b[0] + t.shape[1]] moi = mask[b[1]:b[1] + t.shape[0], b[0]:b[0] + t.shape[1]] # Insert image at positions that are still empty roi[~moi] = t[~moi] # Create gradient in overlapping region. Ratio between old image and new tile values determined by # distance to the center of the tile # Create weight matrix with decreasing values from its center that # has the same size as the tile. hh, hw = numpy.divide(roi.shape, 2) # half-height, half-width # Deal with even/odd tile sizes sz = roi.shape if sz[1] % 2 == 0: x = numpy.arange(-hw, hw, 1) else: x = numpy.arange(-hw, hw + 1, 1) if sz[0] % 2 == 0: y = numpy.arange(-hh, hh, 1) else: y = numpy.arange(-hh, hh + 1, 1) xx, yy = numpy.meshgrid((x / hw) ** 6, (y / hh) ** 6) w = numpy.maximum(xx, yy) # Hardcoding a weight function is quite arbitrary and might result in # suboptimal solutions in some cases. # Alternatively, different weights might be used. One option would be to select # a fixed region on the sides of the image, e.g. 20% (expected overlap), and # only apply a (linear) gradient to these parts, while keeping the new tile for the # rest of the region. However, this approach does not solve the hardcoding problem # since the overlap region is still arbitrary. Future solutions might adaptively # select the this region. # Use weights to create gradient in overlapping region roi[moi] = (t * (1 - w))[moi] + (roi * w)[moi] # Update mask mask[b[1]:b[1] + t.shape[0], b[0]:b[0] + t.shape[1]] = True # Update metadata # TODO: check this is also correct based on lt + half shape * pxs c_phy = ((gbbx_phy[0] + gbbx_phy[2]) / 2, (gbbx_phy[1] + gbbx_phy[3]) / 2) md = tiles[0].metadata.copy() md[model.MD_POS] = c_phy return model.DataArray(im, md)
def man_calib(logpath, keep_loaded=False): escan = None detector = None ccd = None # find components by their role for c in model.getComponents(): if c.role == "e-beam": escan = c elif c.role == "bs-detector": detector = c elif c.role == "ccd": ccd = c elif c.role == "sem-stage": sem_stage = c elif c.role == "align": opt_stage = c elif c.role == "ebeam-focus": ebeam_focus = c elif c.role == "overview-focus": navcam_focus = c elif c.role == "focus": focus = c elif c.role == "overview-ccd": overview_ccd = c elif c.role == "chamber": chamber = c if not all([escan, detector, ccd]): logging.error("Failed to find all the components") raise KeyError("Not all components found") hw_settings = aligndelphi.list_hw_settings(escan, ccd) try: # Get pressure values pressures = chamber.axes["pressure"].choices vacuum_pressure = min(pressures.keys()) vented_pressure = max(pressures.keys()) if overview_ccd: for p, pn in pressures.items(): if pn == "overview": overview_pressure = p break else: raise IOError("Failed to find the overview pressure in %s" % (pressures, )) calibconf = get_calib_conf() shid, sht = chamber.sampleHolder.value calib_values = calibconf.get_sh_calib(shid) if calib_values is None: first_hole = second_hole = offset = resa = resb = hfwa = scaleshift = ( 0, 0) scaling = iscale = iscale_xy = (1, 1) rotation = irot = ishear = 0 hole_focus = aligndelphi.SEM_KNOWN_FOCUS opt_focus = aligndelphi.OPTICAL_KNOWN_FOCUS print_col( ANSI_RED, "Calibration values missing! All the steps will be performed anyway..." ) force_calib = True else: first_hole, second_hole, hole_focus, opt_focus, offset, scaling, rotation, iscale, irot, iscale_xy, ishear, resa, resb, hfwa, scaleshift = calib_values force_calib = False print_col( ANSI_CYAN, "**Delphi Manual Calibration steps**\n" "1.Sample holder hole detection\n" " Current values: 1st hole: " + str(first_hole) + "\n" " 2st hole: " + str(second_hole) + "\n" " hole focus: " + str(hole_focus) + "\n" "2.SEM image calibration\n" " Current values: resolution-a: " + str(resa) + "\n" " resolution-b: " + str(resb) + "\n" " hfw-a: " + str(hfwa) + "\n" " spot shift: " + str(scaleshift) + "\n" "3.Twin stage calibration\n" " Current values: offset: " + str(offset) + "\n" " scaling: " + str(scaling) + "\n" " rotation: " + str(rotation) + "\n" " optical focus: " + str(opt_focus) + "\n" "4.Fine alignment\n" " Current values: scale: " + str(iscale) + "\n" " rotation: " + str(irot) + "\n" " scale-xy: " + str(iscale_xy) + "\n" " shear: " + str(ishear)) print_col( ANSI_YELLOW, "Note that you should not perform any stage move during the process.\n" "Instead, you may zoom in/out while focusing.") print_col(ANSI_BLACK, "Now initializing, please wait...") # Default value for the stage offset position = (offset[0] * scaling[0], offset[1] * scaling[1]) if keep_loaded and chamber.position.value[ "pressure"] == vacuum_pressure: logging.info( "Skipped optical lens detection, will use previous value %s", position) else: # Move to the overview position first f = chamber.moveAbs({"pressure": overview_pressure}) f.result() # Reference the (optical) stage f = opt_stage.reference({"x", "y"}) f.result() f = focus.reference({"z"}) f.result() # SEM stage to (0,0) f = sem_stage.moveAbs({"x": 0, "y": 0}) f.result() # Calculate offset approximation try: f = aligndelphi.LensAlignment(overview_ccd, sem_stage, logpath) position = f.result() except IOError as ex: logging.warning( "Failed to locate the optical lens (%s), will use previous value %s", ex, position) # Just to check if move makes sense f = sem_stage.moveAbs({"x": position[0], "y": position[1]}) f.result() # Move to SEM f = chamber.moveAbs({"pressure": vacuum_pressure}) f.result() # Set basic e-beam settings escan.spotSize.value = 2.7 escan.accelVoltage.value = 5300 # V # Without automatic blanker, the background subtraction doesn't work if (model.hasVA(escan, "blanker") and # For simulator None in escan.blanker.choices and escan.blanker.value is not None): logging.warning("Blanker set back to automatic") escan.blanker.value = None # Detect the holes/markers of the sample holder while True: ans = "Y" if force_calib else None while ans not in YES_NO_CHARS: ans = input_col( ANSI_MAGENTA, "Do you want to execute the sample holder hole detection? [Y/n]" ) if ans in YES_CHARS: # Move Phenom sample stage next to expected hole position sem_stage.moveAbsSync(aligndelphi.SHIFT_DETECTION) ebeam_focus.moveAbsSync({"z": hole_focus}) # Set the FoV to almost 2mm escan.horizontalFoV.value = escan.horizontalFoV.range[1] input_col( ANSI_BLUE, "Please turn on the SEM stream and focus the SEM image. Then turn off the stream and press Enter..." ) print_col( ANSI_BLACK, "Trying to detect the holes/markers, please wait...") try: hole_detectionf = aligndelphi.HoleDetection( detector, escan, sem_stage, ebeam_focus, manual=True, logpath=logpath) new_first_hole, new_second_hole, new_hole_focus = hole_detectionf.result( ) print_col( ANSI_CYAN, "Values computed: 1st hole: " + str(new_first_hole) + "\n" " 2st hole: " + str(new_second_hole) + "\n" " hole focus: " + str(new_hole_focus)) ans = "Y" if force_calib else None while ans not in YES_NO_CHARS: ans = input_col( ANSI_MAGENTA, "Do you want to update the calibration file with these values? [Y/n]" ) if ans in YES_CHARS: first_hole, second_hole, hole_focus = new_first_hole, new_second_hole, new_hole_focus calibconf.set_sh_calib(shid, first_hole, second_hole, hole_focus, opt_focus, offset, scaling, rotation, iscale, irot, iscale_xy, ishear, resa, resb, hfwa, scaleshift) print_col(ANSI_BLACK, "Calibration file is updated.") break except IOError: print_col(ANSI_RED, "Sample holder hole detection failed.") else: break while True: ans = "Y" if force_calib else None while ans not in YES_NO_CHARS: ans = input_col( ANSI_MAGENTA, "Do you want to execute the SEM image calibration? [Y/n]") if ans in YES_CHARS: # Resetting shift parameters, to not take them into account during calib blank_md = dict.fromkeys(aligndelphi.MD_CALIB_SEM, (0, 0)) escan.updateMetadata(blank_md) # We measure the shift in the area just behind the hole where there # are always some features plus the edge of the sample carrier. For # that reason we use the focus measured in the hole detection step sem_stage.moveAbsSync(aligndelphi.SHIFT_DETECTION) ebeam_focus.moveAbsSync({"z": hole_focus}) try: # Compute spot shift percentage print_col( ANSI_BLACK, "Spot shift measurement in progress, please wait...") f = aligndelphi.ScaleShiftFactor(detector, escan, logpath) new_scaleshift = f.result() # Compute resolution-related values. print_col(ANSI_BLACK, "Calculating resolution shift, please wait...") resolution_shiftf = aligndelphi.ResolutionShiftFactor( detector, escan, logpath) new_resa, new_resb = resolution_shiftf.result() # Compute HFW-related values print_col(ANSI_BLACK, "Calculating HFW shift, please wait...") hfw_shiftf = aligndelphi.HFWShiftFactor( detector, escan, logpath) new_hfwa = hfw_shiftf.result() print_col( ANSI_CYAN, "Values computed: resolution-a: " + str(new_resa) + "\n" " resolution-b: " + str(new_resb) + "\n" " hfw-a: " + str(new_hfwa) + "\n" " spot shift: " + str(new_scaleshift)) ans = "Y" if force_calib else None while ans not in YES_NO_CHARS: ans = input_col( ANSI_MAGENTA, "Do you want to update the calibration file with these values? [Y/n]" ) if ans in YES_CHARS: resa, resb, hfwa, scaleshift = new_resa, new_resb, new_hfwa, new_scaleshift calibconf.set_sh_calib(shid, first_hole, second_hole, hole_focus, opt_focus, offset, scaling, rotation, iscale, irot, iscale_xy, ishear, resa, resb, hfwa, scaleshift) print_col(ANSI_BLACK, "Calibration file is updated.") break except IOError: print_col(ANSI_RED, "SEM image calibration failed.") else: break # Update the SEM metadata to have the spots already at corrected place escan.updateMetadata({ model.MD_RESOLUTION_SLOPE: resa, model.MD_RESOLUTION_INTERCEPT: resb, model.MD_HFW_SLOPE: hfwa, model.MD_SPOT_SHIFT: scaleshift }) f = sem_stage.moveAbs({"x": position[0], "y": position[1]}) f.result() f = opt_stage.moveAbs({"x": 0, "y": 0}) f.result() if hole_focus is not None: good_focus = hole_focus - aligndelphi.GOOD_FOCUS_OFFSET else: good_focus = aligndelphi.SEM_KNOWN_FOCUS - aligndelphi.GOOD_FOCUS_OFFSET f = ebeam_focus.moveAbs({"z": good_focus}) f.result() # Set min fov # We want to be as close as possible to the center when we are zoomed in escan.horizontalFoV.value = escan.horizontalFoV.range[0] pure_offset = None # Start with the best optical focus known so far f = focus.moveAbs({"z": opt_focus}) f.result() while True: ans = "Y" if force_calib else None while ans not in YES_NO_CHARS: ans = input_col( ANSI_MAGENTA, "Do you want to execute the twin stage calibration? [Y/n]") if ans in YES_CHARS: # Configure CCD and e-beam to write CL spots ccd.binning.value = ccd.binning.clip((4, 4)) ccd.resolution.value = ccd.resolution.range[1] ccd.exposureTime.value = 900e-03 escan.scale.value = (1, 1) escan.resolution.value = (1, 1) escan.translation.value = (0, 0) if not escan.rotation.readonly: escan.rotation.value = 0 escan.shift.value = (0, 0) escan.dwellTime.value = 5e-06 detector.data.subscribe(_discard_data) print_col( ANSI_BLUE, "Please turn on the Optical stream, set Power to 0 Watt " "and focus the image so you have a clearly visible spot.\n" "Use the up and down arrows or the mouse to move the " "optical focus and right and left arrows to move the SEM focus. " "Then turn off the stream and press Enter...") if not force_calib: print_col( ANSI_YELLOW, "If you cannot see the whole source background (bright circle) " "you may try to move to the already known offset position. \n" "To do this press the R key at any moment and use I to go back " "to the initial position.") rollback_pos = (offset[0] * scaling[0], offset[1] * scaling[1]) else: rollback_pos = None ar = ArrowFocus(sem_stage, focus, ebeam_focus, ccd.depthOfField.value, 10e-6) ar.focusByArrow(rollback_pos) # Did the user adjust the ebeam-focus? If so, let's use this, # as it's probably better than the focus for the hole. new_ebeam_focus = ebeam_focus.position.value.get('z') new_hole_focus = new_ebeam_focus + aligndelphi.GOOD_FOCUS_OFFSET if not util.almost_equal( new_hole_focus, hole_focus, atol=10e-6): print_col( ANSI_CYAN, "Updating e-beam focus: %s (ie, hole focus: %s)" % (new_ebeam_focus, new_hole_focus)) good_focus = new_ebeam_focus hole_focus = new_hole_focus detector.data.unsubscribe(_discard_data) print_col(ANSI_BLACK, "Twin stage calibration starting, please wait...") try: # TODO: the first point (at 0,0) isn't different from the next 4 points, # excepted it might be a little harder to focus. # => use the same code for all of them align_offsetf = aligndelphi.AlignAndOffset( ccd, detector, escan, sem_stage, opt_stage, focus, logpath) align_offset = align_offsetf.result() new_opt_focus = focus.position.value.get('z') # If the offset is large, it can prevent the SEM stage to follow # the optical stage. If it's really large (eg > 1mm) it will # even prevent from going to the calibration locations. # So warn about this, as soon as we detect it. It could be # caused either due to a mistake in the offset detection, or # because the reference switch of the optical axis is not # centered (enough) on the axis. In such case, a technician # should open the sample holder and move the reference switch. # Alternatively, we could try to be more clever and support # the range of the tmcm axes to be defined per sample # holder, and set some asymmetric range (to reflect the fact # that 0 is not at the center). for a, trans in zip(("x", "y"), align_offset): # SEM pos = Opt pos + offset rng_sem = sem_stage.axes[a].range rng_opt = opt_stage.axes[a].range if (rng_opt[0] + trans < rng_sem[0] or rng_opt[1] + trans > rng_sem[1]): logging.info( "Stage align offset = %s, which could cause " "moves on the SEM stage out of range (on axis %s)", align_offset, a) input_col( ANSI_RED, "Twin stage offset on axis %s is %g mm, which could cause moves out of range.\n" "Check that the reference switch in the sample holder is properly at the center." % (a, trans * 1e3)) def ask_user_to_focus(n): detector.data.subscribe(_discard_data) input_col( ANSI_BLUE, "About to calculate rotation and scaling (%d/4). " % (n + 1, ) + "Please turn on the Optical stream, " "set Power to 0 Watt and focus the image using the mouse " "so you have a clearly visible spot. \n" "If you do not see a spot nor the source background, " "move the sem-stage from the command line by steps of 200um " "in x and y until you can see the source background at the center. \n" "Then turn off the stream and press Enter...") # TODO: use ArrowFocus() too? print_col( ANSI_BLACK, "Calculating rotation and scaling (%d/4), please wait..." % (n + 1, )) detector.data.unsubscribe(_discard_data) f = aligndelphi.RotationAndScaling( ccd, detector, escan, sem_stage, opt_stage, focus, align_offset, manual=ask_user_to_focus, logpath=logpath) acc_offset, new_rotation, new_scaling = f.result() # Offset is divided by scaling, since Convert Stage applies scaling # also in the given offset pure_offset = acc_offset new_offset = ((acc_offset[0] / new_scaling[0]), (acc_offset[1] / new_scaling[1])) print_col( ANSI_CYAN, "Values computed: offset: " + str(new_offset) + "\n" " scaling: " + str(new_scaling) + "\n" " rotation: " + str(new_rotation) + "\n" " optical focus: " + str(new_opt_focus)) ans = "Y" if force_calib else None while ans not in YES_NO_CHARS: ans = input_col( ANSI_MAGENTA, "Do you want to update the calibration file with these values? [Y/n]" ) if ans in YES_CHARS: offset, scaling, rotation, opt_focus = new_offset, new_scaling, new_rotation, new_opt_focus calibconf.set_sh_calib(shid, first_hole, second_hole, hole_focus, opt_focus, offset, scaling, rotation, iscale, irot, iscale_xy, ishear, resa, resb, hfwa, scaleshift) print_col(ANSI_BLACK, "Calibration file is updated.") break except IOError: print_col(ANSI_RED, "Twin stage calibration failed.") else: break while True: ans = "Y" if force_calib else None while ans not in YES_NO_CHARS: ans = input_col( ANSI_MAGENTA, "Do you want to execute the fine alignment? [Y/n]") if ans in YES_CHARS: # Return to the center so fine alignment can be executed just after calibration f = opt_stage.moveAbs({"x": 0, "y": 0}) f.result() if pure_offset is not None: f = sem_stage.moveAbs({ "x": pure_offset[0], "y": pure_offset[1] }) elif offset is not None: f = sem_stage.moveAbs({ "x": offset[0] * scaling[0], "y": offset[1] * scaling[1] }) else: f = sem_stage.moveAbs({"x": position[0], "y": position[1]}) fof = focus.moveAbs({"z": opt_focus}) fef = ebeam_focus.moveAbs({"z": good_focus}) f.result() fof.result() fef.result() # Run the optical fine alignment # TODO: reuse the exposure time # Configure e-beam to write CL spots escan.horizontalFoV.value = escan.horizontalFoV.range[0] escan.scale.value = (1, 1) escan.resolution.value = (1, 1) escan.translation.value = (0, 0) if not escan.rotation.readonly: escan.rotation.value = 0 escan.shift.value = (0, 0) escan.dwellTime.value = 5e-06 detector.data.subscribe(_discard_data) print_col( ANSI_BLUE, "Please turn on the Optical stream, set Power to 0 Watt " "and focus the image so you have a clearly visible spot.\n" "Use the up and down arrows or the mouse to move the " "optical focus and right and left arrows to move the SEM focus. " "Then turn off the stream and press Enter...") ar = ArrowFocus(sem_stage, focus, ebeam_focus, ccd.depthOfField.value, 10e-6) ar.focusByArrow() detector.data.unsubscribe(_discard_data) print_col(ANSI_BLACK, "Fine alignment in progress, please wait...") # restore CCD settings (as the GUI/user might have changed them) ccd.binning.value = (1, 1) ccd.resolution.value = ccd.resolution.range[1] ccd.exposureTime.value = 900e-03 # Center (roughly) the spot on the CCD f = spot.CenterSpot(ccd, sem_stage, escan, spot.ROUGH_MOVE, spot.STAGE_MOVE, detector.data) dist, vect = f.result() if dist is None: logging.warning( "Failed to find a spot, twin stage calibration might have failed" ) try: escan.horizontalFoV.value = 80e-06 f = align.FindOverlay( (4, 4), 0.5, # s, dwell time 10e-06, # m, maximum difference allowed escan, ccd, detector, skew=True, bgsub=True) trans_val, cor_md = f.result() trans_md, skew_md = cor_md new_iscale = trans_md[model.MD_PIXEL_SIZE_COR] new_irot = -trans_md[model.MD_ROTATION_COR] % (2 * math.pi) new_ishear = skew_md[model.MD_SHEAR_COR] new_iscale_xy = skew_md[model.MD_PIXEL_SIZE_COR] print_col( ANSI_CYAN, "Values computed: scale: " + str(new_iscale) + "\n" " rotation: " + str(new_irot) + "\n" " scale-xy: " + str(new_iscale_xy) + "\n" " shear: " + str(new_ishear)) ans = "Y" if force_calib else None while ans not in YES_NO_CHARS: ans = input_col( ANSI_MAGENTA, "Do you want to update the calibration file with these values? [Y/n]" ) if ans in YES_CHARS: iscale, irot, iscale_xy, ishear = new_iscale, new_irot, new_iscale_xy, new_ishear calibconf.set_sh_calib(shid, first_hole, second_hole, hole_focus, opt_focus, offset, scaling, rotation, iscale, irot, iscale_xy, ishear, resa, resb, hfwa, scaleshift) print_col(ANSI_BLACK, "Calibration file is updated.") break except ValueError: print_col(ANSI_RED, "Fine alignment failed.") else: break except Exception: logging.exception("Unexpected failure during calibration") finally: aligndelphi.restore_hw_settings(escan, ccd, hw_settings) # Store the final version of the calibration file in the log folder try: shutil.copy(calibconf.file_path, logpath) except Exception: logging.info("Failed to log calibration file", exc_info=True) if not keep_loaded: # Eject the sample holder print_col( ANSI_BLACK, "Calibration ended, now ejecting sample, please wait...") f = chamber.moveAbs({"pressure": vented_pressure}) f.result() ans = input_col(ANSI_MAGENTA, "Press Enter to close")
def __init__(self, name, data, *args, **kwargs): """ name (string) data (model.DataArray(Shadow) of shape (YX) or list of such DataArray(Shadow)). The metadata MD_POS, MD_AR_POLE and MD_POL_MODE should be provided """ if not isinstance(data, collections.Iterable): data = [data] # from now it's just a list of DataArray # TODO: support DAS, as a "delayed loading" by only calling .getData() # when the projection for the particular data needs to be computed (or # .raw needs to be accessed?) # Ensure all the data is a DataArray, as we don't handle (yet) DAS data = [ d.getData() if isinstance(d, model.DataArrayShadow) else d for d in data ] # find positions of each acquisition # (float, float, str or None)) -> DataArray: position on SEM + polarization -> data self._pos = {} sempositions = set() polpositions = set() for d in data: try: sempos_cur = d.metadata[MD_POS] # When reading data: floating point error (slightly different keys for same ebeam pos) # -> check if there is already a position specified, which is very close by # (and therefore the same ebeam pos) and replace with that ebeam position # (e.g. all polarization positions for the same ebeam positions will have exactly the same ebeam pos) for sempos in sempositions: if almost_equal(sempos_cur[0], sempos[0]) and almost_equal( sempos_cur[1], sempos[1]): sempos_cur = sempos break self._pos[sempos_cur + (d.metadata.get(MD_POL_MODE, None), )] = img.ensure2DImage(d) sempositions.add(sempos_cur) if MD_POL_MODE in d.metadata: polpositions.add(d.metadata.get(MD_POL_MODE)) except KeyError: logging.info("Skipping DataArray without known position") # Cached conversion of the CCD image to polar representation # TODO: automatically fill it in a background thread self._polar = {} # dict tuple (float, float, str or None) -> DataArray # SEM position VA # SEM position displayed, (None, None) == no point selected (x, y) self.point = model.VAEnumerated( (None, None), choices=frozenset([(None, None)] + list(sempositions))) if self._pos: # Pick one point, e.g., top-left bbtl = (min(x for x, y in sempositions if x is not None), min(y for x, y in sempositions if y is not None)) # top-left point is the closest from the bounding-box top-left def dis_bbtl(v): try: return math.hypot(bbtl[0] - v[0], bbtl[1] - v[1]) except TypeError: return float("inf") # for None, None self.point.value = min(sempositions, key=dis_bbtl) # no need for init=True, as Stream.__init__ will update the image self.point.subscribe(self._onPoint) # polarization VA # check if any polarization analyzer data, set([]) == no analyzer data (pol) if polpositions: # use first entry in acquisition to populate VA (acq could have 1 or 6 pol pos) self.polarization = model.VAEnumerated(list(polpositions)[0], choices=polpositions) self.polarization.subscribe(self._onPolarization) if "acq_type" not in kwargs: kwargs["acq_type"] = model.MD_AT_AR super(StaticARStream, self).__init__(name, list(self._pos.values()), *args, **kwargs)
def __init__(self, name, data, *args, **kwargs): """ :param name: (string) :param data: (model.DataArray(Shadow) of shape (YX) or list of such DataArray(Shadow)). The metadata MD_POS, MD_AR_POLE and MD_POL_MODE should be provided """ if not isinstance(data, collections.Iterable): data = [data] # from now it's just a list of DataArray # TODO: support DAS, as a "delayed loading" by only calling .getData() # when the projection for the particular data needs to be computed (or # .raw needs to be accessed?) # Ensure all the data is a DataArray, as we don't handle (yet) DAS data = [d.getData() if isinstance(d, model.DataArrayShadow) else d for d in data] # find positions of each acquisition # (float, float, str or None)) -> DataArray: position on SEM + polarization -> data self._pos = {} sempositions = set() polpositions = set() for d in data: try: sempos_cur = d.metadata[MD_POS] # When reading data: floating point error (slightly different keys for same ebeam pos) # -> check if there is already a position specified, which is very close by # (and therefore the same ebeam pos) and replace with that ebeam position # (e.g. all polarization positions for the same ebeam positions will have exactly the same ebeam pos) for sempos in sempositions: if almost_equal(sempos_cur[0], sempos[0]) and almost_equal(sempos_cur[1], sempos[1]): sempos_cur = sempos break self._pos[sempos_cur + (d.metadata.get(MD_POL_MODE, None),)] = img.ensure2DImage(d) sempositions.add(sempos_cur) if MD_POL_MODE in d.metadata: polpositions.add(d.metadata[MD_POL_MODE]) except KeyError: logging.info("Skipping DataArray without known position") # SEM position VA # SEM position displayed, (None, None) == no point selected (x, y) self.point = model.VAEnumerated((None, None), choices=frozenset([(None, None)] + list(sempositions))) if self._pos: # Pick one point, e.g., top-left bbtl = (min(x for x, y in sempositions if x is not None), min(y for x, y in sempositions if y is not None)) # top-left point is the closest from the bounding-box top-left def dis_bbtl(v): try: return math.hypot(bbtl[0] - v[0], bbtl[1] - v[1]) except TypeError: return float("inf") # for None, None self.point.value = min(sempositions, key=dis_bbtl) # check if any polarization analyzer data, (None) == no analyzer data (pol) if polpositions: # Check that for every position, all the polarizations are available, # as the GUI expects all the combinations possible, and weird errors # will happen when one is missing. for pos in sempositions: for pol in polpositions: if pos + (pol,) not in self._pos: logging.warning("Polarization data is not complete: missing %s,%s/%s", pos[0], pos[1], pol) # use first entry in acquisition to populate VA (acq could have 1 or 6 pol pos) current_pol = util.sorted_according_to(polpositions, POL_POSITIONS)[0] self.polarization = model.VAEnumerated(current_pol, choices=polpositions) # Add a polarimetry VA containing the polarimetry image results. # Note: Polarimetry analysis are only possible if all 6 images per ebeam pos exist. # Also check if arpolarimetry package can be imported as might not be installed. if polpositions >= set(POL_POSITIONS) and arpolarimetry: self.polarimetry = model.VAEnumerated(MD_POL_S0, choices=set(POL_POSITIONS_RESULTS)) if "acq_type" not in kwargs: kwargs["acq_type"] = model.MD_AT_AR super(StaticARStream, self).__init__(name, list(self._pos.values()), *args, **kwargs)
def _DoBinaryFocus(future, detector, emt, focus, dfbkg, good_focus, rng_focus): """ Iteratively acquires an optical image, measures its focus level and adjusts the optical focus with respect to the focus level. future (model.ProgressiveFuture): Progressive future provided by the wrapper detector: model.DigitalCamera or model.Detector emt (None or model.Emitter): In case of a SED this is the scanner used focus (model.Actuator): The focus actuator (with a "z" axis) dfbkg (model.DataFlow): dataflow of se- or bs- detector good_focus (float): if provided, an already known good focus position to be taken into consideration while autofocusing rng_focus (tuple of floats): if provided, the search of the best focus position is limited within this range returns: (float): Focus position (m) (float): Focus level raises: CancelledError if cancelled IOError if procedure failed """ # TODO: dfbkg is mis-named, as it's the dataflow to use to _activate_ the # emitter. It's necessary to acquire the background, as otherwise we assume # the emitter is always active, but during background acquisition, that # emitter is explicitly _disabled_. # => change emt to "scanner", and "dfbkg" to "emitter". Or pass a stream? # Note: the emt is almost not used, only to estimate completion time, # and read the depthOfField. # It does a dichotomy search on the focus level. In practice, it means it # will start going into the direction that increase the focus with big steps # until the focus decreases again. Then it'll bounce back and forth with # smaller and smaller steps. # The tricky parts are: # * it's hard to estimate the focus level (on an arbitrary image) # * two acquisitions at the same focus position can have (slightly) different # focus levels (due to noise and sample degradation) # * if the focus actuator is not precise (eg, open loop), it's hard to # even go back to the same focus position when wanted logging.debug("Starting binary autofocus on detector %s...", detector.name) try: # Big timeout, most important being that it's shorter than eternity timeout = 3 + 2 * estimateAcquisitionTime(detector, emt) # use the .depthOfField on detector or emitter as maximum stepsize avail_depths = (detector, emt) if model.hasVA(emt, "dwellTime"): # Hack in case of using the e-beam with a DigitalCamera detector. # All the digital cameras have a depthOfField, which is updated based # on the optical lens properties... but the depthOfField in this # case depends on the e-beam lens. # TODO: or better rely on which component the focuser affects? If it # affects (also) the emitter, use this one first? (but in the # current models the focusers affects nothing) avail_depths = (emt, detector) for c in avail_depths: if model.hasVA(c, "depthOfField"): dof = c.depthOfField.value break else: logging.debug("No depth of field info found") dof = 1e-6 # m, not too bad value logging.debug("Depth of field is %f", dof) min_step = dof / 2 # adjust to rng_focus if provided rng = focus.axes["z"].range if rng_focus: rng = (max(rng[0], rng_focus[0]), min(rng[1], rng_focus[1])) max_step = (rng[1] - rng[0]) / 2 if max_step <= 0: raise ValueError("Unexpected focus range %s" % (rng,)) max_reached = False # True once we've passed the maximum level (ie, start bouncing) # It's used to cache the focus level, to avoid reacquiring at the same # position. We do it only for the 'rough' max search because for the fine # search, the actuator and acquisition delta are likely to play a role focus_levels = {} # focus pos (float) -> focus level (float) best_pos = focus.position.value['z'] best_fm = 0 last_pos = None # Pick measurement method based on the heuristics that SEM detectors # are typically just a point (ie, shape == data depth). # TODO: is this working as expected? Alternatively, we could check # MD_DET_TYPE. if len(detector.shape) > 1: logging.debug("Using Optical method to estimate focus") Measure = MeasureOpticalFocus else: logging.debug("Using SEM method to estimate focus") Measure = MeasureSEMFocus step_factor = 2 ** 7 if good_focus is not None: current_pos = focus.position.value['z'] image = AcquireNoBackground(detector, dfbkg, timeout) fm_current = Measure(image) logging.debug("Focus level at %f is %f", current_pos, fm_current) focus_levels[current_pos] = fm_current focus.moveAbsSync({"z": good_focus}) good_focus = focus.position.value["z"] image = AcquireNoBackground(detector, dfbkg, timeout) fm_good = Measure(image) logging.debug("Focus level at %f is %f", good_focus, fm_good) focus_levels[good_focus] = fm_good last_pos = good_focus if fm_good < fm_current: # Move back to current position if good_pos is not that good # after all focus.moveAbsSync({"z": current_pos}) # it also means we are pretty close step_factor = 2 ** 4 if step_factor * min_step > max_step: # Large steps would be too big. We can reduce step_factor and/or # min_step. => let's take our time, and maybe find finer focus min_step = max_step / step_factor logging.debug("Reducing min step to %g", min_step) # TODO: to go a bit faster, we could use synchronised acquisition on # the detector (if it supports it) # TODO: we could estimate the quality of the autofocus by looking at the # standard deviation of the the focus levels (and the standard deviation # of the focus levels measured for the same focus position) logging.debug("Step factor used for autofocus: %g", step_factor) step_cntr = 1 while step_factor >= 1 and step_cntr <= MAX_STEPS_NUMBER: # TODO: update the estimated time (based on how long it takes to # move + acquire, and how many steps are approximately left) # Start at the current focus position center = focus.position.value['z'] # Don't redo the acquisition either if we've just done it, or if it # was already done and we are still doing a rough search if (not max_reached or last_pos == center) and center in focus_levels: fm_center = focus_levels[center] else: image = AcquireNoBackground(detector, dfbkg, timeout) fm_center = Measure(image) logging.debug("Focus level (center) at %f is %f", center, fm_center) focus_levels[center] = fm_center last_pos = center # Move to right position right = center + step_factor * min_step right = max(rng[0], min(right, rng[1])) # clip if not max_reached and right in focus_levels: fm_right = focus_levels[right] else: focus.moveAbsSync({"z": right}) right = focus.position.value["z"] last_pos = right image = AcquireNoBackground(detector, dfbkg, timeout) fm_right = Measure(image) logging.debug("Focus level (right) at %f is %f", right, fm_right) focus_levels[right] = fm_right # Move to left position left = center - step_factor * min_step left = max(rng[0], min(left, rng[1])) # clip if not max_reached and left in focus_levels: fm_left = focus_levels[left] else: focus.moveAbsSync({"z": left}) left = focus.position.value["z"] last_pos = left image = AcquireNoBackground(detector, dfbkg, timeout) fm_left = Measure(image) logging.debug("Focus level (left) at %f is %f", left, fm_left) focus_levels[left] = fm_left fm_range = (fm_left, fm_center, fm_right) if all(almost_equal(fm_left, fm, rtol=1e-6) for fm in fm_range[1:]): logging.debug("All focus levels identical, picking the middle one") # Most probably the images are all noise, or they are not affected # by the focus. In any case, the best is to not move the focus, # so let's "center" on it. That's better than the default behaviour # which would tend to pick "left" because that's the first one. i_max = 1 best_pos, best_fm = center, fm_center else: pos_range = (left, center, right) best_fm = max(fm_range) i_max = fm_range.index(best_fm) best_pos = pos_range[i_max] if future._autofocus_state == CANCELLED: raise CancelledError() if left == right: logging.info("Seems to have reached minimum step size (at %g m)", 2 * step_factor * min_step) break # if best focus was found at the center if i_max == 1: step_factor /= 2 if not max_reached: logging.debug("Now zooming in on improved focus") max_reached = True elif (rng[0] > best_pos - step_factor * min_step or rng[1] < best_pos + step_factor * min_step): step_factor /= 1.5 logging.debug("Reducing step factor to %g because the focus (%g) is near range limit %s", step_factor, best_pos, rng) if step_factor <= 8: max_reached = True # Force re-checking data focus.moveAbsSync({"z": best_pos}) step_cntr += 1 worst_fm = min(focus_levels.values()) if step_cntr == MAX_STEPS_NUMBER: logging.info("Auto focus gave up after %d steps @ %g m", step_cntr, best_pos) elif (best_fm - worst_fm) < best_fm * 0.5: # We can be confident of the data if there is a "big" (50%) difference # between the focus levels. logging.info("Auto focus indecisive but picking level %g @ %g m (lowest = %g)", best_fm, best_pos, worst_fm) else: logging.info("Auto focus found best level %g @ %g m", best_fm, best_pos) return best_pos, best_fm except CancelledError: # Go to the best position known so far focus.moveAbsSync({"z": best_pos}) finally: with future._autofocus_lock: if future._autofocus_state == CANCELLED: raise CancelledError() future._autofocus_state = FINISHED
def getFullImage(self): """ return (2D DataArray): same dtype as the tiles, with shape corresponding to the bounding box. """ tiles = self.tiles # Compute the bounding box of each tile and the global bounding box # Get a fixed pixel size by using the first one # TODO: use the mean, in case they are all slightly different due to # correction? pxs = tiles[0].metadata[model.MD_PIXEL_SIZE] tbbx_phy = [] # tuples of ltrb in physical coordinates for t in tiles: c = t.metadata[model.MD_POS] w = t.shape[-1], t.shape[-2] if not util.almost_equal(pxs[0], t.metadata[model.MD_PIXEL_SIZE][0], rtol=0.01): logging.warning("Tile @ %s has a unexpected pixel size (%g vs %g)", c, t.metadata[model.MD_PIXEL_SIZE][0], pxs[0]) bbx = (c[0] - (w[0] * pxs[0] / 2), c[1] - (w[1] * pxs[1] / 2), c[0] + (w[0] * pxs[0] / 2), c[1] + (w[1] * pxs[1] / 2)) tbbx_phy.append(bbx) gbbx_phy = (min(b[0] for b in tbbx_phy), min(b[1] for b in tbbx_phy), max(b[2] for b in tbbx_phy), max(b[3] for b in tbbx_phy)) # Compute the bounding-boxes in pixel coordinates tbbx_px = [] # that's the origin (Y is max as Y is inverted) glt = gbbx_phy[0], gbbx_phy[3] for bp, t in zip(tbbx_phy, tiles): lt = (int(round((bp[0] - glt[0]) / pxs[0])), int(round(-(bp[3] - glt[1]) / pxs[1]))) w = t.shape[-1], t.shape[-2] bbx = (lt[0], lt[1], lt[0] + w[0], lt[1] + w[1]) tbbx_px.append(bbx) gbbx_px = (min(b[0] for b in tbbx_px), min(b[1] for b in tbbx_px), max(b[2] for b in tbbx_px), max(b[3] for b in tbbx_px)) assert gbbx_px[0] == gbbx_px[1] == 0 if numpy.greater(gbbx_px[-2:], 4 * numpy.sum(tbbx_px[-2:])).any(): # Overlap > 50% or missing tiles logging.warning("Global area much bigger than sum of tile areas") # Paste each tile logging.debug("Generating global image of size %dx%d px", gbbx_px[-2], gbbx_px[-1]) im = numpy.empty((gbbx_px[-1], gbbx_px[-2]), dtype=tiles[0].dtype) # Use minimum of the values in the tiles for background im[:] = numpy.amin(tiles) for b, t in zip(tbbx_px, tiles): im[b[1]:b[1] + t.shape[0], b[0]:b[0] + t.shape[1]] = t # TODO: border # Update metadata # TODO: check this is also correct based on lt + half shape * pxs c_phy = ((gbbx_phy[0] + gbbx_phy[2]) / 2, (gbbx_phy[1] + gbbx_phy[3]) / 2) md = tiles[0].metadata.copy() md[model.MD_POS] = c_phy md[model.MD_DIMS] = "YX" return model.DataArray(im, md)
def getFullImage(self): """ return (2D DataArray): same dtype as the tiles, with shape corresponding to the bounding box. """ tiles = self.tiles # Compute the bounding box of each tile and the global bounding box # Get a fixed pixel size by using the first one # TODO: use the mean, in case they are all slightly different due to # correction? pxs = tiles[0].metadata[model.MD_PIXEL_SIZE] tbbx_phy = [] # tuples of ltrb in physical coordinates for t in tiles: c = t.metadata[model.MD_POS] w = t.shape[-1], t.shape[-2] if not util.almost_equal(pxs[0], t.metadata[model.MD_PIXEL_SIZE][0], rtol=0.01): logging.warning("Tile @ %s has a unexpected pixel size (%g vs %g)", c, t.metadata[model.MD_PIXEL_SIZE][0], pxs[0]) bbx = (c[0] - (w[0] * pxs[0] / 2), c[1] - (w[1] * pxs[1] / 2), c[0] + (w[0] * pxs[0] / 2), c[1] + (w[1] * pxs[1] / 2)) tbbx_phy.append(bbx) gbbx_phy = (min(b[0] for b in tbbx_phy), min(b[1] for b in tbbx_phy), max(b[2] for b in tbbx_phy), max(b[3] for b in tbbx_phy)) # Compute the bounding-boxes in pixel coordinates tbbx_px = [] # that's the origin (Y is max as Y is inverted) glt = gbbx_phy[0], gbbx_phy[3] for bp, t in zip(tbbx_phy, tiles): lt = (int(round((bp[0] - glt[0]) / pxs[0])), int(round(-(bp[3] - glt[1]) / pxs[1]))) w = t.shape[-1], t.shape[-2] bbx = (lt[0], lt[1], lt[0] + w[0], lt[1] + w[1]) tbbx_px.append(bbx) gbbx_px = (min(b[0] for b in tbbx_px), min(b[1] for b in tbbx_px), max(b[2] for b in tbbx_px), max(b[3] for b in tbbx_px)) assert gbbx_px[0] == gbbx_px[1] == 0 if numpy.greater(gbbx_px[-2:], 4 * numpy.sum(tbbx_px[-2:])).any(): # Overlap > 50% or missing tiles logging.warning("Global area much bigger than sum of tile areas") # Weave tiles by using a smooth gradient. The part of the tile that does not overlap # with any previous tiles is inserted into the part of the # ovv image that is still empty. This part is determined by a mask, which indicates # the parts of the image that already contain image data (True) and the ones that are still # empty (False). For the overlapping parts, the tile is multiplied with weights corresponding # to a gradient that has its maximum at the center of the tile and # smoothly decreases toward the edges. The function for creating the weights is # a distance measure resembling the maximum-norm, i.e. equidistant points lie # on a rectangle (instead of a circle like for the euclidean norm). Additionally, # the x and y values generating this norm are raised to the power of 6 to # create a steeper gradient. The value 6 is quite arbitrary and was found to give # good results during experimentation. # The part of the overview image that overlaps with the new tile is multiplied with the # complementary weights (1 - weights) and the weighted overlapping parts of the new tile and # the ovv image are added, so the resulting image contains a gradient in the overlapping regions # between all the tiles that have been inserted before and the newly inserted tile. # Paste each tile logging.debug("Generating global image of size %dx%d px", gbbx_px[-2], gbbx_px[-1]) im = numpy.empty((gbbx_px[-1], gbbx_px[-2]), dtype=tiles[0].dtype) # Use minimum of the values in the tiles for background im[:] = numpy.amin(tiles) # The mask is multiplied with the tile, thereby creating a tile with a gradient mask = numpy.zeros((gbbx_px[-1], gbbx_px[-2]), dtype=numpy.bool) for b, t in zip(tbbx_px, tiles): # Part of image overlapping with tile roi = im[b[1]:b[1] + t.shape[0], b[0]:b[0] + t.shape[1]] moi = mask[b[1]:b[1] + t.shape[0], b[0]:b[0] + t.shape[1]] # Insert image at positions that are still empty roi[~moi] = t[~moi] # Create gradient in overlapping region. Ratio between old image and new tile values determined by # distance to the center of the tile # Create weight matrix with decreasing values from its center that # has the same size as the tile. sz = numpy.array(roi.shape) hh, hw = sz / 2 # half-height, half-width x = numpy.linspace(-hw, hw, sz[1]) y = numpy.linspace(-hh, hh, sz[0]) xx, yy = numpy.meshgrid((x / hw) ** 6, (y / hh) ** 6) w = numpy.maximum(xx, yy) # Hardcoding a weight function is quite arbitrary and might result in # suboptimal solutions in some cases. # Alternatively, different weights might be used. One option would be to select # a fixed region on the sides of the image, e.g. 20% (expected overlap), and # only apply a (linear) gradient to these parts, while keeping the new tile for the # rest of the region. However, this approach does not solve the hardcoding problem # since the overlap region is still arbitrary. Future solutions might adaptively # select the this region. # Use weights to create gradient in overlapping region roi[moi] = (t * (1 - w))[moi] + (roi * w)[moi] # Update mask mask[b[1]:b[1] + t.shape[0], b[0]:b[0] + t.shape[1]] = True # Update metadata # TODO: check this is also correct based on lt + half shape * pxs c_phy = ((gbbx_phy[0] + gbbx_phy[2]) / 2, (gbbx_phy[1] + gbbx_phy[3]) / 2) md = tiles[0].metadata.copy() md[model.MD_POS] = c_phy md[model.MD_DIMS] = "YX" return model.DataArray(im, md)
def move_abs(comp_name, moves, check_distance=True): """ move (in absolute) the axis of the given component to the specified position comp_name (str): name of the component check_distance (bool): if the axis is in meters, check that the move is not too big. moves (dict str -> str): axis -> position (as text) """ component = get_component(comp_name) act_mv = {} # axis -> value for axis_name, str_position in moves.items(): try: if axis_name not in component.axes: raise ValueError("Actuator %s has no axis '%s'" % (comp_name, axis_name)) ad = component.axes[axis_name] except (TypeError, AttributeError): raise ValueError("Component %s is not an actuator" % comp_name) # Allow the user to indicate the position via the user-friendly choice entry position = None if (hasattr(ad, "choices") and isinstance(ad.choices, dict)): for key, value in ad.choices.items(): if value == str_position: logging.info("Converting '%s' into %s", str_position, key) position = key # Even if it's a big distance, we don't complain as it's likely # that all choices are safe break if position is None: if ad.unit == "m": try: position = float(str_position) except ValueError: raise ValueError("Position '%s' cannot be converted to a number" % str_position) # compare to the current position, to see if the new position sounds reasonable cur_pos = component.position.value[axis_name] if check_distance and abs(cur_pos - position) > MAX_DISTANCE: raise IOError("Distance of %f m is too big (> %f m), use '--big-distance' to allow the move." % (abs(cur_pos - position), MAX_DISTANCE)) else: position = convert_to_object(str_position) # If only a couple of positions are possible, and asking for a float, # avoid the rounding error by looking for the closest possible if (isinstance(position, numbers.Real) and hasattr(ad, "choices") and isinstance(ad.choices, collections.Iterable) and position not in ad.choices): closest = util.find_closest(position, ad.choices) if util.almost_equal(closest, position, rtol=1e-3): logging.debug("Adjusting value %.15g to %.15g", position, closest) position = closest act_mv[axis_name] = position logging.info(u"Will move %s.%s to %s", comp_name, axis_name, units.readable_str(position, ad.unit, sig=3)) try: m = component.moveAbs(act_mv) try: m.result(120) except KeyboardInterrupt: logging.warning("Cancelling absolute move of component %s", comp_name) m.cancel() raise except Exception as exc: raise IOError("Failed to move component %s to %s: %s" % (comp_name, act_mv, exc))
def move_abs(comp_name, moves, check_distance=True): """ move (in absolute) the axis of the given component to the specified position comp_name (str): name of the component check_distance (bool): if the axis is in meters, check that the move is not too big. moves (dict str -> str): axis -> position (as text) """ component = get_component(comp_name) act_mv = {} # axis -> value for axis_name, str_position in moves.items(): try: if axis_name not in component.axes: raise ValueError("Actuator %s has no axis '%s'" % (comp_name, axis_name)) ad = component.axes[axis_name] except (TypeError, AttributeError): raise ValueError("Component %s is not an actuator" % comp_name) # Allow the user to indicate the position via the user-friendly choice entry position = None if hasattr(ad, "choices") and isinstance(ad.choices, dict): for key, value in ad.choices.items(): if value == str_position: logging.info("Converting '%s' into %s", str_position, key) position = key # Even if it's a big distance, we don't complain as it's likely # that all choices are safe break if position is None: if ad.unit == "m": try: position = float(convert_to_object(str_position)) except ValueError: raise ValueError("Position '%s' cannot be converted to a number" % str_position) # compare to the current position, to see if the new position sounds reasonable cur_pos = component.position.value[axis_name] if check_distance and abs(cur_pos - position) > MAX_DISTANCE: raise IOError("Distance of %f m is too big (> %f m), use '--big-distance' to allow the move." % (abs(cur_pos - position), MAX_DISTANCE)) else: position = convert_to_object(str_position) # If only a couple of positions are possible, and asking for a float, # avoid the rounding error by looking for the closest possible if (isinstance(position, numbers.Real) and hasattr(ad, "choices") and isinstance(ad.choices, collections.Iterable) and position not in ad.choices): closest = util.find_closest(position, ad.choices) if util.almost_equal(closest, position, rtol=1e-3): logging.debug("Adjusting value %.15g to %.15g", position, closest) position = closest act_mv[axis_name] = position if isinstance(position, numbers.Real): pos_pretty = units.readable_str(position, ad.unit, sig=3) else: pos_pretty = "%s" % (position,) logging.info(u"Will move %s.%s to %s", comp_name, axis_name, pos_pretty) try: m = component.moveAbs(act_mv) try: m.result(120) except KeyboardInterrupt: logging.warning("Cancelling absolute move of component %s", comp_name) m.cancel() raise except Exception as exc: raise IOError("Failed to move component %s to %s: %s" % (comp_name, act_mv, exc))