def move_to_feature(self, feature, ignore_position=False, ignore_z_pos = False, margin=50, tolerance=0.5, max_iterations = 10): """Bring the feature in the supplied image to the centre of the camera Strictly, what this aims to do is move the sample such that the datum pixel of the "feature" image is on the datum pixel of the camera. It does this by first (unless instructed not to) moving to the datum point as defined by the image. It then compares the image from the camera with the feature, and adjusts the position. feature : ImageWithLocation or numpy.ndarray The feature that we want to move to. ignore_position : bool (optional, default False) Set this to true to skip the initial move using the image's metadata. margin : int (optional) The maximum error, in pixels, that we can cope with (this sets the size of the search area we use to look for the feature in the camera image, it is (2*range + 1) in both X and Y. Set to 0 to use the maximum possible search area (given by the difference in size between the feature image and the camera image) tolerance : float (optional) Once the error between our current position and the feature's position is below this threshold, we stop. max_iterations : int (optional) The maximum number of moves we make to fine-tune the position. """ if (feature.datum_pixel[0]<0 or feature.datum_pixel[0]>np.shape(feature)[0] or feature.datum_pixel[1]<0 or feature.datum_pixel[1]>np.shape(feature)[1]): self.log('The datum picture of the feature is outside of the image!',level = 'WARN') if not ignore_position: try: if ignore_z_pos==True: self.move(feature.datum_location[:2]) #ignore original z value else: self.move(feature.datum_location) #initial move to where we recorded the feature was except: print "Warning: no position data in feature image, skipping initial move." image = self.color_image() assert isinstance(image, ImageWithLocation), "CameraWithLocation should return an ImageWithLocation...?" last_move = np.infty for i in range(max_iterations): try: self.settle() image = self.color_image() pixel_position = locate_feature_in_image(image, feature, margin=margin, restrict=margin>0) # pixel_position = locate_feature_in_image(image, feature,margin=margin) new_position = image.pixel_to_location(pixel_position) self.move(new_position) last_move = np.sqrt(np.sum((new_position - image.datum_location)**2)) # calculate the distance moved self.log("Centering on feature, iteration {}, moved by {}".format(i, last_move)) if last_move < tolerance: break except Exception as e: self.log("Error centering on feature, iteration {} raised an exception:\n{}\n".format(i, e) + "The feature size was {}\n".format(feature.shape) + "The image size was {}\n".format(image.shape)) if last_move > tolerance: self.log("Error centering on feature, final move was too large.") return last_move < tolerance
def calibrate_xy(self, update_progress=lambda p: p, step=None, min_step=1e-5, max_step=1000): """Make a series of moves in X and Y to determine the XY components of the pixel-to-sample matrix. Arguments: step : float, optional (default None) The amount to move the stage by. This should move the sample by approximately 1/10th of the field of view. If it is left as None, we will attempt to auto-determine the step size (see below). min_step : float, optional If we auto-determine the step size, start with this step size. It's deliberately tiny. max_step : float, optional If we're auto-determining the step size, fail if it looks like it's more than this. This starts by gingerly moving the stage a tiny amount. That is repeated, increasing the distance exponentially until we see a reasonable movement. This means we shouldn't need to worry too much about setting the distance we use for calibration. NB this currently assumes the stage deals with backlash correction for us. """ #,bonus_arg = None, # First, acquire a template image: self.settle() starting_image = self.color_image() starting_location = self.datum_location w, h = starting_image.shape[:2] template = starting_image[int(w / 4):int(3 * w / 4), int(h / 4):int(3 * h / 4), ...] # Use the central 50%x50% as template threshold_shift = w * 0.02 # Require a shift of at least 2% of the image's width ,changed s[0] to w target_shift = w * 0.1 # Aim for a shift of about 10% # Swapping images[-1] for starting_image assert np.sum((locate_feature_in_image(starting_image, template) - self.datum_pixel)**2) < 1, "Template's not centred!" update_progress(1) if step is None: # Next, move a small distance until we see a shift, to auto-determine the calibration distance. step = min_step shift = 0 while np.linalg.norm(shift) < threshold_shift: assert step < max_step, "Error, we hit the maximum step before we saw the sample move." self.move(starting_location + np.array([step, 0, 0])) image = self.color_image() shift = locate_feature_in_image(image, template) - image.datum_pixel if np.sqrt(np.sum(shift**2)) > threshold_shift: break else: step *= 10**(0.5) step *= old_div( target_shift, shift ) # Scale the amount we step the stage by, to get a reasonable image shift. update_progress(2) # Move the stage in a square, recording the displacement from both the stage and the camera pixel_shifts = [] images = [] for i, p in enumerate([[-step, -step, 0], [-step, step, 0], [step, step, 0], [step, -step, 0]]): # print 'premove' # print starting_location,p self.move(starting_location + np.array(p)) # print 'post move' self.settle() image = self.color_image() pixel_shifts.append(-locate_feature_in_image(image, template) + image.datum_pixel) images.append(image) # NB the minus sign here: we want the position of the image we just took relative to the datum point of # the template, not the other way around. update_progress(3 + i) # We then use least-squares to fit the XY part of the matrix relating pixels to distance # location_shifts = np.array([ensure_2d(im.datum_location - starting_location) for im in images]) # Does this need to be the datum_location... will this really work for when the stage has not previously been calibrated location_shifts = np.array([ ensure_2d(im.attrs['stage_position'] - starting_location) for im in images ]) pixel_shifts = np.array(pixel_shifts) print(np.shape(pixel_shifts), np.shape(location_shifts)) A, res, rank, s = np.linalg.lstsq( pixel_shifts, location_shifts) # we solve pixel_shifts*A = location_shifts self.pixel_to_sample_displacement = np.zeros((3, 3)) self.pixel_to_sample_displacement[ 2, 2] = 1 # just pass Z through unaltered self.pixel_to_sample_displacement[:2, :2] = A # A deals with xy only fractional_error = np.sqrt( np.sum(res) / np.prod(pixel_shifts.shape)) / np.std(pixel_shifts) print(fractional_error) print(np.sum(res), np.prod(pixel_shifts.shape), np.std(pixel_shifts)) if fractional_error > 0.02: # Check it was a reasonably good fit print( "Warning: the error fitting measured displacements was %.1f%%" % (fractional_error * 100)) self.log( "Calibrated the pixel-location matrix.\nResiduals were {}% of the shift.\nStage positions:\n{}\n" "Pixel shifts:\n{}\nResulting matrix:\n{}".format( fractional_error * 100, location_shifts, pixel_shifts, self.pixel_to_sample_displacement)) update_progress(7) self.update_config('pixel_to_sample_displacement', self.pixel_to_sample_displacement) return self.pixel_to_sample_displacement, location_shifts, pixel_shifts, fractional_error