def move_stage_and_record(step, N_frames, microscope, data_group, template, dwell_time): # move the stage by a given step, recording motion as we go. ms = microscope data_group.attrs['step'] = step ms.camera.start_preview(resolution=(640, 480)) # we will use a RAM buffer to record a bunch of frames outputs = [io.BytesIO() for i in range(N_frames)] stop_moving_event = threading.Event() stage_moves = [] movement_thread = threading.Thread(target=move_stage, args=(step, dwell_time, stop_moving_event, ms.stage, stage_moves), name='stage_movement_thread') movement_thread.start() print( "Starting acquisition of {} frames, should take about {:.0}s.".format( N_frames, N_frames / ms.camera.framerate)) try: start_t = time.time() camera.capture_sequence(outputs, 'jpeg', use_video_port=True) end_t = time.time() finally: print("Stopping...") stop_moving_event.set() movement_thread.join() ms.camera.stop_preview() print("Recorded {} frames in {} seconds ({} fps)".format( N_frames, end_t - start_t, N_frames / (end_t - start_t))) print("Camera framerate was set to {}, and reports as {}".format( framerate, camera.framerate)) data_group['stage_moves'] = np.array(stage_moves) data_group['stage_moves'].attrs['description'] = "t,x,y,z data for the stage's motion during the sequence of moves " \ "time is in seconds, position is in stage steps." # go through the captured images and process them. data = np.zeros((N_frames, 3)) for j, k in enumerate(outputs): frame_data = np.fromstring(k.getvalue(), dtype=np.uint8) frame = cv2.imdecode(frame_data, 1) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) data[j, 1:], corr = find_template(template, frame - np.mean(frame), return_corr=True, fraction=0.5) data[j, 0] = float(j) / float(framerate) + start_t printProgressBar(j, N_frames) print("") data_group["camera_motion"] = data data_group["camera_motion"].attrs['description'] = "t,x,y data of the position, as recorded by the camera." \ "t is in seconds, x/y are in pixels" data_group["camera_motion"].attrs[ 'reported_framerate'] = ms.camera.framerate
def measure_txy(n, start_time, camera, templ8): """Measure position n times and return a t,x,y array.""" pos = np.zeros((3, samples)) #create some variables for j in range(samples): frame = get_numpy_image(camera, True) pos[1:, j] = find_template(templ8, frame) #measures the initial position pos[0, j] = time.time() - start_time return pos
def measure_txy(start_t, ms, templ8): data = np.zeros((1, 3)) data[0, 0] = time.time() - start_t frame = ms.rgb_image().astype(np.float32) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) data[0, 1:], corr = find_template(templ8, frame - np.mean(frame), return_corr=True, fraction=0.5) return data
def measure_txy( n, start_time, camera, templ8 ): #everything used in a definition should be put in as an argument """Measure position n times and return a t,x,y array.""" result = np.zeros((3, n)) #creates an empty array of zeros for i in range(n): frame = get_numpy_image(camera, True) #get frame result[1:, i] = find_template(templ8, frame) #measures position result[0, i] = time.time() - start_time #measures time return result
def measure_txy(ms, start_t, templ8): txy = np.zeros((1, 3)) txy[0, 0] = time.time() - start_t frame = ms.rgb_image().astype(np.float32) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) txy[0, 1:], corr = find_template(templ8, frame - np.mean(frame), return_corr = True, fraction=0.5) camera.stop_preview() cv2.imshow("corr", corr * 255.0 / np.max(corr)) cv2.waitKey(1000) camera.start_preview() return txy, frame
image = get_numpy_image(camera, greyscale=True) #takes template photo templ8 = image[100:-100, 100:-100] #crops image loop = True tweet = True start_t = time.time() #measure starting time try: while loop: data = np.zeros( (3, N_frames)) #creates the array in which data is stored for i in range(data.shape[1]): #takes 100 images and compares the location of the spot with the template frame = get_numpy_image(camera, True) spot_coord = find_template(templ8, frame) data[1, i] = spot_coord[0] data[2, i] = spot_coord[1] data[0, i] = time.time() - start_t df.add_data(data, data_gr, "data") #writes data to .hdf5 file imgfile_location = "/home/pi/dev/fibre_stage_characterisation/frames/drift_%s.jpg" % time.strftime( "%Y%m%d_%H%M%S") cv2.imwrite(imgfile_location, frame) try: if time.gmtime(time.time())[3] in [ 0, 4, 8, 12, 16, 20 ] and tweet: #tweets a picture and co-ordinates every 4 hours api.update_with_media( imgfile_location, status="I'm currently at %d, %d" % (spot_coord[0], spot_coord[1]))
img = Image.open( "/home/pi/summer/drift/calibration/step_size_templ8.jpg") pad = Image.new('RGB', (352, 192)) #Tuple must be multiples of 32 and 16 pad.paste(img, (0, 0)) overlay = camera.add_overlay(pad.tobytes(), size=(352, 192)) overlay.alpha = 128 overlay.fullscreen = False overlay.layer = 3 initial_stage_position = stage.position frame = ms.rgb_image().astype(np.float32) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) templ8_position = np.zeros((1, 2)) templ8_position[0, :], corr = find_template(templ8, frame - np.mean(frame), return_corr=True, fraction=0.5) def move_overlay(cx, cy): """move the overlay to show a shift of cx,cy camera pixels""" x = int(960 + (cx - templ8_position[0, 0] - 176) * 2.25) y = int(540 + (cy - templ8_position[0, 1] - 96) * 2.25) overlay.window = (x, y, int(352 * 2.25), int(192 * 2.25)) stage.move_rel([-side_length / 2, 0, -side_length / 2]) for i in range(points): stage.move_rel([side_length / (points - 1), 0, 0]) data_stage[i, :] = stage.position frame = ms.rgb_image().astype(np.float32) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try: start_t = time.time() camera.capture_sequence(outputs, 'jpeg', use_video_port=True) end_t = time.time() finally: event.set() t.join() stage.move_abs(initial_stage_position) camera.stop_preview() print("Stopping...") print("Recorded {} frames in {} seconds ({} fps)".format( N_frames, end_t - start_t, N_frames / (end_t - start_t))) print("Camera framerate was set to {}, and reports as {}".format( framerate, camera.framerate)) for j, k in enumerate(outputs): frame_data = np.fromstring(k.getvalue(), dtype=np.uint8) frame = cv2.imdecode(frame_data, 1) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) data[j, 1:], corr = find_template(templ8, frame - np.mean(frame), return_corr=True, fraction=0.5) data[j, 0] = float(j) / float(framerate) printProgressBar(j, N_frames) print("") df.add_data(data, cam_pos, "data")
"stage position", "orthogonality measurments, moves in a square") data_cam = df.new_group( "camera position", "orthogonality measurments, moves in a square") stage_pos = np.zeros((2 * points, 3)) cam_pos = np.zeros((2 * points, 2)) stage.move_rel( np.array([-1, -1, 0]) * side_length / 2 - stage.backlash) stage.move_rel(stage.backlash) for i in range(points): stage_pos[i, 0:] = stage.position frame = get_numpy_image(camera, True) cam_pos[i, 0:], corr = find_template(templ8, frame - background, return_corr=True) stage.move_rel([side_length / points, 0, 0]) camera.stop_preview() cv2.imshow("corr", corr.astype(float) / np.max(corr) * 255) cv2.waitKey(1000) camera.start_preview() # time.sleep(1) for j in range(points): stage_pos[j + points, 0:] = stage.position frame = get_numpy_image(camera, True) i = j + points cam_pos[i, 0:], corr = find_template(templ8, frame - background, return_corr=True) stage.move_rel([0, side_length / points, 0])
camera.start_preview(resolution=(640, 480)) #stage.move_rel([-backlash, -backlash, -backlash]) #stage.move_rel([backlash, backlash, backlash]) data_group['initial_sample_image'] = ms.rgb_image() frame = ms.rgb_image().astype(np.float32).mean(axis=2) #mean = np.mean(image) #templ8 = (image - mean)[100:-100, 100:-100] template = np.load("template.npz")['template'] data_group['template_image'] = template d, corr = find_template(template, frame, return_corr=True, fraction=0.15) initial_stage_position = stage.position start_time = time.time() data_group.attrs['start_time'] = start_time last_saved_image_time = 0 try: while True: for d in [1, 0, -1, 0]: data_stage = np.zeros((n_steps + 1, 3)) data_cam = np.zeros((n_steps + 1, 2)) data_time = np.zeros((n_steps + 1, )) for i in range(n_steps + 1): if i > 0:
loop = True tweet = True image = get_numpy_image(camera, greyscale = True).astype(np.float32) background = cv2.GaussianBlur(image, (41, 41), 0) templ8 = (image - background)[100:-100, 100:-100] start_t = time.time() try: while loop: data = np.zeros((N_frames, 3)) for i in range(N_frames): frame = get_numpy_image(camera, True) data[i, 1:], corr = find_template(templ8, frame - background, return_corr = True) cv2.imshow("corr", corr.astype(float) / np.max(corr) * 255) cv2.waitKey(1000) data[i, 0] = time.time() - start_t df.add_data(data, cam_pos, "data") imgfile_location = "/home/pi/dev/fibre_stage_characterisation/frames/drift_%s.jpg" % time.strftime("%Y%m%d_%H%M%S") cv2.imwrite(imgfile_location, frame) try: if time.gmtime(time.time())[3] in [0, 4, 8, 12, 16, 20] and tweet: #tweets a picture and co-ordinates every 4 hours api.update_with_media(imgfile_location, status = "I'm currently at %d, %d" %(spot_coord[0], spot_coord[1])) tweet = False elif time.gmtime(time.time())[3] not in [0, 4, 8, 12, 16, 20]: tweet = True except: pass except KeyboardInterrupt:
mean = np.mean(image) print("Mean value of image: {}".format(mean)) w, h = image.shape template = (image - mean)[w // 2 - 100:w // 2 + 100, h // 2 - 100:h // 2 + 100] np.savez("template.npz", template=template) plt.figure() plt.imshow(template) plt.suptitle("Template image") for i in [ 0.1, ]: image = ms.rgb_image().astype(np.float32) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) print("Testing with another image: shape {}, dtype {}".format( image.shape, image.dtype)) frame = image pos, corr = find_template(template, frame - np.mean(frame), return_corr=True, fraction=i) plt.figure() plt.imshow(corr.astype(np.float)) plt.savefig("test_correlation_fraction_{}.pdf".format(i), bbox_inches='tight', dpi=180) plt.show()