def on_post_run_armed(self, context: scanning.hooks.AContext) -> None: # Stop the acquisition as per post_run_ready # TODO: this should call configure too, will fail for 3D scans at # present child = context.block_view(self.mri) child.stop() context.wait_all_futures(self.start_future)
def setup_plugin( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, initial_configure=True, ) -> None: # Work out the last expected ID if initial_configure: # This is an initial configure, so reset start ID to 1 id_start = 1 self.done_when_reaches = steps_to_do else: # This is rewinding or setting up for another batch, # skip to a uniqueID that has not been produced yet id_start = self.done_when_reaches + 1 self.done_when_reaches += steps_to_do # Delete any remaining old positions child = context.block_view(self.mri) futures = [child.delete_async()] futures += child.put_attribute_values_async( dict(enableCallbacks=True, idStart=id_start, arrayCounter=id_start - 1)) xml, self.end_index = self._make_xml(completed_steps) # Wait for the previous puts to finish context.wait_all_futures(futures) # Put the xml child.xml.put_value(xml) # Start the plugin self.start_future = child.start_async()
def on_post_run_ready(self, context: scanning.hooks.AContext) -> None: # currently the AD support does not know how many frames the detector # has taken and never finishes the Acquire. We know that the file # writer has completed at post run so stop the AD Acquisition child = context.block_view(self.mri) child.stop() context.wait_all_futures(self.start_future)
def on_run(self, context: scanning.hooks.AContext) -> None: # Wait for the first array to arrive in the plugin without a timeout context.wait_all_futures(self.first_array_future) # Get the child block child = context.block_view(self.mri) # Update progress based on number of frames written child.numCapturedReadback.subscribe_value(self.update_completed_steps) # Wait for the number of captured frames to reach the target f_done = child.when_value_matches_async("numCapturedReadback", self.done_when_captured) self.last_capture_update = None while True: try: # Use a regular timeout so we can request a manual flush every second context.wait_all_futures(f_done, timeout=1) except TimeoutError: # This is ok, means we aren't done yet, so flush self._flush_if_still_writing(child) # Check it hasn't been too long since the last frame was written if self._has_file_writing_stalled(): timeout_message = self._get_file_writing_stalled_error_message( child) raise TimeoutError(timeout_message) else: break
def on_run(self, context: scanning.hooks.AContext) -> None: # Start time so everything is relative point_time = time.time() for i in range(self._completed_steps, self._completed_steps + self._steps_to_do): # Get the point we are meant to be scanning point = self._generator.get_point(i) # Update when the next point is due and how long motor moves take point_time += point.duration move_duration = point_time - time.time() # Move the children (instantly) to the beginning of the point, then # start them moving to the end of the point, taking duration # seconds, populating a list of futures we can wait on fs: List[Future] = [] for axis, mover in self._movers.items(): mover.maybe_move_async(fs, point.lower[axis]) mover.maybe_move_async(fs, point.upper[axis], move_duration) # Wait for the moves to complete context.wait_all_futures(fs) # Update the point as being complete assert self.registrar, "Part has no registrar" self.registrar.report(scanning.infos.RunProgressInfo(i + 1)) # If this is the exception step then blow up assert i + 1 != self._exception_step, ( "Raising exception at step %s" % self._exception_step)
def on_run(self, context: scanning.hooks.AContext) -> None: # When there are no MotorInfo's the first row will have Trigger.IMMEDIATE # so don't enable seqTableA until running. if not self.axis_mapping: self.panda.seqSetEnable() futures = self.db_seq_table.run() context.wait_all_futures(futures)
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, generator: scanning.hooks.AGenerator, fileDir: scanning.hooks.AFileDir, formatName: scanning.hooks.AFormatName = "odin", fileTemplate: scanning.hooks.AFileTemplate = "%s.h5", ) -> scanning.hooks.UInfos: self.exposure_time = generator.duration # On initial configure, expect to get the demanded number of frames self.done_when_reaches = completed_steps + steps_to_do self.unique_id_offset = 0 child = context.block_view(self.mri) file_dir = fileDir.rstrip(os.sep) # derive file path from template as AreaDetector would normally do fileName = fileTemplate.replace("%s", formatName) # this is path to the requested file which will be a VDS vds_full_filename = os.path.join(fileDir, fileName) # this is the path to underlying file the odin writer will write to raw_file_name = fileTemplate.replace("%s", formatName + "_raw_data") raw_file_basename, _ = os.path.splitext(raw_file_name) assert "." in vds_full_filename, ( "File extension for %r should be supplied" % vds_full_filename) futures = child.put_attribute_values_async( dict( numCapture=steps_to_do, filePath=file_dir + os.sep, fileName=raw_file_basename, )) context.wait_all_futures(futures) # Start the plugin self.start_future = child.start_async() # Start a future waiting for the first array self.array_future = child.when_value_matches_async( "numCaptured", greater_than_zero) create_vds( generator, raw_file_basename, vds_full_filename, child, self.uid_name, self.sum_name, ) add_nexus_nodes(generator, vds_full_filename) # Return the dataset information dataset_infos = list( create_dataset_infos(formatName, generator, fileName, self.secondary_set)) return dataset_infos
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, generator: scanning.hooks.AGenerator, ) -> None: # clear out old subscriptions context.unsubscribe_all() self.generator = generator # Work out the offset between the generator index and uniqueID if completed_steps == 0: # The detector will reset, so the first uniqueId (for index 0) # will be 1 id_start = 1 self.done_when_reaches = steps_to_do else: # This is rewinding or setting up for another batch, so the detector # will skip to a uniqueID that has not been produced yet assert self.done_when_reaches, "Done when reaches not assigned" id_start = self.done_when_reaches + 1 self.done_when_reaches += steps_to_do # Delete any remaining old positions child = context.block_view(self.mri) futures = [child.delete_async()] futures += child.put_attribute_values_async( dict(enableCallbacks=True, idStart=id_start)) self.steps_up_to = completed_steps + steps_to_do xml, self.end_index = self._make_xml(completed_steps) # Wait for the previous puts to finish context.wait_all_futures(futures) # Put the xml child.xml.put_value(xml) # Start the plugin self.start_future = child.start_async()
def on_run(self, context: scanning.hooks.AContext) -> None: context.wait_all_futures(self.array_future) context.unsubscribe_all() child = context.block_view(self.mri) child.numCaptured.subscribe_value(self.update_completed_steps) child.when_value_matches( "numCaptured", self.done_when_reaches, event_timeout=self.exposure_time + FRAME_TIMEOUT, )
def on_configure( self, context: scanning.hooks.AContext, part_info: scanning.hooks.APartInfo, fileDir: scanning.hooks.AFileDir, ) -> None: child = context.block_view(self.mri) fs = child.put_attribute_values_async( dict(enableCallbacks=True, computeStatistics=True)) xml = self._make_attributes_xml() self.attributes_filename = make_xml_filename(fileDir, self.mri) with open(self.attributes_filename, "w") as f: f.write(xml) attributes_filename = self.attributes_filename if self.runs_on_windows: attributes_filename = FilePathTranslatorInfo.translate_filepath( part_info, self.attributes_filename) fs.append(child.attributesFile.put_value_async(attributes_filename)) context.wait_all_futures(fs)
def on_run(self, context: scanning.hooks.AContext) -> None: context.wait_all_futures(self.array_future) context.unsubscribe_all() self.last_id_update = None child = context.block_view(self.mri) child.uniqueId.subscribe_value(self.update_completed_steps) f_done = child.when_value_matches_async("uniqueId", self.done_when_reaches) while True: try: context.wait_all_futures(f_done, timeout=1) except TimeoutError: # This is ok, means we aren't done yet, so flush self._flush_if_still_writing(child) # Check it hasn't been too long if self.last_id_update: if time.time() > self.last_id_update + self.frame_timeout: raise TimeoutError( "HDF writer stalled, last updated at %s" % (self.last_id_update)) # TODO: what happens if we miss the last frame? else: return
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, # The following were passed from user calling configure() generator: scanning.hooks.AGenerator, axesToMove: scanning.hooks.AAxesToMove, exceptionStep: AExceptionStep = 0, ) -> None: child = context.block_view(self.mri) # Store the generator and place we need to start self._generator = generator self._completed_steps = completed_steps self._steps_to_do = steps_to_do self._exception_step = exceptionStep self._axes_to_move = axesToMove self._movers = {axis: MaybeMover(child, axis) for axis in axesToMove} # Move to start (instantly) first_point = generator.get_point(completed_steps) fs: List[Future] = [] for axis, mover in self._movers.items(): mover.maybe_move_async(fs, first_point.lower[axis]) context.wait_all_futures(fs)
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, part_info: scanning.hooks.APartInfo, generator: scanning.hooks.AGenerator, axesToMove: scanning.hooks.AAxesToMove, ) -> None: context.unsubscribe_all() child = context.block_view(self.mri) # Store what sort of triggers we need to output self.output_triggers = get_motion_trigger(part_info) # Reset tail off state self.tail_off_added = False # Check if we should be taking part in the scan motion_axes = get_motion_axes(generator, axesToMove) if self.taking_part_in_scan(part_info, motion_axes): self.generator = generator else: self.generator = None return # Set minimum turnaround information self.min_turnaround = get_min_turnaround(part_info) # Work out the cs_port cs_port = self.get_cs_port(context, motion_axes) # Reset GPIOs # TODO: we might need to put this in pause if the PandA logic doesn't # copy with a trigger staying high child.writeProfile( csPort=cs_port, timeArray=[MIN_TIME], userPrograms=[UserPrograms.ZERO_PROGRAM], ) child.executeProfile() fs: Union[List, Future] if motion_axes: # Start off the move to the start fs = self.move_to_start(child, cs_port, completed_steps) else: fs = [] # Set how far we should be going and the completed steps lookup self.steps_up_to = completed_steps + steps_to_do self.completed_steps_lookup = [] # Reset the profiles that still need to be sent self.profile = dict( timeArray=[], velocityMode=[], userPrograms=[], ) self.time_since_last_pvt = 0 for info in self.axis_mapping.values(): self.profile[info.cs_axis.lower()] = [] self.calculate_generator_profile(completed_steps, do_run_up=True) self.write_profile_points(child, cs_port) # Wait for the motors to have got to the start context.wait_all_futures(fs)
def on_post_run_ready(self, context: scanning.hooks.AContext) -> None: # If this is the last one, wait until the file is closed context.wait_all_futures(self.start_future)
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, part_info: scanning.hooks.APartInfo, generator: scanning.hooks.AGenerator, fileDir: scanning.hooks.AFileDir, formatName: scanning.hooks.AFormatName = "det", fileTemplate: scanning.hooks.AFileTemplate = "%s.h5", ) -> scanning.hooks.UInfos: # On initial configure, expect to get the demanded number of frames self.done_when_reaches = completed_steps + steps_to_do self.uniqueid_offset = 0 # Calculate how long to wait before marking this scan as stalled self.frame_timeout = FRAME_TIMEOUT if generator.duration > 0: self.frame_timeout += generator.duration else: # Double it to be safe self.frame_timeout += FRAME_TIMEOUT child = context.block_view(self.mri) # For first run then open the file # Enable position mode before setting any position related things child.positionMode.put_value(True) # Setup our required settings file_dir = fileDir.rstrip(os.sep) if self.runs_on_windows: h5_file_dir = FilePathTranslatorInfo.translate_filepath( part_info, file_dir) else: h5_file_dir = file_dir filename = fileTemplate % formatName assert "." in filename, "File extension for %r should be supplied" % filename futures = child.put_attribute_values_async( dict( enableCallbacks=True, fileWriteMode="Stream", swmrMode=True, storeAttr=True, dimAttDatasets=True, lazyOpen=True, arrayCounter=0, filePath=h5_file_dir + os.sep, fileName=formatName, fileTemplate="%s" + fileTemplate, )) futures += set_dimensions(child, generator) xml = make_layout_xml(generator, part_info, self.write_all_nd_attributes.value) self.layout_filename = make_xml_filename(file_dir, self.mri, suffix="layout") assert self.layout_filename, "No layout filename" with open(self.layout_filename, "w") as f: f.write(xml) layout_filename_pv_value = self.layout_filename if self.runs_on_windows: layout_filename_pv_value = FilePathTranslatorInfo.translate_filepath( part_info, self.layout_filename) futures += child.put_attribute_values_async( dict( xmlLayout=layout_filename_pv_value, flushDataPerNFrames=steps_to_do, flushAttrPerNFrames=0, )) # Wait for the previous puts to finish context.wait_all_futures(futures) # Reset numCapture back to 0 child.numCapture.put_value(0) # Start the plugin self.start_future = child.start_async() # Start a future waiting for the first array self.array_future = child.when_value_matches_async( "arrayCounterReadback", greater_than_zero) self._check_xml_is_valid(child) # Return the dataset information dataset_infos = list( create_dataset_infos(formatName, part_info, generator, filename)) return dataset_infos
def on_run(self, context: scanning.hooks.AContext) -> None: self.loading = False child = context.block_view(self.mri) child.qty.subscribe_value(self.load_more_positions, child) context.wait_all_futures(self.start_future)
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, part_info: scanning.hooks.APartInfo, generator: scanning.hooks.AGenerator, axesToMove: scanning.hooks.AAxesToMove, ) -> None: context.unsubscribe_all() child = context.block_view(self.mri) # Store what sort of triggers we need to output self.output_triggers = get_motion_trigger(part_info) # Check if we should be taking part in the scan motion_axes = get_motion_axes(generator, axesToMove) need_gpio = self.output_triggers != scanning.infos.MotionTrigger.NONE if motion_axes or need_gpio: # Taking part, so store generator self.generator = generator else: # Flag as not taking part self.generator = None return # See if there is a minimum turnaround infos: List[ MinTurnaroundInfo] = scanning.infos.MinTurnaroundInfo.filter_values( part_info) if infos: assert len( infos ) == 1, "Expected 0 or 1 MinTurnaroundInfos, got %d" % len(infos) self.min_turnaround = max(MIN_TIME, infos[0].gap) self.min_interval = infos[0].interval else: self.min_turnaround = MIN_TIME self.min_interval = MIN_INTERVAL # Work out the cs_port we should be using layout_table = child.layout.value if motion_axes: self.axis_mapping = cs_axis_mapping(context, layout_table, motion_axes) # Check units for everything in the axis mapping # TODO: reinstate this when GDA does it properly # for axis_name, motor_info in sorted(self.axis_mapping.items()): # assert motor_info.units == generator.units[axis_name], \ # "%s: Expected scan units of %r, got %r" % ( # axis_name, motor_info.units, generator.units[axis_name]) # Guaranteed to have an entry in axis_mapping otherwise # cs_axis_mapping would fail, so pick its cs_port cs_port = list(self.axis_mapping.values())[0].cs_port else: # No axes to move, but if told to output triggers we still need to # do something self.axis_mapping = {} # Pick the first cs we find that has an axis assigned cs_port = cs_port_with_motors_in(context, layout_table) # Reset GPIOs # TODO: we might need to put this in pause if the PandA logic doesn't # copy with a trigger staying high child.writeProfile( csPort=cs_port, timeArray=[MIN_TIME], userPrograms=[UserPrograms.ZERO_PROGRAM], ) child.executeProfile() fs: Union[List, Future] if motion_axes: # Start off the move to the start fs = self.move_to_start(child, cs_port, completed_steps) else: fs = [] # Set how far we should be going and the completed steps lookup self.steps_up_to = completed_steps + steps_to_do self.completed_steps_lookup = [] # Reset the profiles that still need to be sent self.profile = dict( timeArray=[], velocityMode=[], userPrograms=[], ) self.time_since_last_pvt = 0 for info in self.axis_mapping.values(): self.profile[info.cs_axis.lower()] = [] self.calculate_generator_profile(completed_steps, do_run_up=True) self.write_profile_points(child, cs_port) # Wait for the motors to have got to the start context.wait_all_futures(fs)