def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, generator: scanning.hooks.AGenerator, fileDir: scanning.hooks.AFileDir, formatName: scanning.hooks.AFormatName = "odin", fileTemplate: scanning.hooks.AFileTemplate = "%s.h5", ) -> scanning.hooks.UInfos: self.exposure_time = generator.duration # On initial configure, expect to get the demanded number of frames self.done_when_reaches = completed_steps + steps_to_do self.unique_id_offset = 0 child = context.block_view(self.mri) file_dir = fileDir.rstrip(os.sep) # derive file path from template as AreaDetector would normally do fileName = fileTemplate.replace("%s", formatName) # this is path to the requested file which will be a VDS vds_full_filename = os.path.join(fileDir, fileName) # this is the path to underlying file the odin writer will write to raw_file_name = fileTemplate.replace("%s", formatName + "_raw_data") raw_file_basename, _ = os.path.splitext(raw_file_name) assert "." in vds_full_filename, ( "File extension for %r should be supplied" % vds_full_filename) futures = child.put_attribute_values_async( dict( numCapture=steps_to_do, filePath=file_dir + os.sep, fileName=raw_file_basename, )) context.wait_all_futures(futures) # Start the plugin self.start_future = child.start_async() # Start a future waiting for the first array self.array_future = child.when_value_matches_async( "numCaptured", greater_than_zero) create_vds( generator, raw_file_basename, vds_full_filename, child, self.uid_name, self.sum_name, ) add_nexus_nodes(generator, vds_full_filename) # Return the dataset information dataset_infos = list( create_dataset_infos(formatName, generator, fileName, self.secondary_set)) return dataset_infos
def on_run(self, context: scanning.hooks.AContext) -> None: # Wait for the first array to arrive in the plugin without a timeout context.wait_all_futures(self.first_array_future) # Get the child block child = context.block_view(self.mri) # Update progress based on number of frames written child.numCapturedReadback.subscribe_value(self.update_completed_steps) # Wait for the number of captured frames to reach the target f_done = child.when_value_matches_async("numCapturedReadback", self.done_when_captured) self.last_capture_update = None while True: try: # Use a regular timeout so we can request a manual flush every second context.wait_all_futures(f_done, timeout=1) except TimeoutError: # This is ok, means we aren't done yet, so flush self._flush_if_still_writing(child) # Check it hasn't been too long since the last frame was written if self._has_file_writing_stalled(): timeout_message = self._get_file_writing_stalled_error_message( child) raise TimeoutError(timeout_message) else: break
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, generator: scanning.hooks.AGenerator, ) -> None: # clear out old subscriptions context.unsubscribe_all() self.generator = generator # Work out the offset between the generator index and uniqueID if completed_steps == 0: # The detector will reset, so the first uniqueId (for index 0) # will be 1 id_start = 1 self.done_when_reaches = steps_to_do else: # This is rewinding or setting up for another batch, so the detector # will skip to a uniqueID that has not been produced yet assert self.done_when_reaches, "Done when reaches not assigned" id_start = self.done_when_reaches + 1 self.done_when_reaches += steps_to_do # Delete any remaining old positions child = context.block_view(self.mri) futures = [child.delete_async()] futures += child.put_attribute_values_async( dict(enableCallbacks=True, idStart=id_start)) self.steps_up_to = completed_steps + steps_to_do xml, self.end_index = self._make_xml(completed_steps) # Wait for the previous puts to finish context.wait_all_futures(futures) # Put the xml child.xml.put_value(xml) # Start the plugin self.start_future = child.start_async()
def on_post_run_armed(self, context: scanning.hooks.AContext) -> None: # Stop the acquisition as per post_run_ready # TODO: this should call configure too, will fail for 3D scans at # present child = context.block_view(self.mri) child.stop() context.wait_all_futures(self.start_future)
def on_run(self, context: scanning.hooks.AContext) -> None: # When there are no MotorInfo's the first row will have Trigger.IMMEDIATE # so don't enable seqTableA until running. if not self.axis_mapping: self.panda.seqSetEnable() futures = self.db_seq_table.run() context.wait_all_futures(futures)
def on_run(self, context: scanning.hooks.AContext) -> None: # Start time so everything is relative point_time = time.time() for i in range(self._completed_steps, self._completed_steps + self._steps_to_do): # Get the point we are meant to be scanning point = self._generator.get_point(i) # Update when the next point is due and how long motor moves take point_time += point.duration move_duration = point_time - time.time() # Move the children (instantly) to the beginning of the point, then # start them moving to the end of the point, taking duration # seconds, populating a list of futures we can wait on fs: List[Future] = [] for axis, mover in self._movers.items(): mover.maybe_move_async(fs, point.lower[axis]) mover.maybe_move_async(fs, point.upper[axis], move_duration) # Wait for the moves to complete context.wait_all_futures(fs) # Update the point as being complete assert self.registrar, "Part has no registrar" self.registrar.report(scanning.infos.RunProgressInfo(i + 1)) # If this is the exception step then blow up assert i + 1 != self._exception_step, ( "Raising exception at step %s" % self._exception_step)
def setup_plugin( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, initial_configure=True, ) -> None: # Work out the last expected ID if initial_configure: # This is an initial configure, so reset start ID to 1 id_start = 1 self.done_when_reaches = steps_to_do else: # This is rewinding or setting up for another batch, # skip to a uniqueID that has not been produced yet id_start = self.done_when_reaches + 1 self.done_when_reaches += steps_to_do # Delete any remaining old positions child = context.block_view(self.mri) futures = [child.delete_async()] futures += child.put_attribute_values_async( dict(enableCallbacks=True, idStart=id_start, arrayCounter=id_start - 1)) xml, self.end_index = self._make_xml(completed_steps) # Wait for the previous puts to finish context.wait_all_futures(futures) # Put the xml child.xml.put_value(xml) # Start the plugin self.start_future = child.start_async()
def on_post_run_ready(self, context: scanning.hooks.AContext) -> None: # currently the AD support does not know how many frames the detector # has taken and never finishes the Acquire. We know that the file # writer has completed at post run so stop the AD Acquisition child = context.block_view(self.mri) child.stop() context.wait_all_futures(self.start_future)
def on_run(self, context: scanning.hooks.AContext) -> None: """On `RunHook` record where to next take data""" # Start time so everything is relative end_of_exposure = time.time() + self._exposure last_flush = end_of_exposure assert self.registrar, "Part has no registrar" for i in range(self._completed_steps, self._completed_steps + self._steps_to_do): # Get the point we are meant to be scanning point = self._generator.get_point(i) # Simulate waiting for an exposure and writing the data wait_time = end_of_exposure - time.time() context.sleep(wait_time) self.log.debug(f"Writing data for point {i}") self._write_data(point, i) # Flush the datasets if it is time to if time.time() - last_flush > FLUSH_PERIOD: last_flush = time.time() self._flush_datasets() # Schedule the end of the next exposure end_of_exposure += point.duration # Update the point as being complete self.registrar.report(scanning.infos.RunProgressInfo(i + 1)) # Do one last flush and then we're done self._flush_datasets()
def on_run(self, context: scanning.hooks.AContext) -> None: context.wait_all_futures(self.array_future) context.unsubscribe_all() child = context.block_view(self.mri) child.numCaptured.subscribe_value(self.update_completed_steps) child.when_value_matches( "numCaptured", self.done_when_reaches, event_timeout=self.exposure_time + FRAME_TIMEOUT, )
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, part_info: scanning.hooks.APartInfo, generator: scanning.hooks.AGenerator, fileDir: scanning.hooks.AFileDir, **kwargs: Any, ) -> None: context.unsubscribe_all() child = context.block_view(self.mri) self.check_driver_version(child) # If detector can be soft triggered, then we might need to defer # starting it until run. Check triggerMode to find out if self.soft_trigger_modes: mode = child.triggerMode.value self.is_hardware_triggered = mode not in self.soft_trigger_modes # Set up the detector self.setup_detector( context, completed_steps, steps_to_do, generator.duration, part_info, **kwargs, ) # Calculate how long to wait before marking this scan as stalled self.frame_timeout = FRAME_TIMEOUT if generator.duration > 0: self.frame_timeout += generator.duration else: # Double it to be safe self.frame_timeout += FRAME_TIMEOUT if self.is_hardware_triggered: # Start now if we are hardware triggered self.arm_detector(context) # Tell detector to store NDAttributes if table given if len(self.extra_attributes.value.sourceId) > 0: attribute_xml = self.build_attribute_xml() self.attributes_filename = make_xml_filename(fileDir, self.mri) with open(self.attributes_filename, "w") as xml: xml.write(attribute_xml) assert hasattr(child, "attributesFile"), ( "Block doesn't have 'attributesFile' attribute " "(was it instantiated properly with adbase_parts?)" ) attributes_filename = self.attributes_filename if self.runs_on_windows: attributes_filename = FilePathTranslatorInfo.translate_filepath( part_info, self.attributes_filename ) child.attributesFile.put_value(attributes_filename)
def on_seek( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, ) -> None: # clear out old subscriptions context.unsubscribe_all() # We need to set up the plugin again when pausing self.setup_plugin(context, completed_steps, steps_to_do, initial_configure=False)
def report_status( self, context: scanning.hooks.AContext) -> scanning.hooks.UInfos: child = context.block_view(self.mri) info = PmacVariablesInfo(child.iVariables.value, child.pVariables.value, child.mVariables.value) return info
def on_validate( self, context: scanning.hooks.AContext, ) -> None: if self.required_version is not None: child = context.block_view(self.mri) check_driver_version(child.driverVersion.value, self.required_version)
def on_report_status( self, context: scanning.hooks.AContext) -> scanning.hooks.UInfos: child = context.block_view(self.mri) detector_mri = child.detector.value # Say that we can do multi frame for this detector info = scanning.infos.DetectorMutiframeInfo(detector_mri) return info
def get_cs_port( self, context: scanning.hooks.AContext, motion_axes: List[str], ) -> str: """Work out the cs_port and motion_axes we should be using""" child = context.block_view(self.mri) layout_table = child.layout.value if motion_axes: self.axis_mapping = cs_axis_mapping(context, layout_table, motion_axes) # Check units for everything in the axis mapping # TODO: reinstate this when GDA does it properly # for axis_name, motor_info in sorted(self.axis_mapping.items()): # assert motor_info.units == generator.units[axis_name], \ # "%s: Expected scan units of %r, got %r" % ( # axis_name, motor_info.units, generator.units[axis_name]) # Guaranteed to have an entry in axis_mapping otherwise # cs_axis_mapping would fail, so pick its cs_port cs_port = list(self.axis_mapping.values())[0].cs_port else: # No axes to move, but if told to output triggers we still need to # do something self.axis_mapping = {} # Pick the first cs we find that has an axis assigned cs_port = cs_port_with_motors_in(context, layout_table) return cs_port
def on_run(self, context: scanning.hooks.AContext) -> None: if self.generator: self.loading = False child = context.block_view(self.mri) # Wait for the trajectory to run and complete child.pointsScanned.subscribe_value(self.update_step, child) # TODO: we should return at the end of the last point for PostRun child.executeProfile()
def on_reset(self, context: scanning.hooks.AContext) -> None: super().on_reset(context) # Delete the attribute XML file if self.attributes_filename and os.path.isfile( self.attributes_filename): os.remove(self.attributes_filename) child = context.block_view(self.mri) child.attributesFile.put_value("")
def on_run(self, context: scanning.hooks.AContext) -> None: self.loading = False child = context.block_view(self.mri) child.qty.subscribe_value(self.load_more_positions, child) child.when_value_matches( "arrayCounterReadback", self.done_when_reaches, event_timeout=self.frame_timeout, )
def on_post_configure(self, context: scanning.hooks.AContext): child = context.block_view(self.mri) if self.targetSamplesPerFrame.value == 0: child.valuesPerRead.put_value(TETRAMM_MIN_VALUES_PER_READ) elif self.targetSamplesPerFrame.value > 0: values_per_read = ceil(TETRAMM_BASE_FREQ * child.exposure.value / self.targetSamplesPerFrame.value) values_per_read = max(values_per_read, TETRAMM_MIN_VALUES_PER_READ) child.valuesPerRead.put_value(values_per_read)
def on_configure( self, context: scanning.hooks.AContext, part_info: scanning.hooks.APartInfo, fileDir: scanning.hooks.AFileDir, ) -> None: child = context.block_view(self.mri) fs = child.put_attribute_values_async( dict(enableCallbacks=True, computeStatistics=True)) xml = self._make_attributes_xml() self.attributes_filename = make_xml_filename(fileDir, self.mri) with open(self.attributes_filename, "w") as f: f.write(xml) attributes_filename = self.attributes_filename if self.runs_on_windows: attributes_filename = FilePathTranslatorInfo.translate_filepath( part_info, self.attributes_filename) fs.append(child.attributesFile.put_value_async(attributes_filename)) context.wait_all_futures(fs)
def setup_detector( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, num_images: int, duration: float, part_info: scanning.hooks.APartInfo, initial_configure: bool = True, **kwargs: Any, ): # Calculate the readout time child = context.block_view(self.mri) if child.andorFrameTransferMode.value: # Set exposure to zero and use accumulation period for readout time exposure = 0.0 child.exposure.put_value(exposure) child.acquirePeriod.put_value(exposure) readout_time = child.andorAccumulatePeriod.value # With frame transfer mode enabled, the exposure is the time between # triggers. The epics 'acquireTime' (exposure) actually becomes the # User Defined delay before acquisition start after the trigger. The # duration floor becomes the readout time assert duration > readout_time, ( "The duration: %s has to be longer than the Andor 2 readout " "time: %s." % (duration, readout_time)) period = readout_time else: # Behaves like a "normal" detector child.exposure.put_value(duration) child.acquirePeriod.put_value(duration) # Readout time can be approximated from difference in exposure time # and acquire period readout_time = child.acquirePeriod.value - child.exposure.value # Calculate the adjusted exposure time (exposure, period) = self.get_adjusted_exposure_time_and_acquire_period( duration, readout_time, kwargs.get("exposure", 0)) # The real exposure self.exposure.set_value(exposure) kwargs["exposure"] = exposure super().setup_detector( context, completed_steps, steps_to_do, num_images, duration, part_info, initial_configure=initial_configure, **kwargs, ) child.acquirePeriod.put_value(period)
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, generator: scanning.hooks.AGenerator, ) -> None: # clear out old subscriptions context.unsubscribe_all() self.generator = generator # Calculate how long to wait before marking this scan as stalled self.frame_timeout = FRAME_TIMEOUT if generator.duration > 0: self.frame_timeout += generator.duration else: # Double it to be safe self.frame_timeout += FRAME_TIMEOUT # Set up the plugin self.setup_plugin(context, completed_steps, steps_to_do)
def on_validate( self, context: scanning.hooks.AContext, generator: scanning.hooks.AGenerator, axesToMove: scanning.hooks.AAxesToMove, part_info: scanning.hooks.APartInfo, ) -> scanning.hooks.UParameterTweakInfos: child = context.block_view(self.mri) # Check that we can move all the requested axes available = set(child.layout.value.name) motion_axes = get_motion_axes(generator, axesToMove) assert available.issuperset( motion_axes ), "Some of the requested axes %s are not on the motor list %s" % ( list(axesToMove), sorted(available), ) # Find the duration duration = generator.duration assert duration >= 0.0, f"{self.name}: negative duration is not supported" # Check if we should guess the duration if duration == 0.0: # We need to tweak the duration if we are going to take part if self.taking_part_in_scan(part_info, motion_axes): duration = self.calculate_generator_duration( context, generator, part_info, motion_axes) # We may have not been able to tweak duration if axis mappings are # missing. if not duration: return None else: return None # If GPIO is demanded for every point we need to align to the servo # cycle trigger = get_motion_trigger(part_info) if trigger == scanning.infos.MotionTrigger.EVERY_POINT: servo_freq = child.servoFrequency() duration = self.get_aligned_duration_with_servo_frequency( servo_freq, duration) # Check if the duration was tweaked and return if duration != generator.duration: self.log.debug( f"{self.name}: tweaking duration from {generator.duration} to " f"{duration}") serialized = generator.to_dict() new_generator = CompoundGenerator.from_dict(serialized) new_generator.duration = duration return scanning.infos.ParameterTweakInfo("generator", new_generator) else: return None
def on_reset(self, context: scanning.hooks.AContext) -> None: super().on_reset(context) self.on_abort(context) # HDFWriter might have still be writing so stop doesn't guarantee # flushed all frames start_future is in a different context so # can't wait for it, so just wait for the running attribute to be false child = context.block_view(self.mri) child.when_value_matches("running", False) # Delete the layout XML file if self.layout_filename and os.path.isfile(self.layout_filename): os.remove(self.layout_filename) child.xmlLayout.put_value("")
def on_report_status( self, context: scanning.hooks.AContext) -> scanning.hooks.UInfos: child = context.block_view(self.mri) # Work out if we need the motor controller to send start of row triggers # or no triggers if doing_pcomp(child.rowTrigger.value): # Doing position compare, don't need any triggers trigger = scanning.infos.MotionTrigger.NONE else: # Waiting for bit at the start of each row, so need this signal trigger = scanning.infos.MotionTrigger.ROW_GATE info = scanning.infos.MotionTriggerInfo(trigger) return info
def on_seek( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, part_info: scanning.hooks.APartInfo, generator: scanning.hooks.AGenerator, fileDir: scanning.hooks.AFileDir, breakpoints: scanning.controllers.ABreakpoints = None, **kwargs: Any, ) -> None: context.unsubscribe_all() # If detector is hardware triggered, and we aren't using breakpoints, we can # configure the detector for all frames now. This is instead of configuring and # arming the detector for each inner scan, so we save some time if self.is_hardware_triggered and not breakpoints: num_images = generator.size - completed_steps else: num_images = steps_to_do # Set up the detector self.setup_detector( context, completed_steps, steps_to_do, num_images, generator.duration, part_info, initial_configure=False, **kwargs, ) # Start now if we are hardware triggered if self.is_hardware_triggered: self.arm_detector(context)
def on_configure( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, # The following were passed from user calling configure() generator: scanning.hooks.AGenerator, axesToMove: scanning.hooks.AAxesToMove, exceptionStep: AExceptionStep = 0, ) -> None: child = context.block_view(self.mri) # Store the generator and place we need to start self._generator = generator self._completed_steps = completed_steps self._steps_to_do = steps_to_do self._exception_step = exceptionStep self._axes_to_move = axesToMove self._movers = {axis: MaybeMover(child, axis) for axis in axesToMove} # Move to start (instantly) first_point = generator.get_point(completed_steps) fs: List[Future] = [] for axis, mover in self._movers.items(): mover.maybe_move_async(fs, first_point.lower[axis]) context.wait_all_futures(fs)
def on_seek( self, context: scanning.hooks.AContext, completed_steps: scanning.hooks.ACompletedSteps, steps_to_do: scanning.hooks.AStepsToDo, ) -> None: # This is rewinding or setting up for another batch, so the detector # will skip to a uniqueID that has not been produced yet self.unique_id_offset = completed_steps - self.done_when_reaches self.done_when_reaches += steps_to_do child = context.block_view(self.mri) # Just reset the array counter_block child.arrayCounter.put_value(0) # Start a future waiting for the first array self.array_future = child.when_value_matches_async( "numCaptured", greater_than_zero)
def on_validate( self, context: scanning.hooks.AContext, detectors: scanning.util.ADetectorTable = None, ) -> None: child = context.block_view(self.mri) detector_mri = child.detector.value assert ( detectors ), f"{detector_mri}: requires a detector table with 2 frames per step" if detectors: for enable, _, mri, _, frames in detectors.rows(): if mri == detector_mri: if enable and frames != 2: raise ValueError( f"{detector_mri}: frames per step has to be equal to 2" )