def kickoff_and_collect(block=False, magic=False): """Make a flyer or magic_flyer object and maybe block Returns ------- msg1 : Msg kickoff message for flyer or magic_flyer created in this function msg2 : Msg collect message for flyer or magic_flyer created in this function """ args = [] kwargs = {} if magic: flyer = get_magic_flyer() else: flyer = get_flyer() args = [ random.randrange(-5, -1), # start random.randrange(0, 5), # stop random.randint(1, 10) ] # step if block: kwargs = {'group': unique_name()} return [Msg('kickoff', flyer, *args, **kwargs), Msg('collect', flyer)]
def checkpoint_forever(): # simplest pauseable scan yield Msg('open_run') while True: ttime.sleep(0.1) yield Msg('checkpoint') yield Msg('close_run')
def do_nothing(timeout=5): "Generate 'checkpoint' messages until timeout." t = ttime.time() yield Msg('open_run') while True: if ttime.time() > t + timeout: break ttime.sleep(0.1) yield Msg('checkpoint') yield Msg('close_run')
def move(): yield Msg("checkpoint") grp = _short_uid("set") for motor, pos in step.items(): if pos == pos_cache[motor]: # This step does not move this motor. continue yield Msg("set", motor, pos, group=grp) pos_cache[motor] = pos yield Msg("wait", None, group=grp)
def conditional_break(det, motor, threshold): """Set, trigger, read until the detector reads intensity < threshold""" i = 0 yield Msg('open_run') while True: print("LOOP %d" % i) yield Msg('set', motor, i) yield Msg('trigger', det) reading = yield Msg('read', det) if reading[det.name]['value'] < threshold: print('DONE') yield Msg('close_run') break i += 1
def cautious_stepscan(det, motor): yield Msg('open_run') for i in range(-5, 5): yield Msg('checkpoint') yield Msg('create') yield Msg('set', motor, i) yield Msg('trigger', det) ret_m = yield Msg('read', motor) ret_d = yield Msg('read', det) yield Msg('save') print("Value at {m} is {d}. Pausing.".format( m=ret_m[motor.name]['value'], d=ret_d[det.name]['value'])) yield Msg('pause', None, defer=True) yield Msg('close_run')
def test_single_msg_to_gen(): m = Msg('set', None, 0) m_list = [m for m in ensure_generator(m)] assert len(m_list) == 1 assert m_list[0] == m
def simple_scan_saving(det, motor): "Set, trigger, read" yield Msg('open_run') yield Msg('create') yield Msg('set', motor, 5) yield Msg('read', motor) yield Msg('trigger', det) yield Msg('read', det) yield Msg('save') yield Msg('close_run')
def stepscan(det, motor): yield Msg('open_run') for i in range(-5, 5): yield Msg('create') yield Msg('set', motor, i) yield Msg('trigger', det) yield Msg('read', motor) yield Msg('read', det) yield Msg('save') yield Msg('close_run')
def stepscan(det, motor): """Old example scan from bluesky deprecated submodule.""" yield Msg('open_run') for i in range(-5, 5): yield Msg('create', name='primary') yield Msg('set', motor, i) yield Msg('trigger', det) yield Msg('read', motor) yield Msg('read', det) yield Msg('save') yield Msg('close_run')
def wait_complex(det, motors): "Set motors, trigger motors, wait for all motors to move in groups." # Same as above... yield Msg('open_run') for motor in motors[:-1]: yield Msg('set', motor, 5, group='A') # ...but put the last motor is separate group. yield Msg('set', motors[-1], 5, group='B') # Wait for everything in group 'A' to report done. yield Msg('wait', None, group='A') yield Msg('trigger', det) yield Msg('read', det) # Wait for everything in group 'B' to report done. yield Msg('wait', None, group='B') yield Msg('trigger', det) yield Msg('read', det) yield Msg('close_run')
def annotate(message): """ Plan stub to add messages to a plan inspection routine. Parameters ---------- message: str The message to include. """ return (yield Msg('annotate', message))
def restore_value(obj): """ Plan stub that restores a cached value using .put. Parameters ---------- obj : OphydObject The object we will call .put() on. """ return (yield Msg('restore_value', obj))
def cache_value(obj): """ Plan stub that caches an object's .get for later restoration. Parameters ---------- obj : OphydObject The object we will call .get() on. """ return (yield Msg('cache_value', obj))
def multi_sample_temperature_ramp(detector, sample_names, sample_positions, scan_motor, start, stop, step, temp_controller, tstart, tstop, tstep): def read_and_store_temp(): yield Msg('create') yield Msg('read', temp_controller) yield Msg('save') peak_centers = [-1 + 3 * n for n in range(len((sample_names)))] detector.noise = True for idx, temp in enumerate(np.arange(tstart, tstop, tstep)): # todo would be cute to have the temperature reduce peak noise yield Msg('set', temp_controller, temp) for sample, sample_position, peak_pos in zip(sample_names, sample_positions, peak_centers): yield Msg('open_run', sample_name=sample, target_temp=temp) detector.center = peak_pos detector.sigma = .5 + .25 * idx detector.noise_factor = .05 + idx * 0.1 for scan_pos in np.arange(start, stop, step): yield Msg('set', scan_motor, scan_pos) # be super paranoid about the temperature. Grab it before and # after each trigger! # Capturing the temperature data before and after each # trigger is resulting in unintended behavior. Uncomment the # two `yield from` statements and run this example to see # what I'm talking about. # yield from read_and_store_temp() yield Msg('trigger', detector) # yield from read_and_store_temp() yield Msg('create') yield Msg('read', scan_motor) yield Msg('read', detector) yield Msg('read', temp_controller) # yield Msg('sleep', None, .1) yield Msg('save') # generate the end of the run document yield Msg('close_run')
def sleepy(det, motor): "Set, trigger motor, sleep for a fixed time, trigger detector, read" yield Msg('open_run') yield Msg('set', motor, 5) yield Msg('sleep', None, 2) # units: seconds yield Msg('trigger', det) yield Msg('read', det) yield Msg('close_run')
def conditional_pause(det, motor, defer, include_checkpoint): yield Msg('open_run') for i in range(5): if include_checkpoint: yield Msg('checkpoint') yield Msg('set', motor, i) yield Msg('trigger', det) reading = yield Msg('read', det) if reading['det']['value'] < 0.2: yield Msg('pause', defer=defer) print("I'm not pausing yet.") yield Msg('close_run')
def wait_one(det, motor): "Set, trigger, read" yield Msg('open_run') yield Msg('set', motor, 5, group='A') # Add to group 'A'. yield Msg('wait', None, group='A') # Wait for everything in group 'A' to finish. yield Msg('trigger', det) yield Msg('read', det) yield Msg('close_run')
def tweak_core(): nonlocal step while True: yield Msg('create', None, name='primary') ret_mot = yield Msg('read', motor) if ret_mot is None: return key = list(ret_mot.keys())[0] pos = ret_mot[key]['value'] yield Msg('trigger', d, group='A') yield Msg('wait', None, 'A') reading = yield Msg('read', d) val = reading[target_field]['value'] yield Msg('save') prompt = prompt_str.format(motor.name, float(pos), float(val), step) # print(prompt) # wait until user click while obj.wait: yield Msg('sleep', None, 0.1) # change tweak step new_step = obj.step if new_step: try: step = float(new_step) except ValueError: print("step is not valid input") break yield Msg('set', motor, pos + step, group='A') print('Motor moving...') sys.stdout.flush() yield Msg('wait', None, 'A') clear_output(wait=True) # stackoverflow.com/a/12586667/380231 #print('\x1b[1A\x1b[2K\x1b[1A') obj.wait = True
def wait_multiple(det, motors): "Set motors, trigger all motors, wait for all motors to move." yield Msg('open_run') for motor in motors: yield Msg('set', motor, 5, group='A') # Wait for everything in group 'A' to report done. yield Msg('wait', None, group='A') yield Msg('trigger', det) yield Msg('read', det) yield Msg('close_run')
def _inner_Escan_list(): yield from moveE(energy_list[0]+0.001) for energy, factor in zip(energy_list, factor_list): # Change counting time for detector, original_preset in zip(detectors, dets_preset): yield from mv(detector.preset_monitor, factor*original_preset) # Move and scan grp = short_uid('set') yield Msg('checkpoint') yield from moveE(energy, group=grp) if dichro: yield from dichro_steps(detectors, _positioners, trigger_and_read) else: yield from trigger_and_read(list(detectors)+_positioners)
def assert_equals(obj, value, error_message='', halt=True): """ Plan stub that asserts an object's .get equals a particular value. Parameters ---------- obj : OphydObject The object we will call .get() on. value : any The value we will compare against. error_message : str, optional The error to present in the test suite if this fails. halt : bool, optional If True, end the test on failure. Otherwise, bundle for assertion at the end of the test with use of this module's fixtures. """ return (yield Msg('assert_equals', obj, value, error_message=error_message, halt=halt))
def fitwalk(detectors, motor, models, target, naive_step=None, average=120, filters=None, drop_missing=True, tolerance=10, delay=None, max_steps=10): """ Parameters ---------- detectors : list List of detectors to read at each step of walk motor : ``ophyd.Object`` Ophyd object that supports the set and wait. This should have a one to one relationship with the independent variable in the model you plan to optimize models : list List of models to evaluate during the walk target : float Desired end position for the walk naive_step : bluesky.plan, optional Plan to execute when there is not an accurate enough model available. By default this is ``mv(0.01)`` average : int, optional Number of readings to take and average at each event. Models are allowed to take a subset of each reading and average, however if the two settings will create an average over multiple steps in the walk the :attr:`.LiveBuild.average` setting is automatically updated. For example, if the walk is told to average over 10 events, your model can either average over 10, 5, 2, or 1 shots. filters : dict, optional Key, callable pairs of event keys and single input functions that evaluate to True or False. For more infromation see :meth:`.apply_filters` drop_missing : bool, optional Choice to include events where event keys are missing tolerance : float, optional Maximum distance from target considered successful delay : float, optional Mininum time between consecutive readings max_steps : int, optional Maximum number of steps the scan will attempt before faulting. There is a max of 10 by default, but you may disable this by setting this option to None. Note that this may cause the walk to run indefinitely. """ #Check all models are fitting the same key if len(set([model.y for model in models])) > 1: raise RuntimeError("Provided models must predict "\ "the same dependent variable.") #Prepare model callbacks for model in models: #Modify averaging if average % model.average != 0: logger.warning("Model {} was set to an incompatible averaging " "setting, changing setting to {}".format(model.name, average)) model.average = average #Subscribe callbacks yield Msg('subscribe', None, model, 'all') #Target field target_field = models[0].y #Install filters filters = filters or {} [m.install_filters(filters) for m in models] #Link motor to independent variables detectors.insert(1, motor) field_names = list(set(var for model in models for var in model.independent_vars.values())) motors = dict((key, motor) for key in field_names if key in motor.read_attrs) #Initialize variables steps = 0 if not naive_step: def naive_step(): return (yield from rel_set(motor, 0.01, wait=True)) #Measurement method def model_measure(): #Take average measurement avg = yield from measure_average(detectors, num=average, delay=delay, drop_missing=drop_missing, filters=filters) #Save current target position last_shot = avg.pop(target_field) logger.debug("Averaged data yielded {} is at {}" "".format(target_field, last_shot)) #Rank models based on accuracy of fit model_ranking = rank_models(models, last_shot, **avg) #Determine if any models are accurate enough if len(model_ranking): model = model_ranking[0] else: model = None return avg, last_shot, model #Make first measurements averaged_data, last_shot, accurate_model = yield from model_measure() #Begin walk while not np.isclose(last_shot, target, atol=tolerance): #Log error if not steps: logger.debug("Initial error before fitwalk is {}" "".format(int(target-last_shot))) else: logger.debug("fitwalk is reporting an error {} of after step #{}"\ "".format(int(target-last_shot), steps)) #Break on maximum step count if max_steps and steps >= max_steps: raise RuntimeError("fitwalk failed to converge after {} steps"\ "".format(steps)) #Use naive step plan if no model is accurate enough #or we have not made a step yet if not accurate_model or steps==0: logger.debug("No model yielded accurate prediction, "\ "using naive plan") yield from naive_step() else: logger.debug("Using model {} to determine next step."\ "".format(accurate_model.name)) #Calculate estimate of next step from accurate model fixed_motors = dict((key, averaged_data[key]) for key in field_names if key not in motors.keys()) #Try and step off model prediction try: estimates = accurate_model.backsolve(target, **fixed_motors) #Report model faults except Exception as e: logger.warning("Accurate model {} was unable to backsolve " "for target {}".format(accurate_model.name, target)) logger.warning(e) #Reuse naive step logger.debug("Reusing naive step due to lack of accurate model") yield from naive_step() else: #Move system to match estimate for param, pos in estimates.items(): #Watch for NaN if pd.isnull(pos) or np.isinf(pos): raise RuntimeError("Invalid position return by fit") #Attempt to move try: logger.debug("Adjusting motor {} to position {:.1f}"\ "".format(motor.name, pos)) yield from mv(motor, pos) except KeyboardInterrupt as e: logger.debug("No motor found to adjust variable {}"\ "".format(e)) #Count our steps steps += 1 #Take a new measurement logger.debug("Resampling after successfull move") averaged_data, last_shot, accurate_model = yield from model_measure() #Report a succesfull run logger.info("Succesfully walked to value {} (target={}) after {} steps."\ "".format(int(last_shot), target, steps)) return last_shot, accurate_model
def measure(detectors, num=1, delay=None, filters=None, drop_missing=True, max_dropped=50): """ Gather a fixed number of measurements from a group of detectors Parameters ---------- detectors : list List of detector objects to read and bundle num : int Number of measurements that pass filters delay : float Minimum time between consecutive reads of the detectors. filters : dict, optional Key, callable pairs of event keys and single input functions that evaluate to True or False. For more infromation see :meth:`.apply_filters` drop_missing : bool, optional Choice to include events where event keys are missing max_dropped : int, optional Maximum number of events to drop before raising a ValueError Returns ------- data : list List of mock-event documents """ #Log setup logger.debug("Running measure") logger.debug("Arguments passed: detectors: {0}, "\ "num: {1}, delay: {2}, drop_missing: {3}"\ "".format([d.name for d in detectors], num, delay,drop_missing)) #If scalable, repeat forever if not isinstance(delay, Iterable): delay = itertools.repeat(delay) else: #Number of supplied delays try: num_delays = len(delay) #Invalid delay except TypeError as err: err_msg = "Supplied delay must be scalar or iterable" logger.error(err_msg) raise ValueError(err_msg) from err #Handle provided iterable else: #Invalid number of delays for shot counts if num -1 > num_delays: err = "num={:} but delays only provides "\ "{:} entries".format(num, num_delays) logger.error(err, stack_info=True) raise ValueError(err) #Ensure it is an iterable delay = iter(delay) #Gather shots logger.debug("Gathering shots..") shots = 0 dropped = 0 data = list() filters = filters or dict() #Gather fixed number of shots while shots < num: #Timestamp earliest possible moment now = time.time() #Trigger detector and wait for completion for det in detectors: yield Msg('trigger', det, group='B') #Wait for completion and start bundling yield Msg('wait', None, 'B') yield Msg('create', None, name='primary') #Mock-event document det_reads = dict() #Gather shots for det in detectors: cur_det = yield Msg('read', det) det_reads.update(dict([(k,v['value']) for k,v in cur_det.items()])) #Emit Event doc to callbacks yield Msg('save') #Apply filters unfiltered = apply_filters(det_reads, filters=filters, drop_missing=drop_missing) #Increment shots if filters are passed shots += int(unfiltered) #Do not delay if we have not passed filter if unfiltered: #Append recent read to data list data.append(det_reads) #Gather next delay try: d = next(delay) #Out of delays except StopIteration: #If our last measurement that is fine if shots == num: break #Otherwise raise exception else: err = "num={:} but delays only provides {:} entries".format( num, shots) logger.error(err, stack_info=True) raise ValueError(err) #If we have a delay, sleep if d is not None: d = d - (time.time() - now) if d > 0: yield Msg('sleep', None, d) #Report filtered event else: dropped += 1 logger.debug('Ignoring inadequate measurement, '\ 'attempting to gather again...') if dropped > max_dropped: dropped_dict = {} for key in filters.keys(): dropped_dict[key] = det_reads[key] logger.debug(('Dropped too many events, raising exception. Latest ' 'bad values were %s'), dropped_dict) raise FilterCountError #Report finished logger.debug("Finished taking {} measurements, "\ "filters removed {} events"\ "".format(len(data), dropped)) return data
def read_and_store_temp(): yield Msg('create') yield Msg('read', temp_controller) yield Msg('save')
def fly_gen(flyer): yield Msg('open_run') yield Msg('kickoff', flyer, group='fly-kickoff') yield Msg('wait', None, group='fly-kickoff') yield Msg('complete', flyer, group='fly-complete') yield Msg('wait', None, group='fly-complete') yield Msg('collect', flyer) yield Msg('kickoff', flyer, group='fly-kickoff2') yield Msg('wait', None, group='fly-kickoff2') yield Msg('complete', flyer, group='fly-complete2') yield Msg('wait', None, group='fly-complete2') yield Msg('collect', flyer) yield Msg('close_run')
def unload_sample(): # TODO: I think this can be simpler. return (yield from single_gen(Msg("unload_sample", xpd_configuration["robot"])))
def adaptive_core(): next_pos = start step = (stop - start) / 2 past_I = None cur_I = target_val + 99 # start the while loop repeat = 0 largest_out_of_bound = stop cnt = 0 if stop >= start: direction_sign = 1 else: direction_sign = -1 while cur_I > target_val or ( (largest_out_of_bound - next_pos) > accuracy) or repeat < 3: debug = False if debug: print( 'Current value = {}; largest_out_of_bound = {}; repeat = {}' .format(cur_I, largest_out_of_bound, repeat)) yield Msg('checkpoint') yield from bps.mv(motor, next_pos) yield Msg('create', None, name='primary') for det in detectors: yield Msg('trigger', det, group='B') yield Msg('wait', None, 'B') for det in utils.separate_devices(detectors + [motor] + [outer_motor]): cur_det = yield Msg('read', det) if target_field in cur_det: cur_I = calc_function( value=cur_det[target_field]['value']) yield Msg('save') # special case first first loop if past_I is None: past_I = cur_I next_pos += step * direction_sign continue # binary search if cur_I > past_I: if cur_I < target_val: direction_sign = 1 else: direction_sign = -1 largest_out_of_bound = np.min( [largest_out_of_bound, next_pos]) elif cur_I <= past_I: if cur_I < target_val: direction_sign = 1 else: direction_sign = -1 largest_out_of_bound = np.min( [largest_out_of_bound, next_pos]) else: print('binary search error') # the logic here is non-conventional: # once the dynamic reserve is exceeded measurements jump around significantly # so we search for the largest interfering amplitude that does NOT cause a # deviation over 5% (target_val) # and that is a specified voltage maximum voltage away from an interferer amplitude that did cause # a deviation greater than the target value. past_I = cur_I step = np.max([step / 2, min_step]) if (cur_I < target_val) and ( abs(largest_out_of_bound - next_pos) < accuracy): repeat += 1 next_pos = next_pos else: repeat = 0 next_pos += step * direction_sign next_pos = np.clip( next_pos, start, stop ) # by binary search this shouldn't be needed. Just in case # handle conditions that might get stuck. Only run 30 loops, if ((next_pos == start) or (next_pos == stop) and (repeat == 0) and (cnt > 8)) or (cnt > 50): break cnt += 1
def outer_loop(): for step in outer_steps: yield Msg('checkpoint') yield from bps.mv(outer_motor, step) yield from adaptive_core()
def simple_scan(motor): yield Msg('open_run') yield Msg('set', motor, 5) yield Msg('read', motor) yield Msg('close_run')