def print_report(self): """ print a validation report """ print("data file: " + self.fname) print("NeXus definitions ({}): {}, dated {}, sha={}\n".format( self.manager.nxdl_file_set.ref_type, self.manager.nxdl_file_set.ref, self.manager.nxdl_file_set.last_modified, self.manager.nxdl_file_set.sha, )) def sort_validations(f): value = f.h5_address value += " %3d" % -f.status.value # sort from best to worst value += " " + f.status.description value = value.replace( "@", " @") # keep attributes with group or dataset return value print("findings") t = pyRestTable.Table() for label in "address status test comments".split(): t.addLabel(label) for f in sorted(self.validations, key=sort_validations): if f.status == finding.OPTIONAL: continue # enable if you like verbose reports row = [] row.append(f.h5_address) row.append(f.status) row.append(f.test_name) row.append(f.comment) t.addRow(row) print(str(t)) summary = self.finding_summary() t = pyRestTable.Table() for label in "status count description (value)".split(): t.addLabel(label) for s, c in summary.items(): row = [s.key, c, s.description, s.value] t.addRow(row) t.addRow(["", "--", "", ""]) t.addRow(["TOTAL", sum(summary.values()), "", ""]) print("\nsummary statistics") print(str(t)) total, count, average = self.finding_score() print("<finding>=%f of %d items reviewed" % (average, count))
def object_explorer(obj, sortby=None, fmt='simple', printing=True): """ print the contents of obj """ t = pyRestTable.Table() t.addLabel("name") t.addLabel("PV reference") t.addLabel("value") items = _ophyd_structure_walker(obj) logger.debug(f"number of items: {len(items)}") def sorter(obj): if sortby is None: key = obj.dotted_name elif str(sortby).lower() == "pv": key = _get_pv(obj) or "--" else: raise ValueError("sortby should be None or 'PV'" f" found sortby='{sortby}'") return key for item in sorted(items, key=sorter): t.addRow((item.dotted_name, _get_pv(item), item.get())) if printing: print(t.reST(fmt=fmt)) return t
def device_read2table(device, show_ancient=True, use_datetime=True, printing=True): """ read an ophyd device and return a pyRestTable Table Include an option to suppress ancient values identified by timestamp from 1989. These are values only defined in the original ``.db`` file. """ table = pyRestTable.Table() table.labels = "name value timestamp".split() ANCIENT_YEAR = 1989 for k, rec in device.read().items(): value = rec["value"] ts = rec["timestamp"] dt = datetime.datetime.fromtimestamp(ts) if dt.year > ANCIENT_YEAR or show_ancient: if use_datetime: ts = dt table.addRow((k, value, ts)) if printing: print(table) return table
def table_of_caches(self): """ return a pyRestTable table describing all known file sets in both source and user caches :returns obj: instance of pyRestTable.Table with all known file sets **Example**:: ============= ======= ====== =================== ======= =================================== NXDL file set type cache date & time commit path ============= ======= ====== =================== ======= =================================== v3.2 tag source 2017-01-18 23:12:44 e888dac /home/user/punx/src/punx/cache/v3.2 NXroot-1.0 tag user 2016-10-24 14:58:10 e0ad63d /home/user/.config/punx/NXroot-1.0 master branch user 2016-12-20 18:30:29 85d056f /home/user/.config/punx/master Schema-3.3 release user 2017-05-02 12:33:19 4aa4215 /home/user/.config/punx/Schema-3.3 a4fd52d commit user 2016-11-19 01:07:45 a4fd52d /home/user/.config/punx/a4fd52d ============= ======= ====== =================== ======= =================================== """ t = pyRestTable.Table() fs = self.find_all_file_sets() t.labels = [ 'NXDL file set', 'type', 'cache', 'date & time', 'commit', 'path' ] for k, v in fs.items(): # print(k, str(v)) row = [ k, ] v.short_sha = get_short_sha(v.sha) for w in 'ref_type cache last_modified short_sha path'.split(): row.append(str(v.__getattribute__(w))) t.rows.append(row) return t
def makeText(self): ''' generate the text of the panel ''' tbl = pyRestTable.Table() tbl.labels = [ 'GUP ID', ] + [rvwr.getFullName() for rvwr in self.agup.reviewers] # TODO: call code in auto_assignment module for prop in self.agup.proposals: prop_id = prop.getKey('proposal_id') row = [ prop_id, ] assigned = prop.getAssignedReviewers() for rvwr in self.agup.reviewers: full_name = rvwr.getFullName() score = int(100.0 * prop.topics.dotProduct(rvwr.topics) + 0.5) if full_name in assigned: role = assigned.index(full_name) if role == 0: text = '1: ' + str(score) elif role == 1: text = '2: ' + str(score) else: text = score row.append(text) tbl.rows.append(row) return tbl.reST()
def table_of_caches(self): """ return a pyRestTable table describing all known file sets in both source and user caches :returns obj: instance of pyRestTable.Table with all known file sets **Example**:: ============= ====== =================== ======= ================================================================== NXDL file set cache date & time commit path ============= ====== =================== ======= ================================================================== a4fd52d source 2016-11-19 01:07:45 a4fd52d /home/prjemian/Documents/projects/prjemian/punx/punx/cache/a4fd52d v3.3 source 2017-07-12 10:41:12 9285af9 /home/prjemian/Documents/projects/prjemian/punx/punx/cache/v3.3 v2018.5 source 2018-05-15 16:34:19 a3045fd /home/prjemian/Documents/projects/prjemian/punx/punx/cache/v2018.5 Schema-3.4 user 2018-05-15 08:24:34 aa1ccd1 /home/prjemian/.config/punx/Schema-3.4 main user 2021-12-17 13:09:18 041c2c0 /home/prjemian/.config/punx/main ============= ====== =================== ======= ================================================================== """ def sorter(kv): return kv[-1].last_modified t = pyRestTable.Table() t.labels = ["NXDL file set", "cache", "date & time", "commit", "path"] for k, v in sorted(self.all_file_sets.items(), key=sorter): v.short_sha = get_short_sha(v.sha) row = [k] for w in "cache last_modified short_sha path".split(): row.append(str(getattr(v, w))) t.rows.append(row) return t
def object_explorer(obj, sortby=None): """ print the contents of obj """ t = pyRestTable.Table() t.addLabel("name") t.addLabel("PV reference") t.addLabel("value") items = miner(obj) # print(len(items)) def sorter(obj): if sortby is None: key = obj.dotted_name elif str(sortby).lower() == "pv": key = get_pv(obj) or "--" else: raise ValueError("sortby should be None or 'PV'" f" found sortby='{sortby}'") return key for item in sorted(items, key=sorter): # t.addRow((full_dotted_name(item), get_pv(item), item.get())) t.addRow((item.dotted_name, get_pv(item), item.get())) print(t) return t
def report(db): '''report the results in a table to stdout''' pprint.pprint(db) dates = [] for m in db.values(): dates += m.keys() dates = unique_list(dates) modules = sorted(db.keys()) table = pyRestTable.Table() table.addLabel('released') for m in sorted(modules): table.addLabel(m) # get unique list of datetimes for ts in sorted(dates): row = [ts] for module in sorted(modules): if ts in db[module]: row.append(db[module][ts]) else: row.append('') table.addRow(row) try: print table.reST() except Exception, why: pass
def cmd_cycles(args): """ Handle ``cycles`` command. PARAMETERS args (obj): Object returned by ``argparse`` """ if args.full: table = pyRestTable.Table() table.labels = "cycle start end".split() def sorter(entry): return entry["startTime"] for entry in sorted(api_bss.listRuns(), key=sorter, reverse=args.ascending): table.addRow(( entry["name"], entry["startTime"], entry["endTime"], )) logger.debug("%s", str(table)) else: printColumns(listAllRuns())
def showConstraints(self): tbl = pyRestTable.Table() tbl.labels = "axis low_limit high_limit value fit".split() for m in self.real_positioners._fields: tbl.addRow((m, *self.calc[m].limits, self.calc[m].value, self.calc[m].fit)) print(tbl)
def forwardSolutionsTable(self, reflections, full=False): """ return table of computed solutions for each (hkl) in the supplied reflections list The solutions are calculated using the current UB matrix & constraints """ _table = pyRestTable.Table() motors = self.real_positioners._fields _table.labels = "(hkl) solution".split() + list(motors) for reflection in reflections: try: solutions = self.calc.forward(reflection) except ValueError as exc: solutions = exc if isinstance(solutions, ValueError): row = [reflection, "none"] row += ["" for m in motors] _table.addRow(row) else: for i, s in enumerate(solutions): row = [reflection, i] row += [f"{getattr(s, m):.5f}" for m in motors] _table.addRow(row) if not full: break # only show the first (default) solution return _table
def show_sample(sample_name=None, verbose=True): """Print the default sample name and crystal lattice.""" _check_geom_selected() sample_name = sample_name or _geom_.calc.sample_name sample = _geom_.calc._samples[sample_name] title = sample_name if sample_name == _geom_.calc.sample.name: title += " (*)" # Print Lattice more simply (than as a namedtuple). lattice = [ getattr(sample.lattice, parm) for parm in sample.lattice._fields ] if verbose: tbl = pyRestTable.Table() tbl.addLabel("key") tbl.addLabel("value") tbl.addRow(("name", sample_name)) tbl.addRow(("lattice", lattice)) for i, r in enumerate(sample.reflections): tbl.addRow((f"reflection {i+1}", r)) tbl.addRow(("U", numpy.round(sample.U, 5))) tbl.addRow(("UB", numpy.round(sample.UB, 5))) print(f"Sample: {title}\n") print(tbl) else: print(f"{title}: {lattice}")
def _scan(width=1, step_factor=10, num=10, snake=True): for _pass_number in range(pass_max): _md = { 'pass': _pass_number + 1, 'pass_max': pass_max, 'plan_name': self.__class__.__name__ + '.multi_pass_tune', } _md.update(md or {}) yield from self.tune(width=width, num=num, md=_md) if not self.tune_ok: break width /= step_factor if snake: width *= -1 t = pyRestTable.Table() t.labels = "pass Ok? center width max.X max.Y".split() for i, stat in enumerate(self.stats): row = [ i + 1, ] row.append(stat.tune_ok.get()) row.append(stat.cen.get()) row.append(stat.fwhm.get()) x, y = stat.max.get() row += [x, y] t.addRow(row) logger.info("Results\n%s", str(t)) logger.info("Final tune position: %s = %f", self.axis.name, self.axis.position)
def plan_catalog(db): """ make a table of all scans known in the databroker Example:: from apstools.examples import plan_catalog plan_catalog(db) """ import pyRestTable t = pyRestTable.Table() t.labels = "date/time short_uid id plan args".split() for h in db.hs.find_run_starts(): row = [] dt = datetime.datetime.fromtimestamp(h["time"]) row.append(str(dt).split(".")[0]) row.append(h['uid'][:8]) command = _rebuild_scan_command(h) scan_id = command.split()[0] command = command[len(scan_id):].strip() plan = command.split("(")[0] args = command[len(plan) + 1:].rstrip(")") row.append(scan_id) row.append(plan) row.append(args) t.addRow(row) t.rows = t.rows[::-1] # reverse the list return t
def print_report(self, statuses=None): """ Print a validation report. """ reported_statuses = statuses or list(finding.VALID_STATUS_DICT.keys()) print("data file: " + self.fname) print( f"NeXus definitions: {self.manager.nxdl_file_set.ref}" f", dated {self.manager.nxdl_file_set.last_modified}" f", sha={self.manager.nxdl_file_set.sha}\n" ) def sort_validations(f): value = f.h5_address value += " %3d" % -f.status.value # sort from best to worst value += " " + f.status.description value = value.replace("@", " @") # keep attributes with group or dataset return value print("findings") t = pyRestTable.Table() for label in "address status test comments".split(): t.addLabel(label) for f in sorted(self.validations, key=sort_validations): if str(f.status) in reported_statuses: row = [] row.append(f.h5_address) row.append(f.status) row.append(f.test_name) row.append(f.comment) t.addRow(row) print(str(t)) summary = self.finding_summary() t = pyRestTable.Table() for label in "status count description (value)".split(): t.addLabel(label) for s, c in summary.items(): row = [s.key, c, s.description, s.value] t.addRow(row) t.addRow(["", "--", "", ""]) t.addRow(["TOTAL", sum(summary.values()), "", ""]) print("\nsummary statistics") print(str(t)) total, count, average = self.finding_score() print("<finding>=%f of %d items reviewed" % (average, count))
def test_summary_table(): """ This summary table was used as developer test code. It was re-developed into a unit test and could be converted into some useful report for the user interface. """ cm = cache_manager.CacheManager() with pytest.raises(KeyError): # Search for a file set (branch, release, commit, tag) # using a name that will not be found. cm.select_NXDL_file_set("no-such-fileset-reference") cm.select_NXDL_file_set(FILE_SET) assert cm is not None assert cm.default_file_set is not None manager = nxdl_manager.NXDL_Manager(cm.default_file_set) counts_keys = 'attributes fields groups links symbols'.split() total_counts = {k: 0 for k in counts_keys} def count_group(g, counts): for k in counts_keys: if hasattr(g, k): n = len(g.__getattribute__(k)) if n > 0: counts[k] += n for group in g.groups.values(): counts = count_group(group, counts) return counts import pyRestTable t = pyRestTable.Table() t.labels = 'class category'.split() + counts_keys for v in manager.classes.values(): row = [v.title, v.category] counts = {k: 0 for k in counts_keys} counts = count_group(v, counts) for k in counts_keys: n = counts[k] total_counts[k] += n if n == 0: n = "" row.append(n) t.addRow(row) t.addRow(["TOTAL", "-" * 4] + ["-" * 4 for k in counts_keys]) row = [len(manager.classes), 3] for k in counts_keys: n = total_counts[k] if n == 0: n = "" row.append(n) t.addRow(row) report_lines = t.reST().strip().splitlines() for r, e in zip(report_lines, EXPECTED_SUMMARY_TABLE): assert r.strip() == e.strip() assert str(manager) == SUMMARY_STRING_REPRESENTATION
def showConstraints(self, fmt="simple"): """print the current constraints in a table""" tbl = pyRestTable.Table() tbl.labels = "axis low_limit high_limit value fit".split() for m in self.real_positioners._fields: tbl.addRow((m, *self.calc[m].limits, self.calc[m].value, self.calc[m].fit)) print(tbl.reST(fmt=fmt))
def machine_learning_model(model_size=None, train_frac=None): model_size = model_size or 10000 # number of frames to select train_frac = train_frac or 0.5 # fraction of selected frames used to train the net test_frac = 1.0 - train_frac # fraction of selected frames used to test the net for k, r in recordings.items(): print(f"Recording: {k}") r["blocks"] = categorize_by_blocks( r["events"], rate=r["rate"], exclusion=r["exclude_frames"]) total_frames = sum([ block[1] - block[0] for block in r["blocks"] ]) def identify_category(fr_num): for b in r["blocks"]: if b[0] <= fr_num < b[1]: return b[2] selected_frames = sorted([ fr_num for fr_num in np.random.randint(0, total_frames, model_size) if identify_category(fr_num) != "validate" ]) r["selected_frames"] = selected_frames # REPORTS # table = pyRestTable.Table() # table.labels = "fr_start fr_end category".split() # for fr_start, fr_end, category in r["blocks"]: # table.addRow((fr_start, fr_end, category)) # print(table) # summary = {k: 0 for k in categories} # for block in r["blocks"]: # v = block[1] - block[0] # summary[block[2]] += v # table = pyRestTable.Table() # table.labels = "category number_frames".split() # for k, v in summary.items(): # table.addRow((k, v)) # table.addRow(("TOTAL", total_frames)) # print(table) selected_count = len(selected_frames) selected_summary = {k: 0 for k in categories} for fr_num in selected_frames: selected_summary[identify_category(fr_num)] += 1 table = pyRestTable.Table() table.labels = "category number_frames".split() for k, v in selected_summary.items(): table.addRow((k, v)) table.addRow(("TOTAL", selected_count)) print(table)
def summarize_runs(since=None, db=None): """ Report bluesky run metrics from the databroker. * How many different plans? * How many runs? * How many times each run was used? * How frequently? (TODO:) PARAMETERS since (str) : Report all runs since this ISO8601 date & time (default: ``1995``) db (object) : Instance of ``databroker.Broker()`` (default: ``db`` from the IPython shell) """ db = db or ipython_shell_namespace()["db"] since = since or "1995" # no APS X-ray experiment data before 1995! cat = db.v2.search(databroker.queries.TimeRange(since=since)) plans = defaultdict(list) t0 = time.time() for n, uid in enumerate(cat): t1 = time.time() run = cat[uid] # this step is very slow (0.01 - 0.5 seconds each!) t2 = time.time() plan_name = run.metadata["start"].get("plan_name", "unknown") dt = datetime.datetime.fromtimestamp( run.metadata["start"]["time"]).isoformat() scan_id = run.metadata["start"].get("scan_id", "unknown") plans[plan_name].append( dict( plan_name=plan_name, dt=dt, time_start=dt, uid=uid, scan_id=scan_id, )) logger.debug( "%s %s dt1=%4.01fus dt2=%5.01fms %s", scan_id, dt, (t1 - t0) * 1e6, (t2 - t1) * 1e3, plan_name, ) t0 = time.time() def sorter(plan_name): return len(plans[plan_name]) table = pyRestTable.Table() table.labels = "plan quantity".split() for k in sorted(plans.keys(), key=sorter, reverse=True): table.addRow((k, sorter(k))) table.addRow(("TOTAL", n + 1)) print(table)
def tune_guard_slit_motor(motor, width, steps): if steps < 10: raise GuardSlitTuneError( f"Not enough points ({steps}) to tune guard slits.") x_c = motor.position x_0 = x_c - abs(width)/2 x_n = x_c + abs(width)/2 scaler0.select_channels([UPD_SIGNAL.chname.get()]) scaler0.channels.chan01.kind = Kind.config tuner = TuneAxis([scaler0], motor, signal_name=UPD_SIGNAL.chname.get()) yield from tuner.tune(width=-width, num=steps+1) bluesky_runengine_running = RE.state != "idle" if bluesky_runengine_running: found = tuner.peak_detected() center = tuner.peaks.com # center of mass table = pyRestTable.Table() table.addLabel("tune parameter") table.addLabel("fitted value") table.addRow(("peak detected?", found)) table.addRow(("center of mass", center)) table.addRow(("center from half max", tuner.peaks.cen)) table.addRow(("peak max (x,y)", tuner.peaks.max)) table.addRow(("FWHM", tuner.peaks.fwhm)) logger.info(table) def cleanup_then_GuardSlitTuneError(msg): logger.warning(f"{motor.name}: move to {x_c} (initial position)") scaler0.select_channels(None) yield from bps.mv( motor, x_c, scaler0.preset_time, old_preset_time, ti_filter_shutter, "close" ) raise GuardSlitTuneError(msg) if not found: yield from cleanup_then_GuardSlitTuneError(f"{motor.name} Peak not found.") if center < x_0: # sanity check that start <= COM msg = f"{motor.name}: Computed center too low: {center} < {x_0}" yield from cleanup_then_GuardSlitTuneError(msg) if center > x_n: # sanity check that COM <= end msg = f"{motor.name}: Computed center too high: {center} > {x_n}" yield from cleanup_then_GuardSlitTuneError(msg) if max(tuner.peaks.y_data) <= guard_slit.tuning_intensity_threshold: msg = f"{motor.name}: Peak intensity not strong enough to tune." msg += f" {max(tuner.peaks.y_data)} < {guard_slit.tuning_intensity_threshold}" yield from cleanup_then_GuardSlitTuneError(msg) logger.info(f"{motor.name}: move to {center} (center of mass)") yield from bps.mv(motor, center)
def activity_report(self): config_file = self._build_test_catalog() table = pyRestTable.Table() table.addLabel("Mongodb server") table.addLabel("Databroker repository") table.addLabel("total runs") table.addLabel(f"runs since {self.since}") table.addLabel("first run") table.addLabel("last run") # Empirical: Needed to add a pause here. # Without the pause, the YAML config we just wrote is not found. # 0.5 s was too short, 1 s worked. time.sleep(1) cat_list = list(databroker.yaml_catalogs) for bs_repo in cat_list: if not bs_repo.startswith(DB_PREFIX): continue repo = bs_repo[len(DB_PREFIX):] _p = repo.find("-") repo = repo[_p + 1:] # print(repo) cat = databroker.catalog[bs_repo] host = cat._metadatastore_db.client.address[0] total_runs = len(cat) if total_runs: first_run = cat[-total_runs] last_run = cat[-1] first_date = ts2isotime(first_run.metadata["start"]["time"]) last_date = ts2isotime(last_run.metadata["start"]["time"]) else: first_date, last_date = "", "" search_period = databroker.queries.TimeRange(since=self.since, # until=until ) recent_cat = cat.search(search_period) recent_runs = len(recent_cat) # fmt: off table.addRow(( host, repo, total_runs, recent_runs, first_date, last_date, )) # fmt: on os.remove(config_file) return table
def repository_report(self): table = pyRestTable.Table() table.addLabel("host") table.addLabel("repository") table.addLabel("runs database") table.addLabel("file references db") for host, server in self.registry.items(): for repo, pair in server.repositories.items(): table.addRow((host, repo, pair.runs, pair.refs)) return table
def command_list_as_table(commands): """Format a command list as a :class:`pyRestTable.Table()` object.""" tbl = pyRestTable.Table() tbl.addLabel("line #") tbl.addLabel("action") tbl.addLabel("parameters") for command in commands: action, args, line_number = command[:3] row = [line_number, action, ", ".join(map(str, args))] tbl.addRow(row) return tbl
def report(self, title=None): keys = self.peakstats_attrs + "tune_ok center initial_position final_position".split() t = pyRestTable.Table() t.addLabel("key") t.addLabel("result") for key in keys: v = getattr(self, key).get() t.addRow((key, str(v))) if title is not None: print(title) print(t)
def multi_pass_tune_summary(self): t = pyRestTable.Table() t.labels = "pass Ok? center width max.X max.Y".split() for i, stat in enumerate(self.stats): row = [i+1,] row.append(stat.tune_ok.get()) row.append(stat.cen.get()) row.append(stat.fwhm.get()) x, y = stat.max.get() row += [x, y] t.addRow(row) return t
def to_table(self, fmt=None): """Output as pyRestTable object. (backwards compatible)""" self._check_keys() dd = self.parse_runs() table = pyRestTable.Table() rows = [] for label, values in dd.items(): table.addLabel(label) rows.append(values) table.rows = list(zip(*rows)) return table.reST(fmt=fmt or "simple")
def dictionary_table(dictionary, fmt="simple", printing=True): """ return a table object from ``dictionary`` PARAMETERS dictionary : dict Python dictionary fmt : str Any of the format names provided by `spec2nexus <https://pyresttable.readthedocs.io/en/latest/examples/index.html#examples>`_ One of these: ``simple | plain | grid | complex | markdown | list-table | html`` default: ``simple`` fmt : bool Should this function print to stdout? default: ``True`` RETURNS table : obj or `None` multiline text table (pyRestTable object) with dictionary contents in chosen format or ``None`` if dictionary has no contents EXAMPLE:: In [8]: RE.md Out[8]: {'login_id': 'jemian:wow.aps.anl.gov', 'beamline_id': 'developer', 'proposal_id': None, 'pid': 19072, 'scan_id': 10, 'version': {'bluesky': '1.5.2', 'ophyd': '1.3.3', 'apstools': '1.1.5', 'epics': '3.3.3'}} In [9]: print(dictionary_table(RE.md, printing=False)) =========== ============================================================================= key value =========== ============================================================================= beamline_id developer login_id jemian:wow.aps.anl.gov pid 19072 proposal_id None scan_id 10 version {'bluesky': '1.5.2', 'ophyd': '1.3.3', 'apstools': '1.1.5', 'epics': '3.3.3'} =========== ============================================================================= """ if len(dictionary) == 0: return _t = pyRestTable.Table() _t.addLabel("key") _t.addLabel("value") for k, v in sorted(dictionary.items()): _t.addRow((k, str(v))) if printing: print(_t.reST(fmt=fmt)) return _t
def stop(self, doc): if self.xref is None: # not from a snapshot plan return t = pyRestTable.Table() t.addLabel("timestamp") t.addLabel("source") t.addLabel("name") t.addLabel("value") for k, v in sorted(self.xref.items()): p = k.find(":") t.addRow((v["timestamp"], k[:p], k[p + 1:], v["value"])) print(t) for k, v in sorted(doc.items()): print(f"{k}: {v}")
def peak_analysis(): aligned = False if counter.name in bec.peaks["cen"]: table = pyRestTable.Table() table.labels = ("key", "value") table.addRow(("axis", axis.name)) table.addRow(("detector", counter.name)) table.addRow(("starting position", old_position)) for key in bec.peaks.ATTRS: table.addRow((key, bec.peaks[key][counter.name])) logger.info(f"alignment scan results:\n{table}") lo = bec.peaks["min"][counter.name][-1] # [-1] means detector hi = bec.peaks["max"][counter.name][-1] # [0] means axis fwhm = bec.peaks["fwhm"][counter.name] final = bec.peaks["cen"][counter.name] ps = list( bec._peak_stats.values())[0][counter.name] # PeakStats object # get the X data range as received by PeakStats x_range = abs(max(ps.x_data) - min(ps.x_data)) if final is None: logger.error(f"centroid is None") final = old_position elif fwhm is None: logger.error(f"FWHM is None") final = old_position elif hi < peak_factor * lo: logger.error(f"no clear peak: {hi} < {peak_factor}*{lo}") final = old_position elif fwhm > width_factor * x_range: logger.error( f"FWHM too large: {fwhm} > {width_factor}*{x_range}") final = old_position else: aligned = True logger.info(f"moving {axis.name} to {final} (aligned: {aligned})") yield from bps.sleep(PRE_MOVE_DELAY_S) yield from bps.mv(axis, final) else: logger.error("no statistical analysis of scan peak!") yield from bps.null() # too sneaky? We're modifying this structure locally bec.peaks.aligned = aligned bec.peaks.ATTRS = ('com', 'cen', 'max', 'min', 'fwhm')
def makeText(self): ''' generate the text of the panel ''' tbl = pyRestTable.Table() tbl.labels = ['GUP#', 'reviewer 1', 'reviewer 2', 'excluded reviewer(s)', 'title'] for prop in self.agup.proposals: prop_id = prop.getKey('proposal_id') text = prop.getKey('proposal_title') prop_title = tools.text_encode(text).strip() r1, r2 = prop.getAssignedReviewers() r1 = r1 or '' r2 = r2 or '' excluded = prop.getExcludedReviewers(self.agup.reviewers) tbl.rows.append([prop_id, r1, r2, ', '.join(excluded), prop_title]) return tbl.reST()