def _show_plot(self, data): cd = dict(padding=5, stack_order='top_to_bottom') csnames = self.column_names xmin = np.Inf xmax = -np.Inf if self.as_series: g = RegressionGraph(container_dict=cd) p = g.new_plot(padding=[50, 5, 5, 50], xtitle='' ) p.value_range.tight_bounds = False p.value_range.margin = 0.1 else: g = StackedRegressionGraph(container_dict=cd) regressable = False # metadata = None for i, csi in enumerate(self.data_selectors): if not self.as_series: p = g.new_plot(padding=[50, 5, 5, 50]) p.value_range.tight_bounds = False p.value_range.margin = 0.1 plotid = i else: plotid = 0 try: x = data[csnames.index(csi.index)] y = data[csnames.index(csi.value)] xmin = min(xmin, min(x)) xmax = max(xmax, max(x)) fit = csi.fit if csi.fit != NULL_STR else None g.new_series(x, y, fit=fit, filter_outliers=csi.use_filter, type=csi.plot_type, plotid=plotid) g.set_x_title(csi.index, plotid=plotid) g.set_y_title(csi.value, plotid=plotid) if fit: regressable = True except IndexError: pass g.set_x_limits(xmin, xmax, pad='0.1') self._graph_count += 1 if regressable: gg = StatsGraph(graph=g) gii = gg else: gii = g g._update_graph() def show(gi): gi.window_title = '{} Graph {}'.format(self.short_name, self._graph_count) gi.window_x = self._graph_count * 20 + 400 gi.window_y = self._graph_count * 20 + 20 gi.edit_traits() show(gii)
def _graph_linear_j(self, x, y, r, reg, refresh): g = self.graph if not isinstance(g, RegressionGraph): g = RegressionGraph( container_dict={'bgcolor': self.plotter_options.bgcolor}) self.graph = g po = self.plotter_options g.clear() plot = g.new_plot(padding=po.get_paddings()) if po.model_kind == WEIGHTED_MEAN_1D: fit = 'weighted mean' else: fit = po.least_squares_fit _, scatter, line = g.new_series(x=reg.xs, y=reg.ys, yerror=reg.yserr, fit=fit) ebo = ErrorBarOverlay(component=scatter, orientation='y') scatter.underlays.append(ebo) scatter.error_bars = ebo add_inspector(scatter, self._additional_info) add_axes_tools(g, plot) g.set_x_title(po.one_d_axis) g.set_y_title('J') g.add_statistics() miy = 100 may = -1 if self._individual_analyses_enabled: sel = [ i for i, (a, x, y, e) in enumerate(zip(*self._analyses)) if a.is_omitted() ] # plot the individual analyses iscatter, iys = self._graph_individual_analyses(fit=None, add_tools=False) miy = min(iys) may = max(iys) # set metadata last because it will trigger a refresh self.suppress_metadata_change = True iscatter.index.metadata['selections'] = sel self.suppress_metadata_change = False g.set_y_limits(min_=miy, max_=may, pad='0.1') g.set_x_limits(pad='0.1') g.refresh() fys = line.value.get_data() self.max_j = fys.max() self.min_j = fys.min()
def _graph_grid(self, x, y, z, ze, r, reg, refresh): self.min_j = min(z) self.max_j = max(z) g = self.graph layout = FigureLayout(fixed='filled_grid') nrows, ncols = layout.calculate(len(x)) if not isinstance(g, Graph): g = RegressionGraph(container_dict={ 'bgcolor': 'gray', 'kind': 'g', 'shape': (nrows, ncols) }) self.graph = g def get_ip(xi, yi): return next((ip for ip in self.monitor_positions if ((ip.x - xi)**2 + (ip.y - yi)**2)**0.5 < 0.01), None) opt = self.plotter_options monage = opt.monitor_age * 1e6 lk = opt.lambda_k ans = self._analyses[0] scale = opt.flux_scalar for r in range(nrows): for c in range(ncols): idx = c + ncols * r if refresh: try: yy = z[idx] * scale ye = ze[idx] * scale except IndexError: continue # if hasattr(g, 'rules'): # if idx in g.rules: # l1, l2, l3 = g.rules[idx] # l1.value = yy # l2.value = yy + ye # l3.value = yy - ye else: plot = g.new_plot(padding_left=65, padding_right=5, padding_top=30, padding_bottom=5) try: ip = get_ip(x[idx], y[idx]) except IndexError: continue add_axes_tools(g, plot) yy = z[idx] * scale ye = ze[idx] * scale plot.title = 'Identifier={} Position={}'.format( ip.identifier, ip.hole_id) plot.x_axis.visible = False if c == 0 and r == nrows // 2: plot.y_axis.title = 'J x{}'.format(scale) if not ip.use: continue # get ip via x,y ais = [ a for a in ans if a.irradiation_position == ip.hole_id ] n = len(ais) # plot mean value # l1 = g.add_horizontal_rule(yy, color='black', line_style='solid', plotid=idx) # l2 = g.add_horizontal_rule(yy + ye, plotid=idx) # l3 = g.add_horizontal_rule(yy - ye, plotid=idx) # rs = (l1, l2, l3) # d = {idx: rs} # if hasattr(g, 'rules'): # g.rules.update(d) # else: # g.rules = d # plot individual analyses fs = [a.model_j(monage, lk) * scale for a in ais] fs = sorted(fs) iys = array([nominal_value(fi) for fi in fs]) ies = array([std_dev(fi) for fi in fs]) if self.plotter_options.use_weighted_fit: fit = 'weighted mean' else: fit = 'average' ek = self.plotter_options.error_kind if ek == MSEM: ek = 'msem' fit = '{}_{}'.format(fit, ek) p_, s, l_ = g.new_series(linspace(0, n - 1, n), iys, yerror=ies, type='scatter', fit=fit, add_point_inspector=False, add_inspector=False, marker='circle', marker_size=3) g.set_x_limits(0, n - 1, pad='0.1', plotid=idx) g.set_y_limits(min(iys - ies), max(iys + ies), pad='0.1', plotid=idx) g.add_statistics(plotid=idx) ebo = ErrorBarOverlay(component=s, orientation='y') s.underlays.append(ebo) s.error_bars = ebo add_analysis_inspector(s, ais) s.index.on_trait_change( self._grid_update_graph_metadata(ais), 'metadata_changed') self.suppress_metadata_change = True sel = [i for i, a in enumerate(ais) if a.is_omitted()] s.index.metadata['selections'] = sel self.suppress_metadata_change = False g.refresh()
#=============================================================================== # Copyright 2011 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #=============================================================================== from traits.etsconfig.etsconfig import ETSConfig ETSConfig.toolkit = 'qt4' from pychron.graph.regression_graph import RegressionGraph import numpy as np if __name__ == '__main__': reg = RegressionGraph() reg.new_plot() # xs = np.linspace(0, 100, 100) # ys = xs * 0.02 + np.random.random(100) xs = [0, 1, 2, 3, 4, 5] ys = [0, 3, 5, 6, 7, 5] reg.new_series(xs, ys, fit='linear') reg.configure_traits()
def cup_deflection_calibration(self, mass): self.info('{} deflection calibration'.format(self.reference_detector)) rgraph = RegressionGraph(window_x=100, window_y=50) rgraph.new_plot() rgraph.new_series(yer=[]) root_dir = unique_dir(os.path.join(paths.data_dir, 'magfield'), '{}_def_calibration'.format(self.reference_detector)) # if not os.path.exists(root_dir): # os.mkdir(root_dir) dm = self.data_manager p = os.path.join(root_dir, 'defl_vs_dac.csv') deflection_frame_key = dm.new_frame(path=p) dm.write_to_frame(['Deflection (V)', '40{} DAC'.format(self.reference_detector)], frame_key=deflection_frame_key) start = self.dc_start stop = self.dc_stop width = self.dc_step nstep = (stop - start) / width + 1 npeak_centers = self.dc_npeak_centers self.info('Deflection scan parameters start={}, stop={}, stepwidth={}, nstep={}'.format(start, stop, width, nstep)) self.info('Reference detector {}'.format(self.reference_detector)) self.info('Peak centers per step n={}'.format(npeak_centers)) for i, ni in enumerate(np.linspace(start, stop, nstep)): if not self.isAlive(): break self.info('Deflection step {} {} (V)'.format(i + 1, ni)) self._detectors[self.reference_detector].deflection = ni ds = [] for n in range(npeak_centers): if not self.isAlive(): break self.info('Peak center ni = {}'.format(n + 1)) p = os.path.join(root_dir, 'peak_scan_{:02d}_{:02d}.csv'.format(int(ni), n)) dm.new_frame(path=p) dm.write_to_frame(['DAC (V)', 'Intensity (fA)']) graph = Graph(window_title='Peak Centering', window_x=175 + i * 25 + n * 5, window_y=25 + i * 25 + n * 5 ) self.peak_center(graph=graph, update_mftable=True, update_pos=False, center_pos=mass ) if self.isAlive(): # write scan to file dm.write_to_frame(list(zip(graph.get_data(), graph.get_data(axis=1)))) if npeak_centers > 1: if not self.simulation: time.sleep(1) if self.peak_center_results: d = (ni, self.peak_center_results[0][1]) ds.append(self.peak_center_results[0][1]) dm.write_to_frame(list(d), frame_key=deflection_frame_key) # write the centering results to the centering file dm.write_to_frame([('#{}'.format(x), y) for x, y in zip(graph.get_data(series=1), graph.get_data(series=1, axis=1))]) if self.peak_center_results: rgraph.add_datum((ni, np.mean(ds), np.std(ds))) if i == 2: invoke_in_main_thread(rgraph.edit_traits) # delay so we can view graph momonetarily if not self.simulation and self.isAlive(): time.sleep(2) self.info('deflection calibration finished')
# =============================================================================== # Copyright 2011 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== from traits.etsconfig.etsconfig import ETSConfig ETSConfig.toolkit = 'qt4' from pychron.graph.regression_graph import RegressionGraph if __name__ == '__main__': reg = RegressionGraph() reg.new_plot() # xs = np.linspace(0, 100, 100) # ys = xs * 0.02 + np.random.random(100) xs = [0, 1, 2, 3, 4, 5] ys = [0, 3, 5, 6, 7, 5] reg.new_series(xs, ys, fit='linear') reg.configure_traits()
def _make_correlation(self, idx, ytitle): fi = self.figures[0] plots = list(fi.options.get_plotable_aux_plots()) tag = plots[idx].plot_name n = len(self.figures) r, c = filled_grid(n) g = RegressionGraph(container_dict={ 'kind': 'g', 'shape': (r, c) }, window_title='Correlation') for i, fi in enumerate(self.figures): gi = fi.analysis_group p = g.new_plot(xtitle='age', ytitle=ytitle, title='{}({})'.format(gi.sample, gi.identifier)) xs = [nominal_value(a.uage) for a in gi.clean_analyses()] ys = [nominal_value(a.get_value(tag)) for a in gi.clean_analyses()] g.new_series(xs, ys, fit='linear', use_error_envelope=False, plotid=i) g.add_correlation_statistics(plotid=i) g.set_x_limits(pad='0.1', plotid=i) g.set_y_limits(pad='0.1', plotid=i) g.refresh() open_view(g)
def cup_deflection_calibration(self, mass): self.info('{} deflection calibration'.format(self.reference_detector)) rgraph = RegressionGraph(window_x=100, window_y=50) rgraph.new_plot() rgraph.new_series(yer=[]) root_dir = unique_dir( os.path.join(paths.data_dir, 'magfield'), '{}_def_calibration'.format(self.reference_detector)) # if not os.path.exists(root_dir): # os.mkdir(root_dir) dm = self.data_manager p = os.path.join(root_dir, 'defl_vs_dac.csv') deflection_frame_key = dm.new_frame(path=p) dm.write_to_frame( ['Deflection (V)', '40{} DAC'.format(self.reference_detector)], frame_key=deflection_frame_key) start = self.dc_start stop = self.dc_stop width = self.dc_step nstep = (stop - start) / width + 1 npeak_centers = self.dc_npeak_centers self.info( 'Deflection scan parameters start={}, stop={}, stepwidth={}, nstep={}' .format(start, stop, width, nstep)) self.info('Reference detector {}'.format(self.reference_detector)) self.info('Peak centers per step n={}'.format(npeak_centers)) for i, ni in enumerate(np.linspace(start, stop, nstep)): if not self.isAlive(): break self.info('Deflection step {} {} (V)'.format(i + 1, ni)) self._detectors[self.reference_detector].deflection = ni ds = [] for n in range(npeak_centers): if not self.isAlive(): break self.info('Peak center ni = {}'.format(n + 1)) p = os.path.join( root_dir, 'peak_scan_{:02d}_{:02d}.csv'.format(int(ni), n)) dm.new_frame(path=p) dm.write_to_frame(['DAC (V)', 'Intensity (fA)']) graph = Graph(window_title='Peak Centering', window_x=175 + i * 25 + n * 5, window_y=25 + i * 25 + n * 5) self.peak_center(graph=graph, update_mftable=True, update_pos=False, center_pos=mass) if self.isAlive(): # write scan to file dm.write_to_frame( list(zip(graph.get_data(), graph.get_data(axis=1)))) if npeak_centers > 1: if not self.simulation: time.sleep(1) if self.peak_center_results: d = (ni, self.peak_center_results[0][1]) ds.append(self.peak_center_results[0][1]) dm.write_to_frame(list(d), frame_key=deflection_frame_key) # write the centering results to the centering file dm.write_to_frame([ ('#{}'.format(x), y) for x, y in zip(graph.get_data( series=1), graph.get_data(series=1, axis=1)) ]) if self.peak_center_results: rgraph.add_datum((ni, np.mean(ds), np.std(ds))) if i == 2: invoke_in_main_thread(rgraph.edit_traits) # delay so we can view graph momonetarily if not self.simulation and self.isAlive(): time.sleep(2) self.info('deflection calibration finished')
def _make_correlation(self, refplot, xtitle): fi = self.figures[0] n = len(list(fi.options.get_plotable_aux_plots())) plots = list(reversed(fi.graph.plots)) xs = refplot.data.get_data('y1') r, c = filled_grid(n - 1) g = RegressionGraph(container_dict={ 'kind': 'g', 'shape': (r, c) }, window_title='Correlation') i = 0 for pp in plots: ytitle = pp.y_axis.title if ytitle == xtitle: continue g.new_plot(xtitle=xtitle, ytitle=ytitle, padding=[80, 10, 10, 40]) ys = pp.data.get_data('y1') g.new_series(xs, ys, fit='linear', use_error_envelope=False, plotid=i) g.add_correlation_statistics(plotid=i) g.set_x_limits(pad='0.1', plotid=i) g.set_y_limits(pad='0.1', plotid=i) i += 1 g.refresh() open_view(g)