def get_beam_pos_report(run_dir, data): monitor_file = run_dir.join('../epicsServerAnimation/').join( MONITOR_LOGFILE) if not monitor_file.exists(): raise sirepo.util.UserAlert( 'no beam position history', 'monitor file={} does not exist', monitor_file, ) history, num_records, start_time = _read_monitor_file(monitor_file, True) if len(history) <= 0: raise sirepo.util.UserAlert('no beam position history', 'history length <= 0') x_label = 'z [m]' x, plots, colors = _beam_pos_plots(data, history, start_time) if not len(plots): raise sirepo.util.UserAlert('no beam position history', 'no plots') return template_common.parameter_plot( x.tolist(), plots, PKDict(), PKDict( title='', y_label='[m]', x_label=x_label, summaryData={}, ), colors, )
def _extract_report_twissEllipseReport(data, run_dir): #TODO(pjm): use bunch twiss values, not command_twiss values beam = _first_beam_command(data) r_model = data.models[data.report] dim = r_model.dim n_pts = 100 theta = np.arange(0, 2. * np.pi * (n_pts / (n_pts - 1)), 2. * np.pi / n_pts) #TODO(pjm): get code_var value for alf, bet, d a = float(data.models.bunch[f'alf{dim}']) or 0 b = float(data.models.bunch[f'bet{dim}']) or 0 assert b > 0, f'TWISS parameter "bet{dim}" must be > 0' g = (1. + a * a) / b e = (beam[f'e{dim}'] or 1) phi = _twiss_ellipse_rotation(a, b) # major, minor axes of ellipse mj = np.sqrt(e * b) mn = np.sqrt(e * g) # apply rotation x = mj * np.cos(theta) * np.sin(phi) + mn * np.sin(theta) * np.cos(phi) y = mj * np.cos(theta) * np.cos(phi) - mn * np.sin(theta) * np.sin(phi) return template_common.parameter_plot( x.tolist(), [PKDict(field=dim, points=y.tolist(), label=f'{dim}\' [rad]')], {}, PKDict(title=f'a{dim} = {a} b{dim} = {b} g{dim} = {g}', y_label='', x_label=f'{dim} [m]'))
def get_settings_report(run_dir, data): monitor_file = run_dir.join('../epicsServerAnimation/').join( MONITOR_LOGFILE) if not monitor_file.exists(): raise sirepo.util.UserAlert('no settings history', 'monitor file') history, num_records, start_time = _read_monitor_file(monitor_file, True) o = data.models.correctorSettingReport.plotOrder plot_order = o if o is not None else 'time' if plot_order == 'time': x, plots, colors = _setting_plots_by_time(data, history, start_time) x_label = 't [s]' else: x, plots, colors = _setting_plots_by_position(data, history, start_time) x_label = 'z [m]' if not len(plots): raise sirepo.util.UserAlert('no settings history', 'no plots') return template_common.parameter_plot( x.tolist(), plots, PKDict(), PKDict( title='', y_label='[rad]', x_label=x_label, summaryData=PKDict(), ), colors, )
def sim_frame_plot2Animation(frame_args): from sirepo.template import sdds_util x = None plots = [] for f in ('x', 'y1', 'y2', 'y3'): name = frame_args[f].replace(' ', '_') if name == 'none': continue col = sdds_util.extract_sdds_column(str(frame_args.run_dir.join(_OPAL_SDDS_FILE)), name, 0) if col.err: return col.err field = PKDict( points=col['values'], label=frame_args[f], ) _field_units(col.column_def[1], field) if f == 'x': x = field else: plots.append(field) # independent reads of file may produce more columns, trim to match x length for p in plots: if len(x.points) < len(p.points): p.points = p.points[:len(x.points)] return template_common.parameter_plot(x.points, plots, {}, { 'title': '', 'y_label': '', 'x_label': x.label, })
def extract_parameter_report(data, run_dir, filename=_TWISS_OUTPUT_FILE): t = madx_parser.parse_tfs_file(run_dir.join(filename)) plots = [] m = data.models[data.report] for f in ('y1', 'y2', 'y3'): if m[f] == 'None': continue plots.append( PKDict(field=m[f], points=to_floats(t[m[f]]), label=_field_label(m[f])), ) x = m.get('x') or 's' res = template_common.parameter_plot( to_floats(t[x]), plots, m, PKDict( y_label='', x_label=_field_label(x), )) if 'betx' in t and 'bety' in t and 'alfx' in t and 'alfy' in t: res.initialTwissParameters = PKDict( betx=t.betx[0], bety=t.bety[0], alfx=t.alfx[0], alfy=t.alfx[0], ) return res
def _field_lineout_plot(sim_id, name, f_type, f_path, beam_axis, v_axis, h_axis): g_id = _get_g_id(sim_id) v = _generate_field_data(g_id, name, f_type, [f_path]).data[0].vectors pts = numpy.array(v.vertices).reshape(-1, 3) plots = [] labels = {h_axis: 'Horizontal', v_axis: 'Vertical'} x = pts[:, _AXES.index(beam_axis)] y = pts[:, _AXES.index(h_axis)] z = pts[:, _AXES.index(v_axis)] f = numpy.array(v.directions).reshape(-1, 3) m = numpy.array(v.magnitudes) for c in (h_axis, v_axis): plots.append( PKDict( points=(m * f[:, _AXES.index(c)]).tolist(), label=f'{labels[c]} ({c}) [{radia_util.FIELD_UNITS[f_type]}]', style='line' ) ) return template_common.parameter_plot( x.tolist(), plots, PKDict(), PKDict( title=f'{f_type} on {f_path.name}', y_label=f_type, x_label=f'{beam_axis} [mm]', summaryData=PKDict(), ), )
def get_analysis_report(run_dir, data): report, col_info, plot_data = _report_info(run_dir, data) clusters = None if 'action' in report: if report.action == 'fit': return _get_fit_report(report, plot_data, col_info) elif report.action == 'cluster': clusters = _compute_clusters(report, plot_data, col_info) x_idx = _safe_index(col_info, report.x) x = (plot_data[:, x_idx] * col_info['scale'][x_idx]).tolist() plots = [] for f in ('y1', 'y2', 'y3'): #TODO(pjm): determine if y2 or y3 will get used if f != 'y1': continue if f not in report or report[f] == 'none': continue y_idx = _safe_index(col_info, report[f]) y = plot_data[:, y_idx] if len(y) <= 0 or math.isnan(y[0]): continue plots.append({ 'points': (y * col_info['scale'][y_idx]).tolist(), 'label': _label(col_info, y_idx), 'style': 'line' if 'action' in report and report.action == 'fft' else 'scatter', }) return template_common.parameter_plot(x, plots, {}, { 'title': '', 'y_label': '', 'x_label': _label(col_info, x_idx), 'clusters': clusters, 'summaryData': {}, })
def sim_frame_plot2Animation(frame_args): x = None plots = [] for f in ('x', 'y1', 'y2', 'y3'): name = frame_args[f].replace(' ', '_') if name == 'none': continue col = sdds_util.extract_sdds_column( str(frame_args.run_dir.join(_OPAL_SDDS_FILE)), name, 0) if col.err: return col.err field = PKDict( points=col['values'], label=frame_args[f], ) _field_units(col.column_def[1], field) if f == 'x': x = field else: plots.append(field) return template_common.parameter_plot(x.points, plots, {}, { 'title': '', 'y_label': '', 'x_label': x.label, })
def get_centroid_report(run_dir, data): report = data.models[data.report] monitor_file = run_dir.join('../epicsServerAnimation/').join( MONITOR_LOGFILE) bpms = None if monitor_file.exists(): history, num_records, start_time = _read_monitor_file( monitor_file, True) if len(history): bpms = _bpm_readings_for_plots(data, history, start_time) x = [] y = [] t = [] z = _position_of_element(data, report['_id']) if bpms: cx = bpms['x'] cy = bpms['y'] cz = bpms['z'] ct = bpms['t'] c_idx = cz.index(z) for t_idx, time in enumerate(ct): xv = cx[t_idx][c_idx] yv = cy[t_idx][c_idx] t.append(time) x.append(xv) y.append(yv) #TODO(pjm): set reasonable history limit _MAX_BPM_POINTS = 10 if len(t) > _MAX_BPM_POINTS: cutoff = len(t) - _MAX_BPM_POINTS t = t[cutoff:] x = x[cutoff:] y = y[cutoff:] else: # put the point outside the plot x = [1] y = [1] c_idx = _watch_index(data, report['_id']) color = _SETTINGS_PLOT_COLORS[c_idx % len(_SETTINGS_PLOT_COLORS)] c_mod = _hex_color_to_rgb(color) c_mod[3] = 0.2 plots = [ { 'points': y, 'label': 'y [m]', 'style': 'line', 'symbol': 'circle', 'colorModulation': c_mod, }, ] return template_common.parameter_plot( x, plots, data.models[data.report], { 'title': 'z = {}m'.format(z), 'y_label': '', 'x_label': 'x [m]', 'aspectRatio': 1.0, }, [color])
def extract_tunes_report(run_dir, data): report = data.models.tunesReport assert report.turnStart < data.models.simulationSettings.npass, \ 'Turn Start is greater than Number of Turns' col_names, rows = _read_data_file(py.path.local(run_dir).join(_TUNES_FILE), mode='header') # actual columns appear at end of each data line, not in file header col_names = ['qx', 'amp_x', 'qy', 'amp_y', 'ql', 'amp_l', 'kpa', 'kpb', 'kt', 'nspec'] plots = [] x = [] if report.particleSelector == 'all': axis = report.plotAxis title = template_common.enum_text(_SCHEMA, 'TunesAxis', axis) x_idx = col_names.index('q{}'.format(axis)) y_idx = col_names.index('amp_{}'.format(axis)) p_idx = col_names.index('nspec') current_p = -1 for row in rows: p = row[p_idx] if current_p != p: current_p = p plots.append(PKDict( points=[], label='Particle {}'.format(current_p), )) x = [] x.append(float(row[x_idx])) plots[-1].points.append(float(row[y_idx])) else: title = 'Tunes, Particle {}'.format(report.particleSelector) for axis in ('x', 'y'): x_idx = col_names.index('q{}'.format(axis)) y_idx = col_names.index('amp_{}'.format(axis)) points = [] for row in rows: if axis == 'x': x.append(float(row[x_idx])) points.append(float(row[y_idx])) plots.append(PKDict( label=template_common.enum_text(_SCHEMA, 'TunesAxis', axis), points=points, )) for plot in plots: plot.label += ', {}'.format(_peak_x(x, plot.points)) if report.plotScale == 'linear': # normalize each plot to 1.0 and show amplitude in label for plot in plots: maxp = max(plot.points) if maxp != 0: plot.points = (np.array(plot.points) / maxp).tolist() plot.label += ', amplitude: {}'.format(_format_exp(maxp)) return template_common.parameter_plot(x, plots, {}, PKDict( title=title, y_label='', x_label='', ), plot_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'])
def _extract_report_twissEllipseReport(data, run_dir): util = LatticeUtil(data, _SCHEMA) m = util.find_first_command(data, 'twiss') # must an initial twiss always exist? if not m: return template_common.parameter_plot([], [], None, PKDict()) r_model = data.models[data.report] dim = r_model.dim plots = [] n_pts = 200 theta = np.arange(0, 2. * np.pi * (n_pts / (n_pts - 1)), 2. * np.pi / n_pts) alf = 'alf{}'.format(dim) bet = 'bet{}'.format(dim) a = float(m[alf]) b = float(m[bet]) g = (1. + a * a) / b eta = 'e{}'.format(dim) e = m[eta] if eta in m else 1.0 phi = _twiss_ellipse_rotation(a, b) th = theta - phi mj = math.sqrt(e * b) mn = 1.0 / mj r = np.power( mn * np.cos(th) * np.cos(th) + mj * np.sin(th) * np.sin(th), -0.5 ) x = r * np.cos(theta) y = r * np.sin(theta) p = PKDict(field=dim, points=y.tolist(), label=f'{dim}\' [rad]') plots.append( p ) return template_common.parameter_plot( x.tolist(), plots, {}, PKDict( title=f'a{dim} = {a} b{dim} = {b} g{dim} = {g}', y_label='', x_label=f'{dim} [m]' ) )
def _extract_report_data(xFilename, frame_args, page_count=0): page_index = frame_args.frameIndex xfield = frame_args.x if 'x' in frame_args else frame_args[_X_FIELD] # x, column_names, x_def, err x_col = sdds_util.extract_sdds_column(xFilename, xfield, page_index) if x_col['err']: return x_col['err'] x = x_col['values'] if not _is_histogram_file(xFilename, x_col['column_names']): # parameter plot plots = [] filename = PKDict( y1=xFilename, #TODO(pjm): y2Filename, y3Filename are not currently used. Would require rescaling x value across files. y2=xFilename, y3=xFilename, ) for f in ('y1', 'y2', 'y3'): if re.search(r'^none$', frame_args[f], re.IGNORECASE) or frame_args[f] == ' ': continue yfield = frame_args[f] y_col = sdds_util.extract_sdds_column(filename[f], yfield, page_index) if y_col['err']: return y_col['err'] y = y_col['values'] plots.append( PKDict( field=yfield, points=y, label=_field_label(yfield, y_col['column_def'][1]), )) title = '' if page_count > 1: title = 'Plot {} of {}'.format(page_index + 1, page_count) return template_common.parameter_plot( x, plots, frame_args, PKDict( title=title, y_label='', x_label=_field_label(xfield, x_col['column_def'][1]), )) yfield = frame_args['y1'] if 'y1' in frame_args else frame_args['y'] y_col = sdds_util.extract_sdds_column(xFilename, yfield, page_index) if y_col['err']: return y_col['err'] return template_common.heatmap( [x, y_col['values']], frame_args, PKDict( x_label=_field_label(xfield, x_col['column_def'][1]), y_label=_field_label(yfield, y_col['column_def'][1]), title=_plot_title(xfield, yfield, page_index, page_count), ))
def extract_report_data(xFilename, data, page_index, page_count=0): xfield = data['x'] if 'x' in data else data[_X_FIELD] # x, column_names, x_def, err x_col = sdds_util.extract_sdds_column(xFilename, xfield, page_index) if x_col['err']: return x_col['err'] x = x_col['values'] if not _is_histogram_file(xFilename, x_col['column_names']): # parameter plot plots = [] filename = { 'y1': xFilename, #TODO(pjm): y2Filename, y3Filename are not currently used. Would require rescaling x value across files. 'y2': xFilename, 'y3': xFilename, } for f in ('y1', 'y2', 'y3'): if re.search(r'^none$', data[f], re.IGNORECASE) or data[f] == ' ': continue yfield = data[f] y_col = sdds_util.extract_sdds_column(filename[f], yfield, page_index) if y_col['err']: return y_col['err'] y = y_col['values'] plots.append({ 'field': yfield, 'points': y, 'label': _field_label(yfield, y_col['column_def'][1]), }) title = '' if page_count > 1: title = 'Plot {} of {}'.format(page_index + 1, page_count) return template_common.parameter_plot( x, plots, data, { 'title': title, 'y_label': '', 'x_label': _field_label(xfield, x_col['column_def'][1]), }) yfield = data['y1'] if 'y1' in data else data['y'] y_col = sdds_util.extract_sdds_column(xFilename, yfield, page_index) if y_col['err']: return y_col['err'] return template_common.heatmap( [x, y_col['values']], data, { 'x_label': _field_label(xfield, x_col['column_def'][1]), 'y_label': _field_label(yfield, y_col['column_def'][1]), 'title': _plot_title(xfield, yfield, page_index, page_count), })
def get_beam_pos_report(run_dir, data): monitor_file = run_dir.join('../epicsServerAnimation/').join( MONITOR_LOGFILE) assert monitor_file.exists(), 'no beam position history' history, num_records, start_time = _read_monitor_file(monitor_file, True) assert len(history) > 0, 'no beam position history' x_label = 'z [m]' x, plots, colors = _beam_pos_plots(data, history, start_time) return template_common.parameter_plot(x.tolist(), plots, {}, { 'title': '', 'y_label': '', 'x_label': x_label, 'summaryData': {}, }, colors)
def _sdds_report(frame_args, filename, x_field): xfield = _map_field_name(x_field) x_col = sdds_util.extract_sdds_column(filename, xfield, 0) if x_col.err: return x_col.err x = x_col['values'] if 'fieldRange' in frame_args.sim_in.models.particleAnimation: frame_args.fieldRange = frame_args.sim_in.models.particleAnimation.fieldRange plots = [] for f in ('y1', 'y2', 'y3'): if f not in frame_args or frame_args[f] == 'none': continue yfield = _map_field_name(frame_args[f]) y_col = sdds_util.extract_sdds_column(filename, yfield, 0) if y_col.err: return y_col.err y = y_col['values'] label_prefix = '' #TODO(pjm): the forceScale feature makes the code unmanageable # it might be simpler if this was done on the client if 'forceScale' in frame_args \ and yfield in ('f_x', 'f_long') \ and frame_args.forceScale == 'negative': y = [-v for v in y] label_prefix = '-' if 'fieldRange' in frame_args: r = frame_args.fieldRange[frame_args[f]] frame_args.fieldRange[frame_args[f]] = [-r[1], -r[0]] plots.append( PKDict( field=frame_args[f], points=y, label='{}{}{}'.format( label_prefix, _field_label(yfield, y_col.column_def), _field_description(yfield, frame_args.sim_in), ), )) if xfield == 'V_trans': x = _resort_vtrans(x, plots) frame_args.x = x_field return template_common.parameter_plot( x, plots, frame_args, PKDict( y_label='', x_label=_field_label(xfield, x_col.column_def), ))
def _laser_pulse_report(value_index, filename, title, label): values = np.load(filename) return template_common.parameter_plot( values[0].tolist(), [ PKDict( points=values[value_index].tolist(), label=label, ), ], PKDict(), PKDict( title=title, y_label='', x_label='s [m]', ), )
def _get_fit_report(report, plot_data, col_info): col1 = _safe_index(col_info, report.x) col2 = _safe_index(col_info, report.y1) x_vals = plot_data[:, col1] * col_info['scale'][col1] y_vals = plot_data[:, col2] * col_info['scale'][col2] fit_x, fit_y, fit_y_min, fit_y_max, param_vals, param_sigmas, latex_label = _fit_to_equation( x_vals, y_vals, report.fitEquation, report.fitVariable, report.fitParameters, ) plots = [ PKDict( points=y_vals.tolist(), label='data', style='scatter', ), PKDict( points=fit_y.tolist(), x_points=fit_x.tolist(), label='fit', ), PKDict(points=fit_y_min.tolist(), x_points=fit_x.tolist(), label='confidence', _parent='confidence'), PKDict(points=fit_y_max.tolist(), x_points=fit_x.tolist(), label='', _parent='confidence'), ] return template_common.parameter_plot( x_vals.tolist(), plots, PKDict(), PKDict(title='', x_label=_label(col_info, col1), y_label=_label(col_info, col2), summaryData=PKDict( p_vals=param_vals.tolist(), p_errs=param_sigmas.tolist(), ), latex_label=latex_label), )
def _get_fit_report(report, plot_data, col_info): col1 = _safe_index(col_info, report.x) col2 = _safe_index(col_info, report.y1) x_vals = plot_data[:, col1] * col_info['scale'][col1] y_vals = plot_data[:, col2] * col_info['scale'][col2] fit_y, fit_y_min, fit_y_max, param_vals, param_sigmas, latex_label = _fit_to_equation( x_vals, y_vals, report.fitEquation, report.fitVariable, report.fitParameters, ) plots = [ { 'points': y_vals.tolist(), 'label': 'data', 'style': 'scatter', }, { 'points': fit_y.tolist(), 'label': 'fit', }, { 'points': fit_y_min.tolist(), 'label': 'confidence', '_parent': 'confidence' }, { 'points': fit_y_max.tolist(), 'label': '', '_parent': 'confidence' } ] return template_common.parameter_plot(x_vals.tolist(), plots, {}, { 'title': '', 'x_label': _label(col_info, col1), 'y_label': _label(col_info, col2), 'summaryData': { 'p_vals': param_vals.tolist(), 'p_errs': param_sigmas.tolist(), }, 'latex_label': latex_label })
def get_analysis_report(run_dir, data): import math report, col_info, plot_data = _report_info(run_dir, data) clusters = None if 'action' in report: if report.action == 'fit': return _get_fit_report(report, plot_data, col_info) elif report.action == 'cluster': clusters = _compute_clusters(report, plot_data, col_info) x_idx = _set_index_within_cols(col_info, report.x) x = (plot_data[:, x_idx] * col_info['scale'][x_idx]).tolist() plots = [] for f in ('y1', 'y2', 'y3'): #TODO(pjm): determine if y2 or y3 will get used if f != 'y1': continue if f not in report or report[f] == 'none': continue y_idx = _set_index_within_cols(col_info, report[f]) y = plot_data[:, y_idx] if len(y) <= 0 or math.isnan(y[0]): continue plots.append( PKDict( points=(y * col_info['scale'][y_idx]).tolist(), label=_label(col_info, y_idx), style='line' if 'action' in report and report.action == 'fft' else 'scatter', )) return template_common.parameter_plot( x, plots, PKDict(), PKDict( title='', y_label='', x_label=_label(col_info, x_idx), clusters=clusters, summaryData=PKDict(), ), )
def _extract_report_twissReport(data, run_dir, filename=_TWISS_OUTPUT_FILE): t = madx_parser.parse_tfs_file(run_dir.join(filename)) plots = [] m = data.models[data.report] for f in ('y1', 'y2', 'y3'): if m[f] == 'none': continue plots.append( PKDict(field=m[f], points=_to_floats(t[m[f]]), label=_FIELD_LABEL[m[f]]) ) x = m.get('x') or 's' return template_common.parameter_plot( _to_floats(t[x]), plots, m, PKDict( y_label='', x_label=_FIELD_LABEL[x], ) )
def get_settings_report(run_dir, data): monitor_file = run_dir.join('../epicsServerAnimation/').join( MONITOR_LOGFILE) if not monitor_file.exists(): assert False, 'no settings history' history, num_records, start_time = _read_monitor_file(monitor_file, True) o = data.models.correctorSettingReport.plotOrder plot_order = o if o is not None else 'time' if plot_order == 'time': x, plots, colors = _setting_plots_by_time(data, history, start_time) x_label = 't [s]' else: x, plots, colors = _setting_plots_by_position(data, history, start_time) x_label = 'z [m]' return template_common.parameter_plot(x.tolist(), plots, {}, { 'title': '', 'y_label': 'rad', 'x_label': x_label, 'summaryData': {}, }, colors)
def sim_frame_plotAnimation(frame_args): def _walk_file(h5file, key, step, res): if key: for field in res.values(): field.points.append(h5file[key].attrs[field.name][field.index]) else: for field in res.values(): _units_from_hdf5(h5file, field) res = PKDict() for dim in 'x', 'y1', 'y2', 'y3': parts = frame_args[dim].split(' ') if parts[0] == 'none': continue res[dim] = PKDict( label=frame_args[dim], dim=dim, points=[], name=parts[0], index=_DIM_INDEX[parts[1]] if len(parts) > 1 else 0, ) _iterate_hdf5_steps(frame_args.run_dir.join(_OPAL_H5_FILE), _walk_file, res) plots = [] for field in res.values(): if field.dim != 'x': plots.append(field) return template_common.parameter_plot( res.x.points, plots, PKDict(), PKDict( title='', y_label='', x_label=res.x.label, ), )
def get_fft(run_dir, data): import scipy.fftpack import scipy.optimize import scipy.signal data.report = _SIM_DATA.webcon_analysis_report_name_for_fft(data) report, col_info, plot_data = _report_info(run_dir, data) col1 = _safe_index(col_info, report.x) col2 = _safe_index(col_info, report.y1) t_vals = plot_data[:, col1] * col_info['scale'][col1] y_vals = plot_data[:, col2] * col_info['scale'][col2] # fft takes the y data only and assumes it corresponds to equally-spaced x values. fft_out = scipy.fftpack.fft(y_vals) num_samples = len(y_vals) half_num_samples = num_samples // 2 # should all be the same - this will normalize the frequencies sample_period = abs(t_vals[1] - t_vals[0]) if sample_period == 0: raise sirepo.util.UserAlert( 'Data error', 'FFT sample period could not be determined from data. Ensure x has equally spaced values', ) #sample_period = numpy.mean(numpy.diff(t_vals)) # the first half of the fft data (taking abs() folds in the imaginary part) y = 2.0 / num_samples * numpy.abs(fft_out[0:half_num_samples]) # get the frequencies found # fftfreq just generates an array of equally-spaced values that represent the x-axis # of the fft of data of a given length. It includes negative values freqs = scipy.fftpack.fftfreq(len(fft_out), d=sample_period) #/ sample_period w = 2. * numpy.pi * freqs[0:half_num_samples] # is signal to noise useful? m = y.mean() sd = y.std() s2n = numpy.where(sd == 0, 0, m / sd) coefs = (2.0 / num_samples) * numpy.abs(fft_out[0:half_num_samples]) peaks, props = scipy.signal.find_peaks(coefs) found_freqs = [v for v in zip(peaks, numpy.around(w[peaks], 3))] #pkdlog('!FOUND {} FREQS {}, S2N {}, MEAN {}', len(found_freqs), found_freqs, s2n, m) # focus in on the peaks? # maybe better in browser bin_spread = 10 min_bin = max(0, peaks[0] - bin_spread) max_bin = min(half_num_samples, peaks[-1] + bin_spread) yy = 2.0 / num_samples * numpy.abs(fft_out[min_bin:max_bin]) max_yy = numpy.max(yy) yy_norm = yy / (max_yy if max_yy != 0 else 1) ww = 2. * numpy.pi * freqs[min_bin:max_bin] #plots = [ # { # 'points': yy_norm.tolist(), # 'label': 'fft', # }, #] max_y = numpy.max(y) y_norm = y / (max_y if max_y != 0 else 1) plots = [PKDict(points=y_norm.tolist(), label='fft')] #TODO(mvk): figure out appropriate labels from input w_list = w.tolist() return template_common.parameter_plot( w_list, plots, PKDict(), PKDict( title='', y_label=_label(col_info, 1), x_label='f[Hz]', preserve_units=False, #'x_label': _label(col_info, 0) + '^-1', summaryData=PKDict(freqs=found_freqs, minFreq=w_list[0], maxFreq=w_list[-1]), #'latex_label': latex_label ), )
def get_fft(run_dir, data): data.report = _analysis_report_name_for_fft_report(data.report, data) report, col_info, plot_data = _report_info(run_dir, data) col1 = _safe_index(col_info, report.x) col2 = _safe_index(col_info, report.y1) t_vals = plot_data[:, col1] * col_info['scale'][col1] y_vals = plot_data[:, col2] * col_info['scale'][col2] # fft takes the y data only and assumes it corresponds to equally-spaced x values. fft_out = scipy.fftpack.fft(y_vals) num_samples = len(y_vals) half_num_samples = num_samples // 2 # should all be the same - this will normalize the frequencies sample_period = abs(t_vals[1] - t_vals[0]) if sample_period == 0: assert False, 'FFT sample period could not be determined from data. Ensure x has equally spaced values' #sample_period = np.mean(np.diff(t_vals)) # the first half of the fft data (taking abs() folds in the imaginary part) y = 2.0 / num_samples * np.abs(fft_out[0:half_num_samples]) # get the freuqencies found # fftfreq just generates an array of equally-spaced values that represent the x-axis # of the fft of data of a given length. It includes negative values freqs = scipy.fftpack.fftfreq(len(fft_out)) / sample_period w = freqs[0:half_num_samples] found_freqs = [] # is signal to noise useful? m = y.mean() sd = y.std() s2n = np.where(sd == 0, 0, m / sd) # We'll say we found a frequncy peak when the size of the coefficient divided by the average is # greather than this. A crude indicator - one presumes better methods exist found_sn_thresh = 10 ci = 0 max_bin = -1 min_bin = half_num_samples bin_spread = 10 for coef, freq in zip(fft_out[0:half_num_samples], freqs[0:half_num_samples]): #pkdp('{c:>6} * exp(2 pi i t * {f}) : vs thresh {t}', c=(2.0 / N) * np.abs(coef), f=freq, t=(2.0 / N) * np.abs(coef) / m) if (2.0 / num_samples) * np.abs(coef) / m > found_sn_thresh: found_freqs.append((ci, freq)) max_bin = ci if ci < min_bin: min_bin = ci ci += 1 #pkdp('!FOUND FREQS {}, MIN {}, MAX {}, P2P {}, S2N {}, MEAN {}', found_freqs, min_coef, max_coef, p2p, s2n, m) # focus in on the peaks? min_bin = max(0, min_bin - bin_spread) max_bin = min(half_num_samples, max_bin + bin_spread) yy = 2.0 / num_samples * np.abs(fft_out[min_bin:max_bin]) ww = freqs[min_bin:max_bin] plots = [ { 'points': y.tolist(), 'label': 'fft', }, ] #TODO(mvk): figure out appropriate labels from input return template_common.parameter_plot(w.tolist(), plots, {}, { 'title': '', 'y_label': _label(col_info, 1), 'x_label': 'ω[s-1]', #'x_label': _label(col_info, 0) + '^-1', 'summaryData': { 'freqs': found_freqs, }, #'latex_label': latex_label })