def create_report(G, constraint_stats=False): r = Report(G.graph['name']) f = r.figure("Graph plots") report_add_coordinates_and_edges(r, 'graph', G, f, plot_edges=True, plot_vertices=True) report_add_coordinates_and_edges(r, 'graph-edges', G, f, plot_edges=True, plot_vertices=False) report_add_coordinates_and_edges(r, 'graph-vertices', G, f, plot_edges=False, plot_vertices=True) r.text('node_statistics', graph_degree_stats(G)) if constraint_stats: f = r.figure("Constraints statistics") print('Creating statistics') stats = graph_errors(G, G) print(' (done)') report_add_distances_errors_plot(r, nid='statistics', stats=stats, f=f) r.text('constraints_stats', graph_errors_print('constraints', stats)) return r
def report_results_single(func, objspec_name, results): def get_string_result(res): if res is None: s = 'ok' elif isinstance(res, Skipped): s = 'skipped' elif isinstance(res, PartiallySkipped): parts = res.get_skipped_parts() s = 'no ' + ','.join(parts) else: print('how to interpret %s? ' % describe_value(res)) s = '?' return s r = Report() if not results: r.text('warning', 'no test objects defined') return r rows = [] data = [] for id_object, res in results.items(): rows.append(id_object) data.append([get_string_result(res)]) r.table('summary', rows=rows, data=data) return r
def table_by_rows(id_report, samples, rows_field, cols_fields, source_descs): samples2 = StoreResultsDict(samples) class Missing(dict): def __missing__(self, key): logger.warning('Description for %r missing.' % key) d = WithDescription(name=key, symbol='\\text{%s}' % key, desc=None) self[key] = d return d source_descs = Missing(source_descs) r = Report(id_report) data_views = [DataView.from_string(x, source_descs) for x in cols_fields] # data: list of list of list rows_field, data, reduced, display = summarize_data(samples2, rows_field, data_views) rows = ['$%s$' % source_descs[x].get_symbol() for x in rows_field] cols = ['$%s$' % x.get_symbol() for x in data_views] r.table('table', data=display, cols=cols, rows=rows) r.data('table_data', data=reduced, caption="Data without presentation applied.") r.data('table_data_source', data=data, caption="Source data, before reduction.") row_desc = "\n".join(['- $%s$: %s' % (x.get_symbol(), x.get_desc()) for x in map(source_descs.__getitem__, rows_field)]) col_desc = "\n".join(['- $%s$: %s' % (x.get_symbol(), x.get_desc()) for x in data_views]) r.text('row_desc', rst_escape_slash(row_desc), mime=MIME_RST) r.text('col_desc', rst_escape_slash(col_desc), mime=MIME_RST) return r
def report_results_single(func, objspec_name, results): def get_string_result(res): if res is None: s = 'ok' elif isinstance(res, Skipped): s = 'skipped' elif isinstance(res, PartiallySkipped): parts = res.get_skipped_parts() s = 'no ' + ','.join(parts) else: print('how to interpret %s? ' % describe_value(res)) s = '?' return s r = Report() if not results: r.text('warning', 'no test objects defined') return r rows = [] data = [] for id_object, res in list(results.items()): rows.append(id_object) data.append([get_string_result(res)]) r.table('summary', rows=rows, data=data) return r
def get_optim_state_report(s, opt): r = Report() from mcdp_opt_tests.test_basic import plot_ndp plot_ndp(r, 'current', s.get_current_ndp(), opt.library) r.text('order', 'creation order: %s' % s.creation_order) r.text('msg', s.get_info()) return r
class ReprepPublisher(Publisher): default_max_cols = 5 def __init__(self, rid=None, report=None, cols=default_max_cols): # TODO: clear up this interface if report is None: self.r = Report(rid) else: self.r = report self.cols = cols self._f = None def fig(self): ''' Returns reference to current RepRep figure. ''' if self._f is None: self._f = self.r.figure(cols=self.cols) return self._f @contract(name='str', value='array', caption='None|str') def array(self, name, value, caption=None): # XXX to change self.r.data(name, value, mime=MIME_PYTHON, caption=caption) @contract(name='str', value='array', filter='str', caption='None|str') def array_as_image(self, name, value, filter='posneg', # @ReservedAssignment # XXX: config filter_params={}, caption=None): # @ReservedAssignment # try image XXX check uint8 # If this is RGB if len(value.shape) == 3 and value.shape[2] == 3: # zoom images smaller than 50 # if value.shape[0] < 50: # value = zoom(value, 10) self.fig().data_rgb(name, value, caption=caption) else: node = self.r.data(name, value, mime=MIME_PYTHON, caption=caption) m = node.display(filter, **filter_params) if caption is None: caption = name self.fig().sub(m, caption=caption) @contract(name='str', value='str') def text(self, name, value): self.r.text(name, value) @contextmanager @contract(name='str', caption='None|str') def plot(self, name, caption=None, **args): f = self.fig() # TODO: make a child of myself with f.plot(name, caption=caption, **args) as pylab: yield pylab def section(self, section_name, cols=default_max_cols, caption=None): child = self.r.node(section_name, caption=caption) return ReprepPublisher(report=child, cols=cols)
def create_report_delayed(exp_id, delayed, description): delays = numpy.array(sorted(delayed.keys())) r = Report(exp_id) r.text("description", description) f = r.figure(cols=3) # max and sum of correlation for each delay # corr_max = [] corr_mean = [] for delay in delays: data = delayed[delay] a = data["action_image_correlation"] id = "delay%d" % delay # rr = r.node('delay%d' % delay) r.data(id, a).data_rgb("retina", add_reflines(posneg(values2retina(a)))) corr_mean.append(numpy.abs(a).mean()) caption = "delay: %d (max: %.3f, sum: %f)" % (delay, numpy.abs(a).max(), numpy.abs(a).sum()) f.sub(id, caption=caption) timestamp2ms = lambda x: x * (1.0 / 60) * 1000 peak = numpy.argmax(corr_mean) peak_ms = timestamp2ms(delays[peak]) with r.data_pylab("mean") as pylab: T = timestamp2ms(delays) pylab.plot(T, corr_mean, "o-") pylab.ylabel("mean correlation field") pylab.xlabel("delay (ms) ") a = pylab.axis() pylab.plot([0, 0], [a[2], a[3]], "k-") y = a[2] + (a[3] - a[2]) * 0.1 pylab.text(+5, y, "causal", horizontalalignment="left") pylab.text(-5, y, "non causal", horizontalalignment="right") pylab.plot([peak_ms, peak_ms], [a[2], max(corr_mean)], "b--") y = a[2] + (a[3] - a[2]) * 0.2 pylab.text(peak_ms + 10, y, "%d ms" % peak_ms, horizontalalignment="left") f = r.figure("stats") f.sub("mean") a = delayed[int(delays[peak])]["action_image_correlation"] r.data_rgb("best_delay", add_reflines(posneg(values2retina(a)))) return r
def stat_report(stats_def, stats): # pdb.set_trace() report = Report('OnlinePlanning_statistics') report.text('summary', 'Result report for online planning') for job in stats_def: function = eval(job['type']) job['args']['plot_id'] = job['id'] function(report, stats, **job['args']) return report
def report_learner(id_report, learner): r = Report(id_report) if learner is None: msg = 'Not display %r because not initialized' % id_report logger.info(msg) r.text('notice', 'Not initialized') else: learner.display(r) return r
def report_example(param2, samples): print('report_example(%s, %s)' % (param2, samples)) if param2 == -1: print('generating exception') raise Exception('fake exception') r = Report() r.text('samples', str(samples)) print('creating report') return r
def create_report(self): report = Report('OnlinePlanning') report.text('summary', 'Result report for online planning') # Plot images for job in self.plots['line_graph_mean']: graph_errorbar(report, self.all_stats, job['x_axis'], job['function'], job['categorize']) filename = '/home/adam/public_html/testrep.html' report.to_html(filename)
def get_agent_report_from_state(agent, state, progress): rid = "%s-%s-%s" % (state.id_agent, state.id_robot, progress) report = Report(rid) stats = "Num episodes: %s\nNum observations: %s" % (len(state.id_episodes), state.num_observations) report.text("learning_statistics", stats) agent.publish(report) return report
def create_report_execution(exc_id, tcid, tc, algo_class, algo_params, results): r = Report(exc_id) f = r.figure('misc', cols=3) for w in ['gstats', 'lgstats']: if w in results: r.text(w, graph_errors_print(w, results[w])) G = tc.G landmarks = results['landmarks'] G_all = results.get('G_all', None) G_landmarks = results.get('G_landmarks', None) lgstats = results.get('lgstats', None) if G_landmarks is not None: print('plotting landmark positions %s' % G_landmarks.number_of_nodes()) report_add_coordinates_and_edges(r, 'G_landmarks', G=G_landmarks, f=f, caption='landmarks positions') if lgstats is not None: report_add_distances_errors_plot(r, nid='lgstats', stats=lgstats, f=f) else: print("could not find G_landmarks") if G_all is not None: for u, v in G.edges(): G_all.add_edge(u, v, **G[u][v]) print('plotting full solution %s' % G_all.number_of_nodes()) report_add_coordinates_and_edges( r, 'G_all', G=G_all, landmarks=landmarks, # plot_edges=True, f=f, caption='all nodes positions') report_add_distances_errors_plot(r, nid='gstats', stats=results['gstats'], f=f) else: print("could not find G_all") r.text('phases_as_text', results['phases_as_text']) return r
def display_current_results(learner, name, dirname, iteration): dds = learner.summarize(prefix=name) r = Report('%s-it%s' % (name, iteration)) r.text('summary', 'Iteration: %s' % iteration) base = '%s-current.html' % (name) filename = os.path.join(dirname, 'iterations', base) # TODO: add file f = '/opt/EPD/7.3/lib/python2.7/site-packages/PIL/Images/lena.jpg' lena = imread(f) image = UncertainImage(lena) dds.display(r, image) logger.info('Writing to %r.' % filename) r.to_html(filename)
def create_report(outdir, combination_id, saccades): r = Report(combination_id) stats = 'Combination %r has %d saccades' % (combination_id, len(saccades)) r.text('stats', stats) desc = "" #r.add_child(create_report_subset(combination_id,desc, saccades)) #r.add_child(create_report_randomness(combination_id, desc, saccades)) r.add_child(create_report_axis_angle(combination_id, desc, saccades)) rd = os.path.join(outdir, 'images') out = os.path.join(outdir, 'combinations', '%s.html' % combination_id) print('Writing to %r' % out) r.to_html(out, resources_dir=rd)
def __init__(self): report = Report('id', caption='env1d') self.N = 1000 self.res = 10 x = np.linspace(0, 10, self.N * self.res) self.E = scipy.convolve(np.random.ranf(len(x)), np.ones(self.res * 20) / self.res * 20, mode='same') plot_env(report, x, self.E) self.commands = [-2.5, 2.0] self.n_sampels = [0, 0] self.sensels = [30, 31] self.state = self.N / 2 self.plot_y = False self.plot_e = False self.size = 60 self.area = 9 self.s = range(self.size) self.clean() lsize = 20 sensor_noise = 0 actuator_noise = 0 self.run_learning(lsize, actuator_noise=actuator_noise, sensor_noise=sensor_noise) report.text('info0', ('Learning size: \t\t%g \nActuator noise: \t%g ' + '\nSensor noise: \t\t%g') % (lsize, actuator_noise, sensor_noise)) report.text('commands', str(self.commands)) self.summarize(report, 0) self.state = self.N / 2 self.clean() lsize = 100 sensor_noise = 0 actuator_noise = 2 self.run_learning(lsize, actuator_noise=actuator_noise, sensor_noise=sensor_noise) report.text('info1', ('Learning size: \t\t%g \nActuator noise: \t%g ' + '\nSensor noise: \t\t%g') % (lsize, actuator_noise, sensor_noise)) self.summarize(report, 1) self.state = self.N / 2 self.clean() # lsize = 1000 sensor_noise = 2 actuator_noise = 0 self.run_learning(lsize, actuator_noise=actuator_noise, sensor_noise=sensor_noise) report.text('info2', ('Learning size: \t\t%g \nActuator noise: \t%g ' + '\nSensor noise: \t\t%g') % (lsize, actuator_noise, sensor_noise)) self.summarize(report, 2) report.to_html('env1d.html')
def test_consistency_uncertainty(): print 'here' pass contracts.disable_all() symdds = 'sym-dpchain1-120' print('instancing dds %s' % symdds) dds = get_conftools_discdds().instance(symdds) shape = dds.get_shape() d1f = dds.actions[0].get_diffeo2d_forward() d1b = dds.actions[0].get_diffeo2d_backward() fb = Diffeomorphism2D.compose(d1f, d1b) bf = Diffeomorphism2D.compose(d1b, d1f) identity = Diffeomorphism2D.identity(shape) print Diffeomorphism2D.distance_L2_infow(d1f, identity) print Diffeomorphism2D.distance_L2_infow(d1b, identity) print Diffeomorphism2D.distance_L2_infow(fb, identity) print Diffeomorphism2D.distance_L2_infow(bf, identity) action = dds.actions[0] action2 = consistency_based_uncertainty(action, None) r = Report(symdds) r.text('symdds', symdds) with r.subsection('action') as sub: action.display(sub) with r.subsection('action2') as sub: action2.display(sub) # # with r.subsection('misc') as sub: # d = d1f.get_discretized_diffeo() # f = sub.figure() # f.array_as_image('d0', d[:, :, 0]) # f.array_as_image('d1', d[:, :, 1]) # # with r.subsection('d1f') as sub: # d1f.display(sub) # with r.subsection('d1b') as sub: # d1b.display(sub) # # with r.subsection('fb') as sub: # fb.display(sub) # with r.subsection('bf') as sub: # bf.display(sub) r.to_html('test_consistency_uncertainty.html')
def visualize_result(id_tc, id_algo, stats): """ Returns a report """ result = stats['result'] r = Report('%s-%s' % (id_tc, id_algo)) # tc = config.testcases.instance(id_tc) # discdds = config.discdds.instance(tc.id_discdds) algo = stats['algo'] tc = stats['tc'] discdds = algo.get_dds() tc.display(r.section('testcase'), discdds=discdds) if not result.success: r.text('warning', 'Plannning unsuccesful') else: rsol = r.section('solution') rsol.text('plan', 'Plan: %s' % str(result.plan)) y0 = tc.y0 y1 = tc.y1 y1plan = discdds.predict(y0, result.plan) mismatch = np.abs(y1.get_values() - y1plan.get_values()).sum(axis=2) f = rsol.figure(cols=4) zoom = lambda x: rgb_zoom(x, 8) f.data_rgb('y1plan', zoom(y1plan.get_rgb()), caption='plan prediction (certain)') f.data_rgb('y1plan_certain', zoom(y1plan.get_rgb_uncertain()), caption='certainty of prediction') f.data_rgb('mismatch', zoom(scale(mismatch)), caption='Mismatch value pixel by pixel ' '(zero for synthetic testcases...)') algo.plan_report(r.section('planning'), result, tc) extra = result.extra write_log_lines(r, extra) return r
def make_report(learners): print('make_report(learners) in diffeomorphism2d_continuous is used') for i, name in enumerate(learners): # init report report = Report(learners[i]) learner = pickle.load(open(name)) diffeo = learner.estimators[0].summarize() learner.estimators[0].show_areas(report, diffeo.d) cmd = learner.command_list[0] # pdb.set_trace() report.text('learner' + str(i), name) report.text('cmd' + str(i), str(cmd)) diffeo.display(report, nbins=500) # Save report report.to_html(learners[i] + '.html')
def report_results_pairs(func, objspec1_name, objspec2_name, results): reason2symbol = {} def get_string_result(res): if res is None: s = 'ok' elif isinstance(res, Skipped): s = 'skipped' reason = res.get_reason() if not reason in reason2symbol: reason2symbol[reason] = len(reason2symbol) + 1 s += '(%s)' % reason2symbol[reason] elif isinstance(res, PartiallySkipped): parts = res.get_skipped_parts() s = 'no ' + ','.join(parts) else: print('how to interpret %s? ' % describe_value(res)) s = '?' return s r = Report() if not results: r.text('warning', 'no test objects defined') return r rows = sorted(set([a for a, _ in results])) cols = sorted(set([b for _, b in results])) data = [[None for a in range(len(cols))] for b in range(len(rows))] # a nice bug: data = [[None * len(cols)] * len(rows) for ((i, id_object1), (j, id_object2)) in itertools.product(enumerate(rows), enumerate(cols)): res = results[(id_object1, id_object2)] data[i][j] = get_string_result(res) r.table('summary', rows=rows, data=data, cols=cols) expl = "" for reason, symbol in reason2symbol.items(): expl += '(%s): %s\n' % (symbol, reason) r.text('notes', expl) return r
def report_traj_sample(rid, rows): r = Report(rid) print rows.dtype x = rows['position'][:, 0] y = rows['position'][:, 1] f = r.figure() with f.plot('xy') as pl: pl.plot(x, y, '.', markersize=0.8) center = [0.18, 0.45] plot_arena_with_circles(pl, center, radius=1, col='g-') pl.axis('equal') r.text('length', '%s' % len(rows)) return r
def create_report_execution(exc_id, tcid, tc, algo_class, algo_params, results): r = Report(exc_id) f = r.figure('misc', cols=3) for w in ['gstats', 'lgstats']: if w in results: r.text(w, graph_errors_print(w, results[w])) G = tc.G landmarks = results['landmarks'] G_all = results.get('G_all', None) G_landmarks = results.get('G_landmarks', None) lgstats = results.get('lgstats', None) if G_landmarks is not None: print('plotting landmark positions %s' % G_landmarks.number_of_nodes()) report_add_coordinates_and_edges(r, 'G_landmarks', G=G_landmarks, f=f, caption='landmarks positions') if lgstats is not None: report_add_distances_errors_plot(r, nid='lgstats', stats=lgstats, f=f) else: print("could not find G_landmarks") if G_all is not None: for u, v in G.edges(): G_all.add_edge(u, v, **G[u][v]) print('plotting full solution %s' % G_all.number_of_nodes()) report_add_coordinates_and_edges(r, 'G_all', G=G_all, landmarks=landmarks, # plot_edges=True, f=f, caption='all nodes positions') report_add_distances_errors_plot(r, nid='gstats', stats=results['gstats'], f=f) else: print("could not find G_all") r.text('phases_as_text', results['phases_as_text']) return r
def visualize_result(config, id_tc, id_algo, stats): """ Returns a report """ set_current_config(config) result = stats["result"] r = Report("%s-%s" % (id_tc, id_algo)) # tc = config.testcases.instance(id_tc) # discdds = config.discdds.instance(tc.id_discdds) algo = stats["algo"] tc = stats["tc"] discdds = algo.get_dds() tc.display(r.section("testcase"), discdds=discdds) if not result.success: r.text("warning", "Plannning unsuccesful") else: rsol = r.section("solution") rsol.text("plan", "Plan: %s" % str(result.plan)) y0 = tc.y0 y1 = tc.y1 y1plan = discdds.predict(y0, result.plan) mismatch = np.abs(y1.get_values() - y1plan.get_values()).sum(axis=2) f = rsol.figure(cols=4) zoom = lambda x: rgb_zoom(x, 8) f.data_rgb("y1plan", zoom(y1plan.get_rgb()), caption="plan prediction (certain)") f.data_rgb("y1plan_certain", zoom(y1plan.get_rgb_uncertain()), caption="certainty of prediction") f.data_rgb( "mismatch", zoom(scale(mismatch)), caption="Mismatch value pixel by pixel " "(zero for synthetic testcases...)", ) algo.plan_report(r.section("planning"), result, tc) extra = result.extra write_log_lines(r, extra) return r
def report_results_pairs(func, objspec1_name, objspec2_name, results): reason2symbol = {} def get_string_result(res): if res is None: s = 'ok' elif isinstance(res, Skipped): s = 'skipped' reason = res.get_reason() if not reason in reason2symbol: reason2symbol[reason] = len(reason2symbol) + 1 s += '(%s)' % reason2symbol[reason] elif isinstance(res, PartiallySkipped): parts = res.get_skipped_parts() s = 'no ' + ','.join(parts) else: print('how to interpret %s? ' % describe_value(res)) s = '?' return s r = Report() if not results: r.text('warning', 'no test objects defined') return r rows = sorted(set([a for a, _ in results])) cols = sorted(set([b for _, b in results])) data = [[None for a in range(len(cols))] for b in range(len(rows))] # a nice bug: data = [[None * len(cols)] * len(rows) for ((i, id_object1), (j, id_object2)) in itertools.product(enumerate(rows), enumerate(cols)): res = results[(id_object1, id_object2)] data[i][j] = get_string_result(res) r.table('summary', rows=rows, data=data, cols=cols) expl = "" for reason, symbol in list(reason2symbol.items()): expl += '(%s): %s\n' % (symbol, reason) r.text('notes', expl) return r
def run_report(stat): # def write_report(result, metric): report = Report('OnlinePlanningTest') report.text('summary', 'Visualizing of online planning test') labels = stat.labels for key in labels.keys(): report.text(key, str(labels[key])) images = { 'y_start': stat.y_start, 'y_goal': stat.y_goal, 'y_result': stat.y_result, 'y_pred': stat.y_goal_pred, 'y_found_pred': stat.y_found_pred } keys = ['y_start', 'y_goal', 'y_result', 'y_pred', 'y_found_pred'] # Plot images f = report.figure(cols=len(images)) for key in keys: with f.plot(key, caption=key) as pylab: pylab.imshow(images[key].get_rgb(), interpolation='nearest') data = [] for key in keys: yr = images[key] this_row = [] for key2 in keys: yc = images[key2] yr_yc = stat.metric_goal.distance(yr, yc) this_row.append(yr_yc) data.append(this_row) report.table('table', data=data, cols=images.keys(), rows=images.keys(), caption='Distances') return report
def run_report(stat): # def write_report(result, metric): report = Report('OnlinePlanningTest') report.text('summary', 'Visualizing of online planning test') labels = stat.labels for key in labels.keys(): report.text(key, str(labels[key])) images = {'y_start':stat.y_start, 'y_goal':stat.y_goal, 'y_result':stat.y_result, 'y_pred':stat.y_goal_pred, 'y_found_pred':stat.y_found_pred} keys = ['y_start', 'y_goal', 'y_result', 'y_pred', 'y_found_pred'] # Plot images f = report.figure(cols=len(images)) for key in keys: with f.plot(key, caption=key) as pylab: pylab.imshow(images[key].get_rgb(), interpolation='nearest') data = [] for key in keys: yr = images[key] this_row = [] for key2 in keys: yc = images[key2] yr_yc = stat.metric_goal.distance(yr, yc) this_row.append(yr_yc) data.append(this_row) report.table('table', data=data, cols=images.keys(), rows=images.keys(), caption='Distances') return report
def allformats_report(id_ndp, ndp, libname, which): from mcdp_web.images.images import get_mime_for_format r = Report(id_ndp + '-' + which) from mcdp_library_tests.tests import get_test_library library = get_test_library(libname) mf = MakeFiguresNDP(ndp=ndp, library=library, yourname=id_ndp) formats = mf.available_formats(which) try: res = mf.get_figure(which, formats) except DPSemanticError as e: if 'Cannot abstract' in str(e): r.text('warning', 'Not connected. \n\n %s' % e) return r print('%s -> %s %s ' % (which, formats, map(len, [res[f] for f in formats]))) fig = r.figure() for f in formats: data = res[f] mime = get_mime_for_format(f) dn = DataNode(f, data=data, mime=mime) fig.add_child(dn) return r
def report_mdp_display(mdp): r = Report() if not is_uniform(mdp): r.text('warn', 'Cannot create simulation of pds because not actions' ' are available in all states.') return r states = list(mdp.states()) actions = all_actions(mdp) p = {states[0]: 1.0} N = 10 plan = [actions[j] for j in np.random.randint(0, len(actions) - 1, N)] f = r.figure() for i, a in enumerate(plan): with f.plot('p%d' % i) as pylab: mdp.display_state_dist(pylab, p) p = mdp.evolve(p, a) return r
def report_mdp_display(mdp): r = Report() if not is_uniform(mdp): r.text( 'warn', 'Cannot create simulation of pds because not actions' ' are available in all states.') return r states = list(mdp.states()) actions = all_actions(mdp) p = {states[0]: 1.0} N = 10 plan = [actions[j] for j in np.random.randint(0, len(actions) - 1, N)] f = r.figure() for i, a in enumerate(plan): with f.plot('p%d' % i) as pylab: mdp.display_state_dist(pylab, p) p = mdp.evolve(p, a) return r
def allformats_report(id_ndp, ndp, libname, which): from mcdp_web.images.images import get_mime_for_format from mcdp_library_tests.tests import get_test_library r = Report(id_ndp + '-' + which) library = get_test_library(libname) image_source = ImagesFromPaths(library.get_images_paths()) mf = MakeFiguresNDP(ndp=ndp, image_source=image_source, yourname=id_ndp) formats = mf.available_formats(which) try: res = mf.get_figure(which, formats) except DPSemanticError as e: if 'Cannot abstract' in str(e): r.text('warning', 'Not connected. \n\n %s' % e) return r print('%s -> %s %s ' % (which, formats, map(len, [res[f] for f in formats]))) fig = r.figure() for f in formats: data = res[f] mime = get_mime_for_format(f) dn = DataNode(f, data=data, mime=mime) fig.add_child(dn) return r
def table_by_rows(id_report, samples, rows_field, cols_fields, source_descs): samples2 = StoreResultsDict(samples) class Missing(dict): def __missing__(self, key): logger.warning("Description for %r missing." % key) d = WithDescription(name=key, symbol="\\text{%s}" % key, desc=None) self[key] = d return d source_descs = Missing(source_descs) r = Report(id_report) data_views = [DataView.from_string(x, source_descs) for x in cols_fields] # data: list of list of list rows_field, data, reduced, display = summarize_data( samples2, rows_field, data_views) rows = ["$%s$" % source_descs[x].get_symbol() for x in rows_field] cols = ["$%s$" % x.get_symbol() for x in data_views] r.table("table", data=display, cols=cols, rows=rows) r.data("table_data", data=reduced, caption="Data without presentation applied.") r.data("table_data_source", data=data, caption="Source data, before reduction.") row_desc = "\n".join([ "- $%s$: %s" % (x.get_symbol(), x.get_desc()) for x in list(map(source_descs.__getitem__, rows_field)) ]) col_desc = "\n".join( ["- $%s$: %s" % (x.get_symbol(), x.get_desc()) for x in data_views]) r.text("row_desc", rst_escape_slash(row_desc), mime=MIME_RST) r.text("col_desc", rst_escape_slash(col_desc), mime=MIME_RST) return r
def report_dp1(dp, imp=None): r = Report() gg = dp_graph_flow(dp, imp=imp) gg_figure(r, 'graph', gg) r.text('long', dp.repr_long()) # # try: # S, alpha, beta = dp.get_normal_form() # # s = "" # s += 'S: %s' % S # s += '\nα: %s' % alpha # s += '\nβ: %s' % beta # # r.text('normalform', s) # r.text('tree_long', dp.tree_long()) # except Exception as e: # warnings.warn('Normal form not implemented %s' % e) M = dp.get_imp_space() r.text('I', str(M)) if False: R = dp.get_res_space() F = dp.get_fun_space() Rinf = R.get_top() Fbot = F.get_bottom() if M == PosetProduct((R_dimensionless,)): s = "" ms = [0.0, 0.25, 0.5, 0.75, 1.0] for m in ms: feasible = dp.is_feasible(Fbot, (m,), Rinf) s += '\n m = %s = %s' % (m, feasible) r.text('scalarres', s) else: m = M.witness() print(Fbot, m, Rinf) feasible = dp.is_feasible(Fbot, m, Rinf) r.text('some', 'bot feasible( %s, %s,%s): %s' % (Fbot, m, Rinf, feasible)) return r
def report_dp1(dp, imp=None): r = Report() gg = dp_graph_flow(dp, imp=imp) gg_figure(r, 'graph', gg) r.text('long', dp.repr_long()) # # try: # S, alpha, beta = dp.get_normal_form() # # s = "" # s += 'S: %s' % S # s += '\nα: %s' % alpha # s += '\nβ: %s' % beta # # r.text('normalform', s) # r.text('tree_long', dp.tree_long()) # except Exception as e: # warnings.warn('Normal form not implemented %s' % e) M = dp.get_imp_space() r.text('I', str(M)) if False: R = dp.get_res_space() F = dp.get_fun_space() Rinf = R.get_top() Fbot = F.get_bottom() if M == PosetProduct((R_dimensionless, )): s = "" ms = [0.0, 0.25, 0.5, 0.75, 1.0] for m in ms: feasible = dp.is_feasible(Fbot, (m, ), Rinf) s += '\n m = %s = %s' % (m, feasible) r.text('scalarres', s) else: m = M.witness() print(Fbot, m, Rinf) feasible = dp.is_feasible(Fbot, m, Rinf) r.text('some', 'bot feasible( %s, %s,%s): %s' % (Fbot, m, Rinf, feasible)) return r
def create_report_subset(id, desc, saccades): report = Report('subset_' + id) report.text('description', '''%s\n%d saccades total.''' % (desc, len(saccades))) #f = report.figure(cols=3) saccade_angle = saccades['saccade_angle'] approach_angle = saccades['approach_angle'] with report.data_pylab('distance_from_center') as pylab: distance = saccades['distance_from_center'] pylab.hist(distance, 100) pylab.xlabel('meters') pylab.ylabel('number of saccades') pylab.title('Distance from center (%s)' % id) a = pylab.axis() pylab.axis([0, 1, 0, a[3]]) #report.last().add_to(f) with report.data_pylab('distance_from_wall') as pylab: distance = saccades['distance_from_wall'] pylab.hist(distance, 100) pylab.xlabel('meters') pylab.ylabel('number of saccades') pylab.title('Distance from center (%s)' % id) a = pylab.axis() pylab.axis([0, 1, 0, a[3]]) #report.last().add_to(f) with report.data_pylab('saccade_angle') as pylab: pylab.hist(saccade_angle, range(-180, 185, 5)) pylab.xlabel('degrees') pylab.ylabel('number of saccades') pylab.title('Saccade angle (%s)' % id) a = pylab.axis() pylab.axis([-180, 180, 0, a[3]]) # report.last().add_to(f) with report.data_pylab('approach_angle') as pylab: pylab.hist(approach_angle, range(-60, 65, 5)) pylab.xlabel('degrees') pylab.ylabel('number of saccades') pylab.title('Approach angle (%s)' % id) a = pylab.axis() pylab.axis([-60, 60, 0, a[3]]) # report.last().add_to(f) with report.data_pylab('approach_vs_saccade') as pylab: pylab.plot(approach_angle, saccade_angle, '.') pylab.xlabel('approach angle (deg)') pylab.ylabel('saccade angle (deg)') pylab.title('Approach vs saccade angle (%s)' % id) a = pylab.axis() pylab.axis([-60, 60, -180, 180]) # report.last().add_to(f) # compute probability approach, probability_left, probability_right, margin_left, margin_right = \ compute_turning_probability(approach_angle=approach_angle, saccade_angle=saccade_angle) with report.data_pylab('turning_probability') as pylab: n = len(approach) el = np.zeros((2,n)) el[0,:] = +(margin_left[0,:]-probability_left) el[1,:] = -(margin_left[1,:]-probability_left) pylab.errorbar(approach, probability_left, el,None, None, ecolor='g', label='left', capsize=8, elinewidth=1) er = np.zeros((2,n)) er[0,:] = +(margin_right[0,:]-probability_right) er[1,:] = -(margin_right[1,:]-probability_right) pylab.errorbar(approach, probability_right, er, None, None, ecolor='r', label='right', capsize=8, elinewidth=1) pylab.plot(approach, probability_left, 'g-', label='left') pylab.plot(approach, probability_right, 'r-', label='right') pylab.xlabel('approach angle (deg)') pylab.ylabel('probability of turning') pylab.title('Probability of turning (%s)' % id) a = pylab.axis() pylab.plot([0, 0], [0.2, 0.8], 'k--') pylab.axis([-60, 60, 0, 1]) pylab.legend() # report.last().add_to(f) bin_size = 10 saccade_bin_centers = np.array(range(-180, 185, bin_size)) n = len(saccade_bin_centers) saccade_bins = np.zeros(shape=(n + 1)) saccade_bins[0:n] = saccade_bin_centers - bin_size saccade_bins[n] = saccade_bin_centers[-1] bin_centers = np.array(range(-50, 55, 5)) bin_size = 15 distributions = [] for angle in bin_centers: indices, = np.nonzero( np.logical_and( approach_angle > angle - bin_size, approach_angle < angle + bin_size )) x = saccade_angle[indices] if len(indices) > 0: # Otherwise histogram divides by 0 hist, edges = np.histogram(x, bins=saccade_bins, normed=True) #@UnusedVariable else: hist, edges = np.histogram(x, bins=saccade_bins, normed=False) #@UnusedVariable distributions.append(hist) with report.data_pylab('distribution_vs_approach2') as pylab: for k in range(len(bin_centers)): label = '%d' % bin_centers[k] pylab.plot(saccade_bin_centers, distributions[k], '-', label=label) #a = pylab.axis() pylab.legend() pylab.xlabel('saccade angle') pylab.ylabel('density') with report.data_pylab('distribution_vs_approach', figsize=(8, 20)) as pylab: # get the maximum density # max_density = max(map(max, distributions)) num_plots = len(bin_centers) for k in range(num_plots): rect = [0.1, k * 1.0 / num_plots, 0.8, 1.0 / num_plots] pylab.axes(rect) label = '%d' % bin_centers[k] pylab.plot(saccade_bin_centers, distributions[k], '-', label=label) # pylab.axis([-180, 180, 0, max_density]) #a = pylab.axis() pylab.legend() pylab.xlabel('saccade angle') pylab.ylabel('density') f = report.figure(cols=3) f.sub('distance_from_center', caption='Distance from center') f.sub('saccade_angle', caption='Saccade angle') f.sub('approach_angle', caption='Approach angle') f.sub('approach_vs_saccade', caption='Approach vs saccade angle') f.sub('turning_probability', caption='Probability of turning') f.sub('distribution_vs_approach2', caption='Saccade distribution vs approach angle') f.sub('distribution_vs_approach', caption='Saccade distribution vs approach angle') return report
def report_results_pairs_jobs(context, func, objspec1_name, objspec2_name, jobs): """ This version gets the jobs ID """ reason2symbol = {} def get_string_result(res): if res is None: s = 'ok' elif isinstance(res, Skipped): s = 'skipped' reason = res.get_reason() if not reason in reason2symbol: reason2symbol[reason] = len(reason2symbol) + 1 s += '(%s)' % reason2symbol[reason] elif isinstance(res, PartiallySkipped): parts = res.get_skipped_parts() s = 'no ' + ','.join(parts) else: print('how to interpret %s? ' % describe_value(res)) s = '?' return s r = Report() if not jobs: r.text('warning', 'no test objects defined') return r rows = sorted(set([a for a, _ in jobs])) cols = sorted(set([b for _, b in jobs])) data = [[None for a in range(len(cols))] for b in range(len(rows))] # a nice bug: data = [[None * len(cols)] * len(rows) db = context.get_compmake_db() comb = itertools.product(enumerate(rows), enumerate(cols)) for ((i, id_object1), (j, id_object2)) in comb: job_id = jobs[(id_object1, id_object2)] cache = get_job_cache(job_id, db) if cache.state == Cache.DONE: res = get_job_userobject(job_id, db) s = get_string_result(res) elif cache.state == Cache.FAILED: s = 'FAIL' elif cache.state == Cache.BLOCKED: s = 'blocked' # elif cache.state == Cache.IN_PROGRESS: # s = '(in progress)' elif cache.state == Cache.NOT_STARTED: s = ' ' data[i][j] = s r.table('summary', rows=rows, data=data, cols=cols) expl = "" for reason, symbol in reason2symbol.items(): expl += '(%s): %s\n' % (symbol, reason) r.text('notes', expl) return r
def report_example1(param1, param2): r = Report() r.text('type', 'This is one report') r.text('param1', '%s' % param1) r.text('param2', '%s' % param2) return r
def report_example2(param1, param2): r = Report() r.text('type', 'This is another report') r.text('param1', '%s' % param1) r.text('param2', '%s' % param2) return r
def report_stats(records, id_ddss, id_streams, id_distances): r = Report('precision-stats') r.data('records', records) colors = list(islice(cycle(['r', 'g', 'b', 'k', 'y', 'm']), 50)) perc = 10 W = 0.2 for i, id_dds in enumerate(id_ddss): r.text('dds%s' % i, id_dds) streams_sets = generate_stream_sets(id_streams) for stream_set, id_distance in itertools.product(streams_sets, id_distances): f = r.figure(cols=2) with f.plot('distance_legend', caption='Streams: %s, Distance: %s' % (stream_set['label'], id_distance), **dp_predstats_fig) as pylab: ax = pylab.subplot(111) for i, id_dds in enumerate(id_ddss): which = (records['id_discdds'] == id_dds).astype('int') for id_stream in stream_set['id_streams']: which += (records['id_stream'] == id_stream).astype('int') which = (which / (len(stream_set['id_streams']) + 1)).astype('bool') delta = records[which]['delta'] distance = records[which][id_distance] step = float(i) / max(len(id_ddss) - 1, 1) xstep = W * 2 * (step - 0.5) fancy_error_display(ax, delta + xstep, distance, colors[i], perc=perc, label='%s' % i) with f.plot('distance', caption='treams: %s, Distance: %s' % (stream_set['label'], id_distance), **dp_predstats_fig) as pylab: ax = pylab.subplot(111) for i, id_dds in enumerate(id_ddss): which = (records['id_discdds'] == id_dds).astype('int') for id_stream in stream_set['id_streams']: which += (records['id_stream'] == id_stream).astype('int') which = (which / (len(stream_set['id_streams']) + 1)).astype('bool') delta = records[which]['delta'] distance = records[which][id_distance] step = float(i) / max(len(id_ddss) - 1, 1) xstep = W * 2 * (step - 0.5) fancy_error_display(ax, delta + xstep, distance, colors[i], perc=perc, label='%s' % i) legend_put_below(ax) with f.plot('difference', caption='Difference from learner_0, SStreams: %s, Distance: %s' % (stream_set['label'], id_distance), **dp_predstats_fig) as pylab: ax = pylab.subplot(111) which0 = (records['id_discdds'] == id_ddss[0]) _ = records[which0]['delta'] # delta0 distance0 = records[which0][id_distance] for i, id_dds in enumerate(id_ddss[1:]): which = (records['id_discdds'] == id_dds).astype('int') for id_stream in stream_set['id_streams']: which += (records['id_stream'] == id_stream).astype('int') which = (which / (len(stream_set['id_streams']) + 1)).astype('bool') delta = records[which]['delta'] distance = records[which][id_distance] difference = distance0 - distance step = float(i) / max(len(id_ddss) - 1, 1) xstep = W * 2 * (step - 0.5) fancy_error_display(ax, delta + xstep, difference, colors[i + 1], perc=perc, label='%s' % i) return r
def main(): if not vehicles_has_cairo: logger.error('This program cannot be run if Cairo is not installed.') return from vehicles_cairo import (vehicles_cairo_display_pdf, vehicles_cairo_display_png, vehicles_cairo_display_svg) parser = OptionParser(usage=usage) parser.disable_interspersed_args() parser.add_option("--vehicle", default='d_SE2_rb_v-rf360', help="ID vehicle [%default].") parser.add_option("--world", default='stochastic_box_10', help="ID world [%default].") parser.add_option("-n", default=1, type='int', help="number of simulations [%default].") parser.add_option("--outdir", "-o", default='display_demo', help="output directory [%default]") parser.add_option("--figsize", default=10, type='float', help="figsize (inches) [%default]") parser.add_option("-z", "--zoom", default=0, type='float', help="zoom in meters; 0 for full view [%default]") parser.add_option("-g", "--grid", default=1, type='float', help="grid size in meters; 0 for no grid [%default]") parser.add_option("--cairo", default=False, action='store_true') parser.add_option("--seed", default=None, type='int') (options, args) = parser.parse_args() if args: raise Exception() id_vehicle = options.vehicle id_world = options.world logger.info('id_vehicle: %s' % id_vehicle) logger.info(' id_world: %s' % id_world) if options.seed is None: options.seed = np.random.randint(1000000) np.random.seed(seed=options.seed) logger.info('Using seed %s (your lucky number is %s)' % (options.seed, np.random.randint(1000))) vehicle = VehiclesConfig.vehicles.instance( id_vehicle) # @UndefinedVariable world = VehiclesConfig.worlds.instance(id_world) # @UndefinedVariable simulation = VehicleSimulation(vehicle, world) from reprep import Report, MIME_PDF basename = 'display-%s-%s' % (id_vehicle, id_world) r = Report(basename) r.text('seed', 'Seed = %s' % options.seed) for i in range(options.n): sec = r.node('simulation%d' % i) f = sec.figure() simulation.new_episode() simulation.compute_observations() sim_state = simulation.to_yaml() plot_params = dict(grid=options.grid, zoom=options.zoom, show_sensor_data=True) # with f.plot('start', figsize=(options.figsize, # options.figsize)) as pylab: # display_all(pylab, sim_state, **plot_params) with f.data_file('start_cairo_png', MIME_PNG) as filename: vehicles_cairo_display_png(filename, width=800, height=800, sim_state=sim_state, **plot_params) with f.data_file('start_cairo_pdf', MIME_PDF) as filename: vehicles_cairo_display_pdf(filename, width=800, height=800, sim_state=sim_state, **plot_params) with f.data_file('start_cairo_svg', MIME_SVG) as filename: vehicles_cairo_display_svg(filename, width=800, height=800, sim_state=sim_state, **plot_params) filename = os.path.join(options.outdir, 'index.html') logger.info('Writing to %r.' % filename) r.to_html(filename)
def report_stats(records, id_ddss, id_streams, id_distances): r = Report('precision-stats') r.data('records', records) colors = list(islice(cycle(['r', 'g', 'b', 'k', 'y', 'm']), 50)) perc = 10 W = 0.2 for i, id_dds in enumerate(id_ddss): r.text('dds%s' % i, id_dds) streams_sets = generate_stream_sets(id_streams) for stream_set, id_distance in itertools.product(streams_sets, id_distances): f = r.figure(cols=2) with f.plot('distance_legend', caption='Streams: %s, Distance: %s' % (stream_set['label'], id_distance), **dp_predstats_fig) as pylab: ax = pylab.subplot(111) for i, id_dds in enumerate(id_ddss): which = (records['id_discdds'] == id_dds).astype('int') for id_stream in stream_set['id_streams']: which += (records['id_stream'] == id_stream).astype('int') which = (which / (len(stream_set['id_streams']) + 1)).astype('bool') delta = records[which]['delta'] distance = records[which][id_distance] step = float(i) / max(len(id_ddss) - 1, 1) xstep = W * 2 * (step - 0.5) fancy_error_display(ax, delta + xstep, distance, colors[i], perc=perc, label='%s' % i) with f.plot('distance', caption='treams: %s, Distance: %s' % (stream_set['label'], id_distance), **dp_predstats_fig) as pylab: ax = pylab.subplot(111) for i, id_dds in enumerate(id_ddss): which = (records['id_discdds'] == id_dds).astype('int') for id_stream in stream_set['id_streams']: which += (records['id_stream'] == id_stream).astype('int') which = (which / (len(stream_set['id_streams']) + 1)).astype('bool') delta = records[which]['delta'] distance = records[which][id_distance] step = float(i) / max(len(id_ddss) - 1, 1) xstep = W * 2 * (step - 0.5) fancy_error_display(ax, delta + xstep, distance, colors[i], perc=perc, label='%s' % i) legend_put_below(ax) with f.plot( 'difference', caption='Difference from learner_0, SStreams: %s, Distance: %s' % (stream_set['label'], id_distance), **dp_predstats_fig) as pylab: ax = pylab.subplot(111) which0 = (records['id_discdds'] == id_ddss[0]) _ = records[which0]['delta'] # delta0 distance0 = records[which0][id_distance] for i, id_dds in enumerate(id_ddss[1:]): which = (records['id_discdds'] == id_dds).astype('int') for id_stream in stream_set['id_streams']: which += (records['id_stream'] == id_stream).astype('int') which = (which / (len(stream_set['id_streams']) + 1)).astype('bool') delta = records[which]['delta'] distance = records[which][id_distance] difference = distance0 - distance step = float(i) / max(len(id_ddss) - 1, 1) xstep = W * 2 * (step - 0.5) fancy_error_display(ax, delta + xstep, difference, colors[i + 1], perc=perc, label='%s' % i) return r
def empty_report(): report = Report('OnlinePlanning') report.text('summary', 'Empty report') return report
def testText(self): r = Report("test") r.text("ciao", "come va?")
def testText2(self): r = Report("test") r.text("ciao", "come va?", MIME_PLAIN)
def make_rep1(title): report = Report(title) report.text('Summary', 'Test report 1') return report
# Merge img to one channel gray = np.mean(img, 2) # Calculate differences # pdb.set_trace() for i in range(len(ovars)): [S, ker, _] = ovars[i] Si = convolve2d(gray, ker, boundary='wrap', mode='same') S += Si n += 1 # Refresh counter print('Bag Processed') #pdb.set_trace() # Initiate report report = Report('ImageStatistics') report.text('summary', 'Statistics of images in processed bag') vmax = 20 vmin = -20 for [S, _, name] in ovars: # Calculate mean value from sum Sn = S[2:-2, 2:-2] / n # Estimate parameters and plot analytic prob function # gkde = stats.gaussian_kde(Sn.flatten()) # S_clip = np.zeros(Sn.shape) # S_clip[Sn < vmax] = Sn[Sn < vmax] # S_clip[Sn > vmin] = Sn[Sn > vmin] # pdb.set_trace()
def report_class1(ob1): from reprep import Report r = Report() r.text('ob1', ob1) return r
def report_class2(ob2): from reprep import Report r = Report() r.text('ob2', ob2) return r