def validate_report(file_name): e = [] base_path = os.path.dirname(file_name) r = load_report_from_json(file_name) if r.title is None: e.append("Report {i} is missing a title".format(i=r.id)) for t in r.tables: if t.title is None: e.append("Table {r}.{t} is missing a title".format(r=r.id, t=t.id)) for col in t.columns: if col.header is None: e.append("Column {r}.{t}.{c} is missing a header".format( r=r.id, t=t.id, c=col.id)) lengths = set([len(col.values) for col in t.columns]) if len(lengths) != 1: e.append("Inconsistent column sizes in table {r}.{t}: {s}".format( r=r.id, t=t.id, s=",".join( [str(x) for x in sorted(list(lengths))]))) for pg in r.plotGroups: if pg.title is None: e.append("Plot group {r}.{g} is missing a title".format( r=r.id, g=pg.id)) for plot in pg.plots: # if plot.caption is None: # raise ValueError("Plot {r.g.p} is missing a caption".format( # r=r.id, g=pg.id, p=plot.id)) if plot.image is None: e.append("Plot {r}.{g}.{p} does not have an image".format( r=r.id, g=pg.id, p=plot.id)) img_path = os.path.join(base_path, plot.image) if not os.path.exists(img_path): e.append( "The plot image {f} does not exist".format( f=img_path)) if plot.thumbnail is None: pass # raise ValueError("Plot {r.g.p} does not have an thumbnail image".format( # r=r.id, g=pg.id, p=plot.id)) else: thumbnail = os.path.join(base_path, plot.thumbnail) if not os.path.exists(thumbnail): e.append( "The thumbnail image {f} does not exist".format( f=img_path)) if pg.thumbnail is not None: thumbnail = os.path.join(base_path, pg.thumbnail) if not os.path.exists(thumbnail): e.append( "The thumbnail image {f} does not exist".format( f=img_path)) if len(e) > 0: raise ValueError("\n".join(e)) return r
def _validate_against_spec(path): # always load the Report JSON to make sure it valid with respect to the Report core schema rpt = load_report_from_json(path) if rpt.id in self.INTERNAL_REPORTS: raise unittest.SkipTest( "Ignoring internal report type '{}'".format(rpt.id)) if self._specs is None: raise unittest.SkipTest("Can't find report specs.") spec = self._specs.get(rpt.id, None) if spec is None: self.fail("No spec found for report {r}".format(r=rpt.id)) else: return spec.validate_report(rpt)
def validate_report(file_name): e = [] base_path = os.path.dirname(file_name) r = load_report_from_json(file_name) if r.title is None: e.append("Report {i} is missing a title".format(i=r.id)) for t in r.tables: if t.title is None: e.append("Table {r.t} is missing a title".format(r=r.id, t=t.id)) for col in t.columns: if col.header is None: e.append("Column {r.t.c} is missing a header".format(r=r.id, t=t.id, c=col.id)) lengths = set([len(col.values) for col in t.columns]) if len(lengths) != 1: e.append( "Inconsistent column sizes in table {r.t}: {s}".format( r=r.id, t=t.id, s=",".join([str(x) for x in sorted(list(lengths))]) ) ) for pg in r.plotGroups: if pg.title is None: e.append("Plot group {r.g} is missing a title".format(r=r.id, g=pg.id)) for plot in pg.plots: # if plot.caption is None: # raise ValueError("Plot {r.g.p} is missing a caption".format( # r=r.id, g=pg.id, p=plot.id)) if plot.image is None: e.append("Plot {r.g.p} does not have an image".format(r=r.id, g=pg.id, p=plot.id)) img_path = os.path.join(base_path, plot.image) if not os.path.exists(img_path): e.append("The plot image {f} does not exist".format(f=img_path)) if plot.thumbnail is None: pass # raise ValueError("Plot {r.g.p} does not have an thumbnail image".format( # r=r.id, g=pg.id, p=plot.id)) else: thumbnail = os.path.join(base_path, plot.thumbnail) if not os.path.exists(thumbnail): e.append("The thumbnail image {f} does not exist".format(f=img_path)) if pg.thumbnail is not None: thumbnail = os.path.join(base_path, pg.thumbnail) if not os.path.exists(thumbnail): e.append("The thumbnail image {f} does not exist".format(f=img_path)) if len(e) > 0: raise ValueError("\n".join(e)) return r
def test_datastore_report_file_uuid(self): """Test that the DataStore file and the Underlying Report have the same UUID""" ds = DataStore.load_from_json(_to_ds_json(self.job_dir)) n_tested = 0 for ds_file in ds.files.values(): if ds_file.file_type_id == FileTypes.REPORT.file_type_id: rpt = load_report_from_json(ds_file.path) emsg = "{p}: {u1} != {u2}".format(p=ds_file.path, u1=rpt.uuid, u2=ds_file.uuid) # by convention the DS UUID and the Report UUID should the same value self.assertEqual(rpt.uuid, ds_file.uuid, emsg) n_tested += 1 if n_tested == 0: raise unittest.SkipTest( "Warning. No Report JSON files in datastore.")
def loadRtcs(cls): cls.tasks_dir = op.join(cls.job_dir, "tasks") cls.resolved_tool_contracts = [] cls.runnable_tasks = [] tasks_rpt = op.join(cls.job_dir, "workflow", "report-tasks.json") rpt = load_report_from_json(tasks_rpt) table = {t.id: t for t in rpt.tables}['tasks'] tasks = {c.id: c.values for c in table.columns}['task_id'] for task_id_str in tasks: fields = task_id_str.split() task_name = fields[1].split(":")[-1] task_dir = op.join(cls.tasks_dir, task_name) if not op.isdir(task_dir): continue task_id, job_id = task_name.split("-") rtc_json = op.join(task_dir, "resolved-tool-contract.json") if not op.isfile(rtc_json): log.warn("Can't find %s" % rtc_json) continue rtc = load_resolved_tool_contract_from(rtc_json) cls.resolved_tool_contracts.append(rtc) rt_json = op.join(task_dir, "runnable-task.json") rt = RunnableTask.from_manifest_json(rt_json) cls.runnable_tasks.append(rt)
def _args_to_render_report(args): f = R.write_report_with_html_extras if args.with_extras else R.write_report_to_html report = load_report_from_json(args.report_path) return f(report, args.output_file)
def _validate_report(path): p = validate_file(path) _ = load_report_from_json(path) return p
def _to_report(name): file_name = get_data_file_from_subdir(_SERIALIZED_JSON_DIR, name) log.info("loading json report from {f}".format(f=file_name)) r = load_report_from_json(file_name) return r
def _get_report_uuid(path): """Get UUID from the file resource or return None""" return load_report_from_json(path).uuid