def ImportTestcasesFromDirectory( session: db.session_t, testcases_dir: pathlib.Path ) -> None: """Import Testcases from a directory of protos. Args: session: A database session. testcases_dir: Directory containing (only) Testcase protos. """ files_to_delete = [] last_commit_time = time.time() if not testcases_dir.is_dir(): app.Fatal("directory %s does not exist", testcases_dir) for path in progressbar.ProgressBar()(testcases_dir.iterdir()): deeplearning.deepsmith.testcase.Testcase.FromFile(session, path) files_to_delete.append(path) app.Log(1, "Imported testcase %s", path) if time.time() - last_commit_time > 10: session.commit() if FLAGS.delete_after_import: for path in files_to_delete: path.unlink() files_to_delete = [] last_commit_time = time.time() app.Log(1, "Committed database") session.commit() if FLAGS.delete_after_import: for path in files_to_delete: path.unlink()
def ImportResultsFromDirectory(session: db.session_t, results_dir: pathlib.Path) -> None: """Import Results from a directory of protos. Args: session: A database session. results_dir: Directory containing (only) Result protos. """ files_to_delete = [] last_commit_time = time.time() if not results_dir.is_dir(): logging.fatal('directory %s does not exist', results_dir) for path in progressbar.ProgressBar()(results_dir.iterdir()): deeplearning.deepsmith.result.Result.FromFile(session, path) files_to_delete.append(path) logging.info('Imported result %s', path) if time.time() - last_commit_time > 10: session.commit() if FLAGS.delete_after_import: for path in files_to_delete: path.unlink() files_to_delete = [] last_commit_time = time.time() logging.info('Committed database') session.commit() if FLAGS.delete_after_import: for path in files_to_delete: path.unlink()
def PrintRandomResult(session: db.session_t) -> None: """Pretty print a random result. Args: session: A database session. """ query = session.query(deeplearning.deepsmith.result.Result) result = _SelectRandomRow(query) print(pbutil.PrettyPrintJson(result.ToProto()))
def DifftestTestcase(s: db.session_t, t: testcase.Testcase, outdir: pathlib.Path) -> None: """Difftest a testcase.""" results = list(s.query(result.Result).filter(result.Result.testcase == t)) for r in results: r.output_class = GetResultOutputClass(r) majority = GetMajorityOutput(results) def OutputPath(result_class: str) -> pathlib.Path: try: if r.testbed.opts['opencl_opt'] == 'enabled': opt = '+' elif r.testbed.opts['opencl_opt'] == 'disabled': opt = '-' else: raise KeyError except KeyError: raise LookupError(str(r.testbed)) testbeds = sorted(x[0] for x in s.query(testbed.Testbed.name)) dir = outdir / result_class / str(testbeds.index(r.testbed.name)) / opt dir.mkdir(parents=True, exist_ok=True) return dir / (str(r.id) + '.pbtxt') for r in results: if r.output_class == 'Build crash': pbutil.ToFile(r.ToProto(), OutputPath('bc')) elif r.output_class == 'Build timeout': pbutil.ToFile(r.ToProto(), OutputPath('bto')) elif (majority.majority_outcome == 'Pass' and r.output_class == 'Build failure'): pbutil.ToFile(r.ToProto(), OutputPath('abf')) elif (majority.majority_outcome == 'Pass' and r.output_class == 'Runtime crash'): pbutil.ToFile(r.ToProto(), OutputPath('arc')) elif (r.outputs['stdout'] != majority.majority_stdout and majority.majority_outcome == 'Pass' and majority.stdout_majority_size >= math.ceil( 2 * majority.outcome_majority_size / 3)): pbutil.ToFile(r.ToProto(), OutputPath('awo')) else: pbutil.ToFile(r.ToProto(), OutputPath('pass'))
def DifftestTestcase(s: db.session_t, t: testcase.Testcase, outdir: pathlib.Path) -> None: """Difftest a testcase.""" results = list(s.query(result.Result).filter(result.Result.testcase == t)) for r in results: r.output_class = GetResultOutputClass(r) majority = GetMajorityOutput(results) def OutputPath(result_class: str) -> pathlib.Path: try: if r.testbed.opts["opencl_opt"] == "enabled": opt = "+" elif r.testbed.opts["opencl_opt"] == "disabled": opt = "-" else: raise KeyError except KeyError: raise LookupError(str(r.testbed)) testbeds = sorted(x[0] for x in s.query(testbed.Testbed.name)) dir = outdir / result_class / str(testbeds.index(r.testbed.name)) / opt dir.mkdir(parents=True, exist_ok=True) return dir / (str(r.id) + ".pbtxt") for r in results: if r.output_class == "Build crash": pbutil.ToFile(r.ToProto(), OutputPath("bc")) elif r.output_class == "Build timeout": pbutil.ToFile(r.ToProto(), OutputPath("bto")) elif (majority.majority_outcome == "Pass" and r.output_class == "Build failure"): pbutil.ToFile(r.ToProto(), OutputPath("abf")) elif (majority.majority_outcome == "Pass" and r.output_class == "Runtime crash"): pbutil.ToFile(r.ToProto(), OutputPath("arc")) elif (r.outputs["stdout"] != majority.majority_stdout and majority.majority_outcome == "Pass" and majority.stdout_majority_size >= math.ceil( 2 * majority.outcome_majority_size / 3)): pbutil.ToFile(r.ToProto(), OutputPath("awo")) else: pbutil.ToFile(r.ToProto(), OutputPath("pass"))
def GetOrAdd(cls, session: db.session_t, proto: deepsmith_pb2.Result) -> 'Result': testcase = deeplearning.deepsmith.testcase.Testcase.GetOrAdd( session, proto.testcase) testbed = deeplearning.deepsmith.testbed.Testbed.GetOrAdd( session, proto.testbed) # Only add the result if the the <testcase, testbed> tuple is unique. This # is to prevent duplicate results where only the output differs. result = session.query(Result).filter( Result.testcase == testcase, Result.testbed == testbed).first() if result: return result # Build the list of outputs, and md5sum the key value strings. outputs = [] md5 = hashlib.md5() for proto_output_name in sorted(proto.outputs): proto_output_value = proto.outputs[proto_output_name] md5.update( (proto_output_name + proto_output_value).encode('utf-8')) output = labm8.sqlutil.GetOrAdd( session, ResultOutput, name=ResultOutputName.GetOrAdd(session, string=proto_output_name), value=ResultOutputValue.GetOrAdd(session, string=proto_output_value)) outputs.append(output) # Create output set table entries. outputset_id = md5.digest() for output in outputs: labm8.sqlutil.GetOrAdd(session, ResultOutputSet, id=outputset_id, output=output) # Create a new result only if everything *except* the profiling events # are unique. This means that if a generator produced the same testcase # twice (on separate occasions), only the first is added to the datastore. result = labm8.sqlutil.Get(session, cls, testcase=testcase, testbed=testbed, returncode=proto.returncode, outputset_id=outputset_id, outcome_num=proto.outcome) if not result: result = cls(testcase=testcase, testbed=testbed, returncode=proto.returncode, outputset_id=outputset_id, outcome_num=proto.outcome) session.add(result) # Add profiling events. for event in proto.profiling_events: deeplearning.deepsmith.profiling_event.ResultProfilingEvent.GetOrAdd( session, event, result) return result
def GetOrAdd(cls, session: db.session_t, proto: deepsmith_pb2.Testcase) -> 'Testcase': """Instantiate a Testcase from a protocol buffer. Args: session: A database session. proto: A Testcase message. Returns: A Testcase instance. """ toolchain = deeplearning.deepsmith.toolchain.Toolchain.GetOrAdd( session, proto.toolchain) generator = deeplearning.deepsmith.generator.Generator.GetOrAdd( session, proto.generator) harness = deeplearning.deepsmith.harness.Harness.GetOrAdd( session, proto.harness) # Build the list of inputs, and md5sum the key value strings. inputs = [] md5 = hashlib.md5() for proto_input_name in sorted(proto.inputs): proto_input_value = proto.inputs[proto_input_name] md5.update((proto_input_name + proto_input_value).encode('utf-8')) input_ = TestcaseInput.GetOrAdd(session, proto_input_name, proto_input_value) inputs.append(input_) # Create invariant optset table entries. inputset_id = md5.digest() for input in inputs: phd.lib.labm8.sqlutil.GetOrAdd(session, TestcaseInputSet, id=inputset_id, input=input) # Build the list of invariant options, and md5sum the key value strings. invariant_opts = [] md5 = hashlib.md5() for proto_invariant_opt_name in sorted(proto.invariant_opts): proto_invariant_opt_value = proto.invariant_opts[ proto_invariant_opt_name] md5.update((proto_invariant_opt_name + proto_invariant_opt_value).encode('utf-8')) invariant_opt = TestcaseInvariantOpt.GetOrAdd( session, proto_invariant_opt_name, proto_invariant_opt_value) invariant_opts.append(invariant_opt) # Create invariant optset table entries. invariant_optset_id = md5.digest() for invariant_opt in invariant_opts: phd.lib.labm8.sqlutil.GetOrAdd(session, TestcaseInvariantOptSet, id=invariant_optset_id, invariant_opt=invariant_opt) # Create a new testcase only if everything *except* the profiling events # are unique. This means that if a generator produced the same testcase # twice (on separate occasions), only the first is added to the datastore. testcase = phd.lib.labm8.sqlutil.Get( session, cls, toolchain=toolchain, generator=generator, harness=harness, inputset_id=inputset_id, invariant_optset_id=invariant_optset_id) if not testcase: testcase = cls(toolchain=toolchain, generator=generator, harness=harness, inputset_id=inputset_id, invariant_optset_id=invariant_optset_id) session.add(testcase) # Add profiling events. for event in proto.profiling_events: deeplearning.deepsmith.profiling_event.TestcaseProfilingEvent.GetOrAdd( session, event, testcase) return testcase