def blackbox_pyteal_example5(): from graviton.blackbox import DryRunEncoder from pyteal import abi, Subroutine, TealType, Int, Mode from tests.blackbox import Blackbox @Blackbox([None]) @Subroutine(TealType.uint64) def cubed(n: abi.Uint64): return n.get()**Int(3) app_pytealer = PyTealDryRunExecutor(cubed, Mode.Application) lsig_pytealer = PyTealDryRunExecutor(cubed, Mode.Signature) inputs = [[i] for i in range(1, 11)] app_inspect = app_pytealer.dryrun_on_sequence(inputs) lsig_inspect = lsig_pytealer.dryrun_on_sequence(inputs) for index, inspect in enumerate(app_inspect): input_var = inputs[index][0] assert inspect.stack_top() == input_var**3, inspect.report( args=inputs[index], msg="stack_top() gave unexpected results from app") assert inspect.last_log() == DryRunEncoder.hex( input_var**3), inspect.report( args=inputs[index], msg="last_log() gave unexpected results from app") for index, inspect in enumerate(lsig_inspect): input_var = inputs[index][0] assert inspect.stack_top() == input_var**3, inspect.report( args=inputs[index], msg="stack_top() gave unexpected results from app")
def blackbox_pyteal_example4(): # Example 4: Using PyTealDryRunExecutor to debug an ABIReturnSubroutine with an app, logic sig and csv report from pathlib import Path import random from graviton.blackbox import DryRunInspector from pyteal import ( abi, ABIReturnSubroutine, Expr, For, Int, Mode, ScratchVar, Seq, TealType, ) from tests.blackbox import Blackbox, PyTealDryRunExecutor # Sum a dynamic uint64 array @Blackbox(input_types=[None]) @ABIReturnSubroutine def abi_sum(toSum: abi.DynamicArray[abi.Uint64], *, output: abi.Uint64) -> Expr: i = ScratchVar(TealType.uint64) valueAtIndex = abi.Uint64() return Seq( output.set(0), For( i.store(Int(0)), i.load() < toSum.length(), i.store(i.load() + Int(1)), ).Do( Seq( toSum[i.load()].store_into(valueAtIndex), output.set(output.get() + valueAtIndex.get()), )), ) # instantiate PyTealDryRunExecutor objects for the app and lsig: app_pytealer = PyTealDryRunExecutor(abi_sum, Mode.Application) lsig_pytealer = PyTealDryRunExecutor(abi_sum, Mode.Signature) # generate reports with the same random inputs (fix the randomness with a seed): random.seed(42) N = 50 # the number of dry runs for each experiment choices = range(10_000) inputs = [] for n in range(N): inputs.append(tuple([random.sample(choices, n)])) app_inspectors = app_pytealer.dryrun_on_sequence(inputs) lsig_inspectors = lsig_pytealer.dryrun_on_sequence(inputs) for i in range(N): args = inputs[i] app_inspector = app_inspectors[i] lsig_inspector = lsig_inspectors[i] def message(insp): return insp.report(args, f"failed for {args}", row=i) # the app should pass exactly when it's cost was within the 700 budget: assert app_inspector.passed() == (app_inspector.cost() <= 700), message(app_inspector) # the lsig always passes (never goes over budget): assert lsig_inspector.passed(), message(lsig_inspector) expected = sum(args[0]) actual4app = app_inspector.last_log() assert expected == actual4app, message(app_inspector) if i > 0: assert expected in app_inspector.final_scratch().values(), message( app_inspector) assert expected in lsig_inspector.final_scratch().values( ), message(lsig_inspector) def report(kind): assert kind in ("app", "lsig") insps = app_inspectors if kind == "app" else lsig_inspectors csv_report = DryRunInspector.csv_report(inputs, insps) with open(Path.cwd() / f"abi_sum_{kind}.csv", "w") as f: f.write(csv_report) report("app") report("lsig")