Exemple #1
0
def proto_db_10(request, ) -> unlabelled_graph_database.Database.SessionType:
    """A test fixture which yields a database with 10 protos."""
    with testing_databases.DatabaseContext(unlabelled_graph_database.Database,
                                           request.param) as db:
        with db.Session(commit=True) as session:
            session.add_all([
                unlabelled_graph_database.ProgramGraph.Create(proto,
                                                              ir_id=i + 1)
                for i, proto in enumerate(
                    list(random_programl_generator.EnumerateTestSet())[:10])
            ])
        yield db
def PopulateDatabaseWithTestSet(db: unlabelled_graph_database.Database,
                                graph_count: Optional[int] = None):
    """Populate a database with "real" programs."""
    inputs = itertools.islice(
        itertools.cycle(
            random_programl_generator.EnumerateTestSet(n=graph_count)),
        graph_count,
    )

    with db.Session(commit=True) as session:
        session.add_all([
            unlabelled_graph_database.ProgramGraph.Create(proto, ir_id=i + 1)
            for i, proto in enumerate(inputs)
        ])
    return db
Exemple #3
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmarks for comparing annotator performance."""
from deeplearning.ml4pl.graphs.labelled.dataflow import annotate
from deeplearning.ml4pl.graphs.labelled.dataflow import data_flow_graphs
from deeplearning.ml4pl.testing import random_programl_generator
from labm8.py import prof
from labm8.py import test

FLAGS = test.FLAGS

MODULE_UNDER_TEST = None

# Real programs to test over.
PROTOS = list(random_programl_generator.EnumerateTestSet(n=20))

# The annotators to test.
ANNOTATORS = {
    analysis: annotate.ANALYSES[analysis]
    for analysis in annotate.AVAILABLE_ANALYSES
}


def AnnotatorBenchmark(annotator_class):
    """A micro-benchmark that runs annotator over a list of test graphs."""
    with prof.Profile(f"Completed benchmark of {len(PROTOS) * 5} annotations "
                      f"using {annotator_class.__name__}"):
        for graph in PROTOS:
            annotator = annotator_class(graph)
            annotator.MakeAnnotated(5)
from deeplearning.ml4pl.graphs import programl_pb2
from deeplearning.ml4pl.graphs.labelled.dataflow.domtree import dominator_tree
from deeplearning.ml4pl.testing import random_networkx_generator
from deeplearning.ml4pl.testing import random_programl_generator
from labm8.py import test

FLAGS = test.FLAGS

###############################################################################
# Fixtures.
###############################################################################


@test.Fixture(
    scope="session",
    params=list(random_programl_generator.EnumerateTestSet()),
)
def real_proto(request) -> programl_pb2.ProgramGraph:
    """A test fixture which yields one of 100 "real" graphs."""
    return request.param


@test.Fixture(scope="session")
def one_real_graph() -> programl_pb2.ProgramGraph:
    """A test fixture which yields one of 100 "real" graphs."""
    return next(random_networkx_generator.EnumerateTestSet())


@test.Fixture(scope="function")
def g1() -> programl_pb2.ProgramGraph:
    """A four statement graph with one data element."""
def test_EnumerateTestSet():
    """Test the "real" protos."""
    protos = list(random_programl_generator.EnumerateTestSet())
    assert len(protos) == 100
Exemple #6
0
def EnumerateTestSet(n: Optional[int] = None) -> Iterable[nx.MultiDiGraph]:
    """Enumerate a test set of "real" graphs."""
    for proto in random_programl_generator.EnumerateTestSet(n=n):
        yield programl.ProgramGraphToNetworkX(proto)
Exemple #7
0
def one_proto() -> programl_pb2.ProgramGraph:
  """A test fixture which enumerates a single real proto."""
  return next(random_programl_generator.EnumerateTestSet())
Exemple #8
0
from deeplearning.ml4pl.graphs import programl_pb2
from deeplearning.ml4pl.graphs.labelled.dataflow import annotate
from deeplearning.ml4pl.graphs.labelled.dataflow import data_flow_graphs
from deeplearning.ml4pl.testing import random_programl_generator
from labm8.py import test

FLAGS = test.FLAGS


###############################################################################
# Fixtures.
###############################################################################


@test.Fixture(
  scope="session", params=list(random_programl_generator.EnumerateTestSet()),
)
def real_proto(request) -> programl_pb2.ProgramGraph:
  """A test fixture which enumerates one of 100 "real" protos."""
  return request.param


@test.Fixture(scope="session")
def one_proto() -> programl_pb2.ProgramGraph:
  """A test fixture which enumerates a single real proto."""
  return next(random_programl_generator.EnumerateTestSet())


@test.Fixture(scope="session", params=list(programl.InputOutputFormat))
def stdin_fmt(request) -> programl.InputOutputFormat:
  """A test fixture which enumerates stdin formats."""
Exemple #9
0
def EnumerateTestSet(
    n: Optional[int] = None, ) -> Iterable[graph_tuple.GraphTuple]:
    """Enumerate a test set of "real" graph tuples."""
    for graph in random_programl_generator.EnumerateTestSet(n=n):
        yield graph_tuple.GraphTuple.CreateFromProgramGraph(graph)