Esempio n. 1
0
def testSpeedGenerateTests():
    """This method produces json-files, each containing one statetest, with _one_ poststate.
    It stores each test with a filename that is unique per user and per process, so that two
    paralell executions should not interfere with eachother.

    returns (filename, object)
    """

    from evmlab.tools.statetests import templates
    from evmlab.tools.statetests import randomtest
    import time
    t = templates.new(templates.object_based.TEMPLATE_RandomStateTest)
    test = {}
    test.update(t)
    counter = 0
    start = time.time()
    while True:
        x0 = time.time()
        # test.update(t)
        test_obj = json.loads(
            json.dumps(t, cls=randomtest.RandomTestsJsonEncoder))
        x = str(test_obj)
        print(test_obj["randomStatetest"]["transaction"]["to"])
        x1 = time.time()
        print("%d %f (tot %f/s)" % (counter, x1 - x0, counter / (x1 - start)))
        counter = counter + 1
Esempio n. 2
0
    def _test_template(self, template):
        new_template = templates.new(template)

        pprint(new_template)
        randomtest.process_template(new_template)
        pprint(new_template)

        for _ in range(self.num_samples):
            # randomizes everytime its printed
            self.assertNotIn(str(new_template), "'[")
            self.assertNotIn(str(new_template), "]'")

        pprint(template)
Esempio n. 3
0
def generateTests():
    """This method produces json-files, each containing one statetest, with _one_ poststate. 
    It stores each test with a filename that is unique per user and per process, so that two
    paralell executions should not interfere with eachother. 
    
    returns (filename, object) 
    """

    from evmlab.tools.statetests import templates
    from evmlab.tools.statetests import randomtest

    t = templates.new(templates.object_based.TEMPLATE_RandomStateTest)
    test = {}
    counter = 0

    while True: 
        test.update(t)
        test_obj = json.loads(json.dumps(t, cls=randomtest.RandomTestsJsonEncoder))
        s = StateTest(test_obj, counter)
        counter = counter +1
        yield s
Esempio n. 4
0
def configFuzzer():
    ### setup logging
    logger.setLevel(logging.DEBUG)

    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    logger.addHandler(ch)

    ### setup cmdline parser
    parser = argparse.ArgumentParser(description='Ethereum consensus fuzzer')
    loglevels = [
        'CRITICAL', 'FATAL', 'ERROR', 'WARNING', 'WARN', 'INFO', 'DEBUG',
        'NOTSET'
    ]
    parser.add_argument("-v",
                        "--verbosity",
                        default="info",
                        help="available loglevels: %s" %
                        ','.join(l.lower() for l in loglevels))

    # <required> configuration file: statetests.ini
    parser.add_argument(
        "-c",
        "--configfile",
        default="statetests.ini",
        required=True,
        help="path to configuration file (default: statetests.ini)")
    parser.add_argument(
        "-s",
        "--set-config",
        default=[],
        nargs='*',
        help="override settings in ini as <section>.<value>=<value>")
    parser.add_argument(
        "-D",
        "--dry-run",
        default=False,
        action="store_true",
        help=
        "Simulate and print the output instead of running it with the docker backend (default: False)"
    )
    parser.add_argument("-B",
                        "--benchmark",
                        default=False,
                        action="store_true",
                        help="Benchmark test generation (default: False)")

    grp_artefacts = parser.add_argument_group(
        'Configure Output Artefacts and Reporting')
    grp_artefacts.add_argument(
        "-x",
        "--preserve-files",
        default=None,
        action="store_true",
        help=
        "Keep tracefiles/logs/testfiles for non-failing testcases (watch disk space!) (default: False)"
    )
    grp_artefacts.add_argument(
        "-r",
        "--enable-reporting",
        default=None,
        action="store_true",
        help=
        "Output testrun statistics (num of passes/fails and speed (default: False)"
    )

    grp_docker = parser.add_argument_group('Docker Settings')
    grp_docker.add_argument(
        "-y",
        "--docker-force-update-image",
        default=None,
        action="append",
        help=
        "Remove specified docker images before starting the fuzzer to force docker to download new versions of the image (default: [])"
    )
    grp_docker = parser.add_argument_group('Docker Settings')

    ### parse args
    args = parser.parse_args()

    if args.verbosity.upper() in loglevels:
        logger.debug("setting loglevel to %s" % args.verbosity)
        args.verbosity = getattr(logging, args.verbosity.upper())
        logger.setLevel(args.verbosity)
    else:
        parser.error("invalid verbosity selected. please check --help")

    ### create fuzzer instance, pass settings and begin executing tests.

    fuzzer = Fuzzer(config=Config(args))

    if args.benchmark:
        duration = 10
        logger.info("running benchmark for new and old method")
        # benchmark old or new method?

        # benchmark new method
        logger.info("new method: %ssec duration" % duration)
        avg_new = fuzzer.benchmark(duration=duration)

        # benchmark old method
        from evmlab.tools.statetests import templates
        from evmlab.tools.statetests import randomtest
        t = templates.new(templates.object_based.TEMPLATE_RandomStateTest)

        def old_method():
            return json.loads(
                json.dumps(t, cls=randomtest.RandomTestsJsonEncoder))

        logger.info("old method: %ssec duration" % duration)
        avg_old = fuzzer.benchmark(old_method, duration=duration)
        logger.info("old method avg generation time: %f (%f tests/s)" %
                    (avg_old, 1 / avg_old))
        logger.info("new method avg generation time: %f (%f tests/s)" %
                    (avg_new, 1 / avg_new))

        sys.exit(0)

    ### setup signal handler (catches ctrl+c SIGINT)
    def signal_handler(*args, **kwargs):
        logger.warning(
            "SIGINT - Aborting execution. please stand by until the docker instances are shut down."
        )
        fuzzer.stop_daemons()
        logger.info("BYE BYE.")
        sys.exit(1)

    signal.signal(signal.SIGINT, signal_handler)

    return fuzzer