Example #1
0
def run_all_bots_for_session_config(session_config_name, num_participants,
                                    export_path):
    """
    this means all test cases are in 1 big test case.
    so if 1 fails, the others will not get run.
    """
    if session_config_name:
        session_config_names = [session_config_name]
    else:
        session_config_names = SESSION_CONFIGS_DICT.keys()

    for config_name in session_config_names:
        try:
            config = SESSION_CONFIGS_DICT[config_name]
        except KeyError:
            # important to alert the user, since people might be trying to enter app names.
            msg = f"No session config with name '{config_name}'."
            raise Exception(msg) from None

        num_bot_cases = config.get_num_bot_cases()
        for case_number in range(num_bot_cases):
            logger.info("Creating '{}' session (test case {})".format(
                config_name, case_number))

            session = otree.session.create_session(
                session_config_name=config_name,
                num_participants=(num_participants
                                  or config['num_demo_participants']),
            )
            session_id = session.id

            run_bots(session_id, case_number=case_number)

            logger.info('Bots completed session')
    if export_path:

        now = datetime.datetime.now()

        if export_path == AUTO_NAME_BOTS_EXPORT_FOLDER:
            # oTree convention to prefix __temp all temp folders.
            export_path = now.strftime(
                '__temp_bots_%b%d_%Hh%Mm%S.%f')[:-5] + 's'

        os.makedirs(export_path, exist_ok=True)

        for app in settings.OTREE_APPS:
            model_module = otree.common.get_models_module(app)
            if model_module.Player.objects_exists():
                fpath = Path(export_path, "{}.csv".format(app))
                with fpath.open("w", encoding="utf8") as fp:
                    otree.export.export_app(app, fp)
        fpath = Path(export_path, "all_apps_wide.csv")
        with fpath.open("w", encoding="utf8") as fp:
            otree.export.export_wide(fp)

        logger.info('Exported CSV to folder "{}"'.format(export_path))
    else:
        logger.info('Tip: Run this command with the --export flag'
                    ' to save the data generated by bots.')
Example #2
0
def test_all_bots_for_session_config(
        session_config_name, num_participants, export_path):
    """
    this means all configs and test cases are in 1 big test case.
    so if 1 fails, the others will not get run.
    to separate them, we would need to move some of this code
    to pytest_generate_tests in conftest.py
    """
    if session_config_name:
        session_config_names = [session_config_name]
    else:
        session_config_names = SESSION_CONFIGS_DICT.keys()

    for config_name in session_config_names:
        try:
            config = SESSION_CONFIGS_DICT[config_name]
        except KeyError:
            # important to alert the user, since people might be trying to enter app names.
            raise Exception(f"No session config with name '{config_name}'.") from None

        bot_modules = [f'{app_name}.tests' for app_name in config['app_sequence']]
        pytest.register_assert_rewrite(*bot_modules)

        num_bot_cases = config.get_num_bot_cases()
        for case_number in range(num_bot_cases):
            logger.info("Creating '{}' session (test case {})".format(
                config_name, case_number))

            session = otree.session.create_session(
                session_config_name=config_name,
                num_participants=(num_participants or config['num_demo_participants']),
            )

            run_bots(session, case_number=case_number)
            logger.info('Bots completed session')
    if export_path:

        now = datetime.datetime.now()

        if export_path == AUTO_NAME_BOTS_EXPORT_FOLDER:
            # oTree convention to prefix __temp all temp folders.
            export_path = now.strftime('__temp_bots_%b%d_%Hh%Mm%S.%f')[:-5] + 's'

        os.makedirs(export_path, exist_ok=True)


        for app in settings.INSTALLED_OTREE_APPS:
            model_module = otree.common_internal.get_models_module(app)
            if model_module.Player.objects.exists():
                fpath = Path(export_path, "{}.csv".format(app))
                with fpath.open("w", encoding="utf8") as fp:
                    otree.export.export_app(app, fp, file_extension='csv')
        fpath = Path(export_path, "all_apps_wide.csv")
        with fpath.open("w", encoding="utf8") as fp:
            otree.export.export_wide(fp, 'csv')

        logger.info('Exported CSV to folder "{}"'.format(export_path))
Example #3
0
    def run(self):
        options = self.options

        self.check_browser()
        self.set_urls()
        self.client = requests.session()
        self.ping_server()
        self.server_configuration_check()

        sessions_to_create = []

        if options["session_config_name"]:
            session_config_name = options["session_config_name"]
            if session_config_name not in SESSION_CONFIGS_DICT:
                raise ValueError(
                    'No session config named "{}"'.format(
                        session_config_name)
                )
            session_config_names = [session_config_name]

        else:
            # default to all session configs
            session_config_names = SESSION_CONFIGS_DICT.keys()

        self.max_name_length = max(
            len(config_name) for config_name in session_config_names
        )

        for session_config_name in session_config_names:
            session_config = SESSION_CONFIGS_DICT[session_config_name]
            num_bot_cases = session_config.get_num_bot_cases()
            for case_number in range(num_bot_cases):
                num_participants = (options.get('num_participants') or
                                    session_config['num_demo_participants'])
                sessions_to_create.append({
                    'session_config_name': session_config_name,
                    'num_participants': num_participants,
                    'case_number': case_number,
                })

        total_time_spent = 0
        # run in a separate loop, because we want to validate upfront
        # that the session configs are valid, etc,
        # rather than the command failing halfway through
        for session_to_create in sessions_to_create:
            total_time_spent += self.run_session(**session_to_create)

        print('Total: {} seconds'.format(
            round(total_time_spent, 1)
        ))
Example #4
0
    def run(self):
        options = self.options

        self.check_browser()
        self.set_urls()
        self.client = requests.session()
        self.ping_server()
        self.server_configuration_check()

        sessions_to_create = []

        if options["session_config_name"]:
            session_config_name = options["session_config_name"]
            if session_config_name not in SESSION_CONFIGS_DICT:
                raise ValueError(
                    'No session config named "{}"'.format(
                        session_config_name)
                )
            session_config_names = [session_config_name]

        else:
            # default to all session configs
            session_config_names = SESSION_CONFIGS_DICT.keys()

        self.max_name_length = max(
            len(config_name) for config_name in session_config_names
        )

        for session_config_name in session_config_names:
            session_config = SESSION_CONFIGS_DICT[session_config_name]
            num_bot_cases = session_config.get_num_bot_cases()
            for bot_case_number in range(num_bot_cases):
                num_participants = (options.get('num_participants') or
                                    session_config['num_demo_participants'])
                sessions_to_create.append({
                    'session_config_name': session_config_name,
                    'num_participants': num_participants,
                    'bot_case_number': bot_case_number,
                })

        total_time_spent = 0
        # run in a separate loop, because we want to validate upfront
        # that the session configs are valid, etc,
        # rather than the command failing halfway through
        for session_to_create in sessions_to_create:
            total_time_spent += self.run_session(**session_to_create)

        print('Total: {} seconds'.format(
            round(total_time_spent, 1)
        ))
Example #5
0
    def run(self):

        self.check_browser()
        self.set_urls()
        self.client = requests_session()
        self.client.headers.update({'otree-rest-key': REST_KEY})

        sessions_to_create = []

        session_config_name = self.session_config_name
        if session_config_name:
            if session_config_name not in SESSION_CONFIGS_DICT:
                msg = 'No session config named "{}"'.format(session_config_name)
                raise ValueError(msg)
            session_config_names = [session_config_name]

        else:
            # default to all session configs
            session_config_names = SESSION_CONFIGS_DICT.keys()

        self.max_name_length = max(
            len(config_name) for config_name in session_config_names
        )

        for session_config_name in session_config_names:
            session_config = SESSION_CONFIGS_DICT[session_config_name]
            num_bot_cases = session_config.get_num_bot_cases()
            for case_number in range(num_bot_cases):
                num_participants = (
                    self.num_participants or session_config['num_demo_participants']
                )
                sessions_to_create.append(
                    {
                        'session_config_name': session_config_name,
                        'num_participants': num_participants,
                        'case_number': case_number,
                    }
                )

        total_time_spent = 0
        # run in a separate loop, because we want to validate upfront
        # that the session configs are valid, etc,
        # rather than the command failing halfway through
        for session_to_create in sessions_to_create:
            total_time_spent += self.run_session(**session_to_create)

        print('Total: {} seconds'.format(round(total_time_spent, 1)))
Example #6
0
def pytest_generate_tests(metafunc):
    # if the test function has a parameter called session_config_name
    if 'session_config_name' in metafunc.fixturenames:
        option = metafunc.config.option
        session_config_name = option.session_config_name
        if session_config_name:
            session_config_names = [session_config_name]
        else:
            session_config_names = SESSION_CONFIGS_DICT.keys()
        num_participants = option.num_participants
        if num_participants:
            num_participants = int(num_participants)
        preserve_data = option.preserve_data

        params = [[name, num_participants, preserve_data]
                  for name in session_config_names]
        metafunc.parametrize(
            "session_config_name,num_participants,preserve_data", params)
Example #7
0
def pytest_generate_tests(metafunc):
    # if the test function has a parameter called session_config_name
    if 'session_config_name' in metafunc.fixturenames:
        option = metafunc.config.option
        session_config_name = option.session_config_name
        if session_config_name:
            session_config_names = [session_config_name]
        else:
            session_config_names = SESSION_CONFIGS_DICT.keys()
        num_participants = option.num_participants
        if num_participants:
            num_participants = int(num_participants)
        params = [[name, num_participants, False]
                  for name in session_config_names]
        if option.preserve_data and len(params) >= 1:
            params[-1][2] = True
        metafunc.parametrize("session_config_name,num_participants,run_export",
                             params)
Example #8
0
def pytest_generate_tests(metafunc):
    # if the test function has a parameter called session_config_name
    if 'session_config_name' in metafunc.fixturenames:
        option = metafunc.config.option
        session_config_name = option.session_config_name
        if session_config_name:
            session_config_names = [session_config_name]
        else:
            session_config_names = SESSION_CONFIGS_DICT.keys()
        num_participants = option.num_participants
        if num_participants:
            num_participants = int(num_participants)
        params = [
            [name, num_participants, False]
            for name in session_config_names]
        if option.preserve_data and len(params) >= 1:
            params[-1][2] = True
        metafunc.parametrize(
            "session_config_name,num_participants,run_export", params)
Example #9
0
    def handle(self, **options):
        session_config_names = options["session_name"]
        if not session_config_names:
            # default to all session configs
            session_config_names = SESSION_CONFIGS_DICT.keys()

        if options['verbosity'] == 0:
            level = logging.ERROR
        elif options['verbosity'] == 1:
            level = logging.WARNING
        elif options['verbosity'] == 2:
            level = logging.INFO
        else:  # 3
            level = logging.DEBUG

        options['verbosity'] = (
            options['verbosity'] if options['verbosity'] > 2 else 1)

        logging.basicConfig(level=level)
        logging.getLogger("otree").setLevel(level)
        runner.logger.setLevel(level)
        client.logger.setLevel(level)

        export_path = options["export"] or options["save"]
        preserve_data = bool(export_path)

        test_runner = runner.OTreeExperimentTestRunner(**options)

        coverage = options["coverage"]

        if coverage:
            with runner.covering(session_config_names) as coverage_report:
                failures, data = test_runner.run_tests(
                    session_config_names, preserve_data=preserve_data)
        else:
            failures, data = test_runner.run_tests(
                session_config_names, preserve_data=preserve_data)
        if coverage:
            logger.info("Coverage Report")
            if coverage in [COVERAGE_CONSOLE, COVERAGE_ALL]:
                coverage_report.report()
            if coverage in [COVERAGE_HTML, COVERAGE_ALL]:
                html_coverage_results_dir = '_coverage_results'
                coverage_report.html_report(
                    directory=html_coverage_results_dir)
                msg = ("See '{}/index.html' for detailed results.").format(
                    html_coverage_results_dir)
                logger.info(msg)

        if preserve_data:
            now = datetime.datetime.now()

            if export_path == 'auto_name':
                export_path = now.strftime('_bots_%b%d_%Hh%Mm%S.%f')[:-5] + 's'

            if os.path.isdir(export_path):
                msg = "Directory '{}' already exists".format(export_path)
                raise IOError(msg)

            os.makedirs(export_path)

            metadata = dict(options)
            metadata.update({
                "timestamp": now.isoformat(),
                "versions": otree_and_django_version(),
                "failures": failures, "error": bool(failures)})

            sizes = {}
            for session_name, session_data in data.items():
                session_data = session_data or ""
                sizes[session_name] = len(session_data.splitlines())
                fname = "{}.csv".format(session_name)
                fpath = os.path.join(export_path, fname)
                with codecs.open(fpath, "w", encoding="utf8") as fp:
                    fp.write(session_data)

                metainfo = "\n".join(
                    ["{}: {}".format(k, v) for k, v in metadata.items()] +
                    ["sizes:"] +
                    ["\t{}: {}".format(k, v) for k, v in sizes.items()] + [""])
                fpath = os.path.join(export_path, "meta.txt")
                with codecs.open(fpath, "w", encoding="utf8") as fp:
                    fp.write(metainfo)
            logger.info('Exported CSV to folder "{}"'.format(export_path))
        else:
            logger.info('Tip: Run this command with the --export flag'
                        ' to save the data generated by bots.')

        if failures:
            sys.exit(bool(failures))
Example #10
0
    def handle(self, **options):
        session_config_names = options["session_name"]
        if not session_config_names:
            # default to all session configs
            session_config_names = SESSION_CONFIGS_DICT.keys()

        if options['verbosity'] == 0:
            level = logging.ERROR
        elif options['verbosity'] == 1:
            level = logging.WARNING
        elif options['verbosity'] == 2:
            level = logging.INFO
        else:  # 3
            level = logging.DEBUG

        options['verbosity'] = (options['verbosity']
                                if options['verbosity'] > 2 else 1)

        logging.basicConfig(level=level)
        logging.getLogger("otree").setLevel(level)
        runner.logger.setLevel(level)
        client.logger.setLevel(level)

        export_path = options["export"] or options["save"]
        preserve_data = bool(export_path)

        test_runner = runner.OTreeExperimentTestRunner(**options)

        coverage = options["coverage"]

        if coverage:
            with runner.covering(session_config_names) as coverage_report:
                failures, data = test_runner.run_tests(
                    session_config_names, preserve_data=preserve_data)
        else:
            failures, data = test_runner.run_tests(session_config_names,
                                                   preserve_data=preserve_data)
        if coverage:
            logger.info("Coverage Report")
            if coverage in [COVERAGE_CONSOLE, COVERAGE_ALL]:
                coverage_report.report()
            if coverage in [COVERAGE_HTML, COVERAGE_ALL]:
                html_coverage_results_dir = '_coverage_results'
                coverage_report.html_report(
                    directory=html_coverage_results_dir)
                msg = ("See '{}/index.html' for detailed results."
                       ).format(html_coverage_results_dir)
                logger.info(msg)

        if preserve_data:
            now = datetime.datetime.now()

            if export_path == 'auto_name':
                export_path = now.strftime('_bots_%b%d_%Hh%Mm%S.%f')[:-5] + 's'

            if os.path.isdir(export_path):
                msg = "Directory '{}' already exists".format(export_path)
                raise IOError(msg)

            os.makedirs(export_path)

            metadata = dict(options)
            metadata.update({
                "timestamp": now.isoformat(),
                "versions": otree_and_django_version(),
                "failures": failures,
                "error": bool(failures)
            })

            sizes = {}
            for session_name, session_data in data.items():
                session_data = session_data or ""
                sizes[session_name] = len(session_data.splitlines())
                fname = "{}.csv".format(session_name)
                fpath = os.path.join(export_path, fname)
                with codecs.open(fpath, "w", encoding="utf8") as fp:
                    fp.write(session_data)

                metainfo = "\n".join(
                    ["{}: {}".format(k, v)
                     for k, v in metadata.items()] + ["sizes:"] +
                    ["\t{}: {}".format(k, v) for k, v in sizes.items()] + [""])
                fpath = os.path.join(export_path, "meta.txt")
                with codecs.open(fpath, "w", encoding="utf8") as fp:
                    fp.write(metainfo)
            logger.info('Exported CSV to folder "{}"'.format(export_path))
        else:
            logger.info('Tip: Run this command with the --export flag'
                        ' to save the data generated by bots.')

        if failures:
            sys.exit(bool(failures))