示例#1
0
def detect_data():
    """Check if we should run data tests."""
    commit_msg = '[data]' in testing_utils.git_commit_messages().lower()
    test_changed = any(
        testing_utils.is_new_task_filename(fn)
        for fn in testing_utils.git_changed_files())
    return commit_msg or test_changed
def detect_gpu():
    """
    Check if we should run GPU tests.
    """
    commit_msg = '[gpu]' in testing_utils.git_commit_messages().lower()
    test_changed = any('tests/nightly/gpu' in fn
                       for fn in testing_utils.git_changed_files())
    return commit_msg or test_changed
示例#3
0
def detect_mturk():
    """
    Check if we should run mturk tests.
    """
    commit_msg = '[mturk]' in testing_utils.git_commit_messages().lower()
    mturk_changed = any('parlai/mturk' in fn
                        for fn in testing_utils.git_changed_files())
    return commit_msg or mturk_changed
    def test_verify_data(self):
        parser = setup_args()
        opt = parser.parse_args([])
        changed_task_files = [
            fn
            for fn in testing_utils.git_changed_files()
            if testing_utils.is_new_task_filename(fn)
        ]
        if not changed_task_files:
            return

        found_errors = False
        for file in changed_task_files:
            task = file.split('/')[-2]
            module_name = "%s.tasks.%s.agents" % ('parlai', task)
            task_module = importlib.import_module(module_name)
            subtasks = [
                ':'.join([task, x])
                for x in dir(task_module)
                if x.endswith('Teacher') and x not in BASE_TEACHERS
            ]

            if testing_utils.is_this_circleci():
                if len(subtasks) == 0:
                    continue

                self.fail(
                    'test_verify_data plays poorly with CircleCI. Please run '
                    '`python tests/datatests/test_new_tasks.py` locally and '
                    'paste the output in your pull request.'
                )

            for subt in subtasks:
                parser = setup_args()
                opt = parser.parse_args(args=['--task', subt])
                opt['task'] = subt
                try:
                    with testing_utils.capture_output():
                        text, log = verify(opt)
                except Exception:
                    found_errors = True
                    traceback.print_exc()
                    print("Got above exception in {}".format(subt))
                for key in KEYS:
                    if log[key] != 0:
                        print('There are {} {} in {}.'.format(log[key], key, subt))
                        found_errors = True

        if found_errors:
            self.fail(
                "Please fix the above listed errors, or describe in the PR why "
                "you do not expect them to pass."
            )