Example #1
0
    def test_verify_data(self):
        parser = setup_args()
        opt = parser.parse_args(print_args=False)
        changed_files = testing_utils.git_changed_files()
        changed_task_files = []
        for file in changed_files:
            if ('parlai/tasks' in file and 'README' not in file
                    and 'task_list.py' not in file):
                changed_task_files.append(file)

        if not changed_task_files:
            return

        for file in changed_task_files:
            task = file.split('/')[-2]
            module_name = "%s.tasks.%s.agents" % ('parlai', task)
            task_module = importlib.import_module(module_name)
            subtasks = [
                ':'.join([task, x]) for x in dir(task_module)
                if ('teacher' in x.lower() and x not in BASE_TEACHERS)
            ]

            for subt in subtasks:
                opt['task'] = subt
                with testing_utils.capture_output() as _:
                    text, log = verify(opt, print_parser=False)
                for key in KEYS:
                    self.assertEqual(
                        log[key], 0,
                        'There are {} {} in this task.'.format(log[key], log))
Example #2
0
def detect_data():
    """Check if we should run data tests."""
    commit_msg = '[data]' in testing_utils.git_commit_messages().lower()
    test_changed = any(
        testing_utils.is_new_task_filename(fn)
        for fn in testing_utils.git_changed_files())
    return commit_msg or test_changed
Example #3
0
def detect_mturk():
    """Check if we should run mturk tests."""
    commit_msg = '[mturk]' in testing_utils.git_commit_messages().lower()
    mturk_changed = any(
        'parlai/mturk' in fn
        for fn in testing_utils.git_changed_files()
    )
    return commit_msg or mturk_changed
Example #4
0
def detect_gpu():
    """Check if we should run GPU tests."""
    commit_msg = '[gpu]' in testing_utils.git_commit_messages()
    test_changed = any(
        'tests/nightly/gpu' in fn
        for fn in testing_utils.git_changed_files()
    )
    return commit_msg or test_changed
Example #5
0
def unittests():
    """Tests needed to pass Continuous Integration."""
    test_suite = unittest.TestSuite()
    test_suite.addTests(short())
    changed_files = testing_utils.git_changed_files(skip_nonexisting=False)
    if any('parlai/mturk' in fn for fn in changed_files):
        # if any mturk stuff changed, run those tests too
        test_suite.addTests(mturk())
    return test_suite
Example #6
0
    def test_verify_data(self):
        parser = setup_args()
        opt = parser.parse_args(print_args=False)
        changed_files = testing_utils.git_changed_files()
        changed_task_files = []
        for file in changed_files:
            if ('parlai/tasks' in file and 'README' not in file
                    and 'task_list.py' not in file):
                changed_task_files.append(file)

        if not changed_task_files:
            return

        found_errors = False
        for file in changed_task_files:
            task = file.split('/')[-2]
            module_name = "%s.tasks.%s.agents" % ('parlai', task)
            task_module = importlib.import_module(module_name)
            subtasks = [
                ':'.join([task, x]) for x in dir(task_module)
                if ('teacher' in x.lower() and x not in BASE_TEACHERS)
            ]

            if testing_utils.is_this_circleci():
                if len(subtasks) == 0:
                    continue

                self.fail(
                    'test_verify_data plays poorly with CircleCI. Please run '
                    '`python tests/datatests/test_new_tasks.py` locally and '
                    'paste the output in your pull request.')

            for subt in subtasks:
                parser = setup_args()
                opt = parser.parse_args(args=['--task', subt],
                                        print_args=False)
                opt['task'] = subt
                try:
                    with testing_utils.capture_output():
                        text, log = verify(opt, print_parser=False)
                except Exception:
                    found_errors = True
                    traceback.print_exc()
                    print("Got above exception in {}".format(subt))
                for key in KEYS:
                    if log[key] != 0:
                        print('There are {} {} in {}.'.format(
                            log[key],
                            key,
                            subt,
                        ))
                        found_errors = True

        self.assertFalse(found_errors, "Errors were found.")
Example #7
0
    def test_verify_data(self):
        parser = setup_args()
        opt = parser.parse_args(print_args=False)
        changed_files = testing_utils.git_changed_files()
        changed_task_files = []
        for file in changed_files:
            if ('parlai/tasks' in file and 'README' not in file
                    and 'task_list.py' not in file):
                changed_task_files.append(file)

        if not changed_task_files:
            return

        for file in changed_task_files:
            task = file.split('/')[-2]
            module_name = "%s.tasks.%s.agents" % ('parlai', task)
            task_module = importlib.import_module(module_name)
            subtasks = [
                ':'.join([task, x]) for x in dir(task_module)
                if ('teacher' in x.lower() and x not in BASE_TEACHERS)
            ]

            if testing_utils.is_this_circleci():
                if len(subtasks) == 0:
                    continue

                self.fail(
                    'test_verify_data plays poorly with CircleCI. Please run '
                    '`python tests/data/test_new_tasks.py` locally and paste the '
                    'output in your pull request.')

            for subt in subtasks:
                parser = setup_args()
                opt = parser.parse_args(args=['--task', subt],
                                        print_args=False)
                opt['task'] = subt
                with testing_utils.capture_output():
                    text, log = verify(opt, print_parser=False)
                for key in KEYS:
                    self.assertEqual(
                        log[key], 0,
                        'There are {} {} in this task.'.format(log[key], log))
def detect_mturk():
    commit_msg = '[mturk]' in testing_utils.git_commit_messages().lower()
    mturk_changed = any('parlai/mturk' in fn
                        for fn in testing_utils.git_changed_files())
    return commit_msg or mturk_changed
def detect_data():
    commit_msg = '[data]' in testing_utils.git_commit_messages().lower()
    test_changed = any(
        testing_utils.is_new_task_filename(fn)
        for fn in testing_utils.git_changed_files())
    return commit_msg or test_changed
def detect_gpu():
    commit_msg = '[gpu]' in testing_utils.git_commit_messages()
    test_changed = any('tests/nightly/gpu' in fn
                       for fn in testing_utils.git_changed_files())
    return commit_msg or test_changed