def build_tests(license_tests, clazz):
    """
    Dynamically build test methods from a sequence of LicenseTest and attach
    these method to the clazz test class.
    """
    for test in license_tests:
        # path relative to the data directory
        tfn = 'licenses/'+ test.test_file_name
        test_name = 'test_detection_%(tfn)s'% locals()
        test_name = text.python_safe_name(test_name)
        # closure on the test params
        test_method = make_test_function(test.licenses, tfn, test_name, sort=test.sort)
        if test.expected_failure:
            test_method = expectedFailure(test_method)
        # attach that method to our test class
        setattr(clazz, test_name, test_method)
def build_tests(license_tests, clazz):
    """
    Dynamically build test methods from a sequence of LicenseTest and attach
    these method to the clazz test class.
    """
    for test in license_tests:
        # absolute path
        tfn = test.test_file_name
        tf = test.test_file
        test_name = 'test_detection_%(tfn)s' % locals()
        test_name = text.python_safe_name(test_name)
        # closure on the test params
        test_method = make_license_test_function(test.licenses, tf, test_name)

        if test.expected_failure:
            test_method = expectedFailure(test_method)
        # attach that method to our test class
        setattr(clazz, test_name, test_method)
def build_tests(license_tests, clazz):
    """
    Dynamically build test methods from a sequence of LicenseTest and attach
    these method to the clazz test class.
    """
    for test in license_tests:
        # absolute path
        tfn = test.test_file_name
        tf = test.test_file
        test_name = "test_detection_%(tfn)s" % locals()
        test_name = text.python_safe_name(test_name)
        # closure on the test params
        test_method = make_license_test_function(test.licenses, tf, test_name)

        if test.expected_failure:
            test_method = expectedFailure(test_method)
        # attach that method to our test class
        setattr(clazz, test_name, test_method)
def make_license_test_function(
    expected_licenses,
    test_file,
    test_data_file,
    test_name,
    detect_negative=True,
    min_score=0,
    expected_failure=False,
    # if not False, a reason string must be provided
    skip_test=False,
    # if True detailed traces including matched texts will be returned
    trace_text=False):
    """
    Build and return a test function closing on tests arguments.
    """
    if isinstance(test_name, unicode):
        test_name = test_name.encode('utf-8')

    if not isinstance(expected_licenses, list):
        expected_licenses = [expected_licenses]

    def closure_test_function(*args, **kwargs):
        idx = cache.get_index()
        matches = idx.match(
            location=test_file,
            min_score=min_score,
            # if negative, do not detect negative rules when testing negative rules
            detect_negative=detect_negative)

        if not matches:
            matches = []

        # TODO: we should expect matches properly, not with a grab bag of flat license keys
        # flattened list of all detected license keys across all matches.
        detected_licenses = functional.flatten(
            map(unicode, match.rule.licenses) for match in matches)
        try:
            if not detect_negative:
                # we skipped negative detection for a negative rule
                # we just want to ensure that the rule was matched proper
                assert matches and not expected_licenses and not detected_licenses
            else:
                assert expected_licenses == detected_licenses
        except:
            # On failure, we compare against more result data to get additional
            # failure details, including the test_file and full match details
            match_failure_trace = []

            if trace_text:
                for match in matches:
                    qtext, itext = get_texts(match,
                                             location=test_file,
                                             idx=idx)
                    rule_text_file = match.rule.text_file
                    rule_data_file = match.rule.data_file
                    match_failure_trace.extend([
                        '', '', '======= MATCH ====', match,
                        '======= Matched Query Text for:',
                        'file://{test_file}'.format(**locals())
                    ])
                    if test_data_file:
                        match_failure_trace.append(
                            'file://{test_data_file}'.format(**locals()))
                    match_failure_trace.append(qtext.splitlines())
                    match_failure_trace.extend([
                        '',
                        '======= Matched Rule Text for:'
                        'file://{rule_text_file}'.format(**locals()),
                        'file://{rule_data_file}'.format(**locals()),
                        itext.splitlines(),
                    ])
            # this assert will always fail and provide a detailed failure trace
            assert expected_licenses == detected_licenses + [
                test_name, 'test file: file://' + test_file
            ] + match_failure_trace

    closure_test_function.__name__ = test_name
    closure_test_function.funcname = test_name

    if skip_test:
        skipper = skip(repr(skip_test))
        closure_test_function = skipper(closure_test_function)

    if expected_failure:
        closure_test_function = expectedFailure(closure_test_function)

    return closure_test_function
Example #5
0
def make_copyright_test_functions(test,
                                  test_data_dir=test_env.test_data_dir,
                                  regen=False):
    """
    Build and return a test function closing on tests arguments and the function
    name. Create only a single function for multiple tests (e.g. copyrights and
    holders together).
    """
    from summarycode.copyright_summary import summarize_copyrights
    from summarycode.copyright_summary import summarize_holders
    from summarycode.copyright_summary import Text

    def closure_test_function(*args, **kwargs):
        copyrights, holders, authors = copyright_detector(test_file)

        holders_summary = []
        if 'holders_summary' in test.what:
            holders_summary = as_sorted_mapping(summarize_holders(holders))

        copyrights_summary = []
        if 'copyrights_summary' in test.what:
            copyrights_summary = as_sorted_mapping(
                summarize_copyrights(copyrights))

        authors_summary = []
        if 'authors_summary' in test.what:
            authors_summary = as_sorted_mapping(summarize_holders(authors))

        results = dict(
            copyrights=copyrights,
            authors=authors,
            holders=holders,
            holders_summary=holders_summary,
            copyrights_summary=copyrights_summary,
            authors_summary=authors_summary,
        )

        if regen:
            for wht in test.what:
                setattr(test, wht, results.get(wht))
            test.dump()

        failing = []
        all_expected = []
        all_results = []
        for wht in test.what:
            expected = getattr(test, wht, [])
            result = results[wht]
            if wht.endswith('_summary'):
                expected.sort()
                result.sort()
            try:
                assert expected == result
            except:
                # On failure, we compare against more result data to get additional
                # failure details, including the test_file and full results
                # this assert will always fail and provide a more detailed failure trace
                if wht.endswith('_summary'):
                    all_expected.append([e.items() for e in expected])
                    all_results.append([r.items() for r in result])
                else:
                    all_expected.append(expected)
                    all_results.append(result)
                failing.append(wht)

        if all_expected:
            all_expected += [
                'failing tests: ' + ', '.join(failing),
                'data file: file://' + data_file,
                'test file: file://' + test_file
            ]

            assert all_expected == all_results

    data_file = test.data_file
    test_file = test.test_file
    what = test.what

    tfn = test_file.replace(test_data_dir, '').strip('\/\\')
    whats = '_'.join(what)
    test_name = 'test_%(whats)s_%(tfn)s' % locals()
    test_name = python_safe_name(test_name)
    if isinstance(test_name, unicode):
        test_name = test_name.encode('utf-8')

    closure_test_function.__name__ = test_name
    closure_test_function.funcname = test_name

    if test.expected_failures:
        closure_test_function = expectedFailure(closure_test_function)

    return closure_test_function, test_name
class TenantTest(TransactionTestCase):
    def assertSwapFailure(self, tenant_model, expected_message):
        with self.assertRaisesMessage(ImproperlyConfigured, expected_message):
            with self.settings(TENANCY_TENANT_MODEL=tenant_model):
                get_tenant_model()

    def test_invalid_tenant_user_model_format(self):
        stream = StringIO()
        handler = logging.StreamHandler(stream)
        logger.addHandler(handler)
        with self.settings(TENANCY_TENANT_MODEL='invalid'):
            pass
        logger.removeHandler(handler)
        stream.seek(0)
        self.assertIn(
            "TENANCY_TENANT_MODEL must be of the form 'app_label.model_name'",
            stream.read())

    def test_swap_failures(self):
        """
        Make sure tenant swap failures raise the correct exception
        """
        self.assertSwapFailure(
            'not.Installed',
            "TENANCY_TENANT_MODEL refers to model 'not.Installed' that has not been installed"
        )
        self.assertSwapFailure(
            'contenttypes.ContentType',
            "TENANCY_TENANT_MODEL refers to models 'contenttypes.ContentType' "
            "which is not a subclass of 'tenancy.AbstractTenant'")

    def test_content_types_deleted(self):
        """
        Make sure content types of tenant models are deleted upon their related
        tenant deletion.
        """
        tenant = Tenant.objects.create(name='tenant')
        model = tenant.specificmodels.model
        content_type = ContentType.objects.get_for_model(model)
        tenant.delete()
        self.assertFalse(
            ContentType.objects.filter(pk=content_type.pk).exists())

    def test_model_garbage_collection(self):
        """
        Make sure tenant models are correctly garbage collected upon deletion.
        """
        tenant = Tenant.objects.create(name='tenant')

        # Keep weak-references to tenant and associated models to make sure
        # they have been colllected.
        tenant_wref = weakref.ref(tenant)
        models_wrefs = []
        for model in TenantModelBase.references:
            # Make sure all models have their relation tree populated.
            getattr(model._meta, '_relation_tree')
            models_wrefs.append(weakref.ref(model.for_tenant(tenant)))

        # Delete the tenant and all it's associated models.
        tenant.delete()
        del tenant

        # Force a garbage collection for the benefit of non-reference counting
        # implementations.
        gc.collect()

        # Make sure all references have been removed.
        self.assertIsNone(tenant_wref())
        for model_wref in models_wrefs:
            self.assertIsNone(model_wref())

    if sys.version_info >= (3, 5):
        # Models with relationships are not correctly garbage collected
        # on Python 3.5. It looks like circular references between Model._meta,
        # Options.model, Field.model and Manager.model might be the cause.
        test_model_garbage_collection = expectedFailure(
            test_model_garbage_collection)
Example #7
0
class UniqueTogetherDefinitionTest(BaseModelDefinitionTestCase):
    @classmethod
    def setUpTestData(cls):
        super(UniqueTogetherDefinitionTest, cls).setUpTestData()
        with cls.assertChecksumChange():
            cls.f1_pk = CharFieldDefinition.objects.create(
                model_def_id=cls.model_def_pk, name='f1', max_length=25).pk
        with cls.assertChecksumChange():
            cls.f2_pk = CharFieldDefinition.objects.create(
                model_def_id=cls.model_def_pk, name='f2', max_length=25).pk
        cls.ut_pk = UniqueTogetherDefinition.objects.create(
            model_def_id=cls.model_def_pk).pk

    def setUp(self):
        super(UniqueTogetherDefinitionTest, self).setUp()
        self.f1 = CharFieldDefinition.objects.get(pk=self.f1_pk)
        self.f2 = CharFieldDefinition.objects.get(pk=self.f2_pk)
        self.ut = UniqueTogetherDefinition.objects.get(pk=self.ut_pk)
        self.model_class = self.model_def.model_class()

    def test_repr(self):
        """Make sure UniqueTogetherDefinition objects are always
        repr()-able."""
        repr(self.ut)
        repr(UniqueTogetherDefinition())

    def test_clean(self):
        """Make sure we can't create a unique key with two fields of two
        different models"""
        other_model_def = ModelDefinition.objects.create(
            app_label='mutant', object_name='OtherModel')
        with self.assertChecksumChange(other_model_def):
            f2 = CharFieldDefinition.objects.create(model_def=other_model_def,
                                                    name='f2',
                                                    max_length=25)
        many_to_many_set(self.ut, 'field_defs', [self.f1, f2])
        self.assertRaises(ValidationError, self.ut.clean)

    def test_db_column(self):
        """Make sure a unique index creation works correctly when using a
        custom `db_column`. This is needed for unique FK's columns."""
        self.f2.db_column = 'f2_column'
        self.f2.save()
        many_to_many_set(self.ut, 'field_defs', [self.f1, self.f2])
        self.f2.db_column = 'f2'
        self.f2.save()
        self.ut.delete()

    def test_cannot_create_unique(self):
        """Creating a unique key on a table with duplicate rows
        shouldn't work"""
        self.model_class.objects.create(f1='a', f2='b')
        self.model_class.objects.create(f1='a', f2='b')
        with captured_stderr():
            with self.assertRaises(IntegrityError):
                with transaction.atomic():
                    many_to_many_set(self.ut, 'field_defs', [self.f1, self.f2])

    if connection.settings_dict['ENGINE'] == 'django.db.backends.sqlite3':
        # TODO: Figure out why this is failing for Django 1.9 + against SQLite
        # on TravisCI.
        test_cannot_create_unique = expectedFailure(test_cannot_create_unique)

    def test_cannot_insert_duplicate_row(self):
        """Inserting a duplicate rows shouldn't work."""
        self.model_class.objects.create(f1='a', f2='b')
        many_to_many_set(self.ut, 'field_defs', [self.f1, self.f2])
        with captured_stderr():
            with self.assertRaises(IntegrityError):
                with transaction.atomic():
                    self.model_class.objects.create(f1='a', f2='b')

    def test_cannot_remove_unique(self):
        """Removing a unique constraint that cause duplicate rows shouldn't
        work."""
        many_to_many_set(self.ut, 'field_defs', [self.f1, self.f2])
        self.model_class.objects.create(f1='a', f2='b')
        self.model_class.objects.create(f1='a', f2='c')
        with captured_stderr():
            with self.assertRaises(IntegrityError):
                with transaction.atomic():
                    self.ut.field_defs.remove(self.f2)

    if connection.settings_dict['ENGINE'] == 'django.db.backends.sqlite3':
        # TODO: Figure out why this is failing for Django 1.9 + against SQLite
        # on TravisCI.
        test_cannot_remove_unique = expectedFailure(test_cannot_remove_unique)

    def test_clear_removes_unique(self):
        """
        Removing a unique constraint should relax duplicate row
        validation
        """
        self.model_class.objects.create(f1='a', f2='b')
        many_to_many_set(self.ut, 'field_defs', [self.f1, self.f2])
        self.ut.field_defs.clear()
        self.model_class.objects.create(f1='a', f2='b')
    def run(self, result=None):
        """
        Overwrite the behaviors of catching exceptions
        - failureExceptions will be treated as robot ExecutionFailed and
            won't fail the followed test case
        - other Exceptions will be bubbled up to higher level
        """
        rv = None
        orig_result = result
        if result is None:
            result = self.defaultTestResult()
            startTestRun = getattr(result, 'startTestRun', None)
            if startTestRun is not None:
                startTestRun()

        self._resultForDoCleanups = result
        result.startTest(self)

        testMethod = getattr(self, self._testMethodName)
        if (getattr(self.__class__, "__unittest_skip__", False)
                or getattr(testMethod, "__unittest_skip__", False)):
            # If the class or method was skipped.
            try:
                skip_why = (
                    getattr(self.__class__, '__unittest_skip_why__', '')
                    or getattr(testMethod, '__unittest_skip_why__', ''))
                self._addSkip(result, skip_why)
            finally:
                result.stopTest(self)
            return
        try:
            success = False
            try:
                self.setUp()
            except SkipTest as e:
                self._addSkip(result, str(e))
            except KeyboardInterrupt:
                raise
            except:
                result.addError(self, sys.exc_info())
                raise
            else:
                try:
                    rv = testMethod()
                except KeyboardInterrupt:
                    raise
                except self.failureException as ex:
                    result.addFailure(self, sys.exc_info())

                    import traceback

                    tb = traceback.format_tb(sys.exc_info()[-1])
                    msg = 'Within "<b>{}</b>" AUC,<p> <b>Code Stack:</b>\n{}'.format(
                        self._name, ''.join(tb[1:-1]))
                    logger.debug(msg=msg, html=True)

                    logger.error(ex.message)
                    raise ExecutionFailed(ex.message, continue_on_failure=True)
                except expectedFailure(_UnexpectedSuccess) as e:
                    addExpectedFailure = getattr(result, 'addExpectedFailure',
                                                 None)
                    if addExpectedFailure is not None:
                        addExpectedFailure(self, e.exc_info)
                    else:
                        warnings.warn(
                            "TestResult has no addExpectedFailure method, reporting as passes",
                            RuntimeWarning)
                        result.addSuccess(self)
                except _UnexpectedSuccess:
                    addUnexpectedSuccess = getattr(result,
                                                   'addUnexpectedSuccess',
                                                   None)
                    if addUnexpectedSuccess is not None:
                        addUnexpectedSuccess(self)
                    else:
                        warnings.warn(
                            "TestResult has no addUnexpectedSuccess method, reporting as failures",
                            RuntimeWarning)
                        result.addFailure(self, sys.exc_info())
                except SkipTest as e:
                    self._addSkip(result, str(e))
                except:
                    result.addError(self, sys.exc_info())
                    raise
                else:
                    success = True

                try:
                    self.tearDown()
                except KeyboardInterrupt:
                    raise
                except:
                    result.addError(self, sys.exc_info())
                    success = False
                    raise

            cleanUpSuccess = self.doCleanups()
            success = success and cleanUpSuccess
            if success:
                result.addSuccess(self)
        finally:
            result.stopTest(self)
            if orig_result is None:
                stopTestRun = getattr(result, 'stopTestRun', None)
                if stopTestRun is not None:
                    stopTestRun()
            return rv
def make_license_test_function(
        expected_licenses, test_file, test_data_file, test_name,
        detect_negative=True, min_score=0,
        expected_failure=False,
        # if not False, a reason string must be provided
        skip_test=False,
        # if True detailed traces including matched texts will be returned
        trace_text=False):
    """
    Build and return a test function closing on tests arguments.
    """
    if isinstance(test_name, unicode):
        test_name = test_name.encode('utf-8')

    if not isinstance(expected_licenses, list):
        expected_licenses = [expected_licenses]

    def closure_test_function(*args, **kwargs):
        idx = cache.get_index()
        matches = idx.match(location=test_file, min_score=min_score,
                            # if negative, do not detect negative rules when testing negative rules
                            detect_negative=detect_negative)

        if not matches:
            matches = []

        # TODO: we should expect matches properly, not with a grab bag of flat license keys
        # flattened list of all detected license keys across all matches.
        detected_licenses = functional.flatten(map(unicode, match.rule.licenses) for match in matches)
        try:
            if not detect_negative:
                # we skipped negative detection for a negative rule
                # we just want to ensure that the rule was matched proper
                assert matches and not expected_licenses and not detected_licenses
            else:
                assert expected_licenses == detected_licenses
        except:
            # On failure, we compare against more result data to get additional
            # failure details, including the test_file and full match details
            match_failure_trace = []

            if trace_text:
                for match in matches:
                    qtext, itext = get_texts(match, location=test_file, idx=idx)
                    rule_text_file = match.rule.text_file
                    rule_data_file = match.rule.data_file
                    match_failure_trace.extend(['', '',
                        '======= MATCH ====', match,
                        '======= Matched Query Text for:',
                        'file://{test_file}'.format(**locals())
                    ])
                    if test_data_file:
                        match_failure_trace.append('file://{test_data_file}'.format(**locals()))
                    match_failure_trace.append(qtext.splitlines())
                    match_failure_trace.extend(['',
                        '======= Matched Rule Text for:'
                        'file://{rule_text_file}'.format(**locals()),
                        'file://{rule_data_file}'.format(**locals()),
                        itext.splitlines(),
                    ])
            # this assert will always fail and provide a detailed failure trace
            assert expected_licenses == detected_licenses + [test_name, 'test file: file://' + test_file] + match_failure_trace

    closure_test_function.__name__ = test_name
    closure_test_function.funcname = test_name

    if skip_test:
        skipper = skip(repr(skip_test))
        closure_test_function = skipper(closure_test_function)

    if expected_failure:
        closure_test_function = expectedFailure(closure_test_function)

    return closure_test_function