예제 #1
0
    def test_is_invalid_with_duplicated_unique_data(self, ):
        if getattr(self, "unique_fields"):
            self.create_instance_to_be_duplicated_on_post()

            serializer = self.get_serializer_class()(data=self.get_full_data(),
                                                     many=False)
            serializer.is_valid(raise_exception=False)

            self.assertRaises(ValidationError, serializer.save)
        else:
            skip("Not applicable, model does not have any unique field.")
예제 #2
0
 def test_is_invalid_without_required_data(self):
     _required = getattr(self, "required_data")
     if _required:
         _data = {
             key: value
             for key, value in self.get_full_data().items()
             if key not in _required
         }
         serializer = self.get_serializer_class()(data=_data, many=False)
         result = serializer.is_valid(raise_exception=False)
         self.assertFalse(result)
     else:
         skip("Not applicable, model does not have any required field.")
예제 #3
0
 def test_create_missing_required_bad_request_if_any_required(self, ):
     self._login()
     _data = {
         key: value
         for key, value in self.post_data.items()
         if key not in self.required_fields
     }
     response = self.client.post(
         self.base_url,
         data=_data,
         format="json",
     )
     if self.required_fields:
         self.assertEqual(HTTP_400_BAD_REQUEST, response.status_code)
     else:
         skip("Not applicable, model does not have any required field.")
예제 #4
0
    def test_create_duplicated_unique_bad_request_if_any(self, ):
        self._login()

        _duplicated_data = copy(self.post_data)
        for key in self.unique_fields:
            _duplicated_data.update(
                {key: getattr(self.first_element, key, None)})

        response = self.client.post(
            self.base_url,
            data=_duplicated_data,
            format="json",
        )
        if self.unique_fields:
            self.assertEqual(HTTP_400_BAD_REQUEST, response.status_code)
        else:
            skip("Not applicable, model does not have any unique field.")
예제 #5
0
파일: __init__.py 프로젝트: rahulroxx/coala
def skip_if_no_clang():
    """
    Decorate your test with this to skip it if clang isn't present.
    """
    try:
        Index.create()
        return skipIf(False, '')
    except LibclangError as error:
        return skip(str(error))
예제 #6
0
def generate_skip_decorator(bear):
    """
    Creates a skip decorator for a `unittest` module test from a bear.

    `check_prerequisites` is used to determine a test skip.

    :param bear: The bear whose prerequisites determine the test skip.
    :return:     A decorator that skips the test if appropriate.
    """
    result = bear.check_prerequisites()

    return skip(result) if isinstance(result, str) else skipIf(not result, "(No reason given.)")
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

# Copyright (C) 2014, vdnguyen <*****@*****.**>

import unittest
from unittest.case import skip

from shinkenplugins.test import TestPlugin
from shinkenplugins.plugins.postgresql_lag import Plugin

SKIP = skip('find a way to either have a postgres valid account '
            'or to simulate a postgres server.')


class Test(TestPlugin):
    def test_version(self):
        args = ['-v']
        self.execute(Plugin, args, 3, 'version ' + Plugin.VERSION)

    def test_help(self):
        args = ['-h']
        self.execute(Plugin, args, 3, 'Usage:')

    @SKIP
    def test_ok(self):
        args = [
            "-H", "127.0.0.1", "-p", "5432", "-u", "postgres", "-P", "1234",
예제 #8
0
def todo_test():
    return skip("TODO")
예제 #9
0
def only_full_test():
    if not settings.IS_FULL_TEST:
        return skip("only_full_test")
    return _id
def make_license_test_function(
    expected_licenses,
    test_file,
    test_data_file,
    test_name,
    detect_negative=True,
    min_score=0,
    expected_failure=False,
    # if not False, a reason string must be provided
    skip_test=False,
    # if True detailed traces including matched texts will be returned
    trace_text=False):
    """
    Build and return a test function closing on tests arguments.
    """
    if isinstance(test_name, unicode):
        test_name = test_name.encode('utf-8')

    if not isinstance(expected_licenses, list):
        expected_licenses = [expected_licenses]

    def closure_test_function(*args, **kwargs):
        idx = cache.get_index()
        matches = idx.match(
            location=test_file,
            min_score=min_score,
            # if negative, do not detect negative rules when testing negative rules
            detect_negative=detect_negative)

        if not matches:
            matches = []

        # TODO: we should expect matches properly, not with a grab bag of flat license keys
        # flattened list of all detected license keys across all matches.
        detected_licenses = functional.flatten(
            map(unicode, match.rule.licenses) for match in matches)
        try:
            if not detect_negative:
                # we skipped negative detection for a negative rule
                # we just want to ensure that the rule was matched proper
                assert matches and not expected_licenses and not detected_licenses
            else:
                assert expected_licenses == detected_licenses
        except:
            # On failure, we compare against more result data to get additional
            # failure details, including the test_file and full match details
            match_failure_trace = []

            if trace_text:
                for match in matches:
                    qtext, itext = get_texts(match,
                                             location=test_file,
                                             idx=idx)
                    rule_text_file = match.rule.text_file
                    rule_data_file = match.rule.data_file
                    match_failure_trace.extend([
                        '', '', '======= MATCH ====', match,
                        '======= Matched Query Text for:',
                        'file://{test_file}'.format(**locals())
                    ])
                    if test_data_file:
                        match_failure_trace.append(
                            'file://{test_data_file}'.format(**locals()))
                    match_failure_trace.append(qtext.splitlines())
                    match_failure_trace.extend([
                        '',
                        '======= Matched Rule Text for:'
                        'file://{rule_text_file}'.format(**locals()),
                        'file://{rule_data_file}'.format(**locals()),
                        itext.splitlines(),
                    ])
            # this assert will always fail and provide a detailed failure trace
            assert expected_licenses == detected_licenses + [
                test_name, 'test file: file://' + test_file
            ] + match_failure_trace

    closure_test_function.__name__ = test_name
    closure_test_function.funcname = test_name

    if skip_test:
        skipper = skip(repr(skip_test))
        closure_test_function = skipper(closure_test_function)

    if expected_failure:
        closure_test_function = expectedFailure(closure_test_function)

    return closure_test_function
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

# Copyright (C) 2014, vdnguyen <*****@*****.**>

import unittest
from unittest.case import skip

from shinkenplugins.test import TestPlugin
from shinkenplugins.plugins.postgresql_lag import Plugin


SKIP = skip('find a way to either have a postgres valid account '
            'or to simulate a postgres server.')


class Test(TestPlugin):
    def test_version(self):
        args = ['-v']
        self.execute(Plugin, args, 3,
                     'version ' + Plugin.VERSION)

    def test_help(self):
        args = ['-h']
        self.execute(Plugin, args, 3,
                     'Usage:')


    @SKIP
def make_license_test_function(
        expected_licenses, test_file, test_data_file, test_name,
        detect_negative=True, min_score=0,
        expected_failure=False,
        # if not False, a reason string must be provided
        skip_test=False,
        # if True detailed traces including matched texts will be returned
        trace_text=False):
    """
    Build and return a test function closing on tests arguments.
    """
    if isinstance(test_name, unicode):
        test_name = test_name.encode('utf-8')

    if not isinstance(expected_licenses, list):
        expected_licenses = [expected_licenses]

    def closure_test_function(*args, **kwargs):
        idx = cache.get_index()
        matches = idx.match(location=test_file, min_score=min_score,
                            # if negative, do not detect negative rules when testing negative rules
                            detect_negative=detect_negative)

        if not matches:
            matches = []

        # TODO: we should expect matches properly, not with a grab bag of flat license keys
        # flattened list of all detected license keys across all matches.
        detected_licenses = functional.flatten(map(unicode, match.rule.licenses) for match in matches)
        try:
            if not detect_negative:
                # we skipped negative detection for a negative rule
                # we just want to ensure that the rule was matched proper
                assert matches and not expected_licenses and not detected_licenses
            else:
                assert expected_licenses == detected_licenses
        except:
            # On failure, we compare against more result data to get additional
            # failure details, including the test_file and full match details
            match_failure_trace = []

            if trace_text:
                for match in matches:
                    qtext, itext = get_texts(match, location=test_file, idx=idx)
                    rule_text_file = match.rule.text_file
                    rule_data_file = match.rule.data_file
                    match_failure_trace.extend(['', '',
                        '======= MATCH ====', match,
                        '======= Matched Query Text for:',
                        'file://{test_file}'.format(**locals())
                    ])
                    if test_data_file:
                        match_failure_trace.append('file://{test_data_file}'.format(**locals()))
                    match_failure_trace.append(qtext.splitlines())
                    match_failure_trace.extend(['',
                        '======= Matched Rule Text for:'
                        'file://{rule_text_file}'.format(**locals()),
                        'file://{rule_data_file}'.format(**locals()),
                        itext.splitlines(),
                    ])
            # this assert will always fail and provide a detailed failure trace
            assert expected_licenses == detected_licenses + [test_name, 'test file: file://' + test_file] + match_failure_trace

    closure_test_function.__name__ = test_name
    closure_test_function.funcname = test_name

    if skip_test:
        skipper = skip(repr(skip_test))
        closure_test_function = skipper(closure_test_function)

    if expected_failure:
        closure_test_function = expectedFailure(closure_test_function)

    return closure_test_function