def list_workitems_by_query(self, query, wi_type):
        fields = ['work_item_id', 'title', 'author', 'created']

        if wi_type in ["testcase", "TestCase"]:
            workitem_list = TestCase.query(query, fields)
        elif wi_type in ["requirement", "Requirement"]:
            workitem_list = Requirement.query(query, fields)
        elif wi_type == '':
            workitem_list = TestCase.query(query, fields) + \
                            Requirement.query(query, fields)
        else:
            print("'%s' is invalid. Use testcase or requirement" % wi_type)
            exit(0)

        return workitem_list
예제 #2
0
 def test_001_query(self):
     results = TestCase.query("project.id:%s AND title:regression" %
                              (DEFAULT_PROJ))
     tc = results[0]
     self.assertIsNone(tc.title)
     results2 = TestCase.query("project.id:%s AND title:regression" %
                               (DEFAULT_PROJ),
                               fields=["work_item_id", "title"])
     tc = results2[0]
     self.assertIsNotNone(tc.title)
     results3 = TestCase.query("project.id:%s AND title:regression" %
                               (DEFAULT_PROJ),
                               fields=["work_item_id", "caseautomation"])
     tc = results3[0]
     self.assertEquals(tc.caseautomation, "notautomated")
예제 #3
0
def test_run(path, test_run_id, test_template_id, user, project):
    """Execute a test run based on jUnit XML file."""
    results = parse_junit(path)
    try:
        test_run = TestRun(test_run_id, project_id=project)
        click.echo('Test run {0} found.'.format(test_run_id))
    except PylarionLibException as err:
        click.echo(err, err=True)
        click.echo('Creating test run {0}.'.format(test_run_id))
        test_run = TestRun.create(project, test_run_id, test_template_id)

    for result in results:
        test_case_id = '{0}.{1}'.format(result['classname'], result['name'])
        test_case = TestCase.query(test_case_id)
        if len(test_case) == 0:
            click.echo(
                'Was not able to find test case with id {0}, skipping...'.
                format(test_case_id))
            continue
        status = POLARION_STATUS[result['status']]
        work_item_id = test_case[0].work_item_id
        click.echo(
            'Adding test record for test case {0} with status {1}.'.format(
                work_item_id, status))
        try:
            test_run.add_test_record_by_fields(
                test_case_id=work_item_id,
                test_result=status,
                test_comment=result.get('message'),
                executed_by=user,
                executed=datetime.datetime.now(),
                duration=float(result.get('time', '0')))
        except PylarionLibException as err:
            click.echo('Skipping test case {0}.'.format(work_item_id))
            click.echo(err, err=True)
예제 #4
0
파일: core.py 프로젝트: RedHatQE/pong
 def query_test_case(self, query=None):
     """
     """
     from pylarion.work_item import TestCase as PylTestCase
     if query is None:
         query = "title:{}".format(self.title)
     return PylTestCase.query(query)
예제 #5
0
 def query_test_case(self, query=None):
     """
     """
     from pylarion.work_item import TestCase as PylTestCase
     if query is None:
         query = "title:{}".format(self.title)
     return PylTestCase.query(query)
예제 #6
0
def add_test_record(result):
    """Task that adds a test result to a test run.

    This task relies on ``OBJ_CACHE`` to get the test run and user objects. The
    object cache is needed since suds objects are not able to be pickled and it
    is not possible to pass them to processes.
    """
    test_run = OBJ_CACHE['test_run']
    user = OBJ_CACHE['user']
    testcases = OBJ_CACHE['testcases']
    junit_test_case_id = '{0}.{1}'.format(result['classname'], result['name'])
    test_case_id = testcases.get(junit_test_case_id)
    if not test_case_id:
        click.echo(
            'Missing ID information for test {0}, using junit test case id...'.
            format(junit_test_case_id))
        test_case_id = junit_test_case_id
    test_case = TestCase.query(test_case_id)
    if len(test_case) == 0:
        click.echo(
            'Was not able to find test case {0} with id {1}, skipping...'.
            format(junit_test_case_id, test_case_id))
        return
    status = POLARION_STATUS[result['status']]
    work_item_id = test_case[0].work_item_id
    click.echo('Adding test record for test case {0} with status {1}.'.format(
        work_item_id, status))
    message = result.get('message', '')
    if message and type(message) == unicode:
        message = message.encode('ascii', 'xmlcharrefreplace')
    try:
        test_run.add_test_record_by_fields(test_case_id=work_item_id,
                                           test_result=status,
                                           test_comment=message,
                                           executed_by=user,
                                           executed=datetime.datetime.now(),
                                           duration=float(
                                               result.get('time', '0')))
    except PylarionLibException as err:
        click.echo('Skipping test case {0}.'.format(work_item_id))
        click.echo(err, err=True)
    except Exception as err:
        click.echo(
            'Error when adding test record for "{test_case_id}" with the '
            'following information:\n'
            'duration="{duration}"'
            'executed="{executed}"\n'
            'executed_by="{executed_by}"\n'
            'test_result="{test_result}"\n'
            'test_comment="{test_comment}"\n'.format(
                test_case_id=work_item_id,
                test_result=status,
                test_comment=message,
                executed_by=user,
                executed=datetime.datetime.now(),
                duration=float(result.get('time', '0'))))
        click.echo(traceback.format_exc(), err=True)
        raise
예제 #7
0
def add_test_record(result):
    """Task that adds a test result to a test run.

    This task relies on ``OBJ_CACHE`` to get the test run and user objects. The
    object cache is needed since suds objects are not able to be pickled and it
    is not possible to pass them to processes.
    """
    test_run = OBJ_CACHE['test_run']
    user = OBJ_CACHE['user']
    test_case_id = '{0}.{1}'.format(result['classname'], result['name'])
    test_case = TestCase.query(test_case_id)
    if len(test_case) == 0:
        click.echo(
            'Was not able to find test case with id {0}, skipping...'
            .format(test_case_id)
        )
        return
    status = POLARION_STATUS[result['status']]
    work_item_id = test_case[0].work_item_id
    click.echo(
        'Adding test record for test case {0} with status {1}.'
        .format(work_item_id, status)
    )
    message = result.get('message', '')
    if message and type(message) == unicode:
        message = message.encode('ascii', 'xmlcharrefreplace')
    try:
        test_run.add_test_record_by_fields(
            test_case_id=work_item_id,
            test_result=status,
            test_comment=message,
            executed_by=user,
            executed=datetime.datetime.now(),
            duration=float(result.get('time', '0'))
        )
    except PylarionLibException as err:
        click.echo('Skipping test case {0}.'.format(work_item_id))
        click.echo(err, err=True)
    except:
        click.echo(
            'Error when adding test record for "{test_case_id}" with the '
            'following information:\n'
            'duration="{duration}"'
            'executed="{executed}"\n'
            'executed_by="{executed_by}"\n'
            'test_result="{test_result}"\n'
            'test_comment="{test_comment}"\n'
            .format(
                test_case_id=work_item_id,
                test_result=status,
                test_comment=message,
                executed_by=user,
                executed=datetime.datetime.now(),
                duration=float(result.get('time', '0'))
            )
        )
        raise
예제 #8
0
파일: utils.py 프로젝트: RedHatQE/pong
def query_test_case(query, fields=None, **kwargs):
    """
    Returns a list of pylarion TestCase objects

    :param query:
    :param fields: an optional list of fields to populate in the returned TestCase objects
                   (by default only work_item_id and title will be populated)
    :return:
    """
    if fields is None:
        fields = ["work_item_id", "title"]
    from pylarion.work_item import TestCase as PylTestCase
    return PylTestCase.query(query, fields=fields, **kwargs)
예제 #9
0
def get_automation_statuses_from_polarion():

    polarion_items = TestCase.query('project.id:{}'.format(
        conf().polarion.project))

    LUT = {stat: set() for stat in POLARION_CASE_AUTOMATION_STATUSES}

    for item in polarion_items:
        test_case = TestCase(uri=item.uri)
        # TODO: Check whether we have the ID as property of test_case
        item_id = extract_polarion_case_ids(item.uri).pop()
        caseautomation = test_case.caseautomation
        print '{} - {}'.format(item_id, caseautomation)
        if caseautomation not in LUT:
            LUT[caseautomation] = set()
        LUT[caseautomation].add(item_id)

    return LUT
예제 #10
0
def test_run(path, test_run_id, test_template_id, user, project):
    """Execute a test run based on jUnit XML file."""
    results = parse_junit(path)
    try:
        test_run = TestRun(test_run_id, project_id=project)
        click.echo('Test run {0} found.'.format(test_run_id))
    except PylarionLibException as err:
        click.echo(err, err=True)
        click.echo('Creating test run {0}.'.format(test_run_id))
        test_run = TestRun.create(project, test_run_id, test_template_id)

    for result in results:
        test_case_id = '{0}.{1}'.format(result['classname'], result['name'])
        test_case = TestCase.query(test_case_id)
        if len(test_case) == 0:
            click.echo(
                'Was not able to find test case with id {0}, skipping...'
                .format(test_case_id)
            )
            continue
        status = POLARION_STATUS[result['status']]
        work_item_id = test_case[0].work_item_id
        click.echo(
            'Adding test record for test case {0} with status {1}.'
            .format(work_item_id, status)
        )
        try:
            test_run.add_test_record_by_fields(
                test_case_id=work_item_id,
                test_result=status,
                test_comment=result.get('message'),
                executed_by=user,
                executed=datetime.datetime.now(),
                duration=float(result.get('time', '0'))
            )
        except PylarionLibException as err:
            click.echo('Skipping test case {0}.'.format(work_item_id))
            click.echo(err, err=True)
예제 #11
0
def test_case(path, collect_only, project):
    """Sync test cases with Polarion."""
    testcases = testimony.get_testcases([path])
    for path, tests in testcases.items():
        requirement = None
        for test in tests:
            # Expect test_case_id to be path.test_name or
            # path.ClassName.test_name.
            test_case_id_parts = [
                path.replace('/', '.').replace('.py', ''), test.name
            ]
            if test.parent_class is not None:
                test_case_id_parts.insert(-1, test.parent_class)
            test_case_id = '.'.join(test_case_id_parts)
            if requirement is None:
                requirement_name = parse_requirement_name(test_case_id)
                results = Requirement.query('{0}'.format(requirement_name),
                                            fields=['title', 'work_item_id'])
                if len(results) > 0:
                    # As currently is not possible to get a single
                    # match for the title, make sure to not use a
                    # not intended Requirement.
                    for result in results:
                        if result.title == requirement_name:
                            requirement = result

                if requirement is None:
                    click.echo(
                        'Creating requirement {0}.'.format(requirement_name))
                    if not collect_only:
                        requirement = Requirement.create(project,
                                                         requirement_name,
                                                         '',
                                                         reqtype='functional')

            results = TestCase.query(test_case_id,
                                     fields=['description', 'work_item_id'])
            if len(results) == 0:
                click.echo(
                    'Creating test case {0} for requirement {1}.'.format(
                        test.name, requirement_name))
                if not collect_only:
                    test_case = TestCase.create(
                        project,
                        test.name,
                        test.docstring if test.docstring else '',
                        caseautomation='automated',
                        casecomponent='-',
                        caseimportance='medium',
                        caselevel='component',
                        caseposneg='positive',
                        subtype1='-',
                        test_case_id=test_case_id,
                        testtype='functional',
                    )
                click.echo(
                    'Liking test case {0} to verify requirement {1}.'.format(
                        test.name, requirement_name))
                if not collect_only:
                    test_case.add_linked_item(requirement.work_item_id,
                                              'verifies')
            else:
                click.echo(
                    'Updating test case {0} for requirement {1}.'.format(
                        test.name, requirement_name))
                # Ensure that a single match for the Test Case is
                # returned.
                assert len(results) == 1
                test_case = results[0]
                if (not collect_only
                        and test_case.description != test.docstring):
                    test_case = TestCase(project, test_case.work_item_id)
                    test_case.description = (test.docstring
                                             if test.docstring else '')
                    test_case.update()
예제 #12
0
def add_test_case(args):
    """Task that creates or updates Test Cases and manages their Requirement.

    This task relies on ``OBJ_CACHE`` to get the collect_only and project
    objects.

    :param args: A tuple where the first element is a path and the second is a
        list of ``TestFunction`` objects mapping the tests from that path.
    """
    path, tests = args
    collect_only = OBJ_CACHE['collect_only']
    project = OBJ_CACHE['project']

    for test in tests:
        # Fetch the test case id if the @Id tag is present otherwise generate a
        # test_case_id based on the test Python import path
        test_case_id = test.unexpected_tags.get('id')
        if not test_case_id:
            # Generate the test_case_id. It could be either path.test_name or
            # path.ClassName.test_name if the test methods is defined within a
            # class.
            test_case_id_parts = [
                path.replace('/', '.').replace('.py', ''),
                test.name
            ]
            if test.parent_class is not None:
                test_case_id_parts.insert(-1, test.parent_class)
            test_case_id = '.'.join(test_case_id_parts)

        if test.docstring:
            if not type(test.docstring) == unicode:
                test.docstring = test.docstring.decode('utf8')
            test.docstring = RST_PARSER.parse(test.docstring)

        # Is the test automated? Acceptable values are:
        # automated, manualonly, and notautomated
        auto_status = test.unexpected_tags.get(
            'caseautomation',
            'automated' if test.automated else 'notautomated'
        ).lower()
        caseposneg = test.unexpected_tags.get(
            'caseposneg',
            'negative' if 'negative' in test.name else 'positive'
        ).lower()
        subtype1 = test.unexpected_tags.get(
            'subtype1',
            '-'
        ).lower()
        casecomponent = test.unexpected_tags.get('casecomponent', '-').lower()
        caseimportance = test.unexpected_tags.get(
            'caseimportance', 'medium').lower()
        caselevel = test.unexpected_tags.get('caselevel', 'component').lower()
        setup = test.setup if test.setup else None
        status = test.unexpected_tags.get('status', 'approved').lower()
        testtype = test.unexpected_tags.get(
            'testtype',
            'functional'
        ).lower()
        upstream = test.unexpected_tags.get('upstream', 'no').lower()

        results = []
        if not collect_only:
            results = TestCase.query(
                test_case_id,
                fields=[
                    'caseautomation',
                    'caseposneg',
                    'description',
                    'work_item_id'
                ]
            )
        requirement_name = test.unexpected_tags.get(
            'requirement', parse_requirement_name(path))
        if len(results) == 0:
            click.echo(
                'Creating test case {0} for requirement {1}.'
                .format(test.name, requirement_name)
            )
            if not collect_only:
                test_case = TestCase.create(
                    project,
                    test.name,
                    test.docstring if test.docstring else '',
                    caseautomation=auto_status,
                    casecomponent=casecomponent,
                    caseimportance=caseimportance,
                    caselevel=caselevel,
                    caseposneg=caseposneg,
                    subtype1=subtype1,
                    test_case_id=test_case_id,
                    testtype=testtype,
                    setup=setup,
                    upstream=upstream,
                )
                test_case.status = status
                test_case.update()
            click.echo(
                'Linking test case {0} to verify requirement {1}.'
                .format(test.name, requirement_name)
            )
            if not collect_only:
                requirement = fetch_requirement(
                    requirement_name, project, collect_only)
                test_case.add_linked_item(
                    requirement.work_item_id, 'verifies')
        else:
            click.echo(
                'Updating test case {0} for requirement {1}.'
                .format(test.name, requirement_name)
            )
            # Ensure that a single match for the Test Case is
            # returned.
            assert len(results) == 1
            test_case = results[0]
            if not collect_only and any((
                    test_case.caseautomation != auto_status,
                    test_case.casecomponent != casecomponent,
                    test_case.caseimportance != caseimportance,
                    test_case.caselevel != caselevel,
                    test_case.caseposneg != caseposneg,
                    test_case.description != test.docstring,
                    test_case.setup != setup,
                    test_case.subtype1 != subtype1,
                    test_case.testtype != testtype,
                    test_case.upstream != upstream,
                    test_case.status != status,
            )):
                test_case.description = (
                    test.docstring if test.docstring else '')
                test_case.caseautomation = auto_status
                test_case.casecomponent = casecomponent
                test_case.caseimportance = caseimportance
                test_case.caselevel = caselevel
                test_case.caseposneg = caseposneg
                test_case.setup = setup
                test_case.status = status
                test_case.subtype1 = subtype1
                test_case.testtype = testtype
                test_case.upstream = upstream
                test_case.update()
예제 #13
0
import sys
from pylarion.test_run import TestRun
from pylarion.test_record import TestRecord
from pylarion.work_item import TestCase, Requirement
from pylarion.document import Document
from pylarion.text import Text
import unicodedata


# id:(RHELOpenStackPlatform/RHELOSP-32630 RHELOpenStackPlatform/RHELOSP-32628 RHELOpenStackPlatform/RHELOSP-32626 RHELOpenStackPlatform/RHELOSP-32624 RHELOpenStackPlatform/RHELOSP-32622 RHELOpenStackPlatform/RHELOSP-32616 RHELOpenStackPlatform/RHELOSP-29359 RHELOpenStackPlatform/RHELOSP-29358 RHELOpenStackPlatform/RHELOSP-29356 RHELOpenStackPlatform/RHELOSP-28167 RHELOpenStackPlatform/RHELOSP-28137 RHELOpenStackPlatform/RHELOSP-27970 RHELOpenStackPlatform/RHELOSP-27969 RHELOpenStackPlatform/RHELOSP-27968 RHELOpenStackPlatform/RHELOSP-27953 RHELOpenStackPlatform/RHELOSP-27952 RHELOpenStackPlatform/RHELOSP-27951 RHELOpenStackPlatform/RHELOSP-27930 RHELOpenStackPlatform/RHELOSP-27927 RHELOpenStackPlatform/RHELOSP-27926 RHELOpenStackPlatform/RHELOSP-27925 RHELOpenStackPlatform/RHELOSP-27924 RHELOpenStackPlatform/RHELOSP-27923 RHELOpenStackPlatform/RHELOSP-27922 RHELOpenStackPlatform/RHELOSP-27921 RHELOpenStackPlatform/RHELOSP-27919 RHELOpenStackPlatform/RHELOSP-27918 RHELOpenStackPlatform/RHELOSP-27917 RHELOpenStackPlatform/RHELOSP-27916 RHELOpenStackPlatform/RHELOSP-27915 RHELOpenStackPlatform/RHELOSP-27914 RHELOpenStackPlatform/RHELOSP-27913 RHELOpenStackPlatform/RHELOSP-27912 RHELOpenStackPlatform/RHELOSP-27911 RHELOpenStackPlatform/RHELOSP-27910 RHELOpenStackPlatform/RHELOSP-27909 RHELOpenStackPlatform/RHELOSP-27908 RHELOpenStackPlatform/RHELOSP-27907 RHELOpenStackPlatform/RHELOSP-27906 RHELOpenStackPlatform/RHELOSP-27905 RHELOpenStackPlatform/RHELOSP-27904 RHELOpenStackPlatform/RHELOSP-27901 RHELOpenStackPlatform/RHELOSP-27899 RHELOpenStackPlatform/RHELOSP-27896 RHELOpenStackPlatform/RHELOSP-27895 RHELOpenStackPlatform/RHELOSP-27893 RHELOpenStackPlatform/RHELOSP-27891 RHELOpenStackPlatform/RHELOSP-27882 RHELOpenStackPlatform/RHELOSP-27881 RHELOpenStackPlatform/RHELOSP-27880 RHELOpenStackPlatform/RHELOSP-27879 RHELOpenStackPlatform/RHELOSP-27878 RHELOpenStackPlatform/RHELOSP-27859 RHELOpenStackPlatform/RHELOSP-27858 RHELOpenStackPlatform/RHELOSP-27856 RHELOpenStackPlatform/RHELOSP-27852 RHELOpenStackPlatform/RHELOSP-27835 RHELOpenStackPlatform/RHELOSP-27834 RHELOpenStackPlatform/RHELOSP-27833 RHELOpenStackPlatform/RHELOSP-27831 RHELOpenStackPlatform/RHELOSP-27830 RHELOpenStackPlatform/RHELOSP-27829 RHELOpenStackPlatform/RHELOSP-27828 RHELOpenStackPlatform/RHELOSP-27827 RHELOpenStackPlatform/RHELOSP-27825 RHELOpenStackPlatform/RHELOSP-27824 RHELOpenStackPlatform/RHELOSP-27820 RHELOpenStackPlatform/RHELOSP-27819 RHELOpenStackPlatform/RHELOSP-27818 RHELOpenStackPlatform/RHELOSP-27817 RHELOpenStackPlatform/RHELOSP-27811 RHELOpenStackPlatform/RHELOSP-27810 RHELOpenStackPlatform/RHELOSP-27809 RHELOpenStackPlatform/RHELOSP-27807 RHELOpenStackPlatform/RHELOSP-27798 RHELOpenStackPlatform/RHELOSP-27784 RHELOpenStackPlatform/RHELOSP-27701 RHELOpenStackPlatform/RHELOSP-27396 RHELOpenStackPlatform/RHELOSP-27073 RHELOpenStackPlatform/RHELOSP-27072 RHELOpenStackPlatform/RHELOSP-27071 RHELOpenStackPlatform/RHELOSP-27070 RHELOpenStackPlatform/RHELOSP-27045 RHELOpenStackPlatform/RHELOSP-27044 RHELOpenStackPlatform/RHELOSP-27038 RHELOpenStackPlatform/RHELOSP-27037 RHELOpenStackPlatform/RHELOSP-27035 RHELOpenStackPlatform/RHELOSP-27033 RHELOpenStackPlatform/RHELOSP-27028 RHELOpenStackPlatform/RHELOSP-26981 RHELOpenStackPlatform/RHELOSP-26980 RHELOpenStackPlatform/RHELOSP-26979 RHELOpenStackPlatform/RHELOSP-26978 RHELOpenStackPlatform/RHELOSP-26977 RHELOpenStackPlatform/RHELOSP-26976 RHELOpenStackPlatform/RHELOSP-26975 RHELOpenStackPlatform/RHELOSP-26974 RHELOpenStackPlatform/RHELOSP-26973 RHELOpenStackPlatform/RHELOSP-26972 RHELOpenStackPlatform/RHELOSP-26967 RHELOpenStackPlatform/RHELOSP-26966 RHELOpenStackPlatform/RHELOSP-26965 RHELOpenStackPlatform/RHELOSP-26962 RHELOpenStackPlatform/RHELOSP-26961 RHELOpenStackPlatform/RHELOSP-26960 RHELOpenStackPlatform/RHELOSP-26959 RHELOpenStackPlatform/RHELOSP-26958 RHELOpenStackPlatform/RHELOSP-26957 RHELOpenStackPlatform/RHELOSP-26881 RHELOpenStackPlatform/RHELOSP-26431 RHELOpenStackPlatform/RHELOSP-26430 RHELOpenStackPlatform/RHELOSP-26429 RHELOpenStackPlatform/RHELOSP-26290 RHELOpenStackPlatform/RHELOSP-26288 RHELOpenStackPlatform/RHELOSP-26283 RHELOpenStackPlatform/RHELOSP-26281 RHELOpenStackPlatform/RHELOSP-26279 RHELOpenStackPlatform/RHELOSP-26277 RHELOpenStackPlatform/RHELOSP-26275 RHELOpenStackPlatform/RHELOSP-26092 RHELOpenStackPlatform/RHELOSP-26091 RHELOpenStackPlatform/RHELOSP-26090 RHELOpenStackPlatform/RHELOSP-26088 RHELOpenStackPlatform/RHELOSP-26087 RHELOpenStackPlatform/RHELOSP-26086 RHELOpenStackPlatform/RHELOSP-26084 RHELOpenStackPlatform/RHELOSP-26083 RHELOpenStackPlatform/RHELOSP-26082 RHELOpenStackPlatform/RHELOSP-26081 RHELOpenStackPlatform/RHELOSP-26073 RHELOpenStackPlatform/RHELOSP-26071 RHELOpenStackPlatform/RHELOSP-26070 RHELOpenStackPlatform/RHELOSP-26069 RHELOpenStackPlatform/RHELOSP-26068 RHELOpenStackPlatform/RHELOSP-26063 RHELOpenStackPlatform/RHELOSP-26061 RHELOpenStackPlatform/RHELOSP-25152 RHELOpenStackPlatform/RHELOSP-25149 RHELOpenStackPlatform/RHELOSP-25148 RHELOpenStackPlatform/RHELOSP-24647)
#items = TestCase.query('project.id:RHELOpenStackPlatform')

# items = TestCase.query('NOT status:approved AND id:(RHELOpenStackPlatform/RHELOSP-32630 RHELOpenStackPlatform/RHELOSP-32628 RHELOpenStackPlatform/RHELOSP-32626 RHELOpenStackPlatform/RHELOSP-32624 RHELOpenStackPlatform/RHELOSP-32622 RHELOpenStackPlatform/RHELOSP-32616 RHELOpenStackPlatform/RHELOSP-29359 RHELOpenStackPlatform/RHELOSP-29358 RHELOpenStackPlatform/RHELOSP-29356 RHELOpenStackPlatform/RHELOSP-28167 RHELOpenStackPlatform/RHELOSP-28137 RHELOpenStackPlatform/RHELOSP-27970 RHELOpenStackPlatform/RHELOSP-27969 RHELOpenStackPlatform/RHELOSP-27968 RHELOpenStackPlatform/RHELOSP-27953 RHELOpenStackPlatform/RHELOSP-27952 RHELOpenStackPlatform/RHELOSP-27951 RHELOpenStackPlatform/RHELOSP-27930 RHELOpenStackPlatform/RHELOSP-27927 RHELOpenStackPlatform/RHELOSP-27926 RHELOpenStackPlatform/RHELOSP-27925 RHELOpenStackPlatform/RHELOSP-27924 RHELOpenStackPlatform/RHELOSP-27923 RHELOpenStackPlatform/RHELOSP-27922 RHELOpenStackPlatform/RHELOSP-27921 RHELOpenStackPlatform/RHELOSP-27919 RHELOpenStackPlatform/RHELOSP-27918 RHELOpenStackPlatform/RHELOSP-27917 RHELOpenStackPlatform/RHELOSP-27916 RHELOpenStackPlatform/RHELOSP-27915 RHELOpenStackPlatform/RHELOSP-27914 RHELOpenStackPlatform/RHELOSP-27913 RHELOpenStackPlatform/RHELOSP-27912 RHELOpenStackPlatform/RHELOSP-27911 RHELOpenStackPlatform/RHELOSP-27910 RHELOpenStackPlatform/RHELOSP-27909 RHELOpenStackPlatform/RHELOSP-27908 RHELOpenStackPlatform/RHELOSP-27907 RHELOpenStackPlatform/RHELOSP-27906 RHELOpenStackPlatform/RHELOSP-27905 RHELOpenStackPlatform/RHELOSP-27904 RHELOpenStackPlatform/RHELOSP-27901 RHELOpenStackPlatform/RHELOSP-27899 RHELOpenStackPlatform/RHELOSP-27896 RHELOpenStackPlatform/RHELOSP-27895 RHELOpenStackPlatform/RHELOSP-27893 RHELOpenStackPlatform/RHELOSP-27891 RHELOpenStackPlatform/RHELOSP-27882 RHELOpenStackPlatform/RHELOSP-27881 RHELOpenStackPlatform/RHELOSP-27880 RHELOpenStackPlatform/RHELOSP-27879 RHELOpenStackPlatform/RHELOSP-27878 RHELOpenStackPlatform/RHELOSP-27859 RHELOpenStackPlatform/RHELOSP-27858 RHELOpenStackPlatform/RHELOSP-27856 RHELOpenStackPlatform/RHELOSP-27852 RHELOpenStackPlatform/RHELOSP-27835 RHELOpenStackPlatform/RHELOSP-27834 RHELOpenStackPlatform/RHELOSP-27833 RHELOpenStackPlatform/RHELOSP-27831 RHELOpenStackPlatform/RHELOSP-27830 RHELOpenStackPlatform/RHELOSP-27829 RHELOpenStackPlatform/RHELOSP-27828 RHELOpenStackPlatform/RHELOSP-27827 RHELOpenStackPlatform/RHELOSP-27825 RHELOpenStackPlatform/RHELOSP-27824 RHELOpenStackPlatform/RHELOSP-27820 RHELOpenStackPlatform/RHELOSP-27819 RHELOpenStackPlatform/RHELOSP-27818 RHELOpenStackPlatform/RHELOSP-27817 RHELOpenStackPlatform/RHELOSP-27811 RHELOpenStackPlatform/RHELOSP-27810 RHELOpenStackPlatform/RHELOSP-27809 RHELOpenStackPlatform/RHELOSP-27807 RHELOpenStackPlatform/RHELOSP-27798 RHELOpenStackPlatform/RHELOSP-27784 RHELOpenStackPlatform/RHELOSP-27701 RHELOpenStackPlatform/RHELOSP-27396 RHELOpenStackPlatform/RHELOSP-27073 RHELOpenStackPlatform/RHELOSP-27072 RHELOpenStackPlatform/RHELOSP-27071 RHELOpenStackPlatform/RHELOSP-27070 RHELOpenStackPlatform/RHELOSP-27045 RHELOpenStackPlatform/RHELOSP-27044 RHELOpenStackPlatform/RHELOSP-27038 RHELOpenStackPlatform/RHELOSP-27037 RHELOpenStackPlatform/RHELOSP-27035 RHELOpenStackPlatform/RHELOSP-27033 RHELOpenStackPlatform/RHELOSP-27028 RHELOpenStackPlatform/RHELOSP-26981 RHELOpenStackPlatform/RHELOSP-26980 RHELOpenStackPlatform/RHELOSP-26979 RHELOpenStackPlatform/RHELOSP-26978 RHELOpenStackPlatform/RHELOSP-26977 RHELOpenStackPlatform/RHELOSP-26976 RHELOpenStackPlatform/RHELOSP-26975 RHELOpenStackPlatform/RHELOSP-26974 RHELOpenStackPlatform/RHELOSP-26973 RHELOpenStackPlatform/RHELOSP-26972 RHELOpenStackPlatform/RHELOSP-26967 RHELOpenStackPlatform/RHELOSP-26966 RHELOpenStackPlatform/RHELOSP-26965 RHELOpenStackPlatform/RHELOSP-26962 RHELOpenStackPlatform/RHELOSP-26961 RHELOpenStackPlatform/RHELOSP-26960 RHELOpenStackPlatform/RHELOSP-26959 RHELOpenStackPlatform/RHELOSP-26958 RHELOpenStackPlatform/RHELOSP-26957 RHELOpenStackPlatform/RHELOSP-26881 RHELOpenStackPlatform/RHELOSP-26431 RHELOpenStackPlatform/RHELOSP-26430 RHELOpenStackPlatform/RHELOSP-26429 RHELOpenStackPlatform/RHELOSP-26290 RHELOpenStackPlatform/RHELOSP-26288 RHELOpenStackPlatform/RHELOSP-26283 RHELOpenStackPlatform/RHELOSP-26281 RHELOpenStackPlatform/RHELOSP-26279 RHELOpenStackPlatform/RHELOSP-26277 RHELOpenStackPlatform/RHELOSP-26275 RHELOpenStackPlatform/RHELOSP-26092 RHELOpenStackPlatform/RHELOSP-26091 RHELOpenStackPlatform/RHELOSP-26090 RHELOpenStackPlatform/RHELOSP-26088 RHELOpenStackPlatform/RHELOSP-26087 RHELOpenStackPlatform/RHELOSP-26086 RHELOpenStackPlatform/RHELOSP-26084 RHELOpenStackPlatform/RHELOSP-26083 RHELOpenStackPlatform/RHELOSP-26082 RHELOpenStackPlatform/RHELOSP-26081 RHELOpenStackPlatform/RHELOSP-26073 RHELOpenStackPlatform/RHELOSP-26071 RHELOpenStackPlatform/RHELOSP-26070 RHELOpenStackPlatform/RHELOSP-26069 RHELOpenStackPlatform/RHELOSP-26068 RHELOpenStackPlatform/RHELOSP-26063 RHELOpenStackPlatform/RHELOSP-26061 RHELOpenStackPlatform/RHELOSP-25152 RHELOpenStackPlatform/RHELOSP-25149 RHELOpenStackPlatform/RHELOSP-25148 RHELOpenStackPlatform/RHELOSP-24647)')
# query=caseimportance.KEY%3Acritical%20AND%20NOT%20status%3A(approved%20inactive)%20AND%20caseautomation.KEY%3Aautomated
items = TestCase.query("status:needsupdate")

print "Number of items %s" % len(items)

for item in items:
	# try:
	print item.uri
	tc = TestCase(uri=item.uri)
	print tc.title

	# if tc.description:
	# 	tc.description = tc._check_encode(tc.description)
	# else:

	# tc.description = "TBD"
	# tc.description.decode('utf-8')
예제 #14
0
def test_case(path, collect_only, project):
    """Sync test cases with Polarion."""
    testcases = testimony.get_testcases([path])
    for path, tests in testcases.items():
        requirement = None
        for test in tests:
            # Expect test_case_id to be path.test_name or
            # path.ClassName.test_name.
            test_case_id_parts = [
                path.replace('/', '.').replace('.py', ''),
                test.name
            ]
            if test.parent_class is not None:
                test_case_id_parts.insert(-1, test.parent_class)
            test_case_id = '.'.join(test_case_id_parts)
            if requirement is None:
                requirement_name = parse_requirement_name(test_case_id)
                results = Requirement.query(
                    '{0}'.format(requirement_name),
                    fields=['title', 'work_item_id']
                )
                if len(results) > 0:
                    # As currently is not possible to get a single
                    # match for the title, make sure to not use a
                    # not intended Requirement.
                    for result in results:
                        if result.title == requirement_name:
                            requirement = result

                if requirement is None:
                    click.echo(
                        'Creating requirement {0}.'.format(requirement_name))
                    if not collect_only:
                        requirement = Requirement.create(
                            project,
                            requirement_name,
                            '',
                            reqtype='functional'
                        )

            results = TestCase.query(
                test_case_id, fields=['description', 'work_item_id'])
            if len(results) == 0:
                click.echo(
                    'Creating test case {0} for requirement {1}.'
                    .format(test.name, requirement_name)
                )
                if not collect_only:
                    test_case = TestCase.create(
                        project,
                        test.name,
                        test.docstring if test.docstring else '',
                        caseautomation='automated',
                        casecomponent='-',
                        caseimportance='medium',
                        caselevel='component',
                        caseposneg='positive',
                        subtype1='-',
                        test_case_id=test_case_id,
                        testtype='functional',
                    )
                click.echo(
                    'Liking test case {0} to verify requirement {1}.'
                    .format(test.name, requirement_name)
                )
                if not collect_only:
                    test_case.add_linked_item(
                        requirement.work_item_id, 'verifies')
            else:
                click.echo(
                    'Updating test case {0} for requirement {1}.'
                    .format(test.name, requirement_name)
                )
                # Ensure that a single match for the Test Case is
                # returned.
                assert len(results) == 1
                test_case = results[0]
                if (not collect_only and
                        test_case.description != test.docstring):
                    test_case = TestCase(project, test_case.work_item_id)
                    test_case.description = (
                        test.docstring if test.docstring else '')
                    test_case.update()
예제 #15
0
def main():

    isUpdateAutomationValue = False
    # access excel file and update results
    credentials = get_credentials()
    http = credentials.authorize(httplib2.Http())
    discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?version=v4')
    service = discovery.build('sheets',
                              'v4',
                              http=http,
                              discoveryServiceUrl=discoveryUrl)

    # https://docs.google.com/spreadsheets/d/1y4eBJhcZ0HsB5JUH5MPcFXWtc2zbP9XbgdCIF0S4iHs/edit#gid=1503195790
    spreadsheetId = '1y4eBJhcZ0HsB5JUH5MPcFXWtc2zbP9XbgdCIF0S4iHs'

    # Get all test runs by Polarion query, extract test run id and test run results (pass, fail, pending block, total...)

    test_runs_uris = TestRun.search(
        'NOT status:invalid AND plannedin.KEY:RHOS16'
    )  #updated:[20190627 TO 20190630]') #
    # test_runs_uris = TestRun.search('20180625-0836')
    print("Number of items %s" % len(test_runs_uris))
    loop_counter = 1
    missing_test_run_in_excel = ''
    non_test_cases_item = 0

    for test_run_uri in test_runs_uris:
        # for i in range(106,130):
        #     test_run_uri = test_runs_uris[i]

        #get excel values
        rangeName = 'RHOS 16!A2:X'
        result = service.spreadsheets().values().get(
            spreadsheetId=spreadsheetId, range=rangeName).execute()
        values = result.get('values', [])
        value_input_option = 'RAW'

        print('Updating test run number: ' + str(loop_counter))
        loop_counter += 1

        print(test_run_uri.uri)
        test_run = TestRun(uri=test_run_uri.uri)
        test_run_id = test_run.test_run_id

        print('Test run title: ' + test_run.title)
        print('Test run ID: ' + test_run.test_run_id)

        records = test_run.records
        pass_counter = 0
        fail_counter = 0
        pending_counter = 0
        automation_counter = 0.0
        critical_counter = 0
        critical_auto_counter = 0
        #automation_percentage = 0
        blocked_counter = 0
        total_counter = 0

        #Collect inforamtion about test runs, how many test pass

        if test_run.TestRunType == 'Acceptance':

            for record in records:
                if record.result == 'passed':
                    pass_counter += 1
                elif record.result == 'failed':
                    fail_counter += 1
                elif record.result == 'blocked':
                    blocked_counter += 1
                else:
                    pending_counter += 1
        else:
            for record in records:
                # print record.result
                #check if test is automated

                test = TestCase.query(record.test_case_id)

                # print('Test case ID: ' + record.test_case_id)
                # Check if the object type is a testcase and not a header for example!
                if test and not Requirement.query(record.test_case_id):

                    #calculate critical automated and rest automated
                    if isUpdateAutomationValue:
                        if test[0].caseautomation.lower() == 'automated':
                            automation_counter += 1
                            if test[0].caseimportance.lower() == 'critical':
                                critical_auto_counter += 1
                        #count number of critical cases
                        if test[0].caseimportance.lower() == 'critical':
                            critical_counter += 1

                    if record.result == 'passed':
                        pass_counter += 1
                    elif record.result == 'failed':
                        fail_counter += 1
                    elif record.result == 'blocked':
                        blocked_counter += 1
                    else:
                        pending_counter += 1
                else:
                    non_test_cases_item += 1

        total_counter = pass_counter + fail_counter + blocked_counter + pending_counter
        # if total_counter > 0:
        #     automation_percentage = int(float(automation_counter)/float(total_counter)) #*100

        print('Total pass:'******'Total fail:', fail_counter)
        print('Total blocked:', blocked_counter)
        print('Total pending:', pending_counter)
        print('Total automated:', automation_counter)
        print('Number of critical:', critical_counter)
        print('Number of critical auto:', critical_auto_counter)
        #print ('Automation percentage:', automation_percentage)
        print('Total number of test cases:', total_counter)

        #column number in excel file and thier representation as hard coded value
        row_counter = 1  # offset due to headers
        title_column_number = 2
        total_column_number = 8
        pass_column_number = 9
        fail_column_number = 10
        blocked_column_number = 11
        test_run_id_column_number = 20
        automation_percentage_column_number = 18
        critical_test_number = 22
        is_test_run_exist_in_excel = None

        if not values:
            print('No data found.')
        else:
            for row in values:
                is_test_run_exist_in_excel = False
                row_counter += 1
                # print(row_counter)
                # if(row_counter==134):
                #     print('stop')

                # Check that row contains test run id in cell R AND check that test_run_id is match
                if row.__len__() >= test_run_id_column_number and row[
                        test_run_id_column_number] == test_run_id:
                    print('Row number is: ' + str(row_counter))
                    is_test_run_exist_in_excel = True
                    #  print('%s, %s, %s, %s, %s, %s, %s :' % (row[title_column_number], row[total_column_number], row[pass_column_number], row[fail_column_number], row[blocked_column_number],row[automation_percentage_column_number], row[critical_test_number]))
                    values = [[
                        total_counter, total_counter, pass_counter,
                        fail_counter, blocked_counter
                    ]]
                    body = {'values': values}

                    rangeName = 'RHOS 15!H' + str(row_counter) + ':L' + str(
                        row_counter)
                    result = service.spreadsheets().values().update(
                        spreadsheetId=spreadsheetId,
                        range=rangeName,
                        valueInputOption=value_input_option,
                        body=body).execute()

                    # # update automation percentage field
                    # values = [
                    #     [automation_percentage]
                    # ]
                    # body = {
                    #     'values': values
                    # }
                    # rangeName = 'RHOS 13!S' + str(row_counter)
                    # result = service.spreadsheets().values().update(spreadsheetId=spreadsheetId, range=rangeName, valueInputOption='USER_ENTERED',body=body).execute()

                    # update PQI values...
                    if isUpdateAutomationValue and test_run.TestRunType != 'Acceptance':
                        values = [[
                            automation_counter, critical_counter,
                            critical_auto_counter
                        ]]
                        body = {'values': values}
                        rangeName = 'RHOS 15!V' + str(
                            row_counter) + ':X' + str(row_counter)
                        result = service.spreadsheets().values().update(
                            spreadsheetId=spreadsheetId,
                            range=rangeName,
                            valueInputOption=value_input_option,
                            body=body).execute()

                    # done with update, move to next test run
                    break
        #Check if test run exist in excel file and was updated
        if not is_test_run_exist_in_excel:
            missing_test_run_in_excel += test_run_id + ", "

    print("Missing Test Runs in Excel: " + missing_test_run_in_excel)
    print("Number of headers or requirements in test runs: ",
          non_test_cases_item)
예제 #16
0
def add_test_case(args):
    """Task that creates or updates Test Cases and manages their Requirement.

    This task relies on ``OBJ_CACHE`` to get the collect_only and project
    objects.

    :param args: A tuple where the first element is a path and the second is a
        list of ``TestFunction`` objects mapping the tests from that path.
    """
    path, tests = args
    collect_only = OBJ_CACHE['collect_only']
    project = OBJ_CACHE['project']

    # Fetch or create a Requirement
    requirement = None
    requirement_name = parse_requirement_name(path)
    click.echo(
        'Fetching requirement {0}.'.format(requirement_name))
    if not collect_only:
        results = Requirement.query(
            '{0}'.format(requirement_name),
            fields=['title', 'work_item_id']
        )
        if len(results) > 0:
            # As currently is not possible to get a single
            # match for the title, make sure to not use a
            # not intended Requirement.
            for result in results:
                if result.title == requirement_name:
                    requirement = result
    if requirement is None:
        click.echo(
            'Creating requirement {0}.'.format(requirement_name))
        if not collect_only:
            requirement = Requirement.create(
                project,
                requirement_name,
                '',
                reqtype='functional'
            )

    for test in tests:
        # Generate the test_case_id. It could be either path.test_name or
        # path.ClassName.test_name if the test methods is defined within a
        # class.
        test_case_id_parts = [
            path.replace('/', '.').replace('.py', ''),
            test.name
        ]
        if test.parent_class is not None:
            test_case_id_parts.insert(-1, test.parent_class)
        test_case_id = '.'.join(test_case_id_parts)

        if test.docstring:
            if not type(test.docstring) == unicode:
                test.docstring = test.docstring.decode('utf8')
            test.docstring = RST_PARSER.parse(test.docstring)

        # Is the test automated? Acceptable values are:
        # automated, manualonly, and notautomated
        auto_status = 'automated' if test.automated else 'notautomated'
        caseposneg = 'negative' if 'negative' in test.name else 'positive'
        setup = test.setup if test.setup else None

        results = []
        if not collect_only:
            results = TestCase.query(
                test_case_id,
                fields=[
                    'caseautomation',
                    'caseposneg',
                    'description',
                    'work_item_id'
                ]
            )
        if len(results) == 0:
            click.echo(
                'Creating test case {0} for requirement {1}.'
                .format(test.name, requirement_name)
            )
            if not collect_only:
                test_case = TestCase.create(
                    project,
                    test.name,
                    test.docstring if test.docstring else '',
                    caseautomation=auto_status,
                    casecomponent='-',
                    caseimportance='medium',
                    caselevel='component',
                    caseposneg=caseposneg,
                    subtype1='-',
                    test_case_id=test_case_id,
                    testtype='functional',
                    setup=setup,
                )
            click.echo(
                'Linking test case {0} to verify requirement {1}.'
                .format(test.name, requirement_name)
            )
            if not collect_only:
                test_case.add_linked_item(
                    requirement.work_item_id, 'verifies')
        else:
            click.echo(
                'Updating test case {0} for requirement {1}.'
                .format(test.name, requirement_name)
            )
            # Ensure that a single match for the Test Case is
            # returned.
            assert len(results) == 1
            test_case = results[0]
            if (not collect_only and
                (test_case.description != test.docstring or
                    test_case.caseautomation != auto_status or
                    test_case.caseposneg != caseposneg or
                    test_case.setup != setup)):
                test_case.description = (
                    test.docstring if test.docstring else '')
                test_case.caseautomation = auto_status
                test_case.caseposneg = caseposneg
                test_case.setup = setup
                test_case.update()
예제 #17
0
 def test_020_query_with_URI_field(self):
     results = TestCase.query("project.id:%s AND title:regression" %
                              (DEFAULT_PROJ),
                              fields=["work_item_id", "author"])
     tc = results[0]
     self.assertIsNotNone(tc.author)
예제 #18
0
def add_test_case(args):
    """Task that creates or updates Test Cases and manages their Requirement.

    This task relies on ``OBJ_CACHE`` to get the collect_only and project
    objects.

    :param args: A tuple where the first element is a path and the second is a
        list of ``TestFunction`` objects mapping the tests from that path.
    """
    path, tests = args
    collect_only = OBJ_CACHE['collect_only']
    project = OBJ_CACHE['project']

    for test in tests:
        # Fetch the test case id if the @Id tag is present otherwise generate a
        # test_case_id based on the test Python import path
        test_case_id = test.tokens.get('id', generate_test_id(test))
        if test.docstring:
            if not type(test.docstring) == unicode:
                test.docstring = test.docstring.decode('utf8')

        # Is the test automated? Acceptable values are:
        # automated, manualonly, and notautomated
        auto_status = test.tokens.get(
            'caseautomation',
            'notautomated' if test.tokens.get('status') else 'automated'
        ).lower()
        caseposneg = test.tokens.get(
            'caseposneg',
            'negative' if 'negative' in test.name else 'positive'
        ).lower()
        subtype1 = test.tokens.get(
            'subtype1',
            '-'
        ).lower()
        casecomponent = test.tokens.get('casecomponent', '-').lower()
        caseimportance = test.tokens.get(
            'caseimportance', 'medium').lower()
        caselevel = test.tokens.get('caselevel', 'component').lower()
        description = test.tokens.get(
            'description', test.docstring if test.docstring else '')
        description = RST_PARSER.parse(description)
        setup = test.tokens.get('setup')
        status = test.tokens.get('status', 'approved').lower()
        testtype = test.tokens.get(
            'testtype',
            'functional'
        ).lower()
        title = test.tokens.get('title', test.name)
        upstream = test.tokens.get('upstream', 'no').lower()
        steps = test.tokens.get('steps')
        expectedresults = test.tokens.get('expectedresults')

        if steps and expectedresults:
            test_steps = generate_test_steps(
                map_steps(steps, expectedresults))
        else:
            test_steps = None

        results = []
        if not collect_only:
            results = TestCase.query(
                test_case_id,
                fields=[
                    'caseautomation',
                    'caseposneg',
                    'description',
                    'work_item_id'
                ]
            )
        requirement_name = test.tokens.get(
            'requirement', parse_requirement_name(path))
        if len(results) == 0:
            click.echo(
                'Creating test case {0} for requirement: {1}.'
                .format(title, requirement_name)
            )
            if not collect_only:
                test_case = TestCase.create(
                    project,
                    title,
                    description,
                    caseautomation=auto_status,
                    casecomponent=casecomponent,
                    caseimportance=caseimportance,
                    caselevel=caselevel,
                    caseposneg=caseposneg,
                    setup=setup,
                    subtype1=subtype1,
                    test_case_id=test_case_id,
                    testtype=testtype,
                    upstream=upstream,
                )
                test_case.status = status
                if test_steps:
                    test_case.test_steps = test_steps
                test_case.update()
            click.echo(
                'Linking test case {0} to requirement: {1}.'
                .format(title, requirement_name)
            )
            if not collect_only:
                requirement = fetch_requirement(
                    requirement_name, project, collect_only)
                test_case.add_linked_item(
                    requirement.work_item_id, 'verifies')
        else:
            click.echo(
                'Updating test case {0} for requirement {1}.'
                .format(title, requirement_name)
            )
            # Ensure that a single match for the Test Case is
            # returned.
            assert len(results) == 1
            test_case = results[0]
            if not collect_only and any((
                    test_case.caseautomation != auto_status,
                    test_case.casecomponent != casecomponent,
                    test_case.caseimportance != caseimportance,
                    test_case.caselevel != caselevel,
                    test_case.caseposneg != caseposneg,
                    test_case.description != description,
                    test_case.setup != setup,
                    test_case.status != status,
                    test_case.subtype1 != subtype1,
                    test_case.test_steps != test_steps,
                    test_case.testtype != testtype,
                    test_case.title != title,
                    test_case.upstream != upstream,
            )):
                test_case.caseautomation = auto_status
                test_case.casecomponent = casecomponent
                test_case.caseimportance = caseimportance
                test_case.caselevel = caselevel
                test_case.caseposneg = caseposneg
                test_case.description = description
                test_case.setup = setup
                test_case.status = status
                test_case.subtype1 = subtype1
                test_case.testtype = testtype
                test_case.title = title
                test_case.upstream = upstream
                if test_steps:
                    test_case.test_steps = test_steps
                test_case.update()
예제 #19
0
class PolarionMapping(collections.MutableMapping):
   lucene_special_chars = ['+', '-', '&&', '||', '!', '(', ')', '{', '}', '[', ']', '^', '\"', '~', '*', '?', ':', '\\']
   
   def __init__(self, project_name=None, *args, **kwargs):
      if project_name == None:
         self.polarion = TestCase()
         self.project_name = self.polarion.default_project
      else:
         self.project_name = project_name
      # Open a persistent shelf for the default or specified project
      self.shelf = shelve.open(self.project_name)

   def __del__(self):
      # Closing the shelf flushes and saves it
      self.shelf.close()

   def __getitem__(self, key):
      if self.shelf.has_key(key):
         return self.shelf.__getitem__(key)
      else:
         # Query Polarion for the test case if it isn't in the shelf
         tests = self.polarion.query("testCaseID:%s OR title:%s" % (self.escape_query(key), self.escape_query(key)), fields=["work_item_id", "title"], project_id=self.project_name)
         if len(tests) == 0:
            print "Test case '%s' does not exist in Polarion" % key
            return None
         elif len(tests) == 1:
            self.shelf[str(key)] = str(tests[0].work_item_id)
            self.shelf.sync()
            return self.shelf.__getitem__(key)
         else:
            err_str = "Found multiple test cases with testCaseID '%s' in Polarion\n" % key
            for tc in tests:
               if tc.test_case_id is not None:
                  err_str += "testCaseID: '%s'; workItemID: '%s'\n" % (tc.test_case_id, tc.work_item_id)
               elif tc.title is not None:
                  err_str += "title: '%s'; workItemID: '%s'\n" % (tc.title, tc.work_item_id)
            raise RuntimeError(err_str)

   def __setitem__(self, key, val):
      self.shelf[str(key)] = str(val)
      self.shelf.sync()
      
   def __delitem__(self, key):
      self.shelf.pop(key, None)
   
   def __iter__(self):
      return self.shelf.__iter__()
   
   def __len__(self):
      return len(self.shelf)

   def __repr__(self):
      return self.shelf.__repr__()

   def escape_query(self, query_str):
      '''Escape Lucene Query Syntax Special Characters'''
      for char in self.lucene_special_chars:
         escape_str = query_str.replace(char, '\%s' % char)
      return escape_str

   def sync(self, full=False):
      '''Sync the mapping with Polarion. If full is True, then do a complete re-sync.'''
      tests = self.polarion.query("", fields=["work_item_id", "title"], project_id=self.project_name)
      if full:
         self.shelf.clear()
      for test in tests:
         if test.test_case_id is not None and not self.shelf.has_key(str(test.test_case_id)):
            print "Adding '%s' with ID '%s'" % (test.test_case_id, test.work_item_id)
            self.shelf[str(test.test_case_id)] = str(test.work_item_id)
         elif test.title is not None and not self.shelf.has_key(str(test.title)):
            print "Adding '%s' with ID '%s'" % (test.title, test.work_item_id)
            self.shelf[str(test.title)] = str(test.work_item_id)
      self.shelf.sync()