Exemple #1
0
def process_results(results):
    session = api.get_session()
    run_time = results.pop('run_time')
    totals = get_run_totals(results)
    if CONF.run_at:
        run_at = date_parser.parse(CONF.run_at)
    else:
        run_at = None
    db_run = api.create_run(totals['skips'], totals['fails'],
                            totals['success'], run_time, CONF.artifacts,
                            id=CONF.run_id, run_at=run_at, session=session)
    if CONF.run_meta:
        api.add_run_metadata(CONF.run_meta, db_run.id, session)
    for test in results:
        db_test = api.get_test_by_test_id(test, session)
        if not db_test:
            if results[test]['status'] == 'success':
                success = 1
                fails = 0
            elif results[test]['status'] == 'fail':
                fails = 1
                success = 0
            else:
                fails = 0
                success = 0
            run_time = subunit.get_duration(results[test]['start_time'],
                                            results[test]['end_time'])
            db_test = api.create_test(test, (success + fails), success,
                                      fails, run_time,
                                      session)
        else:
            test_values = increment_counts(db_test, results[test])
            # If skipped nothing to update
            if test_values:
                api.update_test(test_values, db_test.id, session)
        test_run = api.create_test_run(db_test.id, db_run.id,
                                       results[test]['status'],
                                       results[test]['start_time'],
                                       results[test]['end_time'],
                                       session)
        if results[test]['metadata']:
            if CONF.test_attr_prefix:
                attrs = results[test]['metadata'].get('attrs')
                test_attr_list = _get_test_attrs_list(attrs)
                test_metadata = api.get_test_metadata(db_test.id, session)
                test_metadata = [(meta.key, meta.value) for meta in
                                 test_metadata]
                if test_attr_list:
                    for attr in test_attr_list:
                        if ('attr', attr) not in test_metadata:
                            test_meta_dict = {'attr': attr}
                            api.add_test_metadata(test_meta_dict, db_test.id,
                                                  sesion=session)
            api.add_test_run_metadata(results[test]['metadata'], test_run.id,
                                      session)
        if results[test]['attachments']:
            api.add_test_run_attachments(results[test]['attachments'],
                                         test_run.id, session)
    session.close()
Exemple #2
0
def process_results(results):
    session = api.get_session()
    run_time = results.pop('run_time')
    totals = get_run_totals(results)
    if CONF.run_at:
        run_at = date_parser.parse(CONF.run_at)
    else:
        run_at = None
    db_run = api.create_run(totals['skips'], totals['fails'],
                            totals['success'], run_time, CONF.artifacts,
                            id=CONF.run_id, run_at=run_at, session=session,
                            metadata=CONF.run_meta)
    for test in results:
        db_test = api.get_test_by_test_id(test, session)
        if not db_test:
            if results[test]['status'] == 'success':
                success = 1
                fails = 0
            elif results[test]['status'] == 'fail':
                fails = 1
                success = 0
            else:
                fails = 0
                success = 0
            run_time = subunit.get_duration(results[test]['start_time'],
                                            results[test]['end_time'])
            db_test = api.create_test(test, (success + fails), success,
                                      fails, run_time, None,
                                      session)
        else:
            test_values = increment_counts(db_test, results[test])
            # If skipped nothing to update
            if test_values:
                api.update_test(test_values, db_test.id, session)
        test_run = api.create_test_run(db_test.id, db_run.id,
                                       results[test]['status'],
                                       results[test]['start_time'],
                                       results[test]['end_time'],
                                       session=session)
        if results[test]['metadata']:
            api.update_test_run(results[test]['metadata'], test_run.id, session)
        if results[test]['attachments']:
            api.add_test_run_attachments(results[test]['attachments'],
                                         test_run.id, session)
    session.close()
Exemple #3
0
    def update_tests(self, job):
        self.log.debug("Entering update_tests for job %s" % job)

        for test in job.tests:
            print test
            t = api.get_test_by_test_id(test['name'], self.session)

            if t is None:
                t = api.create_test(test['name'], session=self.session)

            values = {'run_count':t.run_count + 1}

            if test['status'] == "FAILED" or test['status'] == "REGRESSION":
                values['failure'] = t.failure + 1
            elif test['status'] == "PASSED" or test['status'] == "FIXED":
                values['success'] = t.success + 1
            elif test['status'] == "SKIPPED":
                pass

            api.update_test(values, t.id, session=self.session)
def upgrade():
    query = db_utils.model_query(
        models.Test, db_api.get_session()).filter(
            models.Test.success > 0, models.Test.run_time == None).join(
                models.TestRun).filter_by(
                    status='success').values(models.Test.id,
                                             models.TestRun.start_time,
                                             models.TestRun.stop_time)

    results = {}
    for test_run in query:
        delta = read_subunit.get_duration(test_run[1], test_run[2])
        if test_run[0] in results:
            results[test_run[0]].append(delta)
        else:
            results[test_run[0]] = [delta]

    for test in results:
        avg = float(sum(results[test])) / float(len(results[test]))
        db_api.update_test({'run_time': avg}, test)
def upgrade():
    query = db_utils.model_query(
        models.Test, db_api.get_session()).filter(
            models.Test.success > 0, models.Test.run_time == None).join(
                models.TestRun,
                models.Test.id == models.TestRun.test_id).filter_by(
                    status='success').values(models.Test.id,
                                             models.TestRun.start_time,
                                             models.TestRun.stop_time)

    results = {}
    for test_run in query:
        delta = read_subunit.get_duration(test_run[1], test_run[2])
        if test_run[0] in results:
            results[test_run[0]].append(delta)
        else:
            results[test_run[0]] = [delta]

    for test in results:
        avg = float(sum(results[test])) / float(len(results[test]))
        db_api.update_test({'run_time': avg}, test)
Exemple #6
0
 def _update_test(self, test_dict, session, start_time, stop_time):
     test_id = utils.cleanup_test_name(test_dict['id'])
     db_test = db_api.get_test_by_test_id(test_id, session)
     if not db_test:
         if test_dict['status'] == 'success':
             success = 1
             fails = 0
         elif test_dict['status'] == 'fail':
             fails = 1
             success = 0
         else:
             fails = 0
             success = 0
         run_time = read_subunit.get_duration(start_time, stop_time)
         db_test = db_api.create_test(test_id, (success + fails), success,
                                      fails, run_time, session)
     else:
         test_dict['start_time'] = start_time
         test_dict['end_time'] = stop_time
         test_values = shell.increment_counts(db_test, test_dict)
         # If skipped nothing to update
         if test_values:
             db_api.update_test(test_values, db_test.id, session)
     return db_test
Exemple #7
0
def process_results(results, run_at=None, artifacts=None, run_id=None,
                    run_meta=None, test_attr_prefix=None):
    """Insert converted subunit data into the database.

    Allows for run-specific information to be passed in via kwargs,
    checks CONF if no run-specific information is supplied.

    :param results: subunit stream to be inserted
    :param run_at: Optional time at which the run was started.
    :param artifacts: Link to any artifacts from the test run.
    :param run_id: The run id for the new run. Must be unique.
    :param run_meta: Metadata corresponding to the new run.
    :param test_attr_prefix: Optional test attribute prefix.
    """
    run_at = _override_conf(run_at, 'run_at')
    artifacts = _override_conf(artifacts, 'artifacts')
    run_id = _override_conf(run_id, 'run_id')
    run_meta = _override_conf(run_meta, 'run_meta')
    test_attr_prefix = _override_conf(test_attr_prefix, 'test_attr_prefix')

    if run_at:
        if not isinstance(run_at, datetime.datetime):
            run_at = date_parser.parse(run_at)
    else:
        run_at = None
    session = api.get_session()
    run_time = results.pop('run_time')
    totals = get_run_totals(results)
    db_run = api.create_run(totals['skips'], totals['fails'],
                            totals['success'], run_time, artifacts,
                            id=run_id, run_at=run_at, session=session)
    if run_meta:
        api.add_run_metadata(run_meta, db_run.id, session)
    for test in results:
        db_test = api.get_test_by_test_id(test, session)
        if not db_test:
            if results[test]['status'] in ['success', 'xfail']:
                success = 1
                fails = 0
            elif results[test]['status'] in ['fail', 'uxsuccess']:
                fails = 1
                success = 0
            else:
                fails = 0
                success = 0
            run_time = subunit.get_duration(results[test]['start_time'],
                                            results[test]['end_time'])
            db_test = api.create_test(test, (success + fails), success,
                                      fails, run_time,
                                      session)
        else:
            test_values = increment_counts(db_test, results[test])
            # If skipped nothing to update
            if test_values:
                api.update_test(test_values, db_test.id, session)
        test_run = api.create_test_run(db_test.id, db_run.id,
                                       results[test]['status'],
                                       results[test]['start_time'],
                                       results[test]['end_time'],
                                       session)
        if results[test]['metadata']:
            if test_attr_prefix:
                attrs = results[test]['metadata'].get('attrs')
                test_attr_list = _get_test_attrs_list(attrs)
                test_metadata = api.get_test_metadata(db_test.id, session)
                test_metadata = [(meta.key, meta.value) for meta in
                                 test_metadata]
                if test_attr_list:
                    for attr in test_attr_list:
                        if CONF.remove_test_attr_prefix:
                            normalized_attr = attr[len(
                                CONF.test_attr_prefix):]
                        else:
                            normalized_attr = attr
                        if ('attr', normalized_attr) not in test_metadata:
                            test_meta_dict = {'attr': normalized_attr}
                            api.add_test_metadata(test_meta_dict, db_test.id,
                                                  session=session)
            api.add_test_run_metadata(results[test]['metadata'], test_run.id,
                                      session)
        if results[test]['attachments']:
            api.add_test_run_attachments(results[test]['attachments'],
                                         test_run.id, session)
    session.close()
    return db_run