def convert_test_runs_list_to_time_series_dict(test_runs_list, resample):
    test_runs = []
    for test_run in test_runs_list:
        tr = test_run.to_dict()
        # Populate dict
        start_time = test_run.start_time
        if start_time and test_run.start_time_microsecond:
            start_time = start_time.replace(
                microsecond=test_run.start_time_microsecond)
            tr['start_time'] = start_time
        tr.pop('start_time_microsecond')
        if test_run.stop_time:
            stop_time = test_run.stop_time
            if test_run.stop_time_microsecond:
                stop_time = stop_time.replace(
                    microsecond=test_run.stop_time_microsecond)
            tr['stop_time'] = stop_time
        tr['run_time'] = read_subunit.get_duration(start_time,
                                                   tr.pop('stop_time'))
        tr.pop('stop_time_microsecond')
        tr.pop('id')
        tr.pop('test_id')
        test_runs.append(tr)

    df = pd.DataFrame(test_runs).set_index('start_time')
    df.index = pd.DatetimeIndex(df.index)
    # Add rolling mean and std dev of run_time to datafram
    df['avg_run_time'] = pd.rolling_mean(df['run_time'], 20)
    df['stddev_run_time'] = pd.rolling_std(df['run_time'], 20)

    # Resample numeric data for the run_time graph from successful runs
    numeric_df = df[df['status'] == 'success'].resample(
        base.resample_matrix[resample], how='mean')
    # Drop duplicate or invalid colums
    del(numeric_df['run_id'])
    del(df['run_time'])
    # Interpolate missing data
    numeric_df['run_time'] = numeric_df.interpolate(method='time', limit=20)
    # Add rolling mean and std dev of run_time to datafram
    numeric_df['avg_run_time'] = pd.rolling_mean(numeric_df['run_time'], 20)
    numeric_df['stddev_run_time'] = pd.rolling_std(numeric_df['run_time'], 20)

    # Convert the dataframes to a dict
    numeric_dict = dict(
        (date.isoformat(),
            {
            'run_time': run_time,
            'avg_run_time': avg,
            'std_dev_run_time': stddev,
        }) for date, run_time, avg, stddev in zip(
            numeric_df.index, numeric_df.run_time, numeric_df.avg_run_time,
            numeric_df.stddev_run_time))
    temp_dict = dict(
        (date.isoformat(),
            {
            'run_id': run_id,
            'status': status,
            }) for date, run_id, status in zip(df.index, df.run_id, df.status))

    return {'numeric': numeric_dict, 'data': temp_dict}
示例#2
0
def process_results(results):
    session = api.get_session()
    run_time = results.pop('run_time')
    totals = get_run_totals(results)
    if CONF.run_at:
        run_at = date_parser.parse(CONF.run_at)
    else:
        run_at = None
    db_run = api.create_run(totals['skips'], totals['fails'],
                            totals['success'], run_time, CONF.artifacts,
                            id=CONF.run_id, run_at=run_at, session=session)
    if CONF.run_meta:
        api.add_run_metadata(CONF.run_meta, db_run.id, session)
    for test in results:
        db_test = api.get_test_by_test_id(test, session)
        if not db_test:
            if results[test]['status'] == 'success':
                success = 1
                fails = 0
            elif results[test]['status'] == 'fail':
                fails = 1
                success = 0
            else:
                fails = 0
                success = 0
            run_time = subunit.get_duration(results[test]['start_time'],
                                            results[test]['end_time'])
            db_test = api.create_test(test, (success + fails), success,
                                      fails, run_time,
                                      session)
        else:
            test_values = increment_counts(db_test, results[test])
            # If skipped nothing to update
            if test_values:
                api.update_test(test_values, db_test.id, session)
        test_run = api.create_test_run(db_test.id, db_run.id,
                                       results[test]['status'],
                                       results[test]['start_time'],
                                       results[test]['end_time'],
                                       session)
        if results[test]['metadata']:
            if CONF.test_attr_prefix:
                attrs = results[test]['metadata'].get('attrs')
                test_attr_list = _get_test_attrs_list(attrs)
                test_metadata = api.get_test_metadata(db_test.id, session)
                test_metadata = [(meta.key, meta.value) for meta in
                                 test_metadata]
                if test_attr_list:
                    for attr in test_attr_list:
                        if ('attr', attr) not in test_metadata:
                            test_meta_dict = {'attr': attr}
                            api.add_test_metadata(test_meta_dict, db_test.id,
                                                  sesion=session)
            api.add_test_run_metadata(results[test]['metadata'], test_run.id,
                                      session)
        if results[test]['attachments']:
            api.add_test_run_attachments(results[test]['attachments'],
                                         test_run.id, session)
    session.close()
示例#3
0
 def stopTestRun(self):
     self.hook.stopTestRun()
     stop_time = datetime.datetime.utcnow()
     self._subunit.seek(0)
     values = {}
     values['run_time'] = read_subunit.get_duration(self.start_time,
                                                    stop_time)
     session = self.session_factory()
     db_api.update_run(values, self.run.id, session=session)
     session.close()
示例#4
0
def running_avg(test, values, result):
    count = test.success
    avg_prev = test.run_time
    curr_runtime = subunit.get_duration(result['start_time'],
                                        result['end_time'])
    if isinstance(avg_prev, float):
        # Using a smoothed moving avg to limit the affect of a single outlier
        new_avg = ((count * avg_prev) + curr_runtime) / (count + 1)
        values['run_time'] = new_avg
    else:
        values['run_time'] = curr_runtime
    return values
def convert_test_runs_list_to_time_series_dict(test_runs_list, resample):
    test_runs = []
    for test_run in test_runs_list:
        tr = test_run.to_dict()
        # Populate dict
        start_time = test_run.start_time
        if start_time and test_run.start_time_microsecond:
            start_time = start_time.replace(
                microsecond=test_run.start_time_microsecond)
            tr['start_time'] = start_time
        tr.pop('start_time_microsecond')
        if test_run.stop_time:
            stop_time = test_run.stop_time
            if test_run.stop_time_microsecond:
                stop_time = stop_time.replace(
                    microsecond=test_run.stop_time_microsecond)
            tr['stop_time'] = stop_time
        tr['run_time'] = read_subunit.get_duration(start_time,
                                                   tr.pop('stop_time'))
        tr.pop('stop_time_microsecond')
        tr.pop('id')
        tr.pop('test_id')
        test_runs.append(tr)

    df = pd.DataFrame(test_runs).set_index('start_time')
    df.index = pd.DatetimeIndex(df.index)

    # Resample numeric data for the run_time graph from successful runs
    success_df = df.loc[df['status'] == 'success']
    if len(success_df) == 0:
        numeric_dict, temp_dict = format_output_dicts(df, None)
        return {'numeric': numeric_dict, 'data': temp_dict}
    numeric_df = df[df['status'] == 'success'].resample(
        base.resample_matrix[resample]).mean()
    # Drop duplicate or invalid columns
    del(numeric_df['run_id'])
    del(df['run_time'])
    # Interpolate missing data for a smooth avg and std dev
    temp_numeric_df = numeric_df.interpolate(method='time', limit=20)
    # Add rolling mean and std dev of run_time to dataframe
    numeric_df['avg_run_time'] = temp_numeric_df['run_time'].rolling(
        window=20).mean()
    numeric_df['stddev_run_time'] = temp_numeric_df['run_time'].rolling(
        window=20).std()
    numeric_df = numeric_df.dropna(how='all')

    numeric_dict, temp_dict = format_output_dicts(df, numeric_df)
    return {'numeric': numeric_dict, 'data': temp_dict}
示例#6
0
def process_results(results):
    session = api.get_session()
    run_time = results.pop('run_time')
    totals = get_run_totals(results)
    if CONF.run_at:
        run_at = date_parser.parse(CONF.run_at)
    else:
        run_at = None
    db_run = api.create_run(totals['skips'], totals['fails'],
                            totals['success'], run_time, CONF.artifacts,
                            id=CONF.run_id, run_at=run_at, session=session,
                            metadata=CONF.run_meta)
    for test in results:
        db_test = api.get_test_by_test_id(test, session)
        if not db_test:
            if results[test]['status'] == 'success':
                success = 1
                fails = 0
            elif results[test]['status'] == 'fail':
                fails = 1
                success = 0
            else:
                fails = 0
                success = 0
            run_time = subunit.get_duration(results[test]['start_time'],
                                            results[test]['end_time'])
            db_test = api.create_test(test, (success + fails), success,
                                      fails, run_time, None,
                                      session)
        else:
            test_values = increment_counts(db_test, results[test])
            # If skipped nothing to update
            if test_values:
                api.update_test(test_values, db_test.id, session)
        test_run = api.create_test_run(db_test.id, db_run.id,
                                       results[test]['status'],
                                       results[test]['start_time'],
                                       results[test]['end_time'],
                                       session=session)
        if results[test]['metadata']:
            api.update_test_run(results[test]['metadata'], test_run.id, session)
        if results[test]['attachments']:
            api.add_test_run_attachments(results[test]['attachments'],
                                         test_run.id, session)
    session.close()
示例#7
0
def get_test_run_duration(test_run_id, session=None):
    """Get the run duration for a specific test_run.

    :param str test_run_id: The test_run's uuid (the id column in the test_run
                            table) to get the duration of
    :param session: optional session object if one isn't provided a new session
                    will be acquired for the duration of this operation

    :return: The duration of the test run in secs
    :rtype: float
    """
    session = session or get_session()
    test_run = get_test_run_by_id(test_run_id, session)
    start_time = test_run.start_time
    start_time = start_time.replace(
        microsecond=test_run.start_time_microsecond)
    stop_time = test_run.stop_time
    stop_time = stop_time.replace(microsecond=test_run.stop_time_microsecond)
    return read_subunit.get_duration(test_run.start_time, test_run.stop_time)
def upgrade():
    query = db_utils.model_query(
        models.Test, db_api.get_session()).filter(
            models.Test.success > 0, models.Test.run_time == None).join(
                models.TestRun).filter_by(
                    status='success').values(models.Test.id,
                                             models.TestRun.start_time,
                                             models.TestRun.stop_time)

    results = {}
    for test_run in query:
        delta = read_subunit.get_duration(test_run[1], test_run[2])
        if test_run[0] in results:
            results[test_run[0]].append(delta)
        else:
            results[test_run[0]] = [delta]

    for test in results:
        avg = float(sum(results[test])) / float(len(results[test]))
        db_api.update_test({'run_time': avg}, test)
def upgrade():
    query = db_utils.model_query(
        models.Test, db_api.get_session()).filter(
            models.Test.success > 0, models.Test.run_time == None).join(
                models.TestRun,
                models.Test.id == models.TestRun.test_id).filter_by(
                    status='success').values(models.Test.id,
                                             models.TestRun.start_time,
                                             models.TestRun.stop_time)

    results = {}
    for test_run in query:
        delta = read_subunit.get_duration(test_run[1], test_run[2])
        if test_run[0] in results:
            results[test_run[0]].append(delta)
        else:
            results[test_run[0]] = [delta]

    for test in results:
        avg = float(sum(results[test])) / float(len(results[test]))
        db_api.update_test({'run_time': avg}, test)
示例#10
0
 def _update_test(self, test_dict, session, start_time, stop_time):
     test_id = utils.cleanup_test_name(test_dict['id'])
     db_test = db_api.get_test_by_test_id(test_id, session)
     if not db_test:
         if test_dict['status'] == 'success':
             success = 1
             fails = 0
         elif test_dict['status'] == 'fail':
             fails = 1
             success = 0
         else:
             fails = 0
             success = 0
         run_time = read_subunit.get_duration(start_time, stop_time)
         db_test = db_api.create_test(test_id, (success + fails), success,
                                      fails, run_time, session)
     else:
         test_dict['start_time'] = start_time
         test_dict['end_time'] = stop_time
         test_values = shell.increment_counts(db_test, test_dict)
         # If skipped nothing to update
         if test_values:
             db_api.update_test(test_values, db_test.id, session)
     return db_test
示例#11
0
 def test_get_duration(self):
     dur = subunit.get_duration(datetime.datetime(1914, 6, 28, 10, 45, 0),
                                datetime.datetime(1914, 6, 28, 10, 45, 50))
     self.assertEqual(dur, 50.000000)
示例#12
0
def process_results(results, run_at=None, artifacts=None, run_id=None,
                    run_meta=None, test_attr_prefix=None):
    """Insert converted subunit data into the database.

    Allows for run-specific information to be passed in via kwargs,
    checks CONF if no run-specific information is supplied.

    :param results: subunit stream to be inserted
    :param run_at: Optional time at which the run was started.
    :param artifacts: Link to any artifacts from the test run.
    :param run_id: The run id for the new run. Must be unique.
    :param run_meta: Metadata corresponding to the new run.
    :param test_attr_prefix: Optional test attribute prefix.
    """
    run_at = _override_conf(run_at, 'run_at')
    artifacts = _override_conf(artifacts, 'artifacts')
    run_id = _override_conf(run_id, 'run_id')
    run_meta = _override_conf(run_meta, 'run_meta')
    test_attr_prefix = _override_conf(test_attr_prefix, 'test_attr_prefix')

    if run_at:
        if not isinstance(run_at, datetime.datetime):
            run_at = date_parser.parse(run_at)
    else:
        run_at = None
    session = api.get_session()
    run_time = results.pop('run_time')
    totals = get_run_totals(results)
    db_run = api.create_run(totals['skips'], totals['fails'],
                            totals['success'], run_time, artifacts,
                            id=run_id, run_at=run_at, session=session)
    if run_meta:
        api.add_run_metadata(run_meta, db_run.id, session)
    for test in results:
        db_test = api.get_test_by_test_id(test, session)
        if not db_test:
            if results[test]['status'] in ['success', 'xfail']:
                success = 1
                fails = 0
            elif results[test]['status'] in ['fail', 'uxsuccess']:
                fails = 1
                success = 0
            else:
                fails = 0
                success = 0
            run_time = subunit.get_duration(results[test]['start_time'],
                                            results[test]['end_time'])
            db_test = api.create_test(test, (success + fails), success,
                                      fails, run_time,
                                      session)
        else:
            test_values = increment_counts(db_test, results[test])
            # If skipped nothing to update
            if test_values:
                api.update_test(test_values, db_test.id, session)
        test_run = api.create_test_run(db_test.id, db_run.id,
                                       results[test]['status'],
                                       results[test]['start_time'],
                                       results[test]['end_time'],
                                       session)
        if results[test]['metadata']:
            if test_attr_prefix:
                attrs = results[test]['metadata'].get('attrs')
                test_attr_list = _get_test_attrs_list(attrs)
                test_metadata = api.get_test_metadata(db_test.id, session)
                test_metadata = [(meta.key, meta.value) for meta in
                                 test_metadata]
                if test_attr_list:
                    for attr in test_attr_list:
                        if CONF.remove_test_attr_prefix:
                            normalized_attr = attr[len(
                                CONF.test_attr_prefix):]
                        else:
                            normalized_attr = attr
                        if ('attr', normalized_attr) not in test_metadata:
                            test_meta_dict = {'attr': normalized_attr}
                            api.add_test_metadata(test_meta_dict, db_test.id,
                                                  session=session)
            api.add_test_run_metadata(results[test]['metadata'], test_run.id,
                                      session)
        if results[test]['attachments']:
            api.add_test_run_attachments(results[test]['attachments'],
                                         test_run.id, session)
    session.close()
    return db_run
示例#13
0
 def test_get_duration_no_start(self):
     dur = subunit.get_duration(None,
                                datetime.datetime(1914, 6, 28, 10, 45, 50))
     self.assertIsNone(dur)
 def _get_run_time(self, test_run):
     status = Status(test_run['status'])
     if status.is_success:
         return read_subunit.get_duration(test_run['start_time'],
                                          test_run['stop_time'])
     return 0
示例#15
0
 def test_get_duration_no_end(self):
     dur = subunit.get_duration(datetime.datetime(1914, 6, 28, 10, 45, 50),
                                None)
     self.assertIsNone(dur)
示例#16
0
 def test_get_duration_no_start(self):
     dur = subunit.get_duration(None,
                                datetime.datetime(1914, 6, 28, 10, 45, 50))
     self.assertIsNone(dur)
示例#17
0
 def test_get_duration(self):
     dur = subunit.get_duration(datetime.datetime(1914, 6, 28, 10, 45, 0),
                                datetime.datetime(1914, 6, 28, 10, 45, 50))
     self.assertEqual(dur, 50.000000)
示例#18
0
 def test_get_duration_no_end(self):
     dur = subunit.get_duration(datetime.datetime(1914, 6, 28, 10, 45, 50),
                                None)
     self.assertIsNone(dur)