コード例 #1
0
def generate_series():
    session = api.get_session()
    if CONF.start_date:
        start_date = datetime.datetime.strptime(CONF.start_date, '%Y-%m-%d')
    else:
        start_date = None
    if CONF.stop_date:
        stop_date = datetime.datetime.strptime(CONF.start_date, '%Y-%m-%d')
    else:
        stop_date = None

    run_status = api.get_runs_by_status_grouped_by_run_metadata(
        CONF.command.metadata_key, start_date=start_date,
        stop_date=stop_date, session=session)

    perc_data = {}
    for key in run_status:
        if run_status[key].get('pass'):
            pass_num = float(run_status[key]['pass'])
        else:
            pass_num = 0.0
        if run_status[key].get('fail'):
            fail_num = float(run_status[key]['fail'])
        else:
            fail_num = 0.0
        fail_rate = float(fail_num / (pass_num + fail_num) * 100)
        if fail_rate > 0.0:
            perc_data[key] = fail_rate

    plt.figure()
    plt.barh(range(len(perc_data)), perc_data.values(), align='center')
    locs, labels = plt.yticks(range(len(perc_data)), list(perc_data.keys()))
    plt.xlabel('Failure Percentage')
    plt.tight_layout()
    plt.savefig(CONF.output, dpi=900)
def upgrade():
    migration_file = ('1679b5bc102c_add_subsecond_columns_to_test_runs_table.'
                      'mysql_upgrade.sql')
    migration_dir = os.path.dirname(os.path.realpath(__file__))
    sql_path = os.path.join(migration_dir, migration_file)
    migration_context = context.get_context()
    if migration_context.dialect.name == 'mysql':
        with open(sql_path, 'r') as sql_file:
            op.execute(sql_file.read())
    else:
        op.add_column('test_runs', sa.Column('start_time_microsecond',
                                             sa.Integer(), default=0))
        op.add_column('test_runs', sa.Column('stop_time_microsecond',
                                             sa.Integer(), default=0))
        if not CONF.disable_microsecond_data_migration:
            session = db_api.get_session()
            query = db_utils.model_query(models.TestRun, session).values(
                models.TestRun.id, models.TestRun.start_time,
                models.TestRun.stop_time)
            for test_run in query:
                start_micro = test_run[1].microsecond
                stop_micro = test_run[2].microsecond
                values = {'start_time_microsecond': start_micro,
                          'stop_time_microsecond': stop_micro}
                db_api.update_test_run(values, test_run[0], session)
            session.close()
コード例 #3
0
ファイル: run_time.py プロジェクト: austin81/subunit2sql
def generate_series():
    test_id = CONF.command.test_id
    session = api.get_session()
    run_times = api.get_test_run_time_series(test_id, session)
    if not CONF.title:
        test = api.get_test_by_id(test_id, session)
    session.close()
    ts = pd.Series(run_times)
    ts = utils.filter_dates(ts)
    mean = pd.rolling_mean(ts, 20)
    rolling_std = pd.rolling_std(ts, 20)
    plt.figure()
    if not CONF.title:
        plt.title(test.test_id)
    else:
        plt.title(CONF.title)
    plt.ylabel("Time (sec.)")
    plt.plot(ts.index, ts, "k", label="Run Time")
    plt.plot(mean.index, mean, "b", label="Avg. Run Time")
    upper_std_dev = mean + 2 * rolling_std
    lower_std_dev = mean - 2 * rolling_std
    # Set negative numbers to 0
    lower_std_dev[lower_std_dev < 0] = 0
    plt.fill_between(rolling_std.index, upper_std_dev, lower_std_dev, color="b", alpha=0.2, label="std dev")
    plt.legend()
    plt.savefig(CONF.output, dpi=900)
    return ts
コード例 #4
0
def get_jobs_per_project(session=None):
    session = session or api.get_session()
    rm_build_change = aliased(models.RunMetadata)
    rm_build_patchset = aliased(models.RunMetadata)
    rm_project = aliased(models.RunMetadata)
    select_query = session.query(
        rm_build_change.value.label('build_change'),
        rm_build_patchset.value.label('build_patchset'),
        rm_project.value.label('project'),
        func.count().label('num_jobs')).select_from(rm_build_change)
    all_run_metadata_query = select_query.filter(
        rm_build_change.run_id == rm_build_patchset.run_id).filter(
            rm_project.run_id == rm_build_patchset.run_id).filter(
                rm_build_change.key == 'build_change').filter(
                    rm_build_patchset.key == 'build_patchset').filter(
                        rm_project.key == 'project').filter(
                            rm_project.value.in_(PROJECTS)).group_by(
                                rm_build_change.value, rm_build_patchset.value,
                                rm_project.value)
    result = dict()
    all_data = all_run_metadata_query.all()
    for change, patchset, project, num_jobs in all_data:
        if project not in result:
            result[project] = dict()
        change_patchset = change + ":" + patchset
        result[project][change_patchset] = int(num_jobs)
    return result
コード例 #5
0
ファイル: run_time_meta.py プロジェクト: ader1990/subunit2sql
def generate_series():
    session = api.get_session()
    if CONF.start_date:
        start_date = datetime.datetime.strptime(CONF.start_date, '%Y-%m-%d')
    else:
        start_date = None
    if CONF.stop_date:
        stop_date = datetime.datetime.strptime(CONF.stop_date, '%Y-%m-%d')
    else:
        stop_date = None
    run_times = api.get_run_times_grouped_by_run_metadata_key(
        CONF.command.metadata_key, start_date=start_date,
        stop_date=stop_date, session=session)
    df = pd.DataFrame(dict(
        [(k, pd.Series(v)) for k, v in run_times.iteritems()]))
    if not CONF.title:
        title = "Run aggregate run time grouped by metadata"
    else:
        title = CONF.title
    # NOTE(mtreinish): Decrease label font size for the worst case where we
    # have tons of groups
    matplotlib.rcParams['xtick.labelsize'] = '3'
    plt.figure()
    plt.title(title)
    df.plot(kind='box', rot=90)
    plt.ylabel('Time (sec.)')
    plt.tight_layout()
    plt.savefig(CONF.output, dpi=900)
コード例 #6
0
def generate_series():
    session = api.get_session()
    if CONF.start_date:
        start_date = datetime.datetime.strptime(CONF.start_date, '%Y-%m-%d')
    else:
        start_date = None
    if CONF.stop_date:
        stop_date = datetime.datetime.strptime(CONF.stop_date, '%Y-%m-%d')
    else:
        stop_date = None
    run_times = api.get_run_times_grouped_by_run_metadata_key(
        CONF.command.metadata_key,
        start_date=start_date,
        stop_date=stop_date,
        session=session)
    df = pd.DataFrame(
        dict([(k, pd.Series(v)) for k, v in run_times.iteritems()]))
    if not CONF.title:
        title = "Run aggregate run time grouped by metadata"
    else:
        title = CONF.title
    # NOTE(mtreinish): Decrease label font size for the worst case where we
    # have tons of groups
    matplotlib.rcParams['xtick.labelsize'] = '3'
    plt.figure()
    plt.title(title)
    df.plot(kind='box', rot=90)
    plt.ylabel('Time (sec.)')
    plt.tight_layout()
    plt.savefig(CONF.output, dpi=900)
コード例 #7
0
def generate_series():
    session = api.get_session()
    test_starts = api.get_test_run_series(session)
    session.close()
    ts = pd.Series(test_starts).resample('D', how='sum')
    daily_count = utils.filter_dates(ts)
    mean = pd.rolling_mean(daily_count, 10)
    rolling_std = pd.rolling_std(daily_count, 10)
    plt.figure()
    title = CONF.title or 'Number of tests run'
    plt.title(title)
    plt.ylabel('Number of tests')
    plt.plot(daily_count.index, daily_count, 'k', label='Daily Test Count')
    plt.plot(mean.index, mean, 'b', label='Avg. Daily Test Count')
    upper_std_dev = mean + 2 * rolling_std
    lower_std_dev = mean - 2 * rolling_std
    # Set negative numbers to 0
    lower_std_dev[lower_std_dev < 0] = 0
    plt.fill_between(rolling_std.index,
                     lower_std_dev,
                     upper_std_dev,
                     color='b',
                     alpha=0.2,
                     label='std dev')
    plt.legend()
    plt.savefig(CONF.output)
コード例 #8
0
ファイル: gerrkins.py プロジェクト: shadansari/gerrkins
    def __init__(self, config):
        self.log = logging.getLogger("gerrkins")
        self.db_uri = config.get('subunit', 'subunit_uri')

        shell.parse_args([])
        shell.CONF.set_override('connection', self.db_uri, group='database')
        self.session = api.get_session()
コード例 #9
0
def generate_series():
    session = api.get_session()
    start_date = None
    stop_date = None
    if CONF.start_date:
        start_date = date_parser.parse(CONF.start_date)
    if CONF.stop_date:
        stop_date = date_parser.parse(CONF.stop_date)
    ci_time = {}
    ci_time_temp = {}
    project_run_times = api.get_run_times_grouped_by_run_metadata_key(
        CONF.command.key, start_date=start_date, stop_date=stop_date,
        session=session)
    for project in project_run_times:
        ci_time_temp[project] = numpy.sum(project_run_times[project])
    sorted_times = sorted(ci_time_temp.items(), key=operator.itemgetter(1),
                          reverse=True)
    if CONF.command.num:
        sorted_times = sorted_times[:CONF.command.num]
    for project, time in sorted_times:
        ci_time[project] = time

    title = CONF.title or 'Aggregate Run Time grouped by %s' % CONF.command.key
    session.close()
    plt.bar(range(len(ci_time)), ci_time.values(), align='center', width=.1)
    plt.xticks(range(len(ci_time)), ci_time.keys(), rotation=90, fontsize=8)
    plt.title(title)
    plt.tight_layout()
    plt.savefig(CONF.output, dpi=CONF.dpi)
def upgrade():
    migration_file = ('1679b5bc102c_add_subsecond_columns_to_test_runs_table.'
                      'mysql_upgrade.sql')
    migration_dir = os.path.dirname(os.path.realpath(__file__))
    sql_path = os.path.join(migration_dir, migration_file)
    migration_context = context.get_context()
    if migration_context.dialect.name == 'mysql':
        with open(sql_path, 'r') as sql_file:
            op.execute(sql_file.read())
    else:
        op.add_column(
            'test_runs',
            sa.Column('start_time_microsecond', sa.Integer(), default=0))
        op.add_column(
            'test_runs',
            sa.Column('stop_time_microsecond', sa.Integer(), default=0))
        if not CONF.disable_microsecond_data_migration:
            session = db_api.get_session()
            query = db_utils.model_query(models.TestRun, session).values(
                models.TestRun.id, models.TestRun.start_time,
                models.TestRun.stop_time)
            for test_run in query:
                start_micro = test_run[1].microsecond
                stop_micro = test_run[2].microsecond
                values = {
                    'start_time_microsecond': start_micro,
                    'stop_time_microsecond': stop_micro
                }
                db_api.update_test_run(values, test_run[0], session)
            session.close()
コード例 #11
0
ファイル: shell.py プロジェクト: masayukig/subunit2sql
def process_results(results):
    session = api.get_session()
    run_time = results.pop('run_time')
    totals = get_run_totals(results)
    if CONF.run_at:
        run_at = date_parser.parse(CONF.run_at)
    else:
        run_at = None
    db_run = api.create_run(totals['skips'], totals['fails'],
                            totals['success'], run_time, CONF.artifacts,
                            id=CONF.run_id, run_at=run_at, session=session)
    if CONF.run_meta:
        api.add_run_metadata(CONF.run_meta, db_run.id, session)
    for test in results:
        db_test = api.get_test_by_test_id(test, session)
        if not db_test:
            if results[test]['status'] == 'success':
                success = 1
                fails = 0
            elif results[test]['status'] == 'fail':
                fails = 1
                success = 0
            else:
                fails = 0
                success = 0
            run_time = subunit.get_duration(results[test]['start_time'],
                                            results[test]['end_time'])
            db_test = api.create_test(test, (success + fails), success,
                                      fails, run_time,
                                      session)
        else:
            test_values = increment_counts(db_test, results[test])
            # If skipped nothing to update
            if test_values:
                api.update_test(test_values, db_test.id, session)
        test_run = api.create_test_run(db_test.id, db_run.id,
                                       results[test]['status'],
                                       results[test]['start_time'],
                                       results[test]['end_time'],
                                       session)
        if results[test]['metadata']:
            if CONF.test_attr_prefix:
                attrs = results[test]['metadata'].get('attrs')
                test_attr_list = _get_test_attrs_list(attrs)
                test_metadata = api.get_test_metadata(db_test.id, session)
                test_metadata = [(meta.key, meta.value) for meta in
                                 test_metadata]
                if test_attr_list:
                    for attr in test_attr_list:
                        if ('attr', attr) not in test_metadata:
                            test_meta_dict = {'attr': attr}
                            api.add_test_metadata(test_meta_dict, db_test.id,
                                                  sesion=session)
            api.add_test_run_metadata(results[test]['metadata'], test_run.id,
                                      session)
        if results[test]['attachments']:
            api.add_test_run_attachments(results[test]['attachments'],
                                         test_run.id, session)
    session.close()
コード例 #12
0
ファイル: write_subunit.py プロジェクト: austin81/subunit2sql
def sql2subunit(run_id, output=sys.stdout):
    session = api.get_session()
    test_runs = api.get_tests_run_dicts_from_run_id(run_id, session)
    session.close()
    output = subunit.v2.StreamResultToBytes(output)
    output.startTestRun()
    for test_id in test_runs:
        test = test_runs[test_id]
        write_test(output, test["start_time"], test["stop_time"], test["status"], test_id, test["metadata"])
    output.stopTestRun()
コード例 #13
0
def main():
    shell.parse_args([])
    shell.CONF.set_override('connection', DB_URI, group='database')
    session = api.get_session()
    run_ids = get_run_ids(session)
    session.close()
    preseed_path = os.path.join(TEMPEST_PATH, 'preseed-streams')
    os.mkdir(preseed_path)
    for run in run_ids:
        with open(os.path.join(preseed_path, run + '.subunit'), 'w') as fd:
            write_subunit.sql2subunit(run, fd)
コード例 #14
0
def main():
    shell.parse_args([])
    shell.CONF.set_override('connection', DB_URI, group='database')
    session = api.get_session()
    run_ids = get_run_ids(session)
    session.close()
    preseed_path = os.path.join(TEMPEST_PATH, 'preseed-streams')
    os.mkdir(preseed_path)
    for run in run_ids:
        with open(os.path.join(preseed_path, run + '.subunit'), 'w') as fd:
            write_subunit.sql2subunit(run, fd)
コード例 #15
0
ファイル: write_subunit.py プロジェクト: mguiney/subunit2sql
def avg_sql2subunit(output=sys.stdout):
    session = api.get_session()
    tests = api.get_all_tests(session=session)
    session.close()
    output = subunit.v2.StreamResultToBytes(output)
    output.startTestRun()
    for test in tests:
        if not test.run_time:
            continue
        start_time = datetime.datetime.now()
        stop_time = start_time + datetime.timedelta(0, test.run_time)
        write_test(output, start_time, stop_time, 'success', test.test_id, [])
    output.stopTestRun()
コード例 #16
0
ファイル: write_subunit.py プロジェクト: ader1990/subunit2sql
def avg_sql2subunit(output=sys.stdout):
    session = api.get_session()
    tests = api.get_all_tests(session=session)
    session.close()
    output = subunit.v2.StreamResultToBytes(output)
    output.startTestRun()
    for test in tests:
        if not test.run_time:
            continue
        start_time = datetime.datetime.now()
        stop_time = start_time + datetime.timedelta(0, test.run_time)
        write_test(output, start_time, stop_time, 'success', test.test_id, [])
    output.stopTestRun()
コード例 #17
0
ファイル: test_api.py プロジェクト: girardiv/subunit2sql
 def test_delete_old_runs(self):
     run_a = api.create_run(run_at=datetime.datetime(
         1914, 6, 28, 10, 45, 0))
     run_b = api.create_run()
     api.add_run_metadata({'key': 'value'}, run_b.id)
     api.add_run_metadata({'key': 'not_so_much_a_value'}, run_a.id)
     api.delete_old_runs()
     runs = api.get_all_runs()
     self.assertEqual(1, len(runs))
     self.assertEqual(1, api.get_session().query(
         models.RunMetadata.id).count())
     self.assertEqual(run_b.id, runs[0].id)
     self.assertEqual(1, len(api.get_run_metadata(run_b.uuid)))
     self.assertEqual(0, len(api.get_run_metadata(run_a.uuid)))
コード例 #18
0
ファイル: write_subunit.py プロジェクト: ader1990/subunit2sql
def sql2subunit(run_id, output=sys.stdout):
    session = api.get_session()
    test_runs = api.get_tests_run_dicts_from_run_id(run_id, session)
    session.close()
    output = subunit.v2.StreamResultToBytes(output)
    output.startTestRun()
    for test_id in test_runs:
        test = test_runs[test_id]
        # NOTE(mtreinish): test_run_metadata is not guaranteed to be present
        # for the test_run.
        metadata = test.get('metadata', None)
        write_test(output, test['start_time'], test['stop_time'],
                   test['status'], test_id, metadata)
    output.stopTestRun()
コード例 #19
0
ファイル: run_time.py プロジェクト: mguiney/subunit2sql
def generate_series():
    session = api.get_session()
    test_id = api.get_id_from_test_id(CONF.command.test_id, session)
    if not test_id:
        print("The test_id %s was not found in the database" %
              CONF.command.test_id)
        exit(2)
    run_times = api.get_test_run_time_series(test_id, session)
    if not run_times:
        print("There was no data found in the database")
        exit(3)
    if not CONF.title:
        test = api.get_test_by_id(test_id, session)
    session.close()
    ts = pd.Series(run_times)
    ts = utils.filter_dates(ts)
    if ts.count() == 0:
        print("No data available. Check your query and try again.")
        exit(-1)
    mean = pd.rolling_mean(ts, 20)
    rolling_std = pd.rolling_std(ts, 20)
    plt.figure()
    if not CONF.title:
        plt.title(test.test_id)
    else:
        plt.title(CONF.title)
    plt.ylabel('Time (sec.)')

    # format x-axis with dates
    fig, ax = plt.subplots(1)
    fig.autofmt_xdate()
    xfmt = dates.DateFormatter("%b %d %Y")
    ax.xaxis_date()
    ax.xaxis.set_major_formatter(xfmt)

    plt.plot(ts.index, ts, 'k', label='Run Time')
    plt.plot(mean.index, mean, 'b', label='Avg. Run Time')
    upper_std_dev = mean + 2 * rolling_std
    lower_std_dev = mean - 2 * rolling_std
    # Set negative numbers to 0
    lower_std_dev[lower_std_dev < 0] = 0
    plt.fill_between(rolling_std.index,
                     upper_std_dev,
                     lower_std_dev,
                     color='b',
                     alpha=0.2,
                     label='std dev')
    plt.legend()
    plt.savefig(CONF.output, dpi=900)
    return ts
コード例 #20
0
def generate_series(test_id):
    session = api.get_session()
    run_times = api.get_test_run_time_series(test_id, session)
    session.close()
    ts = pd.Series(run_times)

#    ts = ts.truncate(after='11/26/2014')
#    print len(ts)
#    plot1 = pd.rolling_median(test, 100).plot()
    plot = pd.rolling_mean(ts, 50).plot()
    plot = ts.plot()
    fig = plot.get_figure()
    fig.savefig('/tmp/test.eps')
    return ts
コード例 #21
0
ファイル: write_subunit.py プロジェクト: mguiney/subunit2sql
def sql2subunit(run_id, output=sys.stdout):
    session = api.get_session()
    test_runs = api.get_tests_run_dicts_from_run_id(run_id, session)
    session.close()
    output = subunit.v2.StreamResultToBytes(output)
    output.startTestRun()
    for test_id in test_runs:
        test = test_runs[test_id]
        # NOTE(mtreinish): test_run_metadata is not guaranteed to be present
        # for the test_run.
        metadata = test.get('metadata', None)
        write_test(output, test['start_time'], test['stop_time'],
                   test['status'], test_id, metadata)
    output.stopTestRun()
コード例 #22
0
def main():
    shell.parse_args([])
    shell.CONF.set_override('connection', DB_URI, group='database')
    session = api.get_session()
    runs = api.get_recent_successful_runs_by_run_metadata(
        'build_name', 'gate-tempest-dsvm-neutron-full',
        num_runs=10, session=session)
    session.close()
    preseed_path = os.path.join(TEMPEST_PATH, 'preseed-streams')
    if not os.path.isdir(preseed_path):
        os.mkdir(preseed_path)
    for run in runs:
        with open(os.path.join(preseed_path, run.uuid + '.subunit'), 'w') as fd:
            write_subunit.sql2subunit(run.uuid, fd)
コード例 #23
0
def generate_series():
    if CONF.start_date:
        start_date = datetime.datetime.strptime(CONF.start_date, '%Y-%m-%d')
    else:
        start_date = None
    if CONF.stop_date:
        stop_date = datetime.datetime.strptime(CONF.stop_date, '%Y-%m-%d')
    else:
        stop_date = None
    session = api.get_session()
    test_starts = api.get_test_run_series(start_date=start_date,
                                          stop_date=stop_date,
                                          session=session,
                                          key=CONF.command.dcmd_key,
                                          value=CONF.command.dcmd_value)
    session.close()
    ts = pd.Series(test_starts)
    daily_count = ts.resample('D').sum().fillna(value=0)
    mean = daily_count.rolling(window=10, center=False).mean()
    rolling_std = daily_count.rolling(window=10, center=False).std()
    plt.figure()
    title = CONF.title or 'Number of Tests run Daily'
    plt.title(title)
    plt.ylabel('Number of tests')
    fig, ax = plt.subplots(1)
    fig.autofmt_xdate()
    plt.title(title)
    plt.ylabel('Number of tests')
    xfmt = dates.DateFormatter("%b %d %Y")
    ax.xaxis_date()
    ax.xaxis.set_major_formatter(xfmt)

    plt.plot(daily_count.index[10:],
             daily_count[10:],
             'k',
             label='Daily Test Count')
    plt.plot(mean.index[10:], mean[10:], 'b', label='Avg. Daily Test Count')
    upper_std_dev = mean + 2 * rolling_std
    lower_std_dev = mean - 2 * rolling_std
    # Set negative numbers to 0
    lower_std_dev[lower_std_dev < 0] = 0
    plt.fill_between(rolling_std.index[10:],
                     lower_std_dev[10:],
                     upper_std_dev[10:],
                     color='b',
                     alpha=0.2,
                     label='Std Dev')
    plt.legend()
    plt.savefig(CONF.output, dpi=900)
コード例 #24
0
 def test_delete_old_runs(self):
     run_a = api.create_run(
         run_at=datetime.datetime(1914, 6, 28, 10, 45, 0))
     run_b = api.create_run()
     api.add_run_metadata({'key': 'value'}, run_b.id)
     api.add_run_metadata({'key': 'not_so_much_a_value'}, run_a.id)
     api.delete_old_runs()
     runs = api.get_all_runs()
     self.assertEqual(1, len(runs))
     self.assertEqual(
         1,
         api.get_session().query(models.RunMetadata.id).count())
     self.assertEqual(run_b.id, runs[0].id)
     self.assertEqual(1, len(api.get_run_metadata(run_b.uuid)))
     self.assertEqual(0, len(api.get_run_metadata(run_a.uuid)))
コード例 #25
0
ファイル: agg_count.py プロジェクト: ader1990/subunit2sql
def generate_series():
    session = api.get_session()
    test_dict = {}
    if not CONF.start_date and not CONF.stop_date:
        tests = api.get_all_tests(session)
        for test in tests:
            if CONF.command.test_ids:
                if test.test_id in CONF.command.test_ids:
                    test_dict[test.test_id] = {
                        'success': int(test.success),
                        'failure': int(test.failure),
                    }
            else:
                test_dict[test.test_id] = {
                    'success': int(test.success),
                    'failure': int(test.failure),
                }
    else:
        start_date = None
        stop_date = None
        if CONF.start_date:
            start_date = date_parser.parse(CONF.start_date)
        if CONF.stop_date:
            stop_date = date_parser.parse(CONF.stop_date)
        if CONF.command.test_ids:
            ids = [api.get_id_from_test_id(x) for x in CONF.command.test_ids]
        else:
            ids = api.get_ids_for_all_tests(session)
        for test in ids:
            test_dict[test] = api.get_test_counts_in_date_range(
                test, start_date, stop_date, session)
    if CONF.command.no_success_graph:
        for test in test_dict:
            test_dict[test].pop('success')
    if CONF.command.skip_graph:
        for test in test_dict:
            if not test_dict[test].get('skips'):
                test_id = api.get_id_from_test_id(test)
                test_dict[test]['skips'] = api.get_skip_counts(test_id)
    session.close()
    if not CONF.title:
        title = "Test status counts"
    else:
        title = CONF.title
    df = pd.DataFrame.from_dict(test_dict, orient='index')
    plot = df.plot(kind='barh', stacked=True).set_title(title)
    fig = plot.get_figure()
    fig.savefig(CONF.output)
コード例 #26
0
def generate_series():
    session = api.get_session()
    test_id = api.get_id_from_test_id(CONF.command.test_id, session)
    if not test_id:
        print("The test_id %s was not found in the database" %
              CONF.command.test_id)
        exit(2)
    run_times = api.get_test_run_time_series(test_id, session)
    if not run_times:
        print("There was no data found in the database")
        exit(3)
    if not CONF.title:
        test = api.get_test_by_id(test_id, session)
    session.close()
    ts = pd.Series(run_times)
    ts = utils.filter_dates(ts)
    if ts.count() == 0:
        print("No data available. Check your query and try again.")
        exit(-1)
    roll = ts.rolling(window=20, center=False)
    mean = roll.mean()
    rolling_std = roll.std()
    plt.figure()
    if not CONF.title:
        plt.title(test.test_id)
    else:
        plt.title(CONF.title)
    plt.ylabel('Time (sec.)')

    # format x-axis with dates
    fig, ax = plt.subplots(1)
    fig.autofmt_xdate()
    xfmt = dates.DateFormatter("%b %d %Y")
    ax.xaxis_date()
    ax.xaxis.set_major_formatter(xfmt)

    plt.plot(ts.index, ts, 'ko', label='Run Time', markersize=0.45)
    plt.plot(mean.index, mean, 'b', label='Avg. Run Time', linewidth=0.45)
    upper_std_dev = mean + 2 * rolling_std
    lower_std_dev = mean - 2 * rolling_std
    # Set negative numbers to 0
    lower_std_dev[lower_std_dev < 0] = 0
    plt.fill_between(rolling_std.index, upper_std_dev,
                     lower_std_dev, color='b', alpha=0.2,
                     label='std dev')
    plt.legend()
    plt.savefig(CONF.output, dpi=CONF.dpi)
    return ts
コード例 #27
0
ファイル: agg_count.py プロジェクト: mguiney/subunit2sql
def generate_series():
    session = api.get_session()
    test_dict = {}
    if not CONF.start_date and not CONF.stop_date:
        tests = api.get_all_tests(session)
        for test in tests:
            if CONF.command.test_ids:
                if test.test_id in CONF.command.test_ids:
                    test_dict[test.test_id] = {
                        'success': int(test.success),
                        'failure': int(test.failure),
                    }
            else:
                test_dict[test.test_id] = {
                    'success': int(test.success),
                    'failure': int(test.failure),
                }
    else:
        start_date = None
        stop_date = None
        if CONF.start_date:
            start_date = date_parser.parse(CONF.start_date)
        if CONF.stop_date:
            stop_date = date_parser.parse(CONF.stop_date)
        if CONF.command.test_ids:
            ids = [api.get_id_from_test_id(x) for x in CONF.command.test_ids]
        else:
            ids = api.get_ids_for_all_tests(session)
        for test in ids:
            test_dict[test] = api.get_test_counts_in_date_range(
                test, start_date, stop_date, session)
    if CONF.command.no_success_graph:
        for test in test_dict:
            test_dict[test].pop('success')
    if CONF.command.skip_graph:
        for test in test_dict:
            if not test_dict[test].get('skips'):
                test_id = api.get_id_from_test_id(test)
                test_dict[test]['skips'] = api.get_skip_counts(test_id)
    session.close()
    if not CONF.title:
        title = "Test status counts"
    else:
        title = CONF.title
    df = pd.DataFrame.from_dict(test_dict, orient='index')
    plot = df.plot(kind='barh', stacked=True).set_title(title)
    fig = plot.get_figure()
    fig.savefig(CONF.output)
コード例 #28
0
def main():
    shell.parse_args([])
    shell.CONF.set_override('connection', DB_URI, group='database')
    session = api.get_session()
    runs = api.get_recent_successful_runs_by_run_metadata(
        'build_name',
        'gate-tempest-dsvm-neutron-full',
        num_runs=10,
        session=session)
    session.close()
    preseed_path = os.path.join(TEMPEST_PATH, 'preseed-streams')
    if not os.path.isdir(preseed_path):
        os.mkdir(preseed_path)
    for run in runs:
        with open(os.path.join(preseed_path, run.uuid + '.subunit'),
                  'w') as fd:
            write_subunit.sql2subunit(run.uuid, fd)
コード例 #29
0
ファイル: shell.py プロジェクト: arithx/subunit2sql
def process_results(results):
    session = api.get_session()
    run_time = results.pop('run_time')
    totals = get_run_totals(results)
    if CONF.run_at:
        run_at = date_parser.parse(CONF.run_at)
    else:
        run_at = None
    db_run = api.create_run(totals['skips'], totals['fails'],
                            totals['success'], run_time, CONF.artifacts,
                            id=CONF.run_id, run_at=run_at, session=session,
                            metadata=CONF.run_meta)
    for test in results:
        db_test = api.get_test_by_test_id(test, session)
        if not db_test:
            if results[test]['status'] == 'success':
                success = 1
                fails = 0
            elif results[test]['status'] == 'fail':
                fails = 1
                success = 0
            else:
                fails = 0
                success = 0
            run_time = subunit.get_duration(results[test]['start_time'],
                                            results[test]['end_time'])
            db_test = api.create_test(test, (success + fails), success,
                                      fails, run_time, None,
                                      session)
        else:
            test_values = increment_counts(db_test, results[test])
            # If skipped nothing to update
            if test_values:
                api.update_test(test_values, db_test.id, session)
        test_run = api.create_test_run(db_test.id, db_run.id,
                                       results[test]['status'],
                                       results[test]['start_time'],
                                       results[test]['end_time'],
                                       session=session)
        if results[test]['metadata']:
            api.update_test_run(results[test]['metadata'], test_run.id, session)
        if results[test]['attachments']:
            api.add_test_run_attachments(results[test]['attachments'],
                                         test_run.id, session)
    session.close()
コード例 #30
0
def generate_series():
    if CONF.start_date:
        start_date = datetime.datetime.strptime(CONF.start_date, '%Y-%m-%d')
    else:
        start_date = None
    if CONF.stop_date:
        stop_date = datetime.datetime.strptime(CONF.stop_date, '%Y-%m-%d')
    else:
        stop_date = None
    session = api.get_session()
    test_starts = api.get_test_run_series(start_date=start_date,
                                          stop_date=stop_date,
                                          session=session,
                                          key=CONF.command.dcmd_key,
                                          value=CONF.command.dcmd_value)
    session.close()
    ts = pd.Series(test_starts)
    daily_count = ts.resample('D').sum().fillna(value=0)
    mean = daily_count.rolling(window=10, center=False).mean()
    rolling_std = daily_count.rolling(window=10, center=False).std()
    plt.figure()
    title = CONF.title or 'Number of Tests run Daily'
    plt.title(title)
    plt.ylabel('Number of tests')
    fig, ax = plt.subplots(1)
    fig.autofmt_xdate()
    plt.title(title)
    plt.ylabel('Number of tests')
    xfmt = dates.DateFormatter("%b %d %Y")
    ax.xaxis_date()
    ax.xaxis.set_major_formatter(xfmt)

    plt.plot(daily_count.index[10:], daily_count[10:], 'k',
             label='Daily Test Count')
    plt.plot(mean.index[10:], mean[10:], 'b', label='Avg. Daily Test Count')
    upper_std_dev = mean + 2 * rolling_std
    lower_std_dev = mean - 2 * rolling_std
    # Set negative numbers to 0
    lower_std_dev[lower_std_dev < 0] = 0
    plt.fill_between(rolling_std.index[10:], lower_std_dev[10:],
                     upper_std_dev[10:],
                     color='b', alpha=0.2, label='Std Dev')
    plt.legend()
    plt.savefig(CONF.output, dpi=CONF.dpi)
コード例 #31
0
ファイル: failures.py プロジェクト: mguiney/subunit2sql
def generate_series():
    session = api.get_session()
    test_id = api.get_id_from_test_id(CONF.command.test_id, session)
    test_statuses = api.get_test_status_time_series(test_id, session)
    if not CONF.title:
        test = api.get_test_by_id(test_id, session)
    session.close()
    ts = pd.Series(test_statuses)
    ts = utils.filter_dates(ts)
    run_count = len(ts)
    if run_count == 0:
        print("Query returned no data.")
        exit(-1)
    failures = ts[ts.isin(['fail', 'unxsuccess'])]
    successes = ts[ts.isin(['success', 'xfail'])]
    skips = ts[ts.isin(['skip'])]
    fail_count = len(failures)
    success_count = len(successes)
    skip_count = len(skips)
    fail_group = failures.groupby(failures.index.date).agg(len)
    success_group = successes.groupby(successes.index.date).agg(len)
    skip_group = skips.groupby(skips.index.date).agg(len)
    if not CONF.title:
        plot = fail_group.plot().set_title(test.test_id)
    else:
        plot = fail_group.plot().set_title(CONF.title)
    if CONF.command.success_graph:
        if success_count:
            success_group.plot()
    if CONF.command.skip_graph:
        if skip_count:
            skip_group.plot()

    def percent(count, total):
        count = float(count)
        total = float(total)
        return (count / total) * 100.0

    print('Fail Percentage: %.4f%%' % percent(fail_count, run_count))
    print('Success Percentage: %.4f%%' % percent(success_count, run_count))
    print('Skip Percentage: %.4f%%' % percent(skip_count, run_count))
    fig = plot.get_figure()
    fig.savefig(CONF.output)
    return ts
コード例 #32
0
def main():
    shell.parse_args(sys.argv)
    session = api.get_session()
    project_data = get_jobs_per_project(session)
    averages = []
    for project in project_data:
        changes = project_data[project]
        values = list(changes.values())
        averages.append((project, np.mean(values), np.amax(values)))

    # sort by the 2nd column (avg)
    averages.sort(key=lambda tup: tup[1])
    labels = [x[0].split("/")[1] for x in averages]
    data = [x[1] for x in averages]
    maxes = [x[2] for x in averages]
    title = 'Average gate jobs per project'
    plot_histogram(labels, data, 'job_per_changes', title)
    title = 'Max gate jobs per project'
    plot_histogram(labels, maxes, 'max_job_per_changes', title)
def upgrade():
    query = db_utils.model_query(
        models.Test, db_api.get_session()).filter(
            models.Test.success > 0, models.Test.run_time == None).join(
                models.TestRun).filter_by(
                    status='success').values(models.Test.id,
                                             models.TestRun.start_time,
                                             models.TestRun.stop_time)

    results = {}
    for test_run in query:
        delta = read_subunit.get_duration(test_run[1], test_run[2])
        if test_run[0] in results:
            results[test_run[0]].append(delta)
        else:
            results[test_run[0]] = [delta]

    for test in results:
        avg = float(sum(results[test])) / float(len(results[test]))
        db_api.update_test({'run_time': avg}, test)
コード例 #34
0
def main():
    shell.parse_args([])
    shell.CONF.set_override('connection', DB_URI, group='database')

    preseed_path = os.path.join(TEMPEST_PATH, 'preseed-streams')
    os.mkdir(preseed_path)
    try:
        session = api.get_session()
        run_ids = api.get_recent_successful_runs(num_runs=10, session=session)
        session.close()
        for run in run_ids:
            with open(os.path.join(preseed_path, run + '.subunit'), 'w') as fd:
                write_subunit.sql2subunit(run, fd)
    except:
        # copy the static preseed files if failed to get preseeds from logstash
        src_dir = "/opt/nodepool-scripts/"
        for file in os.listdir(src_dir):
            if file.endswith(".subunit"):
                file_path = os.path.join(src_dir, file)
                shutil.copy(file_path, preseed_path)
def upgrade():
    query = db_utils.model_query(
        models.Test, db_api.get_session()).filter(
            models.Test.success > 0, models.Test.run_time == None).join(
                models.TestRun,
                models.Test.id == models.TestRun.test_id).filter_by(
                    status='success').values(models.Test.id,
                                             models.TestRun.start_time,
                                             models.TestRun.stop_time)

    results = {}
    for test_run in query:
        delta = read_subunit.get_duration(test_run[1], test_run[2])
        if test_run[0] in results:
            results[test_run[0]].append(delta)
        else:
            results[test_run[0]] = [delta]

    for test in results:
        avg = float(sum(results[test])) / float(len(results[test]))
        db_api.update_test({'run_time': avg}, test)
コード例 #36
0
ファイル: dailycount.py プロジェクト: ader1990/subunit2sql
def generate_series():
    session = api.get_session()
    test_starts = api.get_test_run_series(session)
    session.close()
    ts = pd.Series(test_starts).resample('D', how='sum')
    daily_count = utils.filter_dates(ts)
    mean = pd.rolling_mean(daily_count, 10)
    rolling_std = pd.rolling_std(daily_count, 10)
    plt.figure()
    title = CONF.title or 'Number of tests run'
    plt.title(title)
    plt.ylabel('Number of tests')
    plt.plot(daily_count.index, daily_count, 'k', label='Daily Test Count')
    plt.plot(mean.index, mean, 'b', label='Avg. Daily Test Count')
    upper_std_dev = mean + 2 * rolling_std
    lower_std_dev = mean - 2 * rolling_std
    # Set negative numbers to 0
    lower_std_dev[lower_std_dev < 0] = 0
    plt.fill_between(rolling_std.index, lower_std_dev, upper_std_dev,
                     color='b', alpha=0.2, label='std dev')
    plt.legend()
    plt.savefig(CONF.output)
コード例 #37
0
def get_job_counts_per_change(change, patchset, session=None):
    session = session or api.get_session()
    rm_build_change = aliased(models.RunMetadata)
    rm_build_patchset = aliased(models.RunMetadata)
    rm_build_name = aliased(models.RunMetadata)
    select_query = session.query(
        rm_build_name.value.label('build_name'),
        func.count().label('job_counts')).select_from(rm_build_change)
    all_run_metadata_query = select_query.filter(
        rm_build_change.run_id == rm_build_patchset.run_id).filter(
            rm_build_name.run_id == rm_build_patchset.run_id).filter(
                rm_build_change.key == 'build_change',
                rm_build_change.value == change).filter(
                    rm_build_patchset.key == 'build_patchset',
                    rm_build_patchset.value == patchset).filter(
                        rm_build_name.key == 'build_name').group_by(
                            rm_build_change.value, rm_build_patchset.value,
                            rm_build_name.value)
    result = dict()
    all_data = all_run_metadata_query.all()
    for build_name, job_counts in all_data:
        result[build_name] = int(job_counts)
    return result
コード例 #38
0
ファイル: models.py プロジェクト: austin81/subunit2sql
 def save(self, session=None):
     from subunit2sql.db import api as db_api
     super(SubunitBase, self).save(session or db_api.get_session())
コード例 #39
0
ファイル: models.py プロジェクト: mguiney/subunit2sql
 def save(self, session=None):
     from subunit2sql.db import api as db_api
     super(SubunitBase, self).save(session or db_api.get_session())
コード例 #40
0
ファイル: shell.py プロジェクト: openstack-infra/subunit2sql
def process_results(results, run_at=None, artifacts=None, run_id=None,
                    run_meta=None, test_attr_prefix=None):
    """Insert converted subunit data into the database.

    Allows for run-specific information to be passed in via kwargs,
    checks CONF if no run-specific information is supplied.

    :param results: subunit stream to be inserted
    :param run_at: Optional time at which the run was started.
    :param artifacts: Link to any artifacts from the test run.
    :param run_id: The run id for the new run. Must be unique.
    :param run_meta: Metadata corresponding to the new run.
    :param test_attr_prefix: Optional test attribute prefix.
    """
    run_at = _override_conf(run_at, 'run_at')
    artifacts = _override_conf(artifacts, 'artifacts')
    run_id = _override_conf(run_id, 'run_id')
    run_meta = _override_conf(run_meta, 'run_meta')
    test_attr_prefix = _override_conf(test_attr_prefix, 'test_attr_prefix')

    if run_at:
        if not isinstance(run_at, datetime.datetime):
            run_at = date_parser.parse(run_at)
    else:
        run_at = None
    session = api.get_session()
    run_time = results.pop('run_time')
    totals = get_run_totals(results)
    db_run = api.create_run(totals['skips'], totals['fails'],
                            totals['success'], run_time, artifacts,
                            id=run_id, run_at=run_at, session=session)
    if run_meta:
        api.add_run_metadata(run_meta, db_run.id, session)
    for test in results:
        db_test = api.get_test_by_test_id(test, session)
        if not db_test:
            if results[test]['status'] in ['success', 'xfail']:
                success = 1
                fails = 0
            elif results[test]['status'] in ['fail', 'uxsuccess']:
                fails = 1
                success = 0
            else:
                fails = 0
                success = 0
            run_time = subunit.get_duration(results[test]['start_time'],
                                            results[test]['end_time'])
            db_test = api.create_test(test, (success + fails), success,
                                      fails, run_time,
                                      session)
        else:
            test_values = increment_counts(db_test, results[test])
            # If skipped nothing to update
            if test_values:
                api.update_test(test_values, db_test.id, session)
        test_run = api.create_test_run(db_test.id, db_run.id,
                                       results[test]['status'],
                                       results[test]['start_time'],
                                       results[test]['end_time'],
                                       session)
        if results[test]['metadata']:
            if test_attr_prefix:
                attrs = results[test]['metadata'].get('attrs')
                test_attr_list = _get_test_attrs_list(attrs)
                test_metadata = api.get_test_metadata(db_test.id, session)
                test_metadata = [(meta.key, meta.value) for meta in
                                 test_metadata]
                if test_attr_list:
                    for attr in test_attr_list:
                        if CONF.remove_test_attr_prefix:
                            normalized_attr = attr[len(
                                CONF.test_attr_prefix):]
                        else:
                            normalized_attr = attr
                        if ('attr', normalized_attr) not in test_metadata:
                            test_meta_dict = {'attr': normalized_attr}
                            api.add_test_metadata(test_meta_dict, db_test.id,
                                                  session=session)
            api.add_test_run_metadata(results[test]['metadata'], test_run.id,
                                      session)
        if results[test]['attachments']:
            api.add_test_run_attachments(results[test]['attachments'],
                                         test_run.id, session)
    session.close()
    return db_run
コード例 #41
0
def get_metadata(id):
    session = api.get_session()
    query = db_utils.model_query(models.Test, session=session).filter_by(
        id=id).join(models.TestRun).filter_by(status='success').join(
            models.RunMetadata,
            models.RunMetadata.run_id==models.TestRun.run_id).values(
                models.TestRun.start_time,
                models.TestRun.stop_time,
                models.RunMetadata.key,
                models.RunMetadata.value,
                models.TestRun.status)

    test_times = {}
    valid_keys = ['build_node', 'build_name']
    for run in query:
        if run[4] != 'success':
            continue
        if run[0] not in test_times:
            run_time = (run[1] - run[0]).total_seconds()
            metadata = {run[2]: run[3]}
            test_times[run[0]] = (run_time, metadata)
        else:
            test_times[run[0]][1][run[2]] = run[3]

    metas = {}
    metas_more = {}
    metas_really_slow = {}
    count = 0
    count_more = 0
    count_really_slow = 0
    dates = []
    series = {}
    series_more = {}
    series_really_slow = {}
    for run in test_times:
        if test_times[run][0] < 100:
            if 'build_queue' in test_times[run][1]:
                if test_times[run][1]['build_queue'] != 'gate':
                    continue
            if 'build_branch' in test_times[run][1]:
                if test_times[run][1]['build_branch'] == 'master':
                    continue
            count = count + 1
            for meta in test_times[run][1]:
                if meta in metas:
                    metas[meta].append(test_times[run][1].get(meta))
                else:
                    metas[meta] = [test_times[run][1].get(meta)]
                dates.append(run)
            series[run] = test_times[run][0]
        elif test_times[run][0] >= 100:
            if test_times[run][0] >= 175:
                if 'build_queue' in test_times[run][1]:
                    if test_times[run][1]['build_queue'] != 'gate':
                        continue
                if 'build_branch' in test_times[run][1]:
                    if test_times[run][1]['build_branch'] != 'master':
                        continue
                count_really_slow = count_really_slow + 1
                for meta in test_times[run][1]:
                    if meta in metas_really_slow:
                        metas_really_slow[meta].append(test_times[run][1].get(meta))
                    else:
                        metas_really_slow[meta] = [test_times[run][1].get(meta)]
                series_really_slow[run] = test_times[run][0]
            else:
                if 'build_queue' in test_times[run][1]:
                    if test_times[run][1]['build_queue'] != 'gate':
                        continue
                if 'build_branch' in test_times[run][1]:
                    if test_times[run][1]['build_branch'] != 'master':
                        continue
                count_more = count_more + 1
                for meta in test_times[run][1]:
                    if meta in metas_more:
                        metas_more[meta].append(test_times[run][1].get(meta))
                    else:
                        metas_more[meta] = [test_times[run][1].get(meta)]
                series_more[run] = test_times[run][0]
    vals = {}
    trusty = 0
    precise = 0
    other = 0
    vals_more = {}
    trusty_more = 0
    precise_more = 0
    other_more = 0
    vals_really_slow = {}
    hp_really_slow = 0
    rax_really_slow = 0
    other_really_slow = 0
    for meta in metas:
        if meta == 'build_node':
            for node in metas[meta]:
                if 'trusty' in node:
                    trusty = trusty + 1
                elif 'precise' in node:
                    precise = precise + 1
                else:
                    other = other + 1
        else:
            vals[meta] = dict(collections.Counter(metas[meta]))
    for meta in metas_more:
        if meta == 'build_node':
            for node in metas_more[meta]:
                if 'hp' in node:
                    trusty_more = trusty_more + 1
                elif 'rax' in node:
                    precise_more = precise_more + 1
                else:
                    other_more = other_more + 1
        else:
            vals_more[meta] = dict(collections.Counter(metas_more[meta]))

    for meta in metas_really_slow:
        if meta == 'build_node':
            for node in metas_really_slow[meta]:
                if 'hp' in node:
                    hp_really_slow = hp_really_slow + 1
                elif 'rax' in node:
                    rax_really_slow = rax_really_slow + 1
                else:
                    other_really_slow = other_really_slow + 1
        else:
            vals_really_slow[meta] = dict(collections.Counter(metas_really_slow[meta]))
    print "Fast Jobs:"
    print 'Build Queues:'
    print vals['build_queue']
#    print 'Build Name'
#    print vals['build_name']
    print 'Build Branch'
    print vals['build_branch']
    print "trusty: %s, precise %s, other: %s" % (trusty, precise, other)
    print max(dates)
    print "Slow Jobs:"
    print 'Build Queues:'
    print vals_more['build_queue']
#    print 'Build Name'
#    print vals_more['build_name']
    print 'Build Branch'
    print vals_more['build_branch']
    print "hp: %s, rax %s, other: %s" % (trusty_more, precise_more, other_more)
    print sorted(vals_more['build_name'].items(), key=operator.itemgetter(1))
    print "Really Slow Jobs:"
    print 'Build Queues:'
    print sorted(vals_really_slow['build_queue'].items(), key=operator.itemgetter(1))
#    print 'Build Name'
#    print vals_more['build_name']
    print 'Build Branch'
    print vals_really_slow['build_branch']
    print "hp: %s, rax %s, other: %s" % (hp_really_slow, rax_really_slow, other_really_slow)
    print sorted(vals_really_slow['build_name'].items(), key=operator.itemgetter(1))

    ts_slow = pd.Series(series_more)
    ts = pd.Series(series)
    ts_really_slow = pd.Series(series_really_slow)
#    plot = pd.rolling_mean(ts_slow, 60).plot()
    plot = pd.rolling_mean(ts_slow, 60).plot()
    plot2 = pd.rolling_mean(ts, 8).plot()
    plot3 = pd.rolling_mean(ts_really_slow, 10).plot()
    fig = plot.get_figure()
    fig.savefig('/tmp/test2.png')