示例#1
0
 def browse_product():
     df = read_sql("SELECT * from product", connect)
     if df.empty:
         print("Products are not available now")
     else:
         print(df)
     db_session.close()
示例#2
0
def migration(args):
    limit = int(args.limit)
    offset = 0
    url = URL % (limit, offset)
    response = retry(requests.get, args=(url, )).json()
    datasets = response['result']
    while len(datasets) > 0:
        for data in datasets:
            Testjob = Testjobs(data['slave'], data['result'],
                               data['build_system_type'], data['duration'],
                               data['platform'], data['buildtype'],
                               data['testtype'], data['bugid'], data['branch'],
                               data['revision'], data['date'],
                               data['failure_classification'],
                               data['failures'])
            try:
                session.add(Testjob)
                session.commit()

            except Exception as error:
                logging.warning(error)
                session.rollback()

            finally:
                session.close()

        # The process will move forward by set offset
        offset += limit
        url = URL % (limit, offset)
        response = retry(requests.get, args=(url, )).json()
        datasets = response['result']
示例#3
0
 def delete_product():
     print("Available products are following: ")
     Product.browse_product()
     p_id = input("\nEnter the product id which you what to delete: ")
     db_session.query(Product).filter(Product.id == p_id).delete()
     db_session.commit()
     db_session.close()
示例#4
0
def updatedb(date, platform, branch, numpushes, numjobs, sumduration):
    session.query(Dailyjobs).filter(and_(Dailyjobs.date == date, Dailyjobs.branch == branch,
                                         Dailyjobs.platform == platform)).all().delete()
    session.commit()

    dailyjob = Dailyjobs(date, platform, branch, numpushes, numjobs, sumduration)
    try:
        session.add(dailyjob)
    except Exception as e:
        LOG.warning(e)
        session.rollback()

    session.close()
示例#5
0
def addWriters():
    print request.form
    writer = request.form['name']
    email = request.form['email']
    writer = Writers(writer, email, 0)
    try:
        session.add(writer)
        session.commit()
    except Exception as error:
        print error
        session.rollback()
    finally:
        session.close()
    return redirect(url_for('writers'))
示例#6
0
def insert_in_database(to_insert, date=None):
    if not date:
        date = datetime.datetime.now().strftime('%Y-%m-%d')
    else:
        date = date.strftime('%Y-%m-%d')

    session.query(Seta).filter(Seta.date == date).delete(
        synchronize_session='fetch')
    session.commit()
    for jobtype in to_insert:
        job = Seta(str(jobtype), date)
        session.add(job)
        session.commit()
    session.close()
示例#7
0
def updatedb(date, platform, branch, numpushes, numjobs, sumduration):
    session.query(Dailyjobs).filter(
        and_(Dailyjobs.date == date, Dailyjobs.branch == branch,
             Dailyjobs.platform == platform)).all().delete()
    session.commit()

    dailyjob = Dailyjobs(date, platform, branch, numpushes, numjobs,
                         sumduration)
    try:
        session.add(dailyjob)
    except Exception as e:
        LOG.warning(e)
        session.rollback()

    session.close()
示例#8
0
文件: server.py 项目: jmaher/ouija
def run_results_day_flot_query():
    """
    This function returns the total failures/total jobs data per day for all platforms.
    It is sending the data in the format required by flot.Flot is a jQuery package used
    for 'attractive' plotting
    """
    start_date, end_date = clean_date_params(request.args)

    platforms = ['android4.0',
                 'android2.3',
                 'linux32',
                 'winxp',
                 'win7',
                 'win8',
                 'osx10.6',
                 'osx10.7',
                 'osx10.8']

    data_platforms = {}
    for platform in platforms:
        query_results = session.query(Testjobs.date.label('day'),
                                      func.count(Testjobs.result == 'testfailed'
                                                 ).label("failures"),
                                      func.count(Testjobs).label('totals')).filter(
            and_(Testjobs.platform == platform,
                 Testjobs.date >= start_date, Testjobs.date <= end_date)).group_by('day').all()

        dates = []
        data = {}
        data['failures'] = []
        data['totals'] = []

        for day, fail, total in query_results:
            dates.append(day)
            timestamp = calendar.timegm(day.timetuple()) * 1000
            data['failures'].append((timestamp, int(fail)))
            data['totals'].append((timestamp, int(total)))

        data_platforms[platform] = {'data': data, 'dates': get_date_range(dates)}

    session.close()
    return data_platforms
示例#9
0
def run_results_day_flot_query():
    """
    This function returns the total failures/total jobs data per day for all platforms.
    It is sending the data in the format required by flot.Flot is a jQuery package used
    for 'attractive' plotting
    """
    start_date, end_date = clean_date_params(request.args)

    platforms = ['android4.0',
                 'android2.3',
                 'linux32',
                 'winxp',
                 'win7',
                 'win8',
                 'osx10.6',
                 'osx10.7',
                 'osx10.8']

    data_platforms = {}
    for platform in platforms:
        query_results = session.query(Testjobs.date.label('day'),
                                      func.count(Testjobs.result == 'testfailed'
                                                 ).label("failures"),
                                      func.count(Testjobs).label('totals')).filter(
            and_(Testjobs.platform == platform,
                 Testjobs.date >= start_date, Testjobs.date <= end_date)).group_by('day').all()

        dates = []
        data = {}
        data['failures'] = []
        data['totals'] = []

        for day, fail, total in query_results:
            dates.append(day)
            timestamp = calendar.timegm(day.timetuple()) * 1000
            data['failures'].append((timestamp, int(fail)))
            data['totals'].append((timestamp, int(total)))

        data_platforms[platform] = {'data': data, 'dates': get_date_range(dates)}

    session.close()
    return data_platforms
示例#10
0
def dispatch():
    """
        dispatch project to the writes
    """
    projectName = request.form['name']
    keyWords = request.form['keyWords']
    description = request.form['description']
    uploadPath = request.form['uploadPath']
    project = Projects(name=projectName,
                       uploadPath=uploadPath,
                       keyWords=keyWords,
                       description=description,
                       updatedTime=datetime.now())
    try:
        session.add(project)
        session.commit()
    except Exception as error:
        print error
        session.rollback()
    finally:
        session.close()
    return redirect(url_for('index'))
示例#11
0
文件: server.py 项目: MikeLing/ouija
def run_seta_details_query():
    buildbot = sanitize_bool(request.args.get("buildbot", 0))
    branch = sanitize_string(request.args.get("branch", 'mozilla-inbound'))
    taskcluster = sanitize_bool(request.args.get("taskcluster", 0))
    priority = int(sanitize_string(request.args.get("priority", '5')))
    jobnames = JOBSDATA.jobnames_query()
    date = str(datetime.now().date())
    retVal = {}
    retVal[date] = []
    jobtype = []

    # we only support fx-team, autoland, and mozilla-inbound branch in seta
    if (str(branch) in ['fx-team', 'mozilla-inbound', 'autoland']) is not True \
            and str(branch) != '':
        abort(404)

    # For the case of TaskCluster request, we don't care which priority the user request.
    # We return jobs depend on the strategy that we return high value jobs as default and
    # return all jobs for every 5 push or 90 minutes for that branch.
    if request.headers.get('User-Agent', '') == 'TaskCluster':
        # we make taskcluster to 1 if it's a request from taskcluster, it's more reasonable and
        # can simplify the request url.
        taskcluster = 1

        # we will return all jobs for every 90 minutes, so the return_all_jobs flag will been
        # set to true if the time limit been reached.
        return_all_jobs = False

        # We should return full job list as a fallback, if it's a request from
        # taskcluster and without head_rev or pushlog_id in there
        try:
            branch_info = session.query(
                TaskRequests.counter, TaskRequests.datetime,
                TaskRequests.reset_delta).filter(
                    TaskRequests.branch == branch).all()
        except:
            branch_info = []
        time_of_now = datetime.now()

        # If we got nothing related with that branch, we should create it.
        if len(branch_info) == 0:
            # time_of_lastreset is not a good name anyway :(
            # And we treat all branches' reset_delta is 90 seconds, we should find a
            # better delta for them in the further.
            branch_data = TaskRequests(str(branch), 1, time_of_now,
                                       RESET_DELTA)
            try:
                session.add(branch_data)
                session.commit()
            except Exception as error:
                LOG.debug(error)
                session.rollback()

            finally:
                session.close()
            counter = 1
            time_string = time_of_now
            reset_delta = RESET_DELTA

        # We should update it if that branch had already been stored.
        else:
            counter, time_string, reset_delta = branch_info[0]
            counter += 1
            conn = engine.connect()
            statement = update(TaskRequests).where(
                TaskRequests.branch == branch).values(counter=counter)
            conn.execute(statement)

        delta = (time_of_now - time_string).total_seconds()

        # we should update the time recorder if the elapse time had
        # reach the time limit of that branch.
        if delta >= reset_delta:
            conn = engine.connect()
            statement = update(TaskRequests).where(
                TaskRequests.branch == branch).values(datetime=time_of_now)
            conn.execute(statement)

            # we need to set the return_all_jobs flag to true.
            return_all_jobs = True

        # we query all jobs rather than jobs filter by the requested priority in here,
        # Because we need to set the job returning strategy depend on different job priority.
        query = session.query(JobPriorities.platform, JobPriorities.buildtype,
                              JobPriorities.testtype, JobPriorities.priority,
                              JobPriorities.timeout).all()

        for d in query:
            # we only return that job if it hasn't reach the timeout limit. And the
            # timeout is zero means this job need always running.
            if delta < d[4] or d[4] == 0:
                # Due to the priority of all high value jobs is 1, and we
                # need to return all jobs for every 5 pushes(for now).
                if counter % d[3] != 0:
                    jobtype.append([d[0], d[1], d[2]])

            # we need to return all jobs for every 90 minutes, so all jobs will been returned
            # if the delta is larger than 5400
            elif return_all_jobs:
                jobtype.append([d[0], d[1], d[2]])

    # We don't care about the timeout variable of job if it's not a taskcluster request.
    else:
        query = session.query(
            JobPriorities.platform,
            JobPriorities.buildtype,
            JobPriorities.testtype,
            JobPriorities.priority,
        ).all()

        # priority = 0; run all the jobs
        if priority != 1 and priority != 5:
            priority = 0

        # Because we store high value jobs in seta table as default,
        # so we return low value jobs, means no failure related with this job as default
        if priority == 0:
            jobtype = JOBSDATA.jobtype_query(
            )  # All jobs regardless of priority
        # priority =5 run all low value jobs
        else:
            joblist = [job for job in query if job[3] == priority]
            for j in joblist:
                jobtype.append([j[0], j[1], j[2]])

        # TODO: filter out based on buildsystem from database, either 'buildbot' or '*'
        if buildbot:
            active_jobs = []
            # pick up buildbot jobs from job list to faster the filter process
            buildbot_jobs = [
                job for job in jobnames if job['buildplatform'] == 'buildbot'
            ]
            # find out the correspond job detail information
            for job in jobtype:
                for j in buildbot_jobs:
                    if j['name'] == job[2] and j['platform'] == job[0] and j[
                            'buildtype'] == job[1]:
                        active_jobs.append(
                            j['ref_data_name'] if branch is 'mozilla-inbound'
                            else j['ref_data_name'].
                            replace('mozilla-inbound', branch))

            jobtype = active_jobs

    # TODO: filter out based on buildsystem from database, either 'taskcluster' or '*'
    if taskcluster:
        active_jobs = []
        taskcluster_jobs = [
            job for job in jobnames if job['buildplatform'] == 'taskcluster'
        ]
        for job in jobtype:
            # we need to retranslate the jobtype back to the proper data form after all.
            job[2] = job[2].replace('e10s-browser-chrome',
                                    'browser-chrome-e10s')
            job[2] = job[2].replace('e10s-devtools-chrome',
                                    'devtools-chrome-e10s')
            job[2] = job[2].replace('gl-', 'webgl-')

            for j in taskcluster_jobs:
                if job[2] in j['name'] and j['platform'] == job[0] and j[
                        'buildtype'] == job[1]:
                    active_jobs.append(j['ref_data_name'])
        jobtype = active_jobs

    retVal[date] = jobtype
    return {"jobtypes": retVal}
示例#12
0
文件: server.py 项目: MikeLing/ouija
def run_platform_query():
    platform = sanitize_string(request.args.get("platform"))
    build_system_type = sanitize_string(request.args.get("build_system_type"))
    start_date, end_date = clean_date_params(request.args)

    log_message = 'platform: %s startDate: %s endDate: %s' % (
        platform, start_date.strftime('%Y-%m-%d'),
        end_date.strftime('%Y-%m-%d'))
    app.logger.debug(log_message)

    csets = session.query(Testjobs.revision).distinct().\
        filter(and_(Testjobs.platform == platform,
                    Testjobs.branch == 'mozilla-central',
                    Testjobs.date.between(start_date, end_date),
                    Testjobs.build_system_type == build_system_type)).order_by(desc(Testjobs.date))

    cset_summaries = []
    test_summaries = {}
    dates = []

    labels = 'green orange blue red'.split()
    summary = {result: 0 for result in labels}

    for cset in csets:
        cset_id = cset[0]
        cset_summary = CSetSummary(cset_id)

        test_results = session.query(Testjobs.result, Testjobs.testtype, Testjobs.date).\
            filter(and_(Testjobs.platform == platform,
                        Testjobs.buildtype == 'opt',
                        Testjobs.revision == cset_id,
                        Testjobs.build_system_type == build_system_type)).all().order_by(
            Testjobs.testtype)

        for res, testtype, date in test_results:
            test_summary = test_summaries.setdefault(testtype, summary.copy())

            if res == 'success':
                cset_summary.green[testtype] += 1
                test_summary['green'] += 1
            elif res == 'testfailed':
                cset_summary.orange[testtype] += 1
                test_summary['orange'] += 1
            elif res == 'retry':
                cset_summary.blue[testtype] += 1
                test_summary['blue'] += 1
            elif res == 'exception' or res == 'busted':
                cset_summary.red[testtype] += 1
                test_summary['red'] += 1
            elif res == 'usercancel':
                app.logger.debug('usercancel')
            else:
                app.logger.debug('UNRECOGNIZED RESULT: %s' % res)
            dates.append(date)

        cset_summaries.append(cset_summary)

    # sort tests alphabetically and append total & percentage to end of the list
    test_types = sorted(test_summaries.keys())
    test_types += ['total', 'percentage']

    # calculate total stats and percentage
    total = Counter()
    percentage = {}

    for test in test_summaries:
        total.update(test_summaries[test])
    test_count = sum(total.values())

    for key in total:
        percentage[key] = round((100.0 * total[key] / test_count), 2)

    fail_rates = calculate_fail_rate(passes=total['green'],
                                     retries=total['blue'],
                                     totals=test_count)

    test_summaries['total'] = total
    test_summaries['percentage'] = percentage
    session.close()
    return {
        'testTypes': test_types,
        'byRevision': cset_summaries,
        'byTest': test_summaries,
        'failRates': fail_rates,
        'dates': get_date_range(dates)
    }
示例#13
0
文件: server.py 项目: MikeLing/ouija
def run_slaves_query():
    start_date, end_date = clean_date_params(request.args)

    days_to_show = (end_date - start_date).days
    if days_to_show <= 8:
        jobs = 5
    else:
        jobs = int(round(days_to_show * 0.4))

    info = 'Only slaves with more than %d jobs are displayed.' % jobs

    query_results = session.query(
        Testjobs.slave, Testjobs.result, Testjobs.date).filter(
            and_(
                Testjobs.result.in_(
                    ["retry", "testfailed", "success", "busted", "exception"]),
                Testjobs.date.between(start_date,
                                      end_date))).all().order_by(Testjobs.date)
    session.close()

    if not query_results:
        return

    data = {}
    labels = 'fail retry infra success total'.split()
    summary = {result: 0 for result in labels}
    summary['jobs_since_last_success'] = 0
    dates = []

    for name, result, date in query_results:
        data.setdefault(name, summary.copy())
        data[name]['jobs_since_last_success'] += 1
        if result == 'testfailed':
            data[name]['fail'] += 1
        elif result == 'retry':
            data[name]['retry'] += 1
        elif result == 'success':
            data[name]['success'] += 1
            data[name]['jobs_since_last_success'] = 0
        elif result == 'busted' or result == 'exception':
            data[name]['infra'] += 1
        data[name]['total'] += 1
        dates.append(date)

    # filter slaves
    slave_list = [slave for slave in data if data[slave]['total'] > jobs]

    # calculate failure rate only for slaves that we're going to display
    for slave in slave_list:
        results = data[slave]
        fail_rates = calculate_fail_rate(results['success'], results['retry'],
                                         results['total'])
        data[slave]['sfr'] = fail_rates

    platforms = {}

    # group slaves by platform and calculate platform failure rate
    slaves = sorted(data.keys())
    for platform, slave_group in groupby(slaves,
                                         lambda x: x.rsplit('-', 1)[0]):
        slaves = list(slave_group)

        # don't calculate failure rate for platform we're not going to show
        if not any(slave in slaves for slave in slave_list):
            continue

        platforms[platform] = {}
        results = {}

        for label in ['success', 'retry', 'total']:
            r = reduce(lambda x, y: x + y,
                       [data[slave][label] for slave in slaves])
            results[label] = r

        fail_rates = calculate_fail_rate(results['success'], results['retry'],
                                         results['total'])
        platforms[platform].update(fail_rates)

    # remove data that we don't need
    for slave in data.keys():
        if slave not in slave_list:
            del data[slave]

    return {
        'slaves': data,
        'platforms': platforms,
        'dates': get_date_range(dates),
        'disclaimer': info
    }
示例#14
0
def uploadResults(data, branch, revision, date):
    if "results" not in data:
        return

    results = data["results"]
    count = 0
    for r in results:
        _id, slave, result, duration, platform, buildtype, testtype, bugid = \
            '', '', '', '', '', '', '', ''

        # [1468489471, u'taskcluster', u'i-0ba5dce1fab3f3768', u'?', u'unknown', u'opt', u'',
        # 5945, 107, u'success', 4355877, u'-', 6689, u'gecko-decision',
        # u'12626cb1-b7fc-4d8f-bcee-0ee10af509fe/0', u'Gecko Decision Task',
        # u'6751f6b4d53bef7733d3063aa3f72b0832dbde74', u'gecko-decision', u'completed', 503,
        # 1468489740, u'-', u'*****@*****.**',
        # u'102210fe594ee9b33d82058545b1ed14f4c8206e', 1, u'D', u'scheduled', u'fill me', 1, None,
        # u'-', 1468489475, u'-', u'2016-07-14T09:49:00', u'6751f6b4d53bef7733d3063aa3f72b0832dbde74', 2]

        _id = r["id"]

        # Skip if 'result' is unknown
        result = r["result"]
        if result == u'unknown':
            continue

        duration = '%s' % (int(r["end_timestamp"]) - int(r["start_timestamp"]))

        platform = r["platform"]
        if not platform:
            continue

        buildtype = r["platform_option"]
        build_system_type = r['build_system_type']
        # the testtype of builbot job is in 'ref_data_name'
        # like web-platform-tests-4 in "Ubuntu VM 12.04 x64 mozilla-inbound
        # but taskcluster's testtype is a part of its 'job_type_name
        if r['build_system_type'] == 'buildbot':
            testtype = r['ref_data_name'].split(' ')[-1]

        else:
            # The test name on taskcluster comes to a sort of combination
            # (e.g desktop-test-linux64/debug-jittests-3)and asan job can
            # been referenced as a opt job. we want the build type(debug or opt)
            # to separate the job_type_name, then get "jittests-3" as testtype
            # for job_type_name like desktop-test-linux64/debug-jittests-3
            separator = r['platform_option'] \
                if r['platform_option'] != 'asan' else 'opt'
            testtype = r['job_type_name'].split(
                '{buildtype}-'.format(buildtype=separator))[-1]
        if r["build_system_type"] == "taskcluster":
            # TODO: this is fragile, current platforms as of Jan 26, 2016 we see in taskcluster
            pmap = {
                "linux64": "Linux64",
                "linux32": "Linux32",
                "osx-10-7": "MacOSX64",
                "gecko-decision": "gecko-decision",
                "lint": "lint"
            }
            p = platform
            if platform in pmap:
                p = pmap[platform]
            testtype = r["job_type_name"].split(p)[-1]

        failure_classification = 0
        try:
            # http://treeherder.mozilla.org/api/failureclassification/
            failure_classification = int(r["failure_classification_id"])
        except ValueError:
            failure_classification = 0
        except TypeError:
            logging.warning(
                "Error, failure classification id: expecting an int, "
                "but recieved %s instead" % r["failure_classification_id"])
            failure_classification = 0

        # Get Notes: https://treeherder.mozilla.org/api/project/mozilla-inbound/note/?job_id=5083103
        if result != u'success':
            url = "https://treeherder.mozilla.org/api/project/%s/note/?job_id=%s" % (
                branch, _id)
            try:
                notes = fetch_json(url)
                if notes:
                    bugid = notes[-1]['note']
            except KeyError:
                pass

        # Get failure snippets: https://treeherder.mozilla.org/api/project/
        # mozilla-inbound/artifact/?job_id=11651377&name=Bug+suggestions&type=json
        failures = set()
        if failure_classification == 2:
            url = "https://treeherder.mozilla.org/api/project/%s/artifact/?job_id=%s" \
                  "&name=Bug+suggestions&type=json" % (branch, _id)
            snippets = fetch_json(url)
            if snippets:
                for item in snippets[0]["blob"]:
                    if not item["search_terms"] and len(
                            item["search_terms"]) < 1:
                        continue
                    filename = item['search_terms'][0]
                    if (filename.endswith('.js') or filename.endswith('.xul')
                            or filename.endswith('.html')):
                        dir = item['search']
                        dir = (dir.split('|')[1]).strip()
                        if dir.endswith(filename):
                            dir = dir.split(filename)[0]
                            failures.add(dir + filename)
            # https://treeherder.mozilla.org/api/project/mozilla-central/jobs/1116367/
            url = "https://treeherder.mozilla.org/api/project/%s/jobs/%s/" % (
                branch, _id)
            data1 = fetch_json(url)

            slave = data1['machine_name']

            # Insert into MySQL Database
            try:
                testjob = Testjobs(str(slave), str(result),
                                   str(build_system_type), str(duration),
                                   str(platform), str(buildtype),
                                   str(testtype), str(bugid), str(branch),
                                   str(revision), str(date),
                                   str(failure_classification),
                                   str(list(failures)[0:10]))

                session.add(testjob)
                count += 1
                session.commit()
            except Exception as error:
                session.rollback()
                logging.warning(error)
            finally:
                session.close()
    logging.info("uploaded %s/(%s) results for rev: %s, branch: %s, date: %s" %
                 (count, len(results), revision, branch, date))
示例#15
0
文件: updatedb.py 项目: jmaher/ouija
def uploadResults(data, branch, revision, date):
    if "results" not in data:
        return

    results = data["results"]
    count = 0
    for r in results:
        _id, slave, result, duration, platform, buildtype, testtype, bugid = \
            '', '', '', '', '', '', '', ''

# [1468489471, u'taskcluster', u'i-0ba5dce1fab3f3768', u'?', u'unknown', u'opt', u'',
# 5945, 107, u'success', 4355877, u'-', 6689, u'gecko-decision',
# u'12626cb1-b7fc-4d8f-bcee-0ee10af509fe/0', u'Gecko Decision Task',
# u'6751f6b4d53bef7733d3063aa3f72b0832dbde74', u'gecko-decision', u'completed', 503,
# 1468489740, u'-', u'*****@*****.**',
# u'102210fe594ee9b33d82058545b1ed14f4c8206e', 1, u'D', u'scheduled', u'fill me', 1, None,
# u'-', 1468489475, u'-', u'2016-07-14T09:49:00', u'6751f6b4d53bef7733d3063aa3f72b0832dbde74', 2]

        _id = r["id"]

        # Skip if 'result' is unknown
        result = r["result"]
        if result == u'unknown':
            continue

        duration = '%s' % (int(r["end_timestamp"]) - int(r["start_timestamp"]))

        platform = r["platform"]
        if not platform:
            continue

        buildtype = r["platform_option"]
        build_system_type = r['build_system_type']
        # the testtype of builbot job is in 'ref_data_name'
        # like web-platform-tests-4 in "Ubuntu VM 12.04 x64 mozilla-inbound
        # but taskcluster's testtype is a part of its 'job_type_name
        if r['build_system_type'] == 'buildbot':
            testtype = r['ref_data_name'].split(' ')[-1]

        else:
            # The test name on taskcluster comes to a sort of combination
            # (e.g desktop-test-linux64/debug-jittests-3)and asan job can
            # been referenced as a opt job. we want the build type(debug or opt)
            # to separate the job_type_name, then get "jittests-3" as testtype
            # for job_type_name like desktop-test-linux64/debug-jittests-3
            separator = r['platform_option'] \
                if r['platform_option'] != 'asan' else 'opt'
            testtype = r['job_type_name'].split(
                '{buildtype}-'.format(buildtype=separator))[-1]
        if r["build_system_type"] == "taskcluster":
            # TODO: this is fragile, current platforms as of Jan 26, 2016 we see in taskcluster
            pmap = {"linux64": "Linux64",
                    "linux32": "Linux32",
                    "osx-10-7": "MacOSX64",
                    "gecko-decision": "gecko-decision",
                    "lint": "lint"}
            p = platform
            if platform in pmap:
                p = pmap[platform]
            testtype = r["job_type_name"].split(p)[-1]

        failure_classification = 0
        try:
            # http://treeherder.mozilla.org/api/failureclassification/
            failure_classification = int(r["failure_classification_id"])
        except ValueError:
            failure_classification = 0
        except TypeError:
            logging.warning("Error, failure classification id: expecting an int, "
                            "but recieved %s instead" % r["failure_classification_id"])
            failure_classification = 0

        # Get Notes: https://treeherder.mozilla.org/api/project/mozilla-inbound/note/?job_id=5083103
        if result != u'success':
            url = "https://treeherder.mozilla.org/api/project/%s/note/?job_id=%s" % (branch, _id)
            try:
                notes = fetch_json(url)
                if notes:
                    bugid = notes[-1]['text']
            except KeyError:
                if failure_classification == 2:
                    bugid = revision
                pass

        # Get failure snippets: https://treeherder.mozilla.org/api/project/
        # mozilla-inbound/artifact/?job_id=11651377&name=Bug+suggestions&type=json
        failures = set()
        if failure_classification == 2:
            url = "https://treeherder.mozilla.org/api/project/%s/artifact/?job_id=%s" \
                  "&name=Bug+suggestions&type=json" % (branch, _id)
            snippets = fetch_json(url)
            if snippets:
                for item in snippets[0]["blob"]:
                    if not item["search_terms"] and len(item["search_terms"]) < 1:
                        continue
                    filename = item['search_terms'][0]
                    if (filename.endswith('.js') or filename.endswith('.xul') or
                            filename.endswith('.html')):
                        dir = item['search']
                        dir = (dir.split('|')[1]).strip()
                        if dir.endswith(filename):
                            dir = dir.split(filename)[0]
                            failures.add(dir + filename)
            # https://treeherder.mozilla.org/api/project/mozilla-central/jobs/1116367/
            url = "https://treeherder.mozilla.org/api/project/%s/jobs/%s/" % (branch, _id)
            data1 = fetch_json(url)

            slave = data1['machine_name']

            # Insert into MySQL Database
            try:
                testjob = Testjobs(str(slave), str(result), str(build_system_type),
                                   str(duration), str(platform), str(buildtype),
                                   str(testtype), str(bugid), str(branch),
                                   str(revision), str(date), str(failure_classification),
                                   str(list(failures)[0:10]))

                session.add(testjob)
                count += 1
                session.commit()
            except Exception as error:
                session.rollback()
                logging.warning(error)
            finally:
                session.close()
    logging.info("uploaded %s/(%s) results for rev: %s, branch: %s, date: %s" %
                 (count, len(results), revision, branch, date))
示例#16
0
文件: server.py 项目: jmaher/ouija
def run_platform_query():
    platform = sanitize_string(request.args.get("platform"))
    build_system_type = sanitize_string(request.args.get("build_system_type"))
    start_date, end_date = clean_date_params(request.args)

    log_message = 'platform: %s startDate: %s endDate: %s' % (platform,
                                                              start_date.strftime('%Y-%m-%d'),
                                                              end_date.strftime('%Y-%m-%d'))
    app.logger.debug(log_message)

    csets = session.query(Testjobs.revision).distinct().\
        filter(and_(Testjobs.platform == platform,
                    Testjobs.branch == 'mozilla-central',
                    Testjobs.date.between(start_date, end_date),
                    Testjobs.build_system_type == build_system_type)).order_by(desc(Testjobs.date))

    cset_summaries = []
    test_summaries = {}
    dates = []

    labels = 'green orange blue red'.split()
    summary = {result: 0 for result in labels}

    for cset in csets:
        cset_id = cset[0]
        cset_summary = CSetSummary(cset_id)

        test_results = session.query(Testjobs.result, Testjobs.testtype, Testjobs.date).\
            filter(and_(Testjobs.platform == platform,
                        Testjobs.buildtype == 'opt',
                        Testjobs.revision == cset_id,
                        Testjobs.build_system_type == build_system_type)).all().order_by(
            Testjobs.testtype)

        for res, testtype, date in test_results:
            test_summary = test_summaries.setdefault(testtype, summary.copy())

            if res == 'success':
                cset_summary.green[testtype] += 1
                test_summary['green'] += 1
            elif res == 'testfailed':
                cset_summary.orange[testtype] += 1
                test_summary['orange'] += 1
            elif res == 'retry':
                cset_summary.blue[testtype] += 1
                test_summary['blue'] += 1
            elif res == 'exception' or res == 'busted':
                cset_summary.red[testtype] += 1
                test_summary['red'] += 1
            elif res == 'usercancel':
                app.logger.debug('usercancel')
            else:
                app.logger.debug('UNRECOGNIZED RESULT: %s' % res)
            dates.append(date)

        cset_summaries.append(cset_summary)

    # sort tests alphabetically and append total & percentage to end of the list
    test_types = sorted(test_summaries.keys())
    test_types += ['total', 'percentage']

    # calculate total stats and percentage
    total = Counter()
    percentage = {}

    for test in test_summaries:
        total.update(test_summaries[test])
    test_count = sum(total.values())

    for key in total:
        percentage[key] = round((100.0 * total[key] / test_count), 2)

    fail_rates = calculate_fail_rate(passes=total['green'],
                                     retries=total['blue'],
                                     totals=test_count)

    test_summaries['total'] = total
    test_summaries['percentage'] = percentage
    session.close()
    return {'testTypes': test_types,
            'byRevision': cset_summaries,
            'byTest': test_summaries,
            'failRates': fail_rates,
            'dates': get_date_range(dates)}
示例#17
0
文件: server.py 项目: jmaher/ouija
def update_preseed():
    """
    We sync preseed.json to jobpririties in server on startup, since that is
    the only time we expect preseed.json to change.
    """

    # get preseed data first
    preseed_path = os.path.join(os.path.dirname(SCRIPT_DIR), 'src', 'preseed.json')
    preseed = []
    with open(preseed_path, 'r') as fHandle:
        preseed = json.load(fHandle)

    # Preseed data will have fields: buildtype,testtype,platform,priority,timeout,expires
    # The expires field defaults to 2 weeks on a new job in the database
    # Expires field has a date "YYYY-MM-DD", but can have "*" to indicate never
    # Typical priority will be 1, but if we want to force coalescing we can do that
    # One hack is that if we have a * in a buildtype,testtype,platform field, then
    # we assume it is for all flavors of the * field: i.e. linux64,pgo,* - all tests
    # assumption - preseed fields are sanitized already - move parse_testtype to utils.py ?

    for job in preseed:
        data = session.query(JobPriorities.id,
                             JobPriorities.testtype,
                             JobPriorities.buildtype,
                             JobPriorities.platform,
                             JobPriorities.priority,
                             JobPriorities.timeout,
                             JobPriorities.expires,
                             JobPriorities.buildsystem)
        if job['testtype'] != '*':
            data = data.filter(getattr(JobPriorities, 'testtype') == job['testtype'])

        if job['buildtype'] != '*':
            data = data.filter(getattr(JobPriorities, 'buildtype') == job['buildtype'])

        if job['platform'] != '*':
            data = data.filter(getattr(JobPriorities, 'platform') == job['platform'])

        data = data.all()

        _buildsystem = job["build_system_type"]

        # TODO: edge case: we add future jobs with a wildcard, when jobs show up
        #       remove the wildcard, apply priority/timeout/expires to new jobs
        # Deal with the case where we have a new entry in preseed
        if len(data) == 0:
            _expires = job['expires']
            if _expires == '*':
                _expires = str(datetime.now().date() + timedelta(days=365))

            LOG.info("adding a new unknown job to the database: %s" % job)
            newjob = JobPriorities(job['testtype'],
                                   job['buildtype'],
                                   job['platform'],
                                   job['priority'],
                                   job['timeout'],
                                   _expires,
                                   _buildsystem)
            session.add(newjob)
            session.commit()
            session.close()
            continue

        # We can have wildcards, so loop on all returned values in data
        for d in data:
            changed = False
            LOG.info("updating existing job %s/%s/%s" % (d[1], d[2], d[3]))
            _expires = job['expires']
            _priority = job['priority']
            _timeout = job['timeout']

            # we have a taskcluster job in the db, and new job in preseed
            if d[7] != _buildsystem:
                _buildsystem = "*"
                changed = True

            # When we have a defined date to expire a job, parse and use it
            if _expires == '*':
                _expires = str(datetime.now().date() + timedelta(days=365))

            try:
                dv = datetime.strptime(_expires, "%Y-%M-%d").date()
            except ValueError:
                continue

            # When we have expired, use existing priority/timeout, reset expires
            if dv <= datetime.now().date():
                LOG.info("  --  past the expiration date- reset!")
                _expires = ''
                _priority = d[4]
                _timeout = d[5]
                changed = True

            if changed:
                # TODO: do we need to try/except/finally with commit/rollback statements
                conn = engine.connect()
                statement = update(JobPriorities).where(
                    JobPriorities.id == d[0]).values(
                    priority=_priority,
                    timeout=_timeout,
                    expires=_expires,
                    buildsystem=_buildsystem)
                conn.execute(statement)
示例#18
0
文件: server.py 项目: jmaher/ouija
def run_slaves_query():
    start_date, end_date = clean_date_params(request.args)

    days_to_show = (end_date - start_date).days
    if days_to_show <= 8:
        jobs = 5
    else:
        jobs = int(round(days_to_show * 0.4))

    info = 'Only slaves with more than %d jobs are displayed.' % jobs

    query_results = session.query(Testjobs.slave, Testjobs.result, Testjobs.date).filter(
        and_(Testjobs.result.in_(["retry", "testfailed", "success", "busted", "exception"]),
             Testjobs.date.between(start_date, end_date))).all().order_by(Testjobs.date)
    session.close()

    if not query_results:
        return

    data = {}
    labels = 'fail retry infra success total'.split()
    summary = {result: 0 for result in labels}
    summary['jobs_since_last_success'] = 0
    dates = []

    for name, result, date in query_results:
        data.setdefault(name, summary.copy())
        data[name]['jobs_since_last_success'] += 1
        if result == 'testfailed':
            data[name]['fail'] += 1
        elif result == 'retry':
            data[name]['retry'] += 1
        elif result == 'success':
            data[name]['success'] += 1
            data[name]['jobs_since_last_success'] = 0
        elif result == 'busted' or result == 'exception':
            data[name]['infra'] += 1
        data[name]['total'] += 1
        dates.append(date)

    # filter slaves
    slave_list = [slave for slave in data if data[slave]['total'] > jobs]

    # calculate failure rate only for slaves that we're going to display
    for slave in slave_list:
        results = data[slave]
        fail_rates = calculate_fail_rate(results['success'],
                                         results['retry'],
                                         results['total'])
        data[slave]['sfr'] = fail_rates

    platforms = {}

    # group slaves by platform and calculate platform failure rate
    slaves = sorted(data.keys())
    for platform, slave_group in groupby(slaves, lambda x: x.rsplit('-', 1)[0]):
        slaves = list(slave_group)

        # don't calculate failure rate for platform we're not going to show
        if not any(slave in slaves for slave in slave_list):
            continue

        platforms[platform] = {}
        results = {}

        for label in ['success', 'retry', 'total']:
            r = reduce(lambda x, y: x + y,
                       [data[slave][label] for slave in slaves])
            results[label] = r

        fail_rates = calculate_fail_rate(results['success'],
                                         results['retry'],
                                         results['total'])
        platforms[platform].update(fail_rates)

    # remove data that we don't need
    for slave in data.keys():
        if slave not in slave_list:
            del data[slave]

    return {'slaves': data,
            'platforms': platforms,
            'dates': get_date_range(dates),
            'disclaimer': info}
示例#19
0
    def add_user():
        """ Add new user/customer """
        name = input("Enter your name: ")
        email_id = input("Enter email id: ")
        # Validate Email id
        while True:
            if not validate_email(email_id):
                print(
                    "Entered Email id is invalid. Please enter valid email id")
                email_id = input("Enter email id: ")
            else:
                try:
                    email_id_object = session.query(Customer).filter(
                        Customer.email_id == email_id).one()
                    if email_id_object:
                        print("Email id already exist.")
                        email_id = input("Enter email id: ")

                except NoResultFound:
                    break

        # TODO: Use getpass module to take hidden password
        password = input("Enter password: "******"Enter password: "******"Enter confirm password: "******"Password and confirm password is not same. Please try again")
            password = input("Enter password: "******"Enter confirm password: "******"Enter your mobile number: ")
        # Validate mobile number
        while True:
            if not len(mobile_number) == 10:
                print(
                    "Entered mobile number is invalid. Please enter 10 digits mobile number"
                )
                mobile_number = input("Enter mobile number: ")
            else:
                try:
                    mobile_no_object = session.query(Customer).filter(
                        Customer.mobile_number == mobile_number).one()
                    if mobile_no_object:
                        print("Mobile number already exist")
                        mobile_number = input("Enter your mobile number: ")
                except NoResultFound:
                    break

        c = Customer()
        c.add_customer(name, email_id, password, mobile_number)
        session.add(c)
        session.commit()
        session.close()
示例#20
0
def add_jobs_to_jobpriority(new_data=None,
                            priority=1,
                            timeout=0,
                            set_expired=False):
    added_jobs = []

    if not new_data:
        return

    # TODO: as a perf improvement we can reduce jobs prior to this expensive for loop
    for job in new_data['results']:

        # TODO: potentially ensure no duplicates in new_data and query once outside the loop
        db_data = []
        db_data = session.query(JobPriorities.id, JobPriorities.testtype,
                                JobPriorities.buildtype,
                                JobPriorities.platform, JobPriorities.priority,
                                JobPriorities.timeout, JobPriorities.expires,
                                JobPriorities.buildsystem).all()

        platform = parse_platform(job['build_platform'])
        if platform == None or platform == "":
            continue

        testtype = parse_testtype(job['build_system_type'],
                                  job['ref_data_name'], job['platform_option'],
                                  job['job_type_name'])
        if testtype == None or testtype == "":
            continue

        _buildsystem = job["build_system_type"]
        found = False
        found_id = None
        for row in db_data:
            if (row[1] == testtype and row[3] == platform
                    and row[2] == job["platform_option"]):
                #TODO: what if we have a race condition with two identical jobs
                # verify the build system type is the same, or make it *
                found = True
                if row[7] != "*" and _buildsystem != row[7]:
                    _buildsystem = "*"
                    found_id = row[0]

        # We have new jobs from runnablejobs to add to our master list
        if not found:
            _expired = None
            if set_expired:
                # set _expired = today + 14 days
                # TODO: write test for it
                _expired = "%s" % (datetime.datetime.now() +
                                   datetime.timedelta(days=14))

            try:
                jobpriority = JobPriorities(str(testtype),
                                            str(job["platform_option"]),
                                            str(job["build_platform"]),
                                            priority, timeout, _expired,
                                            _buildsystem)

                session.add(jobpriority)
                session.commit()
                added_jobs.append(job)
            except Exception as error:
                session.rollback()
                logging.warning(error)
            finally:
                session.close()
        elif _buildsystem != job['build_system_type']:
            # update table with new buildsystem
            conn = engine.connect()
            statement = update(JobPriorities)\
                          .where(JobPriorities.id == found_id)\
                          .values(buildsystem=_buildsystem)
            conn.execute(statement)

    return added_jobs
示例#21
0
def _update_job_priority_table(data):
    """Add new jobs to the priority table and update the build system if required."""
    LOG.info('Fetch all rows from the job priority table.')
    # Get all rows of job priorities
    db_data = session.query(JobPriorities.id,
                            JobPriorities.testtype,
                            JobPriorities.buildtype,
                            JobPriorities.platform,
                            JobPriorities.priority,
                            JobPriorities.timeout,
                            JobPriorities.expires,
                            JobPriorities.buildsystem).all()

    # TODO: write test for this
    # When the table is empty it means that we're starting the system for the first time
    # and we're going to use different default values
    map = {}
    if not len(db_data) == 0:
        priority = 1
        timeout = 0
        # Using %Y-%m-%d fixes this issue:
        # Warning: Incorrect date value: '2016-10-28 17:36:58.153265' for column 'expires' at row 1
        expiration_date = (datetime.datetime.now() + datetime.timedelta(days=14)).strftime("%Y-%m-%d")
        # Creating this data structure which will reduce how many times we iterate through the DB rows
        for row in db_data:
            key = tuple(row[1:4])
            # This is guaranteed by a unique composite index for these 3 fields in models.py
            assert key not in map,\
                '"{}" should be a unique row and that is unexpected.'.format(key)
            # (testtype, buildtype, platform)
            map[key] = {'pk': row[0], 'build_system_type': row[7]}
    else:
        priority = 5
        timeout = 5400
        expiration_date = None

    total_jobs = len(data)
    new_jobs = 0
    failed_changes = 0
    updated_jobs = 0
    # Loop through sanitized jobs, add new jobs and update the build system if needed
    for job in data:
        _buildsystem = job["build_system_type"]
        key = _unique_key(job)
        if key in map:
            # We already know about this job, we might need to update the build system
            row_build_system_type = map[key]['build_system_type']

            if row_build_system_type == '*' or _buildsystem == '*':
                # We don't need to update anything
                pass
            else:
                # We're seeing the job again but for another build system (e.g. buildbot vs
                # taskcluster). We need to change it to '*'
                if row_build_system_type != _buildsystem:
                    _buildsystem = "*"
                    # Update table with new buildsystem
                    try:
                        conn = engine.connect()
                        statement = update(JobPriorities).where(
                            JobPriorities.id == map[key]['pk_key']).values(buildsystem=_buildsystem)
                        conn.execute(statement)
                        LOG.info('Updated {}/{} from {} to {}'.format(
                            job['testtype'], job['platform_option'],
                            job['build_system_type'], _buildsystem
                        ))
                        updated_jobs += 1
                    except Exception as e:
                        LOG.info("key = %s, buildsystem = %s" % (key, _buildsystem))
                        LOG.info("exception updating jobPriorities: %s" % e)

        else:
            # We have a new job from runnablejobs to add to our master list
            try:
                jobpriority = JobPriorities(
                    str(job["testtype"]),
                    str(job["platform_option"]),
                    str(job["platform"]),
                    priority,
                    timeout,
                    expiration_date,
                    _buildsystem
                )
                session.add(jobpriority)
                session.commit()
                LOG.info('New job was found ({},{},{},{})'.format(
                    job['testtype'], job['platform_option'], job['platform'], _buildsystem,))
                new_jobs += 1
            except Exception as error:
                session.rollback()
                LOG.warning(error)
                failed_changes += 1
            finally:
                session.close()

    LOG.info('We have {} new jobs and {} updated jobs out of {} total jobs processed.'.format(
        new_jobs, updated_jobs, total_jobs
    ))

    if failed_changes != 0:
        LOG.error('We have failed {} changes out of {} total jobs processed.'.format(
            failed_changes, total_jobs
        ))
示例#22
0
def migration(args):
    limit = int(args.limit)
    startDate = args.startDate
    offset = 0
    url = URL % (startDate, limit, offset)
    try:
        response = retry(requests.get, args=(url, )).json()
    except Exception as error:
        # we will return an empty 'result' list if got exception here
        logger.debug("the request to %s failed, due to %s" % (url, error))
        response = {'result': []}
    datasets = response['result']

    session.query(Testjobs).filter(
        Testjobs.date >= '%s 00:00:00' % startDate).delete()

    while len(datasets) > 0:
        for data in datasets:
            testtype = data['testtype']

            # spidermonkey builds
            if len(testtype.split('pider')) > 1:
                continue

            # docker generation jobs
            if len(testtype.split('ocker')) > 1:
                continue

            # hazzard builds
            if len(testtype.split('az')) > 1:
                continue

            # decision tasks
            if len(testtype.split('ecision')) > 1:
                continue

            # TODO: figure out a better naming strategy here
            # builds, linting, etc.
            if testtype.startswith('[TC]'):
                continue

            # skip talos jobs
            if testtype in [
                    'chromez', 'tp5o', 'g1', 'g2', 'g3', 'g4', 'xperf',
                    'chromez-e10s', 'tp5o-e10s', 'g1-e10s', 'g2-e10s',
                    'g3-e10s', 'g4-e10s', 'xperf-e10s', 'dromaeojs',
                    'dromaeojs-e10s', 'svgr', 'svgr-e10s', 'remote-tsvgx',
                    'remote-tp4m_chrome', 'other', 'other-e10s'
            ]:
                continue

            # linter jobs
            if testtype in [
                    'Static Analysis Opt', 'ESLint', 'Android lint', 'lint'
            ]:
                continue

            # builds
            if testtype in [
                    'nightly', 'Android armv7 API 15+', 'ASan Dbg', 'valgrind',
                    'Android armv7 API 15+ Dbg', 'ASan Debug', 'pgo-build',
                    'ASan Opt', 'build'
            ]:
                continue

            # hidden/lower tier tests, not sure of CI system, old jobs
            if testtype in [
                    'media-youtube-tests', 'external-media-tests',
                    'luciddream', 'media-tests'
            ]:
                continue

            Testjob = Testjobs(data['slave'], data['result'],
                               data['build_system_type'], data['duration'],
                               data['platform'], data['buildtype'], testtype,
                               data['bugid'], data['branch'], data['revision'],
                               data['date'], data['failure_classification'],
                               data['failures'])
            try:
                session.add(Testjob)
                session.commit()

            except Exception as error:
                logging.warning(error)
                session.rollback()

            finally:
                session.close()

        # The process will move forward by set offset
        offset += limit
        url = URL % (startDate, limit, offset)
        response = retry(requests.get, args=(url, )).json()
        datasets = response['result']
示例#23
0
文件: server.py 项目: MikeLing/ouija
def update_preseed():
    """
    We sync preseed.json to jobpririties in server on startup, since that is
    the only time we expect preseed.json to change.
    """

    # get preseed data first
    preseed_path = os.path.join(os.path.dirname(SCRIPT_DIR), 'src',
                                'preseed.json')
    preseed = []
    with open(preseed_path, 'r') as fHandle:
        preseed = json.load(fHandle)

    # Preseed data will have fields: buildtype,testtype,platform,priority,timeout,expires
    # The expires field defaults to 2 weeks on a new job in the database
    # Expires field has a date "YYYY-MM-DD", but can have "*" to indicate never
    # Typical priority will be 1, but if we want to force coalescing we can do that
    # One hack is that if we have a * in a buildtype,testtype,platform field, then
    # we assume it is for all flavors of the * field: i.e. linux64,pgo,* - all tests
    # assumption - preseed fields are sanitized already - move parse_testtype to utils.py ?

    for job in preseed:
        data = session.query(JobPriorities.id, JobPriorities.testtype,
                             JobPriorities.buildtype, JobPriorities.platform,
                             JobPriorities.priority, JobPriorities.timeout,
                             JobPriorities.expires, JobPriorities.buildsystem)
        if job['testtype'] != '*':
            data = data.filter(
                getattr(JobPriorities, 'testtype') == job['testtype'])

        if job['buildtype'] != '*':
            data = data.filter(
                getattr(JobPriorities, 'buildtype') == job['buildtype'])

        if job['platform'] != '*':
            data = data.filter(
                getattr(JobPriorities, 'platform') == job['platform'])

        data = data.all()

        _buildsystem = job["build_system_type"]

        # TODO: edge case: we add future jobs with a wildcard, when jobs show up
        #       remove the wildcard, apply priority/timeout/expires to new jobs
        # Deal with the case where we have a new entry in preseed
        if len(data) == 0:
            _expires = job['expires']
            if _expires == '*':
                _expires = str(datetime.now().date() + timedelta(days=365))

            LOG.info("adding a new unknown job to the database: %s" % job)
            newjob = JobPriorities(job['testtype'], job['buildtype'],
                                   job['platform'], job['priority'],
                                   job['timeout'], _expires, _buildsystem)
            session.add(newjob)
            session.commit()
            session.close()
            continue

        # We can have wildcards, so loop on all returned values in data
        for d in data:
            changed = False
            LOG.info("updating existing job %s/%s/%s" % (d[1], d[2], d[3]))
            _expires = job['expires']
            _priority = job['priority']
            _timeout = job['timeout']

            # we have a taskcluster job in the db, and new job in preseed
            if d[7] != _buildsystem:
                _buildsystem = "*"
                changed = True

            # When we have a defined date to expire a job, parse and use it
            if _expires == '*':
                _expires = str(datetime.now().date() + timedelta(days=365))

            try:
                dv = datetime.strptime(_expires, "%Y-%M-%d").date()
            except ValueError:
                continue

            # When we have expired, use existing priority/timeout, reset expires
            if dv <= datetime.now().date():
                LOG.info("  --  past the expiration date- reset!")
                _expires = ''
                _priority = d[4]
                _timeout = d[5]
                changed = True

            if changed:
                # TODO: do we need to try/except/finally with commit/rollback statements
                conn = engine.connect()
                statement = update(JobPriorities).where(
                    JobPriorities.id == d[0]).values(priority=_priority,
                                                     timeout=_timeout,
                                                     expires=_expires,
                                                     buildsystem=_buildsystem)
                conn.execute(statement)
示例#24
0
def _update_job_priority_table(data):
    """Add new jobs to the priority table and update the build system if required."""
    LOG.info('Fetch all rows from the job priority table.')
    # Get all rows of job priorities
    db_data = session.query(JobPriorities.id, JobPriorities.testtype,
                            JobPriorities.buildtype, JobPriorities.platform,
                            JobPriorities.priority, JobPriorities.timeout,
                            JobPriorities.expires,
                            JobPriorities.buildsystem).all()

    # TODO: write test for this
    # When the table is empty it means that we're starting the system for the first time
    # and we're going to use different default values
    map = {}
    if not len(db_data) == 0:
        priority = 1
        timeout = 0
        # Using %Y-%m-%d fixes this issue:
        # Warning: Incorrect date value: '2016-10-28 17:36:58.153265' for column 'expires' at row 1
        expiration_date = (datetime.datetime.now() +
                           datetime.timedelta(days=14)).strftime("%Y-%m-%d")
        # Creating this data structure which will reduce how many times we iterate through the DB rows
        for row in db_data:
            key = tuple(row[1:4])
            # This is guaranteed by a unique composite index for these 3 fields in models.py
            assert key not in map,\
                '"{}" should be a unique row and that is unexpected.'.format(key)
            # (testtype, buildtype, platform)
            map[key] = {'pk': row[0], 'build_system_type': row[7]}
    else:
        priority = 5
        timeout = 5400
        expiration_date = None

    total_jobs = len(data)
    new_jobs = 0
    failed_changes = 0
    updated_jobs = 0
    # Loop through sanitized jobs, add new jobs and update the build system if needed
    for job in data:
        _buildsystem = job["build_system_type"]
        key = _unique_key(job)
        if key in map:
            # We already know about this job, we might need to update the build system
            row_build_system_type = map[key]['build_system_type']

            if row_build_system_type == '*' or _buildsystem == '*':
                # We don't need to update anything
                pass
            else:
                # We're seeing the job again but for another build system (e.g. buildbot vs
                # taskcluster). We need to change it to '*'
                if row_build_system_type != _buildsystem:
                    _buildsystem = "*"
                    # Update table with new buildsystem
                    try:
                        conn = engine.connect()
                        statement = update(JobPriorities).where(
                            JobPriorities.id == map[key]['pk_key']).values(
                                buildsystem=_buildsystem)
                        conn.execute(statement)
                        LOG.info('Updated {}/{} from {} to {}'.format(
                            job['testtype'], job['platform_option'],
                            job['build_system_type'], _buildsystem))
                        updated_jobs += 1
                    except Exception as e:
                        LOG.info("key = %s, buildsystem = %s" %
                                 (key, _buildsystem))
                        LOG.info("exception updating jobPriorities: %s" % e)

        else:
            # We have a new job from runnablejobs to add to our master list
            try:
                jobpriority = JobPriorities(str(job["testtype"]),
                                            str(job["platform_option"]),
                                            str(job["platform"]), priority,
                                            timeout, expiration_date,
                                            _buildsystem)
                session.add(jobpriority)
                session.commit()
                LOG.info('New job was found ({},{},{},{})'.format(
                    job['testtype'],
                    job['platform_option'],
                    job['platform'],
                    _buildsystem,
                ))
                new_jobs += 1
            except Exception as error:
                session.rollback()
                LOG.warning(error)
                failed_changes += 1
            finally:
                session.close()

    LOG.info(
        'We have {} new jobs and {} updated jobs out of {} total jobs processed.'
        .format(new_jobs, updated_jobs, total_jobs))

    if failed_changes != 0:
        LOG.error(
            'We have failed {} changes out of {} total jobs processed.'.format(
                failed_changes, total_jobs))
示例#25
0
 def delete_customer(email_id):
     db_session.query(Customer).filter(
         Customer.email_id == email_id).delete()
     db_session.commit()
     db_session.close()
     print("Customer deleted successfully. :)")
示例#26
0
文件: server.py 项目: jmaher/ouija
def run_seta_details_query():
    buildbot = sanitize_bool(request.args.get("buildbot", 0))
    branch = sanitize_string(request.args.get("branch", 'mozilla-inbound'))
    taskcluster = sanitize_bool(request.args.get("taskcluster", 0))
    priority = int(sanitize_string(request.args.get("priority", '5')))
    jobnames = JOBSDATA.jobnames_query()
    date = str(datetime.now().date())
    retVal = {}
    retVal[date] = []
    jobtype = []

    # we only support fx-team, autoland, and mozilla-inbound branch in seta
    if (str(branch) in ['fx-team', 'mozilla-inbound', 'autoland']) is not True \
            and str(branch) != '':
        abort(404)

    # For the case of TaskCluster request, we don't care which priority the user request.
    # We return jobs depend on the strategy that we return high value jobs as default and
    # return all jobs for every 5 push or 90 minutes for that branch.
    if request.headers.get('User-Agent', '') == 'TaskCluster':
        # we make taskcluster to 1 if it's a request from taskcluster, it's more reasonable and
        # can simplify the request url.
        taskcluster = 1

        # we will return all jobs for every 90 minutes, so the return_all_jobs flag will been
        # set to true if the time limit been reached.
        return_all_jobs = False

        # We should return full job list as a fallback, if it's a request from
        # taskcluster and without head_rev or pushlog_id in there
        try:
            branch_info = session.query(TaskRequests.counter,
                                        TaskRequests.datetime,
                                        TaskRequests.reset_delta).filter(
                TaskRequests.branch == branch).all()
        except:
            branch_info = []
        time_of_now = datetime.now()

        # If we got nothing related with that branch, we should create it.
        if len(branch_info) == 0:
            # time_of_lastreset is not a good name anyway :(
            # And we treat all branches' reset_delta is 90 seconds, we should find a
            # better delta for them in the further.
            branch_data = TaskRequests(str(branch), 1, time_of_now, RESET_DELTA)
            try:
                session.add(branch_data)
                session.commit()
            except Exception as error:
                LOG.debug(error)
                session.rollback()

            finally:
                session.close()
            counter = 1
            time_string = time_of_now
            reset_delta = RESET_DELTA

        # We should update it if that branch had already been stored.
        else:
            counter, time_string, reset_delta = branch_info[0]
            counter += 1
            conn = engine.connect()
            statement = update(TaskRequests).where(
                TaskRequests.branch == branch).values(
                counter=counter)
            conn.execute(statement)

        delta = (time_of_now - time_string).total_seconds()

        # we should update the time recorder if the elapse time had
        # reach the time limit of that branch.
        if delta >= reset_delta:
            conn = engine.connect()
            statement = update(TaskRequests).where(
                TaskRequests.branch == branch).values(
                datetime=time_of_now)
            conn.execute(statement)

            # we need to set the return_all_jobs flag to true.
            return_all_jobs = True

        # we query all jobs rather than jobs filter by the requested priority in here,
        # Because we need to set the job returning strategy depend on different job priority.
        query = session.query(JobPriorities.platform,
                              JobPriorities.buildtype,
                              JobPriorities.testtype,
                              JobPriorities.priority,
                              JobPriorities.timeout
                              ).all()

        for d in query:
            # we only return that job if it hasn't reach the timeout limit. And the
            # timeout is zero means this job need always running.
            if delta < d[4] or d[4] == 0:
                # Due to the priority of all high value jobs is 1, and we
                # need to return all jobs for every 5 pushes(for now).
                if counter % d[3] != 0:
                    jobtype.append([d[0], d[1], d[2]])

            # we need to return all jobs for every 90 minutes, so all jobs will been returned
            # if the delta is larger than 5400
            elif return_all_jobs:
                jobtype.append([d[0], d[1], d[2]])

    # We don't care about the timeout variable of job if it's not a taskcluster request.
    else:
        query = session.query(JobPriorities.platform,
                              JobPriorities.buildtype,
                              JobPriorities.testtype,
                              JobPriorities.priority,
                              ).all()

        # priority = 0; run all the jobs
        if priority != 1 and priority != 5:
            priority = 0

        # Because we store high value jobs in seta table as default,
        # so we return low value jobs, means no failure related with this job as default
        if priority == 0:
            jobtype = JOBSDATA.jobtype_query()  # All jobs regardless of priority
        # priority =5 run all low value jobs
        else:
            joblist = [job for job in query if job[3] == priority]
            for j in joblist:
                jobtype.append([j[0], j[1], j[2]])

        # TODO: filter out based on buildsystem from database, either 'buildbot' or '*'
        if buildbot:
            active_jobs = []
            # pick up buildbot jobs from job list to faster the filter process
            buildbot_jobs = [job for job in jobnames if job['buildplatform'] == 'buildbot']
            # find out the correspond job detail information
            for job in jobtype:
                for j in buildbot_jobs:
                    if j['name'] == job[2] and j['platform'] == job[0] and j['buildtype'] == job[1]:
                        active_jobs.append(j['ref_data_name'] if branch is 'mozilla-inbound'
                                           else j['ref_data_name'].replace(
                                               'mozilla-inbound', branch))

            jobtype = active_jobs

    # TODO: filter out based on buildsystem from database, either 'taskcluster' or '*'
    if taskcluster:
        active_jobs = []
        taskcluster_jobs = [job for job in jobnames if job['buildplatform'] == 'taskcluster']
        for job in jobtype:
            # we need to retranslate the jobtype back to the proper data form after all.
            job[2] = job[2].replace('e10s-browser-chrome', 'browser-chrome-e10s')
            job[2] = job[2].replace('e10s-devtools-chrome', 'devtools-chrome-e10s')
            job[2] = job[2].replace('gl-', 'webgl-')

            for j in taskcluster_jobs:
                if job[2] in j['name'] and j['platform'] == job[0] and j['buildtype'] == job[1]:
                    active_jobs.append(j['ref_data_name'])
        jobtype = active_jobs

    retVal[date] = jobtype
    return {"jobtypes": retVal}