Beispiel #1
0
 def delete_product():
     print("Available products are following: ")
     Product.browse_product()
     p_id = input("\nEnter the product id which you what to delete: ")
     db_session.query(Product).filter(Product.id == p_id).delete()
     db_session.commit()
     db_session.close()
Beispiel #2
0
def clearResults(branch, startdate):

    date_xx_days_ago = datetime.date.today() - datetime.timedelta(days=180)
    session.query(Testjobs).filter(branch == branch).\
        filter(and_(Testjobs.date >= startdate), (Testjobs.date < date_xx_days_ago)).\
        delete(synchronize_session='fetch')

    session.commit()
Beispiel #3
0
def clearResults(branch, startdate):

    date_xx_days_ago = datetime.date.today() - datetime.timedelta(days=180)
    session.query(Testjobs).filter(branch == branch).\
        filter(and_(Testjobs.date >= startdate), (Testjobs.date < date_xx_days_ago)).\
        delete(synchronize_session='fetch')

    session.commit()
Beispiel #4
0
def run_seta_summary_query():
    query = session.query(Seta.date).distinct().all()
    retVal = {}
    dates = []
    for d in query:
        dates.append(d[0].strftime("%Y-%m-%d"))

    for d in dates:
        count = session.query(Seta.id).filter(Seta.date == d).count()
        retVal['%s' % d] = "%s" % int(count)

    return {'dates': retVal}
Beispiel #5
0
def updatedb(date, platform, branch, numpushes, numjobs, sumduration):
    session.query(Dailyjobs).filter(and_(Dailyjobs.date == date, Dailyjobs.branch == branch,
                                         Dailyjobs.platform == platform)).all().delete()
    session.commit()

    dailyjob = Dailyjobs(date, platform, branch, numpushes, numjobs, sumduration)
    try:
        session.add(dailyjob)
    except Exception as e:
        LOG.warning(e)
        session.rollback()

    session.close()
Beispiel #6
0
def insert_in_database(to_insert, date=None):
    if not date:
        date = datetime.datetime.now().strftime('%Y-%m-%d')
    else:
        date = date.strftime('%Y-%m-%d')

    session.query(Seta).filter(Seta.date == date).delete(
        synchronize_session='fetch')
    session.commit()
    for jobtype in to_insert:
        job = Seta(str(jobtype), date)
        session.add(job)
        session.commit()
    session.close()
Beispiel #7
0
def updatedb(date, platform, branch, numpushes, numjobs, sumduration):
    session.query(Dailyjobs).filter(
        and_(Dailyjobs.date == date, Dailyjobs.branch == branch,
             Dailyjobs.platform == platform)).all().delete()
    session.commit()

    dailyjob = Dailyjobs(date, platform, branch, numpushes, numjobs,
                         sumduration)
    try:
        session.add(dailyjob)
    except Exception as e:
        LOG.warning(e)
        session.rollback()

    session.close()
Beispiel #8
0
def update_jobpriorities(to_insert, _priority, _timeout):
    # to_insert is currently high priority, pri=1 jobs, all else are pri=5 jobs

    changed_jobs = []
    for item in to_insert:
        # NOTE: we ignore JobPriorities with expires as they take precendence
        data = session.query(JobPriorities.id, JobPriorities.priority)\
                      .filter(and_(JobPriorities.testtype == item[2],
                                   JobPriorities.buildtype == item[1],
                                   JobPriorities.platform == item[0],
                                   JobPriorities.expires == None)).all()
        if len(data) != 1:
            # TODO: if 0 items, do we add the job?  if >1 do we alert and cleanup?
            continue

        if data[0][1] != _priority:
            changed_jobs.append(item)

            conn = engine.connect()
            statement = update(JobPriorities)\
                          .where(and_(JobPriorities.testtype == item[2],
                                      JobPriorities.buildtype == item[1],
                                      JobPriorities.platform == item[0]))\
                          .values(priority=_priority,
                                  timeout=_timeout)
            conn.execute(statement)

    return changed_jobs
Beispiel #9
0
def retrievedb(branch, date):
    data = session.query(Testjobs.revision, func.count(Testjobs.result),
                         func.sum(Testjobs.duration), Testjobs.platform).\
        filter(and_(Testjobs.branch == branch, Testjobs.date.like(date),
                    ~Testjobs.testtype.like('build'), Testjobs.testtype != 'valgrind')).group_by(
        Testjobs.revision, Testjobs.platform).all()

    for rows in data:
        revision = rows[0]
        jobs = int(rows[1])
        duration = int(rows[2])
        platform = rows[3]
        found = False
        for p in platforms:
            if platform.startswith(p):
                platform = p
                found = True
                break

        # we can skip b2g/mulet or other platforms that don't match our core set
        if not found:
            continue

        if revision not in revisions_dict:
            revisions_dict[revision] = {}
            for p in platforms:
                revisions_dict[revision][p] = [date, 0, 0]

        revisions_dict[revision][platform][1] += jobs
        revisions_dict[revision][platform][2] += duration
Beispiel #10
0
def increase_jobs_priority(high_value_jobs, priority=1, timeout=0):
    """For every high value job try to see if we need to update increase its priority

    Currently, high value jobs have a priority of 1 and a timeout of 0.

    Return how many jobs had their priority increased
    """
    changed_jobs = []
    for item in high_value_jobs:
        # NOTE: we ignore JobPriorities with expires as they take precendence
        data = session.query(JobPriorities.id, JobPriorities.priority)\
                      .filter(and_(JobPriorities.testtype == item[2],
                                   JobPriorities.buildtype == item[1],
                                   JobPriorities.platform == item[0],
                                   JobPriorities.expires == None)).all()  # flake8: noqa
        if len(data) != 1:
            # TODO: if 0 items, do we add the job?  if >1 do we alert and cleanup?
            continue

        if data[0][1] != priority:
            changed_jobs.append(item)

            conn = engine.connect()
            statement = update(JobPriorities)\
                .where(and_(JobPriorities.testtype == item[2],
                            JobPriorities.buildtype == item[1],
                            JobPriorities.platform == item[0]))\
                .values(priority=priority, timeout=timeout)
            conn.execute(statement)

    return changed_jobs
Beispiel #11
0
def detail():
    """
    Detail page of project
    """
    id = request.args.get("id")
    projectDetail = session.query(Projects).filter(Projects.id == id).all()
    return render_template("detail.html", projectDetail=projectDetail)
Beispiel #12
0
def retrievedb(branch, date):
    data = session.query(Testjobs.revision, func.count(Testjobs.result),
                         func.sum(Testjobs.duration), Testjobs.platform).\
        filter(and_(Testjobs.branch == branch, Testjobs.date.like(date),
                    ~Testjobs.testtype.like('build'), Testjobs.testtype != 'valgrind')).group_by(
        Testjobs.revision, Testjobs.platform).all()

    for rows in data:
        revision = rows[0]
        jobs = int(rows[1])
        duration = int(rows[2])
        platform = rows[3]
        found = False
        for p in platforms:
            if platform.startswith(p):
                platform = p
                found = True
                break

        # we can skip b2g/mulet or other platforms that don't match our core set
        if not found:
            continue

        if revision not in revisions_dict:
            revisions_dict[revision] = {}
            for p in platforms:
                revisions_dict[revision][p] = [date, 0, 0]

        revisions_dict[revision][platform][1] += jobs
        revisions_dict[revision][platform][2] += duration
Beispiel #13
0
def valve(head_rev, pushlog_id, branch, priority):
    """Determine which kind of job should been returned"""
    priority = priority
    BRANCH_COUNTER.increase_the_counter(branch)
    request_list = []
    try:
        request_list = session.query(TaskRequests.head_rev,
                                     TaskRequests.pushlog_id,
                                     TaskRequests.priority).limit(40)
    except Exception:
        session.rollback()

    requests = {}
    for head_rev, pushlog_id, priority in request_list:
        requests[pushlog_id] = {'head_rev': head_rev, 'priority': priority}

    # If this pushlog_id has been schduled, we just return
    # the priority returned before.
    if pushlog_id in requests.keys():
        priority = requests.get(pushlog_id)['priority']
    else:

        # we return all jobs for every 5 pushes.
        if RequestCounter.BRANCH_COUNTER[branch] >= 5:
            RequestCounter.reset(branch)
            priority = None
        task_request = TaskRequests(str(head_rev), str(pushlog_id), priority)
        session.add(task_request)
        session.commit()
    return priority
Beispiel #14
0
def run_dailyjob_query():
    start_date, end_date = clean_date_params(request.args)
    start_date = start_date - timedelta(days=1)
    end_date = end_date + timedelta(days=1)
    data = session.query(Dailyjobs.date, Dailyjobs.platform, Dailyjobs.branch, Dailyjobs.numjobs,
                         Dailyjobs.sumduration).\
        filter(Dailyjobs.date.between(start_date, end_date)).order_by(case([
            (Dailyjobs.platform == 'linux', 1),
            (Dailyjobs.platform == 'osx', 2),
            (Dailyjobs.platform == 'win', 3),
            (Dailyjobs.platform == 'android', 4)], else_='5')).all()

    output = {}
    for rows in data:
        date = str(rows[0])
        platform = rows[1]
        branch = rows[2]
        numpushes = int(rows[3])
        numjobs = int(rows[4])
        sumduration = int(rows[5])

        if date not in output:
            output[date] = {'mozilla-inbound': [], 'fx-team': [], 'try': [], 'autoland': []}
        if 'mozilla-inbound' in branch:
            output[date]['mozilla-inbound'].append([platform, numpushes, numjobs, sumduration])
        elif 'fx-team' in branch:
            output[date]['fx-team'].append([platform, numpushes, numjobs, sumduration])
        elif 'try' in branch:
            output[date]['try'].append([platform, numpushes, numjobs, sumduration])
        elif 'autoland' in branch:
            output[date]['autoland'].append([platform, numpushes, numjobs, sumduration])
    return {'dailyjobs': output}
Beispiel #15
0
def run_dailyjob_query():
    start_date, end_date = clean_date_params(request.args)
    start_date = start_date - timedelta(days=1)
    end_date = end_date + timedelta(days=1)
    data = session.query(Dailyjobs.date, Dailyjobs.platform, Dailyjobs.branch, Dailyjobs.numjobs,
                         Dailyjobs.sumduration).\
        filter(Dailyjobs.date.between(start_date, end_date)).order_by(case([
            (Dailyjobs.platform == 'linux', 1),
            (Dailyjobs.platform == 'osx', 2),
            (Dailyjobs.platform == 'win', 3),
            (Dailyjobs.platform == 'android', 4)], else_='5')).all()

    output = {}
    for rows in data:
        date = str(rows[0])
        platform = rows[1]
        branch = rows[2]
        numpushes = int(rows[3])
        numjobs = int(rows[4])
        sumduration = int(rows[5])

        if date not in output:
            output[date] = {'mozilla-inbound': [], 'fx-team': [], 'try': [], 'autoland': []}
        if 'mozilla-inbound' in branch:
            output[date]['mozilla-inbound'].append([platform, numpushes, numjobs, sumduration])
        elif 'fx-team' in branch:
            output[date]['fx-team'].append([platform, numpushes, numjobs, sumduration])
        elif 'try' in branch:
            output[date]['try'].append([platform, numpushes, numjobs, sumduration])
        elif 'autoland' in branch:
            output[date]['autoland'].append([platform, numpushes, numjobs, sumduration])
    return {'dailyjobs': output}
Beispiel #16
0
def check_data(query_date):
    ret_val = []
    data = session.query(Seta.jobtype).filter(Seta.date == query_date).all()
    if not data:
        print "The database does not have data for the given %s date." % query_date
        for date in range(-3, 4):
            current_date = query_date + datetime.timedelta(date)
            jobtype = session.query(Seta).filter(Seta.date == current_date)
            if jobtype:
                print "The data is available for date=%s" % current_date
        return ret_val

    for job in data:
        parts = job[0].split("'")
        ret_val.append("%s" % [str(parts[1]), str(parts[3]), str(parts[5])])

    return ret_val
Beispiel #17
0
def run_seta_query():
    start_date, end_date = clean_date_params(request.args, delta=SETA_WINDOW)

    # we would like to enlarge the datetime range to make sure the latest failures been get.
    start_date = start_date - timedelta(days=1)
    end_date = end_date + timedelta(days=1)
    data = session.query(Testjobs.bugid, Testjobs.platform, Testjobs.buildtype, Testjobs.testtype,
                         Testjobs.duration).filter(and_(Testjobs.failure_classification == 2,
                                                        Testjobs.date >= start_date,
                                                        Testjobs.date <= end_date)).all()
    failures = {}
    for d in data:
        failures.setdefault(d[0], []).append(d[1:])

    return {'failures': failures}
Beispiel #18
0
def run_seta_query():
    start_date, end_date = clean_date_params(request.args, delta=SETA_WINDOW)

    # we would like to enlarge the datetime range to make sure the latest failures been get.
    start_date = start_date - timedelta(days=1)
    end_date = end_date + timedelta(days=1)
    data = session.query(Testjobs.bugid, Testjobs.platform, Testjobs.buildtype, Testjobs.testtype,
                         Testjobs.duration).filter(and_(Testjobs.failure_classification == 2,
                                                        Testjobs.date >= start_date,
                                                        Testjobs.date <= end_date)).all()
    failures = {}
    for d in data:
        failures.setdefault(d[0], []).append(d[1:])

    return {'failures': failures}
Beispiel #19
0
def run_results_day_flot_query():
    """
    This function returns the total failures/total jobs data per day for all platforms.
    It is sending the data in the format required by flot.Flot is a jQuery package used
    for 'attractive' plotting
    """
    start_date, end_date = clean_date_params(request.args)

    platforms = ['android4.0',
                 'android2.3',
                 'linux32',
                 'winxp',
                 'win7',
                 'win8',
                 'osx10.6',
                 'osx10.7',
                 'osx10.8']

    data_platforms = {}
    for platform in platforms:
        query_results = session.query(Testjobs.date.label('day'),
                                      func.count(Testjobs.result == 'testfailed'
                                                 ).label("failures"),
                                      func.count(Testjobs).label('totals')).filter(
            and_(Testjobs.platform == platform,
                 Testjobs.date >= start_date, Testjobs.date <= end_date)).group_by('day').all()

        dates = []
        data = {}
        data['failures'] = []
        data['totals'] = []

        for day, fail, total in query_results:
            dates.append(day)
            timestamp = calendar.timegm(day.timetuple()) * 1000
            data['failures'].append((timestamp, int(fail)))
            data['totals'].append((timestamp, int(total)))

        data_platforms[platform] = {'data': data, 'dates': get_date_range(dates)}

    session.close()
    return data_platforms
Beispiel #20
0
def run_results_day_flot_query():
    """
    This function returns the total failures/total jobs data per day for all platforms.
    It is sending the data in the format required by flot.Flot is a jQuery package used
    for 'attractive' plotting
    """
    start_date, end_date = clean_date_params(request.args)

    platforms = ['android4.0',
                 'android2.3',
                 'linux32',
                 'winxp',
                 'win7',
                 'win8',
                 'osx10.6',
                 'osx10.7',
                 'osx10.8']

    data_platforms = {}
    for platform in platforms:
        query_results = session.query(Testjobs.date.label('day'),
                                      func.count(Testjobs.result == 'testfailed'
                                                 ).label("failures"),
                                      func.count(Testjobs).label('totals')).filter(
            and_(Testjobs.platform == platform,
                 Testjobs.date >= start_date, Testjobs.date <= end_date)).group_by('day').all()

        dates = []
        data = {}
        data['failures'] = []
        data['totals'] = []

        for day, fail, total in query_results:
            dates.append(day)
            timestamp = calendar.timegm(day.timetuple()) * 1000
            data['failures'].append((timestamp, int(fail)))
            data['totals'].append((timestamp, int(total)))

        data_platforms[platform] = {'data': data, 'dates': get_date_range(dates)}

    session.close()
    return data_platforms
Beispiel #21
0
    def __init__(self):
        self.email = input("Enter your email id: ")
        self.password = input("Enter your Password: "******"*****@*****.**" and self.password == "Mohit#24":
            print("Welcome Admin")
            AdminActions.admin_action()
        else:
            print("Welcome Customer")
            try:
                # Check customer account exist or not
                all_email_password = session.query(Customer).filter(
                    and_(Customer.email_id == self.email,
                         Customer.password == self.password)).one()
                if all_email_password:
                    CustomerActions.customer_action()
            except NoResultFound:
                print(
                    "Account is not available for this email id. Please create your account! :)"
                )
                User.add_user()
                CustomerActions.customer_action()
Beispiel #22
0
def reset_preseed():
    data = session.query(JobPriorities.expires, JobPriorities.id)\
                  .filter(JobPriorities.expires != None).all()

    now = datetime.datetime.now()
    for item in data:
        try:
            dv = datetime.datetime.strptime(item[0], "%Y-%M-%d")
        except ValueError:
            # TODO: consider updating column to have expires=None?
            continue
        except TypeError:
            dv = datetime.datetime.combine(item[0].today(),
                                           datetime.datetime.min.time())

        # reset expire field if date is today or in the past
        if dv.date() <= now.date():
            conn = engine.connect()
            statement = update(JobPriorities)\
                          .where(JobPriorities.id == item[1])\
                          .values(expires=None)
            conn.execute(statement)
Beispiel #23
0
def clear_expiration_field_for_expired_jobs():
    data = session.query(JobPriorities.expires, JobPriorities.id).filter(
        JobPriorities.expires != None).all()  # flake8: noqa

    now = datetime.datetime.now()
    for item in data:
        try:
            expiration_date = datetime.datetime.strptime(item[0], "%Y-%M-%d")
        except ValueError:
            # TODO: consider updating column to have expires=None?
            LOG.warning('Failed to downcast to datetime for ({},{})'.format(
                item[1], item[0]))
            continue
        except TypeError:
            expiration_date = datetime.datetime.combine(
                item[0].today(), datetime.datetime.min.time())

        # reset expiration field if the date is today or in the past
        if expiration_date.date() <= now.date():
            conn = engine.connect()
            statement = update(JobPriorities)\
                .where(JobPriorities.id == item[1])\
                .values(expires=None)
            conn.execute(statement)
Beispiel #24
0
def _update_job_priority_table(data):
    """Add new jobs to the priority table and update the build system if required."""
    LOG.info('Fetch all rows from the job priority table.')
    # Get all rows of job priorities
    db_data = session.query(JobPriorities.id, JobPriorities.testtype,
                            JobPriorities.buildtype, JobPriorities.platform,
                            JobPriorities.priority, JobPriorities.timeout,
                            JobPriorities.expires,
                            JobPriorities.buildsystem).all()

    # TODO: write test for this
    # When the table is empty it means that we're starting the system for the first time
    # and we're going to use different default values
    map = {}
    if not len(db_data) == 0:
        priority = 1
        timeout = 0
        # Using %Y-%m-%d fixes this issue:
        # Warning: Incorrect date value: '2016-10-28 17:36:58.153265' for column 'expires' at row 1
        expiration_date = (datetime.datetime.now() +
                           datetime.timedelta(days=14)).strftime("%Y-%m-%d")
        # Creating this data structure which will reduce how many times we iterate through the DB rows
        for row in db_data:
            key = tuple(row[1:4])
            # This is guaranteed by a unique composite index for these 3 fields in models.py
            assert key not in map,\
                '"{}" should be a unique row and that is unexpected.'.format(key)
            # (testtype, buildtype, platform)
            map[key] = {'pk': row[0], 'build_system_type': row[7]}
    else:
        priority = 5
        timeout = 5400
        expiration_date = None

    total_jobs = len(data)
    new_jobs = 0
    failed_changes = 0
    updated_jobs = 0
    # Loop through sanitized jobs, add new jobs and update the build system if needed
    for job in data:
        _buildsystem = job["build_system_type"]
        key = _unique_key(job)
        if key in map:
            # We already know about this job, we might need to update the build system
            row_build_system_type = map[key]['build_system_type']

            if row_build_system_type == '*' or _buildsystem == '*':
                # We don't need to update anything
                pass
            else:
                # We're seeing the job again but for another build system (e.g. buildbot vs
                # taskcluster). We need to change it to '*'
                if row_build_system_type != _buildsystem:
                    _buildsystem = "*"
                    # Update table with new buildsystem
                    try:
                        conn = engine.connect()
                        statement = update(JobPriorities).where(
                            JobPriorities.id == map[key]['pk_key']).values(
                                buildsystem=_buildsystem)
                        conn.execute(statement)
                        LOG.info('Updated {}/{} from {} to {}'.format(
                            job['testtype'], job['platform_option'],
                            job['build_system_type'], _buildsystem))
                        updated_jobs += 1
                    except Exception as e:
                        LOG.info("key = %s, buildsystem = %s" %
                                 (key, _buildsystem))
                        LOG.info("exception updating jobPriorities: %s" % e)

        else:
            # We have a new job from runnablejobs to add to our master list
            try:
                jobpriority = JobPriorities(str(job["testtype"]),
                                            str(job["platform_option"]),
                                            str(job["platform"]), priority,
                                            timeout, expiration_date,
                                            _buildsystem)
                session.add(jobpriority)
                session.commit()
                LOG.info('New job was found ({},{},{},{})'.format(
                    job['testtype'],
                    job['platform_option'],
                    job['platform'],
                    _buildsystem,
                ))
                new_jobs += 1
            except Exception as error:
                session.rollback()
                LOG.warning(error)
                failed_changes += 1
            finally:
                session.close()

    LOG.info(
        'We have {} new jobs and {} updated jobs out of {} total jobs processed.'
        .format(new_jobs, updated_jobs, total_jobs))

    if failed_changes != 0:
        LOG.error(
            'We have failed {} changes out of {} total jobs processed.'.format(
                failed_changes, total_jobs))
Beispiel #25
0
def update_preseed():
    """
    We sync preseed.json to jobpririties in server on startup, since that is
    the only time we expect preseed.json to change.
    """

    # get preseed data first
    preseed_path = os.path.join(os.path.dirname(SCRIPT_DIR), 'src', 'preseed.json')
    preseed = []
    with open(preseed_path, 'r') as fHandle:
        preseed = json.load(fHandle)

    # Preseed data will have fields: buildtype,testtype,platform,priority,timeout,expires
    # The expires field defaults to 2 weeks on a new job in the database
    # Expires field has a date "YYYY-MM-DD", but can have "*" to indicate never
    # Typical priority will be 1, but if we want to force coalescing we can do that
    # One hack is that if we have a * in a buildtype,testtype,platform field, then
    # we assume it is for all flavors of the * field: i.e. linux64,pgo,* - all tests
    # assumption - preseed fields are sanitized already - move parse_testtype to utils.py ?

    for job in preseed:
        data = session.query(JobPriorities.id,
                             JobPriorities.testtype,
                             JobPriorities.buildtype,
                             JobPriorities.platform,
                             JobPriorities.priority,
                             JobPriorities.timeout,
                             JobPriorities.expires,
                             JobPriorities.buildsystem)
        if job['testtype'] != '*':
            data = data.filter(getattr(JobPriorities, 'testtype') == job['testtype'])

        if job['buildtype'] != '*':
            data = data.filter(getattr(JobPriorities, 'buildtype') == job['buildtype'])

        if job['platform'] != '*':
            data = data.filter(getattr(JobPriorities, 'platform') == job['platform'])

        data = data.all()

        _buildsystem = job["build_system_type"]

        # TODO: edge case: we add future jobs with a wildcard, when jobs show up
        #       remove the wildcard, apply priority/timeout/expires to new jobs
        # Deal with the case where we have a new entry in preseed
        if len(data) == 0:
            _expires = job['expires']
            if _expires == '*':
                _expires = str(datetime.now().date() + timedelta(days=365))

            LOG.info("adding a new unknown job to the database: %s" % job)
            newjob = JobPriorities(job['testtype'],
                                   job['buildtype'],
                                   job['platform'],
                                   job['priority'],
                                   job['timeout'],
                                   _expires,
                                   _buildsystem)
            session.add(newjob)
            session.commit()
            session.close()
            continue

        # We can have wildcards, so loop on all returned values in data
        for d in data:
            changed = False
            LOG.info("updating existing job %s/%s/%s" % (d[1], d[2], d[3]))
            _expires = job['expires']
            _priority = job['priority']
            _timeout = job['timeout']

            # we have a taskcluster job in the db, and new job in preseed
            if d[7] != _buildsystem:
                _buildsystem = "*"
                changed = True

            # When we have a defined date to expire a job, parse and use it
            if _expires == '*':
                _expires = str(datetime.now().date() + timedelta(days=365))

            try:
                dv = datetime.strptime(_expires, "%Y-%M-%d").date()
            except ValueError:
                continue

            # When we have expired, use existing priority/timeout, reset expires
            if dv <= datetime.now().date():
                LOG.info("  --  past the expiration date- reset!")
                _expires = ''
                _priority = d[4]
                _timeout = d[5]
                changed = True

            if changed:
                # TODO: do we need to try/except/finally with commit/rollback statements
                conn = engine.connect()
                statement = update(JobPriorities).where(
                    JobPriorities.id == d[0]).values(
                    priority=_priority,
                    timeout=_timeout,
                    expires=_expires,
                    buildsystem=_buildsystem)
                conn.execute(statement)
Beispiel #26
0
def run_seta_details_query():
    startDate, date = clean_date_params(request.args)
    active = sanitize_bool(request.args.get("active", 0))
    buildbot = sanitize_bool(request.args.get("buildbot", 0))
    branch = sanitize_string(request.args.get("branch", ''))
    taskcluster = sanitize_bool(request.args.get("taskcluster", 0))
    priority = sanitize_string(request.args.get("priority", "low"))
    jobnames = JOBSDATA.jobnames_query()
    if date == "" or date == "latest":
        today = datetime.now()
        date = today.strftime("%Y-%m-%d")
    date = "%s" % date
    query = session.query(Seta.jobtype).filter(Seta.date == date).all()
    retVal = {}
    retVal[date] = []
    jobtype = []

    # we only support fx-team and mozilla-inbound branch in seta
    if (str(branch) in ['fx-team', 'mozilla-inbound', 'autoland']) is not True \
            and str(branch) != '':
        abort(404)
    for d in query:
        parts = d[0].split("'")
        jobtype.append([parts[1], parts[3], parts[5]])

    alljobs = JOBSDATA.jobtype_query()

    # Because we store high value jobs in seta table as default,
    # so we return low value jobs(default) when the priority is 'low',
    # otherwise we return high value jobs.
    if priority == 'low':
        low_value_jobs = [low_value_job for low_value_job in alljobs if
                          low_value_job not in jobtype]
        jobtype = low_value_jobs

    if active:
        active_jobs = []
        for job in alljobs:
            found = False
            for j in jobtype:
                if j[0] == job[0] and j[1] == job[1] and j[2] == job[2]:
                    found = True
                    break
            if not found:
                active_jobs.append(job)
        jobtype = active_jobs

    if buildbot:
        active_jobs = []
        # pick up buildbot jobs from job list to faster the filter process
        buildbot_jobs = [job for job in jobnames if job['buildplatform'] == 'buildbot']
        # find out the correspond job detail information
        for job in jobtype:
            for j in buildbot_jobs:
                if j['name'] == job[2] and j['platform'] == job[0] and j['buildtype'] == job[1]:
                    active_jobs.append(j['ref_data_name'] if branch is 'mozilla-inbound'
                                       else j['ref_data_name'].replace('mozilla-inbound', branch))

        jobtype = active_jobs

    if taskcluster:
        active_jobs = []
        taskcluster_jobs = [job for job in jobnames if job['buildplatform'] == 'taskcluster']
        for job in jobtype:
            for j in taskcluster_jobs:
                if j['name'] == job[2] and j['platform'] == job[0] and j['buildtype'] == job[1]:
                    active_jobs.append(j['ref_data_name'])
        jobtype = active_jobs

    retVal[date] = jobtype
    return {"jobtypes": retVal}
Beispiel #27
0
    def add_user():
        """ Add new user/customer """
        name = input("Enter your name: ")
        email_id = input("Enter email id: ")
        # Validate Email id
        while True:
            if not validate_email(email_id):
                print(
                    "Entered Email id is invalid. Please enter valid email id")
                email_id = input("Enter email id: ")
            else:
                try:
                    email_id_object = session.query(Customer).filter(
                        Customer.email_id == email_id).one()
                    if email_id_object:
                        print("Email id already exist.")
                        email_id = input("Enter email id: ")

                except NoResultFound:
                    break

        # TODO: Use getpass module to take hidden password
        password = input("Enter password: "******"Enter password: "******"Enter confirm password: "******"Password and confirm password is not same. Please try again")
            password = input("Enter password: "******"Enter confirm password: "******"Enter your mobile number: ")
        # Validate mobile number
        while True:
            if not len(mobile_number) == 10:
                print(
                    "Entered mobile number is invalid. Please enter 10 digits mobile number"
                )
                mobile_number = input("Enter mobile number: ")
            else:
                try:
                    mobile_no_object = session.query(Customer).filter(
                        Customer.mobile_number == mobile_number).one()
                    if mobile_no_object:
                        print("Mobile number already exist")
                        mobile_number = input("Enter your mobile number: ")
                except NoResultFound:
                    break

        c = Customer()
        c.add_customer(name, email_id, password, mobile_number)
        session.add(c)
        session.commit()
        session.close()
Beispiel #28
0
def migration(args):
    limit = int(args.limit)
    startDate = args.startDate
    offset = 0
    url = URL % (startDate, limit, offset)
    try:
        response = retry(requests.get, args=(url, )).json()
    except Exception as error:
        # we will return an empty 'result' list if got exception here
        logger.debug("the request to %s failed, due to %s" % (url, error))
        response = {'result': []}
    datasets = response['result']

    session.query(Testjobs).filter(
        Testjobs.date >= '%s 00:00:00' % startDate).delete()

    while len(datasets) > 0:
        for data in datasets:
            testtype = data['testtype']

            # spidermonkey builds
            if len(testtype.split('pider')) > 1:
                continue

            # docker generation jobs
            if len(testtype.split('ocker')) > 1:
                continue

            # hazzard builds
            if len(testtype.split('az')) > 1:
                continue

            # decision tasks
            if len(testtype.split('ecision')) > 1:
                continue

            # TODO: figure out a better naming strategy here
            # builds, linting, etc.
            if testtype.startswith('[TC]'):
                continue

            # skip talos jobs
            if testtype in [
                    'chromez', 'tp5o', 'g1', 'g2', 'g3', 'g4', 'xperf',
                    'chromez-e10s', 'tp5o-e10s', 'g1-e10s', 'g2-e10s',
                    'g3-e10s', 'g4-e10s', 'xperf-e10s', 'dromaeojs',
                    'dromaeojs-e10s', 'svgr', 'svgr-e10s', 'remote-tsvgx',
                    'remote-tp4m_chrome', 'other', 'other-e10s'
            ]:
                continue

            # linter jobs
            if testtype in [
                    'Static Analysis Opt', 'ESLint', 'Android lint', 'lint'
            ]:
                continue

            # builds
            if testtype in [
                    'nightly', 'Android armv7 API 15+', 'ASan Dbg', 'valgrind',
                    'Android armv7 API 15+ Dbg', 'ASan Debug', 'pgo-build',
                    'ASan Opt', 'build'
            ]:
                continue

            # hidden/lower tier tests, not sure of CI system, old jobs
            if testtype in [
                    'media-youtube-tests', 'external-media-tests',
                    'luciddream', 'media-tests'
            ]:
                continue

            Testjob = Testjobs(data['slave'], data['result'],
                               data['build_system_type'], data['duration'],
                               data['platform'], data['buildtype'], testtype,
                               data['bugid'], data['branch'], data['revision'],
                               data['date'], data['failure_classification'],
                               data['failures'])
            try:
                session.add(Testjob)
                session.commit()

            except Exception as error:
                logging.warning(error)
                session.rollback()

            finally:
                session.close()

        # The process will move forward by set offset
        offset += limit
        url = URL % (startDate, limit, offset)
        response = retry(requests.get, args=(url, )).json()
        datasets = response['result']
Beispiel #29
0
def run_platform_query():
    platform = sanitize_string(request.args.get("platform"))
    build_system_type = sanitize_string(request.args.get("build_system_type"))
    start_date, end_date = clean_date_params(request.args)

    log_message = 'platform: %s startDate: %s endDate: %s' % (
        platform, start_date.strftime('%Y-%m-%d'),
        end_date.strftime('%Y-%m-%d'))
    app.logger.debug(log_message)

    csets = session.query(Testjobs.revision).distinct().\
        filter(and_(Testjobs.platform == platform,
                    Testjobs.branch == 'mozilla-central',
                    Testjobs.date.between(start_date, end_date),
                    Testjobs.build_system_type == build_system_type)).order_by(desc(Testjobs.date))

    cset_summaries = []
    test_summaries = {}
    dates = []

    labels = 'green orange blue red'.split()
    summary = {result: 0 for result in labels}

    for cset in csets:
        cset_id = cset[0]
        cset_summary = CSetSummary(cset_id)

        test_results = session.query(Testjobs.result, Testjobs.testtype, Testjobs.date).\
            filter(and_(Testjobs.platform == platform,
                        Testjobs.buildtype == 'opt',
                        Testjobs.revision == cset_id,
                        Testjobs.build_system_type == build_system_type)).all().order_by(
            Testjobs.testtype)

        for res, testtype, date in test_results:
            test_summary = test_summaries.setdefault(testtype, summary.copy())

            if res == 'success':
                cset_summary.green[testtype] += 1
                test_summary['green'] += 1
            elif res == 'testfailed':
                cset_summary.orange[testtype] += 1
                test_summary['orange'] += 1
            elif res == 'retry':
                cset_summary.blue[testtype] += 1
                test_summary['blue'] += 1
            elif res == 'exception' or res == 'busted':
                cset_summary.red[testtype] += 1
                test_summary['red'] += 1
            elif res == 'usercancel':
                app.logger.debug('usercancel')
            else:
                app.logger.debug('UNRECOGNIZED RESULT: %s' % res)
            dates.append(date)

        cset_summaries.append(cset_summary)

    # sort tests alphabetically and append total & percentage to end of the list
    test_types = sorted(test_summaries.keys())
    test_types += ['total', 'percentage']

    # calculate total stats and percentage
    total = Counter()
    percentage = {}

    for test in test_summaries:
        total.update(test_summaries[test])
    test_count = sum(total.values())

    for key in total:
        percentage[key] = round((100.0 * total[key] / test_count), 2)

    fail_rates = calculate_fail_rate(passes=total['green'],
                                     retries=total['blue'],
                                     totals=test_count)

    test_summaries['total'] = total
    test_summaries['percentage'] = percentage
    session.close()
    return {
        'testTypes': test_types,
        'byRevision': cset_summaries,
        'byTest': test_summaries,
        'failRates': fail_rates,
        'dates': get_date_range(dates)
    }
Beispiel #30
0
def run_seta_details_query():
    # TODO: remove inactive when buildbot api queries s/inactive/priority/
    inactive = sanitize_bool(request.args.get("inactive", 0))
    buildbot = sanitize_bool(request.args.get("buildbot", 0))
    branch = sanitize_string(request.args.get("branch", ''))
    taskcluster = sanitize_bool(request.args.get("taskcluster", 0))
    priority = int(sanitize_string(request.args.get("priority", '1')))

    jobnames = JOBSDATA.jobnames_query()
    date = str(datetime.now().date())

    if inactive == 1:
        priority = 5
    else:
        priority = 1

    # TODO: we can make this a variable priority in the future based on input
    query = session.query(
        JobPriorities.platform, JobPriorities.buildtype,
        JobPriorities.testtype).filter(JobPriorities.priority == 1).all()
    retVal = {}
    retVal[date] = []
    jobtype = []

    # we only support fx-team, autoland, and mozilla-inbound branch in seta
    if (str(branch) in ['fx-team', 'mozilla-inbound', 'autoland']) is not True \
            and str(branch) != '':
        abort(404)
    for d in query:
        jobtype.append([d[0], d[1], d[2]])

    # We call valve to determine what kind of jobs we should return only if
    # this request is comes from taskcluster. Otherwise, we just return what people
    # request for.
    if request.headers.get('User-Agent', '') == 'TaskCluster':

        # We should return full job list as a fallback, if it's a request from
        # taskcluster and without head_rev or pushlog_id in there
        if head_rev or pushlog_id:
            priority = valve(head_rev, pushlog_id, branch, priority)
        else:
            priority = 0

    alljobs = JOBSDATA.jobtype_query()

    # Because we store high value jobs in seta table as default,
    # so we return low value jobs, means no failure related with this job as default

    # priority = 0; run all the jobs
    if priority == 0:
        jobtype = alljobs
    # priority =5 run all low value jobs
    elif priority == 5:
        low_value_jobs = [
            low_value_job for low_value_job in alljobs
            if low_value_job not in jobtype
        ]
        jobtype = low_value_jobs
    # priority =1, run all high value jobs
    elif priority == 1:
        pass  # use jobtype as a high value query

    # TODO: filter out based on buildsystem from database, either 'buildbot' or '*'
    if buildbot:
        active_jobs = []
        # pick up buildbot jobs from job list to faster the filter process
        buildbot_jobs = [
            job for job in jobnames if job['buildplatform'] == 'buildbot'
        ]
        # find out the correspond job detail information
        for job in jobtype:
            for j in buildbot_jobs:
                if j['name'] == job[2] and j['platform'] == job[0] and j[
                        'buildtype'] == job[1]:
                    active_jobs.append(
                        j['ref_data_name'] if branch is 'mozilla-inbound' else
                        j['ref_data_name'].replace('mozilla-inbound', branch))

        jobtype = active_jobs

    # TODO: filter out based on buildsystem from database, either 'taskcluster' or '*'
    if taskcluster:
        active_jobs = []
        taskcluster_jobs = [
            job for job in jobnames if job['buildplatform'] == 'taskcluster'
        ]
        for job in jobtype:
            for j in taskcluster_jobs:
                if j['name'] == job[2] and j['platform'] == job[0] and j[
                        'buildtype'] == job[1]:
                    active_jobs.append(j['ref_data_name'])
        jobtype = active_jobs

    retVal[date] = jobtype
    return {"jobtypes": retVal}
Beispiel #31
0
def update_preseed():
    """
    We sync preseed.json to jobpririties in server on startup, since that is
    the only time we expect preseed.json to change.
    """

    # get preseed data first
    preseed_path = os.path.join(os.path.dirname(SCRIPT_DIR), 'src',
                                'preseed.json')
    preseed = []
    with open(preseed_path, 'r') as fHandle:
        preseed = json.load(fHandle)

    # Preseed data will have fields: buildtype,testtype,platform,priority,timeout,expires
    # The expires field defaults to 2 weeks on a new job in the database
    # Expires field has a date "YYYY-MM-DD", but can have "*" to indicate never
    # Typical priority will be 1, but if we want to force coalescing we can do that
    # One hack is that if we have a * in a buildtype,testtype,platform field, then
    # we assume it is for all flavors of the * field: i.e. linux64,pgo,* - all tests
    # assumption - preseed fields are sanitized already - move parse_testtype to utils.py ?

    for job in preseed:
        data = session.query(JobPriorities.id, JobPriorities.testtype,
                             JobPriorities.buildtype, JobPriorities.platform,
                             JobPriorities.priority, JobPriorities.timeout,
                             JobPriorities.expires, JobPriorities.buildsystem)
        if job['testtype'] != '*':
            data = data.filter(
                getattr(JobPriorities, 'testtype') == job['testtype'])

        if job['buildtype'] != '*':
            data = data.filter(
                getattr(JobPriorities, 'buildtype') == job['buildtype'])

        if job['platform'] != '*':
            data = data.filter(
                getattr(JobPriorities, 'platform') == job['platform'])

        data = data.all()

        _buildsystem = job["build_system_type"]

        # TODO: edge case: we add future jobs with a wildcard, when jobs show up
        #       remove the wildcard, apply priority/timeout/expires to new jobs
        # Deal with the case where we have a new entry in preseed
        if len(data) == 0:
            _expires = job['expires']
            if _expires == '*':
                _expires = str(datetime.now().date() + timedelta(days=365))

            LOG.info("adding a new unknown job to the database: %s" % job)
            newjob = JobPriorities(job['testtype'], job['buildtype'],
                                   job['platform'], job['priority'],
                                   job['timeout'], _expires, _buildsystem)
            session.add(newjob)
            session.commit()
            session.close()
            continue

        # We can have wildcards, so loop on all returned values in data
        for d in data:
            changed = False
            LOG.info("updating existing job %s/%s/%s" % (d[1], d[2], d[3]))
            _expires = job['expires']
            _priority = job['priority']
            _timeout = job['timeout']

            # we have a taskcluster job in the db, and new job in preseed
            if d[7] != _buildsystem:
                _buildsystem = "*"
                changed = True

            # When we have a defined date to expire a job, parse and use it
            if _expires == '*':
                _expires = str(datetime.now().date() + timedelta(days=365))

            try:
                dv = datetime.strptime(_expires, "%Y-%M-%d").date()
            except ValueError:
                continue

            # When we have expired, use existing priority/timeout, reset expires
            if dv <= datetime.now().date():
                LOG.info("  --  past the expiration date- reset!")
                _expires = ''
                _priority = d[4]
                _timeout = d[5]
                changed = True

            if changed:
                # TODO: do we need to try/except/finally with commit/rollback statements
                conn = engine.connect()
                statement = update(JobPriorities).where(
                    JobPriorities.id == d[0]).values(priority=_priority,
                                                     timeout=_timeout,
                                                     expires=_expires,
                                                     buildsystem=_buildsystem)
                conn.execute(statement)
Beispiel #32
0
def run_platform_query():
    platform = sanitize_string(request.args.get("platform"))
    build_system_type = sanitize_string(request.args.get("build_system_type"))
    start_date, end_date = clean_date_params(request.args)

    log_message = 'platform: %s startDate: %s endDate: %s' % (platform,
                                                              start_date.strftime('%Y-%m-%d'),
                                                              end_date.strftime('%Y-%m-%d'))
    app.logger.debug(log_message)

    csets = session.query(Testjobs.revision).distinct().\
        filter(and_(Testjobs.platform == platform,
                    Testjobs.branch == 'mozilla-central',
                    Testjobs.date.between(start_date, end_date),
                    Testjobs.build_system_type == build_system_type)).order_by(desc(Testjobs.date))

    cset_summaries = []
    test_summaries = {}
    dates = []

    labels = 'green orange blue red'.split()
    summary = {result: 0 for result in labels}

    for cset in csets:
        cset_id = cset[0]
        cset_summary = CSetSummary(cset_id)

        test_results = session.query(Testjobs.result, Testjobs.testtype, Testjobs.date).\
            filter(and_(Testjobs.platform == platform,
                        Testjobs.buildtype == 'opt',
                        Testjobs.revision == cset_id,
                        Testjobs.build_system_type == build_system_type)).all().order_by(
            Testjobs.testtype)

        for res, testtype, date in test_results:
            test_summary = test_summaries.setdefault(testtype, summary.copy())

            if res == 'success':
                cset_summary.green[testtype] += 1
                test_summary['green'] += 1
            elif res == 'testfailed':
                cset_summary.orange[testtype] += 1
                test_summary['orange'] += 1
            elif res == 'retry':
                cset_summary.blue[testtype] += 1
                test_summary['blue'] += 1
            elif res == 'exception' or res == 'busted':
                cset_summary.red[testtype] += 1
                test_summary['red'] += 1
            elif res == 'usercancel':
                app.logger.debug('usercancel')
            else:
                app.logger.debug('UNRECOGNIZED RESULT: %s' % res)
            dates.append(date)

        cset_summaries.append(cset_summary)

    # sort tests alphabetically and append total & percentage to end of the list
    test_types = sorted(test_summaries.keys())
    test_types += ['total', 'percentage']

    # calculate total stats and percentage
    total = Counter()
    percentage = {}

    for test in test_summaries:
        total.update(test_summaries[test])
    test_count = sum(total.values())

    for key in total:
        percentage[key] = round((100.0 * total[key] / test_count), 2)

    fail_rates = calculate_fail_rate(passes=total['green'],
                                     retries=total['blue'],
                                     totals=test_count)

    test_summaries['total'] = total
    test_summaries['percentage'] = percentage
    session.close()
    return {'testTypes': test_types,
            'byRevision': cset_summaries,
            'byTest': test_summaries,
            'failRates': fail_rates,
            'dates': get_date_range(dates)}
Beispiel #33
0
def run_seta_details_query():
    buildbot = sanitize_bool(request.args.get("buildbot", 0))
    branch = sanitize_string(request.args.get("branch", 'mozilla-inbound'))
    taskcluster = sanitize_bool(request.args.get("taskcluster", 0))
    priority = int(sanitize_string(request.args.get("priority", '5')))
    jobnames = JOBSDATA.jobnames_query()
    date = str(datetime.now().date())
    retVal = {}
    retVal[date] = []
    jobtype = []

    # we only support fx-team, autoland, and mozilla-inbound branch in seta
    if (str(branch) in ['fx-team', 'mozilla-inbound', 'autoland']) is not True \
            and str(branch) != '':
        abort(404)

    # For the case of TaskCluster request, we don't care which priority the user request.
    # We return jobs depend on the strategy that we return high value jobs as default and
    # return all jobs for every 5 push or 90 minutes for that branch.
    if request.headers.get('User-Agent', '') == 'TaskCluster':
        # we make taskcluster to 1 if it's a request from taskcluster, it's more reasonable and
        # can simplify the request url.
        taskcluster = 1

        # we will return all jobs for every 90 minutes, so the return_all_jobs flag will been
        # set to true if the time limit been reached.
        return_all_jobs = False

        # We should return full job list as a fallback, if it's a request from
        # taskcluster and without head_rev or pushlog_id in there
        try:
            branch_info = session.query(TaskRequests.counter,
                                        TaskRequests.datetime,
                                        TaskRequests.reset_delta).filter(
                TaskRequests.branch == branch).all()
        except:
            branch_info = []
        time_of_now = datetime.now()

        # If we got nothing related with that branch, we should create it.
        if len(branch_info) == 0:
            # time_of_lastreset is not a good name anyway :(
            # And we treat all branches' reset_delta is 90 seconds, we should find a
            # better delta for them in the further.
            branch_data = TaskRequests(str(branch), 1, time_of_now, RESET_DELTA)
            try:
                session.add(branch_data)
                session.commit()
            except Exception as error:
                LOG.debug(error)
                session.rollback()

            finally:
                session.close()
            counter = 1
            time_string = time_of_now
            reset_delta = RESET_DELTA

        # We should update it if that branch had already been stored.
        else:
            counter, time_string, reset_delta = branch_info[0]
            counter += 1
            conn = engine.connect()
            statement = update(TaskRequests).where(
                TaskRequests.branch == branch).values(
                counter=counter)
            conn.execute(statement)

        delta = (time_of_now - time_string).total_seconds()

        # we should update the time recorder if the elapse time had
        # reach the time limit of that branch.
        if delta >= reset_delta:
            conn = engine.connect()
            statement = update(TaskRequests).where(
                TaskRequests.branch == branch).values(
                datetime=time_of_now)
            conn.execute(statement)

            # we need to set the return_all_jobs flag to true.
            return_all_jobs = True

        # we query all jobs rather than jobs filter by the requested priority in here,
        # Because we need to set the job returning strategy depend on different job priority.
        query = session.query(JobPriorities.platform,
                              JobPriorities.buildtype,
                              JobPriorities.testtype,
                              JobPriorities.priority,
                              JobPriorities.timeout
                              ).all()

        for d in query:
            # we only return that job if it hasn't reach the timeout limit. And the
            # timeout is zero means this job need always running.
            if delta < d[4] or d[4] == 0:
                # Due to the priority of all high value jobs is 1, and we
                # need to return all jobs for every 5 pushes(for now).
                if counter % d[3] != 0:
                    jobtype.append([d[0], d[1], d[2]])

            # we need to return all jobs for every 90 minutes, so all jobs will been returned
            # if the delta is larger than 5400
            elif return_all_jobs:
                jobtype.append([d[0], d[1], d[2]])

    # We don't care about the timeout variable of job if it's not a taskcluster request.
    else:
        query = session.query(JobPriorities.platform,
                              JobPriorities.buildtype,
                              JobPriorities.testtype,
                              JobPriorities.priority,
                              ).all()

        # priority = 0; run all the jobs
        if priority != 1 and priority != 5:
            priority = 0

        # Because we store high value jobs in seta table as default,
        # so we return low value jobs, means no failure related with this job as default
        if priority == 0:
            jobtype = JOBSDATA.jobtype_query()  # All jobs regardless of priority
        # priority =5 run all low value jobs
        else:
            joblist = [job for job in query if job[3] == priority]
            for j in joblist:
                jobtype.append([j[0], j[1], j[2]])

        # TODO: filter out based on buildsystem from database, either 'buildbot' or '*'
        if buildbot:
            active_jobs = []
            # pick up buildbot jobs from job list to faster the filter process
            buildbot_jobs = [job for job in jobnames if job['buildplatform'] == 'buildbot']
            # find out the correspond job detail information
            for job in jobtype:
                for j in buildbot_jobs:
                    if j['name'] == job[2] and j['platform'] == job[0] and j['buildtype'] == job[1]:
                        active_jobs.append(j['ref_data_name'] if branch is 'mozilla-inbound'
                                           else j['ref_data_name'].replace(
                                               'mozilla-inbound', branch))

            jobtype = active_jobs

    # TODO: filter out based on buildsystem from database, either 'taskcluster' or '*'
    if taskcluster:
        active_jobs = []
        taskcluster_jobs = [job for job in jobnames if job['buildplatform'] == 'taskcluster']
        for job in jobtype:
            # we need to retranslate the jobtype back to the proper data form after all.
            job[2] = job[2].replace('e10s-browser-chrome', 'browser-chrome-e10s')
            job[2] = job[2].replace('e10s-devtools-chrome', 'devtools-chrome-e10s')
            job[2] = job[2].replace('gl-', 'webgl-')

            for j in taskcluster_jobs:
                if job[2] in j['name'] and j['platform'] == job[0] and j['buildtype'] == job[1]:
                    active_jobs.append(j['ref_data_name'])
        jobtype = active_jobs

    retVal[date] = jobtype
    return {"jobtypes": retVal}
Beispiel #34
0
def _update_job_priority_table(data):
    """Add new jobs to the priority table and update the build system if required."""
    LOG.info('Fetch all rows from the job priority table.')
    # Get all rows of job priorities
    db_data = session.query(JobPriorities.id,
                            JobPriorities.testtype,
                            JobPriorities.buildtype,
                            JobPriorities.platform,
                            JobPriorities.priority,
                            JobPriorities.timeout,
                            JobPriorities.expires,
                            JobPriorities.buildsystem).all()

    # TODO: write test for this
    # When the table is empty it means that we're starting the system for the first time
    # and we're going to use different default values
    map = {}
    if not len(db_data) == 0:
        priority = 1
        timeout = 0
        # Using %Y-%m-%d fixes this issue:
        # Warning: Incorrect date value: '2016-10-28 17:36:58.153265' for column 'expires' at row 1
        expiration_date = (datetime.datetime.now() + datetime.timedelta(days=14)).strftime("%Y-%m-%d")
        # Creating this data structure which will reduce how many times we iterate through the DB rows
        for row in db_data:
            key = tuple(row[1:4])
            # This is guaranteed by a unique composite index for these 3 fields in models.py
            assert key not in map,\
                '"{}" should be a unique row and that is unexpected.'.format(key)
            # (testtype, buildtype, platform)
            map[key] = {'pk': row[0], 'build_system_type': row[7]}
    else:
        priority = 5
        timeout = 5400
        expiration_date = None

    total_jobs = len(data)
    new_jobs = 0
    failed_changes = 0
    updated_jobs = 0
    # Loop through sanitized jobs, add new jobs and update the build system if needed
    for job in data:
        _buildsystem = job["build_system_type"]
        key = _unique_key(job)
        if key in map:
            # We already know about this job, we might need to update the build system
            row_build_system_type = map[key]['build_system_type']

            if row_build_system_type == '*' or _buildsystem == '*':
                # We don't need to update anything
                pass
            else:
                # We're seeing the job again but for another build system (e.g. buildbot vs
                # taskcluster). We need to change it to '*'
                if row_build_system_type != _buildsystem:
                    _buildsystem = "*"
                    # Update table with new buildsystem
                    try:
                        conn = engine.connect()
                        statement = update(JobPriorities).where(
                            JobPriorities.id == map[key]['pk_key']).values(buildsystem=_buildsystem)
                        conn.execute(statement)
                        LOG.info('Updated {}/{} from {} to {}'.format(
                            job['testtype'], job['platform_option'],
                            job['build_system_type'], _buildsystem
                        ))
                        updated_jobs += 1
                    except Exception as e:
                        LOG.info("key = %s, buildsystem = %s" % (key, _buildsystem))
                        LOG.info("exception updating jobPriorities: %s" % e)

        else:
            # We have a new job from runnablejobs to add to our master list
            try:
                jobpriority = JobPriorities(
                    str(job["testtype"]),
                    str(job["platform_option"]),
                    str(job["platform"]),
                    priority,
                    timeout,
                    expiration_date,
                    _buildsystem
                )
                session.add(jobpriority)
                session.commit()
                LOG.info('New job was found ({},{},{},{})'.format(
                    job['testtype'], job['platform_option'], job['platform'], _buildsystem,))
                new_jobs += 1
            except Exception as error:
                session.rollback()
                LOG.warning(error)
                failed_changes += 1
            finally:
                session.close()

    LOG.info('We have {} new jobs and {} updated jobs out of {} total jobs processed.'.format(
        new_jobs, updated_jobs, total_jobs
    ))

    if failed_changes != 0:
        LOG.error('We have failed {} changes out of {} total jobs processed.'.format(
            failed_changes, total_jobs
        ))
Beispiel #35
0
def prepare_the_database():
    # wipe up the job data older than 90 days
    date = (datetime.datetime.now() -
            datetime.timedelta(days=SETA_WINDOW)).strftime('%Y-%m-%d')
    session.query(Seta).filter(Seta.date <= date)
Beispiel #36
0
def run_slaves_query():
    start_date, end_date = clean_date_params(request.args)

    days_to_show = (end_date - start_date).days
    if days_to_show <= 8:
        jobs = 5
    else:
        jobs = int(round(days_to_show * 0.4))

    info = 'Only slaves with more than %d jobs are displayed.' % jobs

    query_results = session.query(
        Testjobs.slave, Testjobs.result, Testjobs.date).filter(
            and_(
                Testjobs.result.in_(
                    ["retry", "testfailed", "success", "busted", "exception"]),
                Testjobs.date.between(start_date,
                                      end_date))).all().order_by(Testjobs.date)
    session.close()

    if not query_results:
        return

    data = {}
    labels = 'fail retry infra success total'.split()
    summary = {result: 0 for result in labels}
    summary['jobs_since_last_success'] = 0
    dates = []

    for name, result, date in query_results:
        data.setdefault(name, summary.copy())
        data[name]['jobs_since_last_success'] += 1
        if result == 'testfailed':
            data[name]['fail'] += 1
        elif result == 'retry':
            data[name]['retry'] += 1
        elif result == 'success':
            data[name]['success'] += 1
            data[name]['jobs_since_last_success'] = 0
        elif result == 'busted' or result == 'exception':
            data[name]['infra'] += 1
        data[name]['total'] += 1
        dates.append(date)

    # filter slaves
    slave_list = [slave for slave in data if data[slave]['total'] > jobs]

    # calculate failure rate only for slaves that we're going to display
    for slave in slave_list:
        results = data[slave]
        fail_rates = calculate_fail_rate(results['success'], results['retry'],
                                         results['total'])
        data[slave]['sfr'] = fail_rates

    platforms = {}

    # group slaves by platform and calculate platform failure rate
    slaves = sorted(data.keys())
    for platform, slave_group in groupby(slaves,
                                         lambda x: x.rsplit('-', 1)[0]):
        slaves = list(slave_group)

        # don't calculate failure rate for platform we're not going to show
        if not any(slave in slaves for slave in slave_list):
            continue

        platforms[platform] = {}
        results = {}

        for label in ['success', 'retry', 'total']:
            r = reduce(lambda x, y: x + y,
                       [data[slave][label] for slave in slaves])
            results[label] = r

        fail_rates = calculate_fail_rate(results['success'], results['retry'],
                                         results['total'])
        platforms[platform].update(fail_rates)

    # remove data that we don't need
    for slave in data.keys():
        if slave not in slave_list:
            del data[slave]

    return {
        'slaves': data,
        'platforms': platforms,
        'dates': get_date_range(dates),
        'disclaimer': info
    }
Beispiel #37
0
def run_slaves_query():
    start_date, end_date = clean_date_params(request.args)

    days_to_show = (end_date - start_date).days
    if days_to_show <= 8:
        jobs = 5
    else:
        jobs = int(round(days_to_show * 0.4))

    info = 'Only slaves with more than %d jobs are displayed.' % jobs

    query_results = session.query(Testjobs.slave, Testjobs.result, Testjobs.date).filter(
        and_(Testjobs.result.in_(["retry", "testfailed", "success", "busted", "exception"]),
             Testjobs.date.between(start_date, end_date))).all().order_by(Testjobs.date)
    session.close()

    if not query_results:
        return

    data = {}
    labels = 'fail retry infra success total'.split()
    summary = {result: 0 for result in labels}
    summary['jobs_since_last_success'] = 0
    dates = []

    for name, result, date in query_results:
        data.setdefault(name, summary.copy())
        data[name]['jobs_since_last_success'] += 1
        if result == 'testfailed':
            data[name]['fail'] += 1
        elif result == 'retry':
            data[name]['retry'] += 1
        elif result == 'success':
            data[name]['success'] += 1
            data[name]['jobs_since_last_success'] = 0
        elif result == 'busted' or result == 'exception':
            data[name]['infra'] += 1
        data[name]['total'] += 1
        dates.append(date)

    # filter slaves
    slave_list = [slave for slave in data if data[slave]['total'] > jobs]

    # calculate failure rate only for slaves that we're going to display
    for slave in slave_list:
        results = data[slave]
        fail_rates = calculate_fail_rate(results['success'],
                                         results['retry'],
                                         results['total'])
        data[slave]['sfr'] = fail_rates

    platforms = {}

    # group slaves by platform and calculate platform failure rate
    slaves = sorted(data.keys())
    for platform, slave_group in groupby(slaves, lambda x: x.rsplit('-', 1)[0]):
        slaves = list(slave_group)

        # don't calculate failure rate for platform we're not going to show
        if not any(slave in slaves for slave in slave_list):
            continue

        platforms[platform] = {}
        results = {}

        for label in ['success', 'retry', 'total']:
            r = reduce(lambda x, y: x + y,
                       [data[slave][label] for slave in slaves])
            results[label] = r

        fail_rates = calculate_fail_rate(results['success'],
                                         results['retry'],
                                         results['total'])
        platforms[platform].update(fail_rates)

    # remove data that we don't need
    for slave in data.keys():
        if slave not in slave_list:
            del data[slave]

    return {'slaves': data,
            'platforms': platforms,
            'dates': get_date_range(dates),
            'disclaimer': info}
Beispiel #38
0
def run_seta_details_query():
    buildbot = sanitize_bool(request.args.get("buildbot", 0))
    branch = sanitize_string(request.args.get("branch", 'mozilla-inbound'))
    taskcluster = sanitize_bool(request.args.get("taskcluster", 0))
    priority = int(sanitize_string(request.args.get("priority", '5')))
    jobnames = JOBSDATA.jobnames_query()
    date = str(datetime.now().date())
    retVal = {}
    retVal[date] = []
    jobtype = []

    # we only support fx-team, autoland, and mozilla-inbound branch in seta
    if (str(branch) in ['fx-team', 'mozilla-inbound', 'autoland']) is not True \
            and str(branch) != '':
        abort(404)

    # For the case of TaskCluster request, we don't care which priority the user request.
    # We return jobs depend on the strategy that we return high value jobs as default and
    # return all jobs for every 5 push or 90 minutes for that branch.
    if request.headers.get('User-Agent', '') == 'TaskCluster':
        # we make taskcluster to 1 if it's a request from taskcluster, it's more reasonable and
        # can simplify the request url.
        taskcluster = 1

        # we will return all jobs for every 90 minutes, so the return_all_jobs flag will been
        # set to true if the time limit been reached.
        return_all_jobs = False

        # We should return full job list as a fallback, if it's a request from
        # taskcluster and without head_rev or pushlog_id in there
        try:
            branch_info = session.query(
                TaskRequests.counter, TaskRequests.datetime,
                TaskRequests.reset_delta).filter(
                    TaskRequests.branch == branch).all()
        except:
            branch_info = []
        time_of_now = datetime.now()

        # If we got nothing related with that branch, we should create it.
        if len(branch_info) == 0:
            # time_of_lastreset is not a good name anyway :(
            # And we treat all branches' reset_delta is 90 seconds, we should find a
            # better delta for them in the further.
            branch_data = TaskRequests(str(branch), 1, time_of_now,
                                       RESET_DELTA)
            try:
                session.add(branch_data)
                session.commit()
            except Exception as error:
                LOG.debug(error)
                session.rollback()

            finally:
                session.close()
            counter = 1
            time_string = time_of_now
            reset_delta = RESET_DELTA

        # We should update it if that branch had already been stored.
        else:
            counter, time_string, reset_delta = branch_info[0]
            counter += 1
            conn = engine.connect()
            statement = update(TaskRequests).where(
                TaskRequests.branch == branch).values(counter=counter)
            conn.execute(statement)

        delta = (time_of_now - time_string).total_seconds()

        # we should update the time recorder if the elapse time had
        # reach the time limit of that branch.
        if delta >= reset_delta:
            conn = engine.connect()
            statement = update(TaskRequests).where(
                TaskRequests.branch == branch).values(datetime=time_of_now)
            conn.execute(statement)

            # we need to set the return_all_jobs flag to true.
            return_all_jobs = True

        # we query all jobs rather than jobs filter by the requested priority in here,
        # Because we need to set the job returning strategy depend on different job priority.
        query = session.query(JobPriorities.platform, JobPriorities.buildtype,
                              JobPriorities.testtype, JobPriorities.priority,
                              JobPriorities.timeout).all()

        for d in query:
            # we only return that job if it hasn't reach the timeout limit. And the
            # timeout is zero means this job need always running.
            if delta < d[4] or d[4] == 0:
                # Due to the priority of all high value jobs is 1, and we
                # need to return all jobs for every 5 pushes(for now).
                if counter % d[3] != 0:
                    jobtype.append([d[0], d[1], d[2]])

            # we need to return all jobs for every 90 minutes, so all jobs will been returned
            # if the delta is larger than 5400
            elif return_all_jobs:
                jobtype.append([d[0], d[1], d[2]])

    # We don't care about the timeout variable of job if it's not a taskcluster request.
    else:
        query = session.query(
            JobPriorities.platform,
            JobPriorities.buildtype,
            JobPriorities.testtype,
            JobPriorities.priority,
        ).all()

        # priority = 0; run all the jobs
        if priority != 1 and priority != 5:
            priority = 0

        # Because we store high value jobs in seta table as default,
        # so we return low value jobs, means no failure related with this job as default
        if priority == 0:
            jobtype = JOBSDATA.jobtype_query(
            )  # All jobs regardless of priority
        # priority =5 run all low value jobs
        else:
            joblist = [job for job in query if job[3] == priority]
            for j in joblist:
                jobtype.append([j[0], j[1], j[2]])

        # TODO: filter out based on buildsystem from database, either 'buildbot' or '*'
        if buildbot:
            active_jobs = []
            # pick up buildbot jobs from job list to faster the filter process
            buildbot_jobs = [
                job for job in jobnames if job['buildplatform'] == 'buildbot'
            ]
            # find out the correspond job detail information
            for job in jobtype:
                for j in buildbot_jobs:
                    if j['name'] == job[2] and j['platform'] == job[0] and j[
                            'buildtype'] == job[1]:
                        active_jobs.append(
                            j['ref_data_name'] if branch is 'mozilla-inbound'
                            else j['ref_data_name'].
                            replace('mozilla-inbound', branch))

            jobtype = active_jobs

    # TODO: filter out based on buildsystem from database, either 'taskcluster' or '*'
    if taskcluster:
        active_jobs = []
        taskcluster_jobs = [
            job for job in jobnames if job['buildplatform'] == 'taskcluster'
        ]
        for job in jobtype:
            # we need to retranslate the jobtype back to the proper data form after all.
            job[2] = job[2].replace('e10s-browser-chrome',
                                    'browser-chrome-e10s')
            job[2] = job[2].replace('e10s-devtools-chrome',
                                    'devtools-chrome-e10s')
            job[2] = job[2].replace('gl-', 'webgl-')

            for j in taskcluster_jobs:
                if job[2] in j['name'] and j['platform'] == job[0] and j[
                        'buildtype'] == job[1]:
                    active_jobs.append(j['ref_data_name'])
        jobtype = active_jobs

    retVal[date] = jobtype
    return {"jobtypes": retVal}
def add_jobs_to_jobpriority(new_data=None,
                            priority=1,
                            timeout=0,
                            set_expired=False):
    added_jobs = []

    if not new_data:
        return

    # TODO: as a perf improvement we can reduce jobs prior to this expensive for loop
    for job in new_data['results']:

        # TODO: potentially ensure no duplicates in new_data and query once outside the loop
        db_data = []
        db_data = session.query(JobPriorities.id, JobPriorities.testtype,
                                JobPriorities.buildtype,
                                JobPriorities.platform, JobPriorities.priority,
                                JobPriorities.timeout, JobPriorities.expires,
                                JobPriorities.buildsystem).all()

        platform = parse_platform(job['build_platform'])
        if platform == None or platform == "":
            continue

        testtype = parse_testtype(job['build_system_type'],
                                  job['ref_data_name'], job['platform_option'],
                                  job['job_type_name'])
        if testtype == None or testtype == "":
            continue

        _buildsystem = job["build_system_type"]
        found = False
        found_id = None
        for row in db_data:
            if (row[1] == testtype and row[3] == platform
                    and row[2] == job["platform_option"]):
                #TODO: what if we have a race condition with two identical jobs
                # verify the build system type is the same, or make it *
                found = True
                if row[7] != "*" and _buildsystem != row[7]:
                    _buildsystem = "*"
                    found_id = row[0]

        # We have new jobs from runnablejobs to add to our master list
        if not found:
            _expired = None
            if set_expired:
                # set _expired = today + 14 days
                # TODO: write test for it
                _expired = "%s" % (datetime.datetime.now() +
                                   datetime.timedelta(days=14))

            try:
                jobpriority = JobPriorities(str(testtype),
                                            str(job["platform_option"]),
                                            str(job["build_platform"]),
                                            priority, timeout, _expired,
                                            _buildsystem)

                session.add(jobpriority)
                session.commit()
                added_jobs.append(job)
            except Exception as error:
                session.rollback()
                logging.warning(error)
            finally:
                session.close()
        elif _buildsystem != job['build_system_type']:
            # update table with new buildsystem
            conn = engine.connect()
            statement = update(JobPriorities)\
                          .where(JobPriorities.id == found_id)\
                          .values(buildsystem=_buildsystem)
            conn.execute(statement)

    return added_jobs
Beispiel #40
0
 def delete_customer(email_id):
     db_session.query(Customer).filter(
         Customer.email_id == email_id).delete()
     db_session.commit()
     db_session.close()
     print("Customer deleted successfully. :)")
Beispiel #41
0
    def get(self):
        writers = session.query(Writers.id, Writers.name, Writers.email).all()

        date = [{'id': i[0], 'name': i[1], 'email': i[2]} for i in writers]
        return date