def post_request(self, project, job_collection, guid=None):
        self.logger.debug(type(self).__name__ + '.post_request - '
                          'job_collection =\n%s' %
                          pretty(job_collection.get_collection_data()))

        client = TreeherderClient(protocol=self.protocol,
                                  host=self.server,
                                  client_id=self.credentials['client_id'],
                                  secret=self.credentials['secret'])
        for attempt in range(1, self.retries + 1):
            try:
                client.post_collection(project, job_collection)
                self.logger.info(type(self).__name__ +
                                 '.post_request - collection posted')
                if guid:
                    job_url = self.request_job_url(project, guid)
                    self.logger.info(type(self).__name__ +
                                     '.post_request - url is %s' % job_url)
                return
            except requests.exceptions.Timeout:
                message = ('Attempt %d to post result to '
                           'Treeherder timed out.' % attempt)
                self.logger.error(message)
                time.sleep(self.retry_wait)
            except Exception as e:
                message = ('Error submitting request to Treeherder\n\n'
                           'Exception: %s\n'
                           'TreeherderJobCollection %s\n' %
                           (e, pretty(job_collection.get_collection_data())))
                self.logger.exception(message)
                return
        self.logger.error('Error submitting request to Treeherder.')
示例#2
0
    def submit(self, job):
        """Submit the job to treeherder.

        :param job: Treeherder job instance to use for submission.

        """
        job.add_submit_timestamp(int(time.time()))

        # We can only submit job info once, so it has to be done in completed
        if self._job_details:
            job.add_artifact('Job Info', 'json', {'job_details': self._job_details})

        job_collection = TreeherderJobCollection()
        job_collection.add(job)

        logger.info('Sending results to Treeherder: {}'.format(job_collection.to_json()))
        url = urlparse(self.url)
        client = TreeherderClient(protocol=url.scheme, host=url.hostname,
                                  client_id=self.client_id, secret=self.secret)
        client.post_collection(self.repository, job_collection)

        logger.info('Results are available to view at: {}'.format(
                    urljoin(self.url,
                            JOB_FRAGMENT.format(repository=self.repository,
                                                revision=self.revision))))
    def submit(self,
               revision,
               browser,
               timestamp,
               perf_data,
               version='',
               repo_link='',
               video_links='',
               extra_info_obj={}):

        j_dataset = self.create_job_dataset(revision=revision,
                                            browser=browser,
                                            timestamp=timestamp,
                                            perf_data=perf_data,
                                            version=version,
                                            repo_link=repo_link,
                                            video_links=video_links,
                                            extra_info_obj=extra_info_obj)
        tjc = self.create_job_collection(j_dataset)

        if self.server_url:
            client = TreeherderClient(server_url=self.server_url,
                                      client_id=self.client_id,
                                      secret=self.secret)
        else:
            client = TreeherderClient(client_id=self.client_id,
                                      secret=self.secret)

        try:
            return_result = client.post_collection(self.repo, tjc)
        except Exception as e:
            print e.message
            print traceback.print_exc()
            return None
        return return_result
 def __init__(self, repo, platform, status_check):
     self.repo = repo
     self.platform = platform
     self.platform_option = 'opt'
     self.pushes = []
     self.skip_status_check = status_check
     self.thclient = TreeherderClient()
示例#5
0
    def post_request(self, project, job_collection, guid=None):
        self.logger.debug(type(self).__name__ + '.post_request - '
                          'job_collection =\n%s' %
                          pretty(job_collection.get_collection_data()))

        client = TreeherderClient(protocol=self.protocol,
                                  host=self.server,
                                  client_id=self.credentials['client_id'],
                                  secret=self.credentials['secret'])

        for attempt in range(1, self.retries + 1):
            try:
                client.post_collection(project, job_collection)
                self.logger.info(type(self).__name__ +
                                 '.post_request - collection posted')
                if guid:
                    job_url = self.request_job_url(project, guid)
                    self.logger.info(type(self).__name__ +
                                     '.post_request - url is %s' % job_url)
                return
            except requests.exceptions.Timeout:
                message = ('Attempt %d to post result to '
                           'Treeherder timed out.' % attempt)
                self.logger.error(message)
                time.sleep(self.retry_wait)
            except Exception as e:
                message = ('Error submitting request to Treeherder\n\n'
                           'Exception: %s\n'
                           'TreeherderJobCollection %s\n' %
                           (e, pretty(job_collection.get_collection_data())))
                self.logger.exception(message)
                return
        self.logger.error('Error submitting request to Treeherder.')
示例#6
0
    def post_request(self, machine, project, job_collection, attempts, last_attempt):
        logger.debug('AutophoneTreeherder.post_request: %s, attempt=%d, last=%s' %
                     (job_collection.__dict__, attempts, last_attempt))
        client = TreeherderClient(protocol=self.protocol,
                                  host=self.server,
                                  client_id=self.client_id,
                                  secret=self.secret)

        try:
            client.post_collection(project, job_collection)
            return True
        except Exception, e:
            logger.exception('Error submitting request to Treeherder, attempt=%d, last=%s' %
                             (attempts, last_attempt))
            if self.mailer:
                if hasattr(e, 'response') and e.response:
                    response_json = json.dumps(e.response.json(),
                                               indent=2, sort_keys=True)
                else:
                    response_json = None
                self.mailer.send(
                    '%s attempt %d Error submitting request to Treeherder' %
                    (utils.host(), attempts),
                    'Phone: %s\n'
                    'Exception: %s\n'
                    'Last attempt: %s\n'
                    'Response: %s\n' % (
                        machine,
                        e,
                        last_attempt,
                        response_json))
示例#7
0
    def __init__(self,
                 worker_subprocess,
                 options,
                 jobs,
                 s3_bucket=None,
                 mailer=None,
                 shared_lock=None):
        assert options, "options is required."
        assert shared_lock, "shared_lock is required."

        self.options = options
        self.jobs = jobs
        self.s3_bucket = s3_bucket
        self.mailer = mailer
        self.shared_lock = shared_lock
        self.worker = worker_subprocess
        self.shutdown_requested = False
        LOGGER.debug('AutophoneTreeherder')

        self.url = self.options.treeherder_url
        if not self.url:
            LOGGER.debug('AutophoneTreeherder: no treeherder url')
            return

        self.client_id = self.options.treeherder_client_id
        self.secret = self.options.treeherder_secret
        self.retry_wait = self.options.treeherder_retry_wait

        self.client = TreeherderClient(server_url=self.url,
                                       client_id=self.client_id,
                                       secret=self.secret)

        LOGGER.debug('AutophoneTreeherder: %s', self)
    def submit(self, job, logs=None):
        logs = logs or []

        # We can only submit job info once, so it has to be done in completed
        if self._job_details:
            job.add_artifact('Job Info', 'json',
                             {'job_details': self._job_details})

        job_collection = TreeherderJobCollection()
        job_collection.add(job)

        print('Sending results to Treeherder: {}'.format(
            job_collection.to_json()))
        url = urlparse(self.url)

        client = TreeherderClient(protocol=url.scheme,
                                  host=url.hostname,
                                  client_id=self.client_id,
                                  secret=self.secret)
        client.post_collection(self.repository, job_collection)

        print('Results are available to view at: {}'.format(
            urljoin(
                self.url,
                JOB_FRAGMENT.format(repository=self.repository,
                                    revision=self.revision))))
    def __init__(self,
                 worker_subprocess,
                 options,
                 jobs,
                 s3_bucket=None,
                 mailer=None):
        assert options, "options is required."

        logger = utils.getLogger()

        self.options = options
        self.jobs = jobs
        self.s3_bucket = s3_bucket
        self.mailer = mailer
        self.worker = worker_subprocess
        self.shutdown_requested = False
        logger.debug('AutophoneTreeherder')

        self.url = self.options.treeherder_url
        if not self.url:
            logger.debug('AutophoneTreeherder: no treeherder url')
            return

        self.client_id = self.options.treeherder_client_id
        self.secret = self.options.treeherder_secret
        self.retry_wait = self.options.treeherder_retry_wait

        self.client = TreeherderClient(server_url=self.url,
                                       client_id=self.client_id,
                                       secret=self.secret)

        logger.debug('AutophoneTreeherder: %s', self)
示例#10
0
    def submit(self, revision, browser, timestamp, perf_data, version='', repo_link='', video_links='', extra_info_obj={}):

        j_dataset = self.create_job_dataset(revision=revision,
                                            browser=browser,
                                            timestamp=timestamp,
                                            perf_data=perf_data,
                                            version=version,
                                            repo_link=repo_link,
                                            video_links=video_links,
                                            extra_info_obj=extra_info_obj)
        tjc = self.create_job_collection(j_dataset)

        if self.server_url:
            client = TreeherderClient(server_url=self.server_url,
                                      client_id=self.client_id,
                                      secret=self.secret)
        else:
            client = TreeherderClient(client_id=self.client_id,
                                      secret=self.secret)

        try:
            return_result = client.post_collection(self.repo, tjc)
        except Exception as e:
            print e.message
            print traceback.print_exc()
            return None
        return return_result
def on_event(data, message, dry_run, treeherder_server_url, acknowledge, **kwargs):
    if ignored(data):
        if acknowledge:
            message.ack()
        return 0  # SUCCESS

    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}
    repo_name = data["project"]
    action = data["action"]
    times = data["times"]
    # Pulse gives us resultset_id, we need to get revision from it.
    resultset_id = data["resultset_id"]

    treeherder_client = TreeherderClient(server_url=treeherder_server_url)

    LOG.info("%s action requested by %s on repo_name %s with resultset_id: %s" % (
        data['action'],
        data["requester"],
        data["project"],
        data["resultset_id"])
    )
    revision = treeherder_client.get_resultsets(repo_name, id=resultset_id)[0]["revision"]
    status = None

    if action == "trigger_missing_jobs":
        mgr = BuildAPIManager()
        mgr.trigger_missing_jobs_for_revision(repo_name, revision, dry_run=dry_run)
        if acknowledge:
            status = 'trigger_missing_jobs request sent'
        else:
            status = 'Dry-mode, no request sent'

    elif action == "trigger_all_talos_jobs":
        trigger_all_talos_jobs(
            repo_name=repo_name,
            revision=revision,
            times=times,
            priority=-1,
            dry_run=dry_run
        )
        if acknowledge:
            status = 'trigger_all_talos_jobs: {0} times request sent with priority'\
                     'lower then normal'.format(times)
        else:
            status = 'Dry-mode, no request sent'
    else:
        raise Exception(
            'We were not aware of the "{}" action. Please address the code.'.format(action)
        )

    LOG.debug(status)

    if acknowledge:
        # We need to ack the message to remove it from our queue
        message.ack()

    return 0  # SUCCESS
示例#12
0
def on_buildbot_event(data, message, dry_run, stage=False):
    """Act upon buildbot events."""
    # Pulse gives us a job_id and a job_guid, we need request_id.
    LOG.info(
        "%s action requested by %s on repo_name %s with job_id: %s"
        % (data["action"], data["requester"], data["project"], data["job_id"])
    )
    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    if stage:
        treeherder_client = TreeherderClient(host="treeherder.allizom.org")
    else:
        treeherder_client = TreeherderClient()
    repo_name = data["project"]
    job_id = data["job_id"]
    result = treeherder_client.get_jobs(repo_name, id=job_id)
    # If result not found, ignore
    if not result:
        LOG.info("We could not find any result for repo_name: %s and " "job_id: %s" % (repo_name, job_id))
        message.ack()
        return

    result = result[0]
    buildername = result["ref_data_name"]
    resultset_id = result["result_set_id"]
    result_sets = treeherder_client.get_resultsets(repo_name, id=resultset_id)
    revision = result_sets[0]["revision"]
    action = data["action"]
    status = None

    buildername = filter_invalid_builders(buildername)

    # Treeherder can send us invalid builder names
    # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
    if buildername is None:
        status = "Builder %s was invalid." % buildername[0]

    # Backfill action
    elif action == "backfill":
        manual_backfill(revision, buildername, max_revisions=get_maxRevisions(buildername), dry_run=dry_run)
        if not dry_run:
            status = "Backfill request sent"
        else:
            status = "Dry-run mode, nothing was backfilled"

    # Send a pulse message showing what we did
    message_sender = MessageHandler()
    pulse_message = {"job_id": job_id, "action": action, "requester": data["requester"], "status": status}
    routing_key = "{}.{}".format(repo_name, action)
    try:
        message_sender.publish_message(pulse_message, routing_key)
    except:
        LOG.warning("Failed to publish message over pulse stream.")

    if not dry_run:
        # We need to ack the message to remove it from our queue
        message.ack()
示例#13
0
 def __init__(self, input_env_data):
     self.platform_option = 'opt'
     self.thclient = TreeherderClient()
     self.resultsets = []
     self.env_data = {
         key.upper(): value
         for key, value in input_env_data.items()
     }
     self.dispatch_variables(self.env_data)
示例#14
0
 def __init__(self,
              server_url='https://treeherder.mozilla.org',
              treeherder_host=None):
     if treeherder_host:
         LOG.warning(
             "The `TreeherderApi()` parameter `treeherder_host` is deprecated. "
             "Use `server_url` instead, or omit entirely to use the default of "
             "production Treeherder.")
         server_url = 'https://%s' % treeherder_host
     self.treeherder_client = TreeherderClient(server_url=server_url)
示例#15
0
 def __init__(self, ldap_auth, is_triggerbot_user=lambda _: True):
     self.revmap = defaultdict(dict)
     self.revmap_threshold = TreeWatcher.revmap_threshold
     self.auth = ldap_auth
     self.lower_trigger_limit = TreeWatcher.default_retry * TreeWatcher.per_push_failures
     self.log = logging.getLogger('trigger-bot')
     self.is_triggerbot_user = is_triggerbot_user
     self.global_trigger_count = 0
     self.treeherder_client = TreeherderClient()
     self.hidden_builders = set()
     self.refresh_builder_counter = 0
示例#16
0
def get_revision_hash(server_url, project, revision):
    """Retrieve the Treeherder's revision hash for a given revision.

    :param server_url: URL of the Treeherder instance.
    :param project: The project (branch) to use.
    :param revision: The revision to get the hash for.
    """
    client = TreeherderClient(server_url=server_url)
    resultsets = client.get_resultsets(project, revision=revision)

    return resultsets[0]['revision_hash']
示例#17
0
def get_revision_hash(host, project, revision):
    """Retrieve the Treeherder's revision hash for a given revision.

    :param host: URL of the Treeherder instance.
    :param project: The project (branch) to use.
    :param revision: The revision to get the hash for.
    """
    client = TreeherderClient(host=host, protocol='https')
    resultsets = client.get_resultsets(project, revision=revision)

    return resultsets[0]['revision_hash']
示例#18
0
def get_revision_hash(server_url, project, revision):
    """Retrieve the Treeherder's revision hash for a given revision.

    :param server_url: URL of the Treeherder instance.
    :param project: The project (branch) to use.
    :param revision: The revision to get the hash for.
    """
    client = TreeherderClient(server_url=server_url)
    resultsets = client.get_resultsets(project, revision=revision)

    return resultsets[0]['revision_hash']
def get_job_log(repo_name, job_id):
    '''For a given job id return the URL to the log associated to it.'''
    th_client = TreeherderClient()
    query_params = {'job_id': job_id, 'name': 'text_log_summary'}
    try:
        return str(th_client.get_artifacts(repo_name, **query_params)[0]['blob']['logurl'])
    except IndexError:
        print 'No artifacts for {}'.format(job_id)
    except requests.exceptions.ConnectionError as e:
        print 'Connection failed for {}'.format(job_id)
        traceback.print_exc()
示例#20
0
def get_job_url(task_id, run_id, **params):
    """Build a Treeherder job url for a given Taskcluster task"""
    treeherder_client = TreeherderClient()
    uuid = slugid.decode(task_id)

    # Fetch specific job id from treeherder
    job_details = treeherder_client.get_job_details(
        job_guid=f"{uuid}/{run_id}")
    if len(job_details) > 0:
        params["selectedJob"] = job_details[0]["job_id"]

    return f"https://treeherder.mozilla.org/#/jobs?{urlencode(params)}"
    def __init__(self, application, branch, platform, server_url=TREEHERDER_URL):
        """Create a new instance of the Treeherder class.

        :param application: The name of the application to download.
        :param branch: Name of the branch.
        :param platform: Platform of the application.
        :param server_url: The URL of the Treeherder instance to access.
        """
        self.logger = logging.getLogger(__name__)

        self.client = TreeherderClient(server_url=server_url)
        self.application = application
        self.branch = branch
        self.platform = platform
示例#22
0
 def __init__(self, repo, platform, status_check):
     self.repo = repo
     self.platform = platform
     self.platform_option = 'opt'
     self.resultsets = []
     self.skip_status_check = status_check
     self.thclient = TreeherderClient()
示例#23
0
    def __init__(self, worker_subprocess, options, jobs, s3_bucket=None,
                 mailer=None):
        assert options, "options is required."

        logger = utils.getLogger()

        self.options = options
        self.jobs = jobs
        self.s3_bucket = s3_bucket
        self.mailer = mailer
        self.worker = worker_subprocess
        self.shutdown_requested = False
        logger.debug('AutophoneTreeherder')

        self.url = self.options.treeherder_url
        if not self.url:
            logger.debug('AutophoneTreeherder: no treeherder url')
            return

        self.client_id = self.options.treeherder_client_id
        self.secret = self.options.treeherder_secret
        self.retry_wait = self.options.treeherder_retry_wait

        self.client = TreeherderClient(server_url=self.url,
                                       client_id=self.client_id,
                                       secret=self.secret)

        logger.debug('AutophoneTreeherder: %s', self)
def get_all_jobs(repo_name, revision):
    '''Return dictionary of all jobs for a given revision

    Return: {'<revision_hash>': {'<job_id_1>': <job_id_1_metadata>}}
    '''
    print "Fetching Treeherder jobs for {}/{}".format(repo_name, revision)
    th_client = TreeherderClient()
    results = th_client.get_resultsets(repo_name, revision=revision)
    all_jobs = {}
    if results:
        revision_id = results[0]["id"]
        for job in th_client.get_jobs(repo_name, count=6000, result_set_id=revision_id):
            # Grab job metadata
            all_jobs[job['id']] = job

    return {revision: all_jobs}
示例#25
0
 def __init__(self, server_url='https://treeherder.mozilla.org', treeherder_host=None):
     if treeherder_host:
         LOG.warning("The `TreeherderApi()` parameter `treeherder_host` is deprecated. "
                     "Use `server_url` instead, or omit entirely to use the default of "
                     "production Treeherder.")
         server_url = 'https://%s' % treeherder_host
     self.treeherder_client = TreeherderClient(server_url=server_url)
示例#26
0
def query_repositories(clobber=False):
    """
    Return dictionary with information about the various repositories.

    The data about a repository looks like this:

    .. code-block:: python

        "ash": {
            "repo": "https://hg.mozilla.org/projects/ash",
            "graph_branches": ["Ash"],
            "repo_type": "hg"
        }

    """
    global REPOSITORIES

    if clobber:
        REPOSITORIES = {}
        if os.path.exists(REPOSITORIES_FILE):
            os.remove(REPOSITORIES_FILE)

    if REPOSITORIES:
        return REPOSITORIES

    if os.path.exists(REPOSITORIES_FILE):
        LOG.debug("Loading %s" % REPOSITORIES_FILE)
        fd = open(REPOSITORIES_FILE)
        REPOSITORIES = json.load(fd)
    else:

        th_client = TreeherderClient(protocol="https", host=TREEHERDER_URL)
        treeherderRepos = th_client.get_repositories()
        REPOSITORIES = {}
        for th_repo in treeherderRepos:
            if th_repo["active_status"] == "active":
                repo = {}
                repo["repo"] = th_repo["url"]
                repo["repo_type"] = th_repo["dvcs_type"]
                repo["graph_branches"] = [th_repo["name"].capitalize()]
                REPOSITORIES[th_repo["name"]] = repo

        with open(REPOSITORIES_FILE, "wb") as fd:
            json.dump(REPOSITORIES, fd)

    return REPOSITORIES
示例#27
0
def make_task_graph(public_key, signing_pvt_key, product,
                    root_template="release_graph.yml.tmpl",
                    template_dir=DEFAULT_TEMPLATE_DIR,
                    **template_kwargs):
    # TODO: some validation of template_kwargs + defaults
    env = Environment(
        loader=FileSystemLoader(path.join(template_dir, product)),
        undefined=StrictUndefined,
        extensions=['jinja2.ext.do'])
    th = TreeherderClient()

    now = arrow.now()
    now_ms = now.timestamp * 1000

    # Don't let the signing pvt key leak into the task graph.
    with open(signing_pvt_key) as f:
        pvt_key = f.read()

    template = env.get_template(root_template)
    template_vars = {
        "product": product,
        "stableSlugId": stableSlugId(),
        "chunkify": chunkify,
        "sorted": sorted,
        "now": now,
        "now_ms": now_ms,
        # This is used in defining expirations in tasks. There's no way to
        # actually tell Taskcluster never to expire them, but 1,000 years
        # is as good as never....
        "never": arrow.now().replace(years=1000),
        # Treeherder expects 12 symbols in revision
        "revision_hash": th.get_resultsets(
            template_kwargs["branch"],
            revision=template_kwargs["revision"][:12])[0]["revision_hash"],
        "get_treeherder_platform": treeherder_platform,
        "encrypt_env_var": lambda *args: encryptEnvVar(*args,
                                                       keyFile=public_key),
        "buildbot2ftp": buildbot2ftp,
        "buildbot2bouncer": buildbot2bouncer,
        "sign_task": partial(sign_task, pvt_key=pvt_key),
    }
    template_vars.update(template_kwargs)

    return yaml.safe_load(template.render(**template_vars))
示例#28
0
    def post_request(self, machine, project, job_collection):
        logger.debug('AutophoneTreeherder.post_request: %s' % job_collection.__dict__)
        logger.debug('AutophoneTreeherder shared_lock.acquire')
        self.shared_lock.acquire()
        try:
            auth = TreeherderAuth(self.credentials[project]['consumer_key'],
                                  self.credentials[project]['consumer_secret'],
                                  project)
            client = TreeherderClient(protocol=self.protocol, host=self.server, auth=auth)

            for attempt in range(1, self.retries+1):
                try:
                    client.post_collection(project, job_collection)
                    return
                except Exception, e:
                    logger.exception('Error submitting request to Treeherder')
                    if self.mailer:
                        if e.response:
                            response_json = json.dumps(e.response.json(),
                                                       indent=2, sort_keys=True)
                        else:
                            response_json = None
                        self.mailer.send(
                            'Attempt %d Error submitting request to Treeherder' %
                            attempt,
                            'Phone: %s\n'
                            'TreeherderClientError: %s\n'
                            'Response: %s\n' % (
                                machine,
                                e,
                                response_json))
                time.sleep(self.retry_wait)
            logger.error('Error submitting request to Treeherder')
            if self.mailer:
                self.mailer.send('Error submitting request to Treeherder',
                                 'Phone: %s\n'
                                 'TreeherderClientError: %s\n'
                                 'Response: %s\n'
                                 'TreeherderJobCollection %s\n' % (
                                     machine,
                                     e,
                                     response_json,
                                     job_collection.to_json()))
示例#29
0
    def post_request(self, machine, project, job_collection):
        logger.debug('AutophoneTreeherder.post_request: %s' % job_collection.__dict__)
        logger.debug('AutophoneTreeherder shared_lock.acquire')
        self.shared_lock.acquire()
        try:
            client = TreeherderClient(protocol=self.protocol, host=self.server)

            for attempt in range(1, self.retries+1):
                try:
                    client.post_collection(
                        project,
                        self.credentials[project]['consumer_key'],
                        self.credentials[project]['consumer_secret'],
                        job_collection)
                    return
                except requests.exceptions.Timeout:
                    msg = ('Attempt %d to post result to '
                           'Treeherder timed out.\n\n\n' % attempt)
                    logger.error(msg)
                    if self.mailer:
                        self.mailer.send('Attempt %d for Phone %s failed to post to Treeherder' %
                                         (attempt, machine), msg)
                    time.sleep(self.retry_wait)
                except Exception, e:
                    logger.exception('Error submitting request to Treeherder')
                    if self.mailer:
                        self.mailer.send('Error submitting request to Treeherder',
                                         'Phone: %s\n'
                                         'TreeherderClientError: %s\n'
                                         'TreeherderJobCollection %s\n' % (
                                             machine,
                                             e,
                                             job_collection.to_json()))
                    return
            logger.error('Error submitting request to Treeherder')
            if self.mailer:
                self.mailer.send('Error submitting request to Treeherder',
                                 'Phone: %s\n'
                                 'TreeherderClientError: %s\n'
                                 'TreeherderJobCollection %s\n' % (
                                     machine,
                                     e,
                                     job_collection.to_json()))
def on_resultset_action_event(data, message, dry_run):
    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}
    repo_name = data["project"]
    action = data["action"]
    times = data["times"]
    # Pulse gives us resultset_id, we need to get revision from it.
    resultset_id = data["resultset_id"]
    treeherder_client = TreeherderClient()
    # We do not handle 'cancel_all' action right now, so skip it.
    if action == "cancel_all":
        message.ack()
        return
    LOG.info("%s action requested by %s on repo_name %s with resultset_id: %s" %
                (data['action'], data["requester"], data["project"], data["resultset_id"]))
    revision = treeherder_client.get_resultsets(repo_name, id=resultset_id)[0]["revision"]
    status = None

    if action == "trigger_missing_jobs":
        trigger_missing_jobs_for_revision(repo_name, revision, dry_run=dry_run)
        if not dry_run:
            status = 'trigger_missing_jobs request sent'
        else:
            status = 'Dry-mode, no request sent'
    elif action == "trigger_all_talos_jobs":
        trigger_all_talos_jobs(repo_name, revision, times, dry_run=dry_run)
        if not dry_run:
            status = 'trigger_all_talos_jobs %s times request sent' % times
        else:
            status = 'Dry-mode, no request sent'
    # Send a pulse message showing what we did
    message_sender = MessageHandler()
    pulse_message = {
        'resultset_id': resultset_id,
        'action': action,
        'requester': data['requester'],
        'status': status}
    routing_key = '{}.{}'.format(repo_name, action)
    message_sender.publish_message(pulse_message, routing_key)
    # We need to ack the message to remove it from our queue
    message.ack()
示例#31
0
def make_task_graph(public_key, signing_pvt_key,
                    root_template="release_graph.yml.tmpl",
                    template_dir=DEFAULT_TEMPLATE_DIR,
                    **template_kwargs):
    # TODO: some validation of template_kwargs + defaults
    env = Environment(loader=FileSystemLoader(template_dir),
                      undefined=StrictUndefined,
                      extensions=['jinja2.ext.do'])
    th = TreeherderClient()

    now = arrow.now()
    now_ms = now.timestamp * 1000

    # Don't let the signing pvt key leak into the task graph.
    with open(signing_pvt_key) as f:
        pvt_key = f.read()

    template = env.get_template(root_template)
    template_vars = {
        "stableSlugId": stableSlugId(),
        "chunkify": chunkify,
        "sorted": sorted,
        "now": now,
        "now_ms": now_ms,
        # This is used in defining expirations in tasks. There's no way to
        # actually tell Taskcluster never to expire them, but 1,000 years
        # is as good as never....
        "never": arrow.now().replace(years=1000),
        # Treeherder expects 12 symbols in revision
        "revision_hash": th.get_resultsets(
            template_kwargs["branch"],
            revision=template_kwargs["revision"][:12])[0]["revision_hash"],
        "get_treeherder_platform": treeherder_platform,
        "encrypt_env_var": lambda *args: encryptEnvVar(*args,
                                                       keyFile=public_key),
        "buildbot2ftp": buildbot2ftp,
        "buildbot2bouncer": buildbot2bouncer,
        "sign_task": partial(sign_task, pvt_key=pvt_key),
    }
    template_vars.update(template_kwargs)

    return yaml.safe_load(template.render(**template_vars))
示例#32
0
    def __init__(self,
                 application,
                 branch,
                 platform,
                 host=TREEHERDER_HOST,
                 protocol='https'):
        """Create a new instance of the Treeherder class.

        :param application: The name of the application to download.
        :param branch: Name of the branch.
        :param platform: Platform of the application.
        :param host: The Treeherder host to make use of.
        :param protocol: The protocol for the Treeherder host.
        """
        self.logger = logging.getLogger(__name__)

        self.client = TreeherderClient(host=host, protocol=protocol)
        self.application = application
        self.branch = branch
        self.platform = platform
示例#33
0
 def __init__(self, ldap_auth, is_triggerbot_user=lambda _: True):
     self.revmap = defaultdict(dict)
     self.revmap_threshold = TreeWatcher.revmap_threshold
     self.auth = ldap_auth
     self.lower_trigger_limit = TreeWatcher.default_retry * TreeWatcher.per_push_failures
     self.log = logging.getLogger('trigger-bot')
     self.is_triggerbot_user = is_triggerbot_user
     self.global_trigger_count = 0
     self.treeherder_client = TreeherderClient()
     self.hidden_builders = set()
     self.refresh_builder_counter = 0
    def submit(self, job, logs=None):
        logs = logs or []

        # We can only submit job info once, so it has to be done in completed
        if self._job_details:
            job.add_artifact('Job Info', 'json', {'job_details': self._job_details})

        job_collection = TreeherderJobCollection()
        job_collection.add(job)

        print('Sending results to Treeherder: {}'.format(job_collection.to_json()))
        url = urlparse(self.url)
       
        client = TreeherderClient(protocol=url.scheme, host=url.hostname,
                                  client_id=self.client_id, secret=self.secret)
        client.post_collection(self.repository, job_collection)

        print('Results are available to view at: {}'.format(
            urljoin(self.url,
                    JOB_FRAGMENT.format(repository=self.repository, revision=self.revision))))
示例#35
0
    def submit_results(self, job):
        job.add_project(self.project)
        job.add_revision_hash(self.retrieve_revision_hash())
        job.add_submit_timestamp(int(time.time()))

        job_collection = TreeherderJobCollection()
        job_collection.add(job)

        # self.logger.info
        print('Sending results to Treeherder: %s' % job_collection.to_json())

        url = urlparse(self.url)
        client = TreeherderClient(protocol=url.scheme, host=url.hostname,
                                  client_id=self.client_id, secret=self.secret)
        client.post_collection(self.project, job_collection)

        # self.logger.info
        print('Results are available to view at: %s' % (
            urljoin(self.url,
                    REVISON_FRAGMENT % (self.project, self.revision))))
示例#36
0
def get_job_url(repository, revision, task_id=None, run_id=None, **params):
    """Build a Treeherder job url for a given Taskcluster task"""
    assert isinstance(repository, str) and repository, "Missing repository"
    assert isinstance(revision, str) and revision, "Missing revision"
    assert "repo" not in params, "repo cannot be set in params"
    assert "revision" not in params, "revision cannot be set in params"

    params.update({"repo": repository, "revision": revision})

    if task_id is not None and run_id is not None:
        treeherder_client = TreeherderClient()
        uuid = slugid.decode(task_id)

        # Fetch specific job id from treeherder
        job_details = treeherder_client.get_job_details(
            job_guid=f"{uuid}/{run_id}")
        if len(job_details) > 0:
            params["selectedJob"] = job_details[0]["job_id"]

    return f"{JOBS_URL}?{urlencode(params)}"
示例#37
0
    def __init__(self, repository, revision, settings, treeherder_url,
                 treeherder_client_id, treeherder_secret):
        """Creates new instance of the submission class.

        :param repository: Name of the repository the build has been built from.
        :param revision: Changeset of the repository the build has been built from.
        :param settings: Settings for the Treeherder job as retrieved from the config file.
        :param treeherder_url: URL of the Treeherder instance.
        :param treeherder_client_id: The client ID necessary for the Hawk authentication.
        :param treeherder_secret: The secret key necessary for the Hawk authentication.

        """
        self.repository = repository
        self.revision = revision
        self.settings = settings

        self._job_details = []

        self.client = TreeherderClient(server_url=treeherder_url,
                                       client_id=treeherder_client_id,
                                       secret=treeherder_secret)
示例#38
0
def on_event(data, message, dry_run, treeherder_server_url, **kwargs):
    if ignored(data):
        return 0  # SUCCESS

    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}
    repo_name = data["project"]
    action = data["action"]
    times = data["times"]
    # Pulse gives us resultset_id, we need to get revision from it.
    resultset_id = data["resultset_id"]

    treeherder_client = TreeherderClient(server_url=treeherder_server_url)

    LOG.info(
        "%s action requested by %s on repo_name %s with resultset_id: %s" %
        (data['action'], data["requester"], data["project"],
         data["resultset_id"]))
    revision = treeherder_client.get_resultsets(repo_name,
                                                id=resultset_id)[0]["revision"]

    if action == "trigger_missing_jobs":
        mgr = BuildAPIManager()
        mgr.trigger_missing_jobs_for_revision(repo_name,
                                              revision,
                                              dry_run=dry_run)

    elif action == "trigger_all_talos_jobs":
        trigger_all_talos_jobs(repo_name=repo_name,
                               revision=revision,
                               times=times,
                               priority=-1,
                               dry_run=dry_run)
    else:
        raise Exception(
            'We were not aware of the "{}" action. Please address the code.'.
            format(action))

    return 0  # SUCCESS
    def submit(self,
               revision,
               browser,
               timestamp,
               perf_data,
               link='',
               version='',
               repo_link='',
               video_links='',
               extra_info_obj={}):

        j_dataset = self.create_job_dataset(revision=revision,
                                            browser=browser,
                                            timestamp=timestamp,
                                            perf_data=perf_data,
                                            link=link,
                                            version=version,
                                            repo_link=repo_link,
                                            video_links=video_links,
                                            extra_info_obj=extra_info_obj)
        tjc = self.create_job_collection(j_dataset)

        if self.server_url:
            client = TreeherderClient(server_url=self.server_url,
                                      client_id=self.client_id,
                                      secret=self.secret)
        else:
            client = TreeherderClient(client_id=self.client_id,
                                      secret=self.secret)

        client.post_collection(self.repo, tjc)
示例#40
0
def main(args):
    compare_to_client = TreeherderClient(server_url=HOSTS[args.host])
    production_client = TreeherderClient(server_url=HOSTS["production"])

    # Support comma separated projects
    projects = args.projects.split(',')
    for _project in projects:
        logger.info("Comparing {} against production.".format(_project))
        # Remove properties that are irrelevant for the comparison
        pushes = compare_to_client.get_pushes(_project, count=50)
        for _push in sorted(pushes, key=lambda push: push["revision"]):
            del _push["id"]
            for _rev in _push["revisions"]:
                del _rev["result_set_id"]

        production_pushes = production_client.get_pushes(_project, count=50)
        for _push in sorted(production_pushes, key=lambda push: push["revision"]):
            del _push["id"]
            for _rev in _push["revisions"]:
                del _rev["result_set_id"]

        for index in range(0, len(pushes)):
            assert pushes[index]["revision"] == production_pushes[index]["revision"]
            difference = DeepDiff(pushes[index], production_pushes[index])
            if difference:
                logger.info(difference.to_json())
                logger.info("{}/#/jobs?repo={}&revision={}".format(
                            compare_to_client.server_url,
                            _project,
                            pushes[index]["revision"]))
                logger.info("{}/#/jobs?repo={}&revision={}".format(
                            production_client.server_url,
                            _project,
                            production_pushes[index]["revision"]))
示例#41
0
    def submit(self, revision, browser, timestamp, perf_data, link='', version='', repo_link='', video_links='', extra_info_obj={}):

        j_dataset = self.create_job_dataset(revision=revision,
                                            browser=browser,
                                            timestamp=timestamp,
                                            perf_data=perf_data,
                                            link=link,
                                            version=version,
                                            repo_link=repo_link,
                                            video_links=video_links,
                                            extra_info_obj=extra_info_obj)
        tjc = self.create_job_collection(j_dataset)

        if self.server_url:
            client = TreeherderClient(server_url=self.server_url,
                                      client_id=self.client_id,
                                      secret=self.secret)
        else:
            client = TreeherderClient(client_id=self.client_id,
                                      secret=self.secret)

        client.post_collection(self.repo, tjc)
示例#42
0
    def submit(self, revision, browser, timestamp, perf_data, link='', version='', repo_link='', video_links='', extra_info_obj={}):
        # rs_dataset = self.create_resultset_dataset(revision=revision,
        #                                            timestamp=timestamp)
        # trsc = self.create_resultset_collection(rs_dataset)

        j_dataset = self.create_job_dataset(revision=revision,
                                            browser=browser,
                                            timestamp=timestamp,
                                            perf_data=perf_data,
                                            link=link,
                                            version=version,
                                            repo_link=repo_link,
                                            video_links=video_links,
                                            extra_info_obj=extra_info_obj)
        tjc = self.create_job_collection(j_dataset)

        client = TreeherderClient(protocol=self.potocol,
                                  host=self.host,
                                  client_id=self.client_id,
                                  secret=self.secret)
        # don't post resultset, that overwrites existing data. see: https://bugzilla.mozilla.org/show_bug.cgi?id=1320694
        # client.post_collection(self.repo, trsc)
        client.post_collection(self.repo, tjc)
示例#43
0
    def __init__(self, application, branch, platform, host=TREEHERDER_HOST, protocol="https"):
        """Create a new instance of the Treeherder class.

        :param application: The name of the application to download.
        :param branch: Name of the branch.
        :param platform: Platform of the application.
        :param host: The Treeherder host to make use of.
        :param protocol: The protocol for the Treeherder host.
        """
        self.logger = logging.getLogger(__name__)

        self.client = TreeherderClient(host=host, protocol=protocol)
        self.application = application
        self.branch = branch
        self.platform = platform
示例#44
0
def query_repositories(clear_cache=False):
    """
    Return dictionary with information about the various repositories.

    The data about a repository looks like this:

    .. code-block:: python

        "ash": {
            "repo": "https://hg.mozilla.org/projects/ash",
            "graph_branches": ["Ash"],
            "repo_type": "hg"
        }

    """
    LOG.debug("Query repositories")
    global REPOSITORIES

    if clear_cache:
        REPOSITORIES = {}

    if REPOSITORIES:
        return REPOSITORIES

    th_client = TreeherderClient()
    treeherderRepos = th_client.get_repositories()
    REPOSITORIES = {}
    for th_repo in treeherderRepos:
        if th_repo['active_status'] == "active":
            REPOSITORIES[th_repo['name']] = {
                'repo': th_repo['url'],
                'repo_type': th_repo['dvcs_type'],
                'graph_branches': [th_repo['name'].capitalize()],
            }

    return REPOSITORIES
示例#45
0
    def __init__(self, repository, revision, settings,
                 treeherder_url, treeherder_client_id, treeherder_secret):
        """Creates new instance of the submission class.

        :param repository: Name of the repository the build has been built from.
        :param revision: Changeset of the repository the build has been built from.
        :param settings: Settings for the Treeherder job as retrieved from the config file.
        :param treeherder_url: URL of the Treeherder instance.
        :param treeherder_client_id: The client ID necessary for the Hawk authentication.
        :param treeherder_secret: The secret key necessary for the Hawk authentication.

        """
        self.repository = repository
        self.revision = revision
        self.settings = settings

        self._job_details = []

        self.client = TreeherderClient(server_url=treeherder_url,
                                       client_id=treeherder_client_id,
                                       secret=treeherder_secret)
示例#46
0
class TriggerBuild(object):
    ARCHIVE_URL = "https://archive.mozilla.org"
    NIGHTLY_LATEST_URL_FOLDER = "/pub/firefox/nightly/latest-mozilla-central/"
    PLATFORM_FN_MAPPING = {
        'linux32': {
            'key': 'linux-i686',
            'ext': 'tar.bz2',
            'trydl': 'linux',
            'job': ['linux32']
        },
        'linux64': {
            'key': 'linux-x86_64',
            'ext': 'tar.bz2',
            'trydl': 'linux64',
            'job': ['linux64']
        },
        'mac': {
            'key': 'mac',
            'ext': 'dmg',
            'trydl': 'macosx64',
            'job': ['osx']
        },
        'win32': {
            'key': 'win32',
            'ext': 'zip',
            'trydl': 'win32',
            'job': ['windows', '32']
        },
        'win64': {
            'key': 'win64',
            'ext': 'zip',
            'trydl': 'win64',
            'job': ['windows', '64']
        }
    }
    ENV_KEY_TRY_REPO_USER_EMAIL = "EMAIL"
    ENV_KEY_ENABLE_WIN32 = "WIN32_FLAG"
    ENV_KEY_SKIP_STATUS_CHECK = "SKIP_STATUS_CHECK"
    ENV_KEY_OUTPUT_DP = "OUTPUT_DP"
    ENV_KEY_BUILD_HASH = "BUILD_HASH"
    ENV_KEY_BUILD_NO = "BUILD_NUMBER"
    REPO_NAME = {'TRY': "try", "NIGHTLY": "nightly"}
    DEFAULT_AGENT_CONF_DIR_LINUX = "/home/hasal/Hasal/agent"
    DEFAULT_AGENT_CONF_DIR_MAC = "/Users/hasal/Hasal/agent"
    DEFAULT_AGENT_CONF_DIR_WIN = "C:\\Users\\user\\Hasal\\agent"
    DEFAULT_AGENT_STATUS_DIR = "agent_status"
    DEFAULT_AGENT_JOB_STATUS = {
        'BEGIN': 'begin',
        'FINISH': 'finish',
        'EXCEPTION': 'exception'
    }
    DEFAULT_AGENT_JOB_WACTH_TIMEOUT = 180

    def __init__(self, input_env_data):
        self.platform_option = 'opt'
        self.thclient = TreeherderClient()
        self.resultsets = []
        self.env_data = {
            key.upper(): value
            for key, value in input_env_data.items()
        }
        self.dispatch_variables(self.env_data)

    def dispatch_variables(self, input_env_data):
        # if user email not in environment data, repo will be the nightly
        if self.ENV_KEY_TRY_REPO_USER_EMAIL in input_env_data.keys():
            self.user_email = input_env_data[self.ENV_KEY_TRY_REPO_USER_EMAIL]
            self.repo = self.REPO_NAME['TRY']
        else:
            self.repo = self.REPO_NAME['NIGHTLY']

        # check current platform, widnows will double check the --win32 flag enabled or not
        if sys.platform == "linux2":
            self.platform = "linux64"
        elif sys.platform == "darwin":
            self.platform = "mac"
        else:
            if self.ENV_KEY_ENABLE_WIN32 in input_env_data.keys(
            ) and input_env_data[self.ENV_KEY_ENABLE_WIN32] == 'true':
                self.platform = "win32"
            else:
                self.platform = "win64"

        # assign skip status check to variable
        if self.ENV_KEY_SKIP_STATUS_CHECK in input_env_data.keys(
        ) and input_env_data[self.ENV_KEY_SKIP_STATUS_CHECK] == 'true':
            self.skip_status_check = True
        else:
            self.skip_status_check = False

        # assign build hash to variable
        if self.ENV_KEY_BUILD_HASH in input_env_data.keys():
            self.build_hash = input_env_data[self.ENV_KEY_BUILD_HASH]
        else:
            self.build_hash = None

        # assign output dp to variable
        if self.ENV_KEY_OUTPUT_DP in input_env_data.keys():
            self.output_dp = input_env_data[self.ENV_KEY_OUTPUT_DP]
        else:
            self.output_dp = os.getcwd()

        # assign build number to variable
        if self.ENV_KEY_BUILD_NO in input_env_data.keys():
            self.jenkins_build_no = input_env_data[self.ENV_KEY_BUILD_NO]
        else:
            self.jenkins_build_no = 0
        self.HASAL_JSON_FN = str(self.jenkins_build_no) + ".json"

    def check_agent_status(self):
        for i in range(0, self.DEFAULT_AGENT_JOB_WACTH_TIMEOUT):
            # extract job id from agent_status dir
            agent_status_dir_path = os.path.join(os.getcwd(),
                                                 self.DEFAULT_AGENT_STATUS_DIR)
            print "INFO: housekeeping the agent status folder [%s]" % agent_status_dir_path
            if not os.path.exists(agent_status_dir_path):
                os.mkdir(agent_status_dir_path)
            agent_status_file_list = os.listdir(agent_status_dir_path)
            print "DEBUG: current agent status file list [%s]" % agent_status_file_list

            # get latest agent id
            job_id_list = [
                int(id.split(".")[0]) for id in agent_status_file_list
            ]
            job_id_list.sort()
            if len(job_id_list) > 0:
                current_id = job_id_list[-1]
            else:
                current_id = 0

            # get latest agent status
            # agent status will sort by alphabetical, so the last one will be the latest status
            job_status_list = [
                status.split(".")[1] for status in agent_status_file_list
                if status.split(".")[0] == str(current_id)
            ]
            job_status_list.sort()
            if len(job_status_list) > 0:
                current_job_status = job_status_list[-1]
            else:
                return True

            if current_job_status == self.DEFAULT_AGENT_JOB_STATUS['FINISH']:
                for target_name in agent_status_file_list:
                    check_target = os.path.join(agent_status_dir_path,
                                                target_name)
                    os.remove(check_target)
                return True
            else:
                time.sleep(10)
        return False

    def trigger(self):

        # check agent status folder
        if self.check_agent_status() is False:
            sys.exit(1)

        # download build
        if self.repo == self.REPO_NAME['TRY']:
            download_fx_fp, download_json_fp = self.get_try_build(
                self.user_email, self.build_hash, self.output_dp)
        else:
            download_fx_fp, download_json_fp = self.get_nightly_build(
                self.output_dp)

        if download_fx_fp is None or download_json_fp is None:
            print "ERROR: something wrong with your build download process, please check the setting and job status."
            sys.exit(1)
        else:
            # generate hasal.json data
            with open(download_json_fp) as dl_json_fh:
                dl_json_data = json.load(dl_json_fh)
                perfherder_revision = dl_json_data['moz_source_stamp']
                build_pkg_platform = dl_json_data['moz_pkg_platform']
                # mapping the perfherder pkg platform to nomenclature of builddot
                builddot_mapping_platform = {
                    "linux-i686": "linux32",
                    "linux-x86_64": "linux64",
                    "mac": "osx-10-10",
                    "win32": "windows7-32",
                    "win64": "windows8-64"
                }
                with open(self.HASAL_JSON_FN, "w") as write_fh:
                    write_data = copy.deepcopy(self.env_data)
                    write_data['FX-DL-PACKAGE-PATH'] = download_fx_fp
                    write_data['FX-DL-JSON-PATH'] = download_json_fp
                    write_data['--PERFHERDER-REVISION'] = perfherder_revision
                    write_data[
                        '--PERFHERDER-PKG-PLATFORM'] = builddot_mapping_platform[
                            build_pkg_platform]
                    json.dump(write_data, write_fh)

            if os.path.exists(os.path.join(os.getcwd(), self.HASAL_JSON_FN)):
                print "INFO: current json file created at [%s]" % os.path.join(
                    os.getcwd(), self.HASAL_JSON_FN)
            else:
                print "ERROR: json file not exist in expected path [%s]" % os.path.join(
                    os.getcwd(), self.HASAL_JSON_FN)

            # create agent status folder
            if os.path.exists(
                    os.path.join(os.getcwd(),
                                 self.DEFAULT_AGENT_STATUS_DIR)) is False:
                os.mkdir(
                    os.path.join(os.getcwd(), self.DEFAULT_AGENT_STATUS_DIR))

            # move to agent config folder
            if sys.platform == "linux2":
                new_hasal_json_fp = os.path.join(
                    self.DEFAULT_AGENT_CONF_DIR_LINUX, self.HASAL_JSON_FN)
            elif sys.platform == "darwin":
                new_hasal_json_fp = os.path.join(
                    self.DEFAULT_AGENT_CONF_DIR_MAC, self.HASAL_JSON_FN)
            else:
                new_hasal_json_fp = os.path.join(
                    self.DEFAULT_AGENT_CONF_DIR_WIN, self.HASAL_JSON_FN)
            os.rename(self.HASAL_JSON_FN, new_hasal_json_fp)

            if os.path.exists(new_hasal_json_fp):
                print "INFO: hasal json file move to new location [%s]" % new_hasal_json_fp
            else:
                print "ERROR: hasal json file in not in new location [%s]" % new_hasal_json_fp
            sys.exit(0)

    def fetch_resultset(self, user_email, build_hash, default_count=500):
        tmp_resultsets = self.thclient.get_resultsets(self.repo,
                                                      count=default_count)
        for resultset in tmp_resultsets:
            if resultset['author'].lower() == user_email.lower():
                self.resultsets.append(resultset)
                if build_hash is None:
                    return resultset
                elif resultset['revision'] == build_hash:
                    return resultset
        print "Can't find the specify build hash [%s] in resultsets!!" % build_hash
        return None

    def get_job(self, resultset, platform_keyword_list):
        jobs = self.thclient.get_jobs(self.repo, result_set_id=resultset['id'])
        for job in jobs:
            cnt = 0
            for platform_keyword in platform_keyword_list:
                if platform_keyword in job['platform']:
                    cnt += 1
            if job['platform_option'] == self.platform_option and cnt == len(
                    platform_keyword_list):
                return job
        print "Can't find the specify platform [%s] and platform_options [%s] in jobs!!!" % (
            self.platform, self.platform_option)
        return None

    def get_files_from_remote_url_folder(self, remote_url_str):
        return_dict = {}
        try:
            response_obj = urllib2.urlopen(remote_url_str)
            if response_obj.getcode() == 200:
                for line in response_obj.readlines():
                    match = re.search(r'(?<=href=").*?(?=")', line)
                    if match:
                        href_link = match.group(0)
                        f_name = href_link.split("/")[-1]
                        return_dict[f_name] = href_link
            else:
                print "ERROR: fetch remote file list error with code [%s]" % str(
                    response_obj.getcode())
        except Exception as e:
            print "ERROR: [%s]" % e.message
        return return_dict

    def download_file(self, output_dp, download_link):
        print "Prepare to download the build from link [%s]" % download_link
        response = requests.get(download_link, verify=False, stream=True)
        download_fn = download_link.split("/")[-1]
        if os.path.exists(output_dp) is False:
            os.makedirs(output_dp)
        download_fp = os.path.join(output_dp, download_fn)
        try:
            try:
                total_len = int(response.headers['content-length'])
            except:
                total_len = None
            with open(download_fp, 'wb') as fh:
                for data in tqdm(response.iter_content(chunk_size=512 * 1024),
                                 total=total_len / (512 * 1024)):
                    fh.write(data)
            return download_fp
        except Exception as e:
            print "ERROR: [%s]" % e.message
            return None

    def download_from_remote_url_folder(self, remote_url_str, output_dp):
        # get latest nightly build list from remote url folder
        remote_file_dict = self.get_files_from_remote_url_folder(
            remote_url_str)

        # filter with platform, and return file name with extension
        if len(remote_file_dict.keys()) == 0:
            print "ERROR: can't get remote file list, could be the network error, or url path[%s] wrong!!" % remote_url_str
            return False
        else:
            if self.platform not in self.PLATFORM_FN_MAPPING:
                print "ERROR: we are currently not support the platform[%s] you specified!" % self.platform
                print "We are currently support the platform tag: [%s]" % self.PLATFORM_FN_MAPPING.keys(
                )
                return False
            else:
                matched_keyword = self.PLATFORM_FN_MAPPING[
                    self.platform]['key'] + "." + self.PLATFORM_FN_MAPPING[
                        self.platform]['ext']
                matched_file_list = [
                    fn for fn in remote_file_dict.keys()
                    if ((matched_keyword in fn) and ('firefox' in fn) and (
                        not fn.endswith('.asc')))
                ]
                if len(matched_file_list) != 1:
                    print "WARN: the possible match file list is not equal 1, list as below: [%s]" % matched_file_list
                    if len(matched_file_list) < 1:
                        return False
                    matched_file_list = sorted(matched_file_list)[-1:]
                    print "WARN: select following file [%s]" % matched_file_list

        # combine file name with json
        matched_file_name = matched_file_list[0]
        json_file_name = matched_file_name.replace(
            self.PLATFORM_FN_MAPPING[self.platform]['key'] + "." +
            self.PLATFORM_FN_MAPPING[self.platform]['ext'],
            self.PLATFORM_FN_MAPPING[self.platform]['key'] + ".json")
        if json_file_name not in remote_file_dict:
            print "ERROR: can't find the json file[%s] in remote file list[%s]!" % (
                json_file_name, remote_file_dict)
            return False
        else:
            print "DEBUG: matched file name: [%s], json_file_name: [%s]" % (
                matched_file_name, json_file_name)

        # download files
        download_fx_url = self.ARCHIVE_URL + remote_file_dict[matched_file_name]
        download_fx_fp = self.download_file(output_dp, download_fx_url)
        download_json_url = self.ARCHIVE_URL + remote_file_dict[json_file_name]
        download_json_fp = self.download_file(output_dp, download_json_url)

        # check download status
        if download_fx_fp and download_json_fp:
            print "SUCCESS: build files download in [%s], [%s] " % (
                download_fx_fp, download_json_fp)
            return (download_fx_fp, download_json_fp)
        else:
            print "ERROR: build files download in [%s,%s] " % (
                download_fx_fp, download_json_fp)
            return None

    def get_try_build(self, user_email, build_hash, output_dp):
        resultset = self.fetch_resultset(user_email, build_hash)

        # check result set
        if resultset:
            # if build hash is not porvided, use the latest revision as build hash value
            if build_hash is None:
                build_hash = resultset['revision']
            print "Resultset is found, and build hash is [%s]" % build_hash

            # compose remote folder url
            build_folder_url_template = "%s/pub/firefox/%s-builds/%s-%s/%s-%s/"
            build_folder_url = build_folder_url_template % (
                self.ARCHIVE_URL, self.repo, user_email, build_hash, self.repo,
                self.PLATFORM_FN_MAPPING[self.platform]['trydl'])

            # skip status check will retrieve the files list from remote folder url
            if self.skip_status_check:
                return self.download_from_remote_url_folder(
                    build_folder_url, output_dp)
            else:
                job = self.get_job(
                    resultset, self.PLATFORM_FN_MAPPING[self.platform]['job'])
                if job:
                    if job['result'].lower() == "success":
                        return self.download_from_remote_url_folder(
                            build_folder_url, output_dp)
                    else:
                        print "WARNING: Current job status is [%s] !! Your build will download when job status is success" % job[
                            'result'].lower()
                        return (None, None)
                else:
                    print "ERROR: can't find the job!"
                    return (None, None)
        else:
            print "ERROR: can't get result set! skip download build from try server, [%s, %s]" % (
                user_email, build_hash)
            return (None, None)

    def get_nightly_build(self, output_dp):
        remote_url_str = self.ARCHIVE_URL + self.NIGHTLY_LATEST_URL_FOLDER
        return self.download_from_remote_url_folder(remote_url_str, output_dp)
class AutophoneTreeherder(object):
    def __init__(self,
                 worker_subprocess,
                 options,
                 jobs,
                 s3_bucket=None,
                 mailer=None):
        assert options, "options is required."

        logger = utils.getLogger()

        self.options = options
        self.jobs = jobs
        self.s3_bucket = s3_bucket
        self.mailer = mailer
        self.worker = worker_subprocess
        self.shutdown_requested = False
        logger.debug('AutophoneTreeherder')

        self.url = self.options.treeherder_url
        if not self.url:
            logger.debug('AutophoneTreeherder: no treeherder url')
            return

        self.client_id = self.options.treeherder_client_id
        self.secret = self.options.treeherder_secret
        self.retry_wait = self.options.treeherder_retry_wait

        self.client = TreeherderClient(server_url=self.url,
                                       client_id=self.client_id,
                                       secret=self.secret)

        logger.debug('AutophoneTreeherder: %s', self)

    def __str__(self):
        # Do not publish sensitive information
        whitelist = ('url', 'retry_wait')
        d = {}
        for attr in whitelist:
            d[attr] = getattr(self, attr)
        return '%s' % d

    def post_request(self, machine, project, job_collection, attempts,
                     last_attempt):
        logger = utils.getLogger()
        logger.debug(
            'AutophoneTreeherder.post_request: %s, attempt=%d, last=%s',
            job_collection.__dict__, attempts, last_attempt)

        try:
            self.client.post_collection(project, job_collection)
            return True
        except Exception, e:
            logger.exception(
                'Error submitting request to Treeherder, attempt=%d, last=%s',
                attempts, last_attempt)
            if attempts > 1 and self.mailer:
                if hasattr(e, 'response') and e.response:
                    response_json = json.dumps(e.response.json(),
                                               indent=2,
                                               sort_keys=True)
                else:
                    response_json = None
                request_len = len(job_collection.to_json())
                self.mailer.send(
                    '%s attempt %d Error submitting request to Treeherder' %
                    (utils.host(), attempts), 'Phone: %s\n'
                    'Exception: %s\n'
                    'Last attempt: %s\n'
                    'Request length: %d\n'
                    'Response: %s\n' %
                    (machine, e, last_attempt, request_len, response_json))
        return False
示例#48
0
    parser = argparse.ArgumentParser(
        "Compare a push from a Treeherder instance to the production instance."
    )
    parser.add_argument(
        "--host", default="localhost", help="Host to compare. It defaults to localhost"
    )
    parser.add_argument("--revision", required=True, help="Revision to compare")
    parser.add_argument(
        "--project",
        default="mozilla-central",
        help="Project to compare. It defaults to mozilla-central",
    )

    args = parser.parse_args()

    th_instance = TreeherderClient(server_url=HOSTS[args.host])
    th_instance_pushid = th_instance.get_pushes(args.project, revision=args.revision)[0]["id"]
    th_instance_jobs = (
        th_instance.get_jobs(args.project, push_id=th_instance_pushid, count=None) or []
    )

    production = TreeherderClient(server_url=HOSTS["production"])
    production_pushid = production.get_pushes(args.project, revision=args.revision)[0]["id"]
    production_jobs = production.get_jobs(args.project, push_id=production_pushid, count=None)

    production_dict = {}
    for job in production_jobs:
        production_dict[job["job_guid"]] = job

    th_instance_dict = {}
    th_instance_not_found = []
示例#49
0
class AutophoneTreeherder(object):

    def __init__(self, worker_subprocess, options, jobs, s3_bucket=None,
                 mailer=None):
        assert options, "options is required."

        logger = utils.getLogger()

        self.options = options
        self.jobs = jobs
        self.s3_bucket = s3_bucket
        self.mailer = mailer
        self.worker = worker_subprocess
        self.shutdown_requested = False
        logger.debug('AutophoneTreeherder')

        self.url = self.options.treeherder_url
        if not self.url:
            logger.debug('AutophoneTreeherder: no treeherder url')
            return

        self.client_id = self.options.treeherder_client_id
        self.secret = self.options.treeherder_secret
        self.retry_wait = self.options.treeherder_retry_wait

        self.client = TreeherderClient(server_url=self.url,
                                       client_id=self.client_id,
                                       secret=self.secret)

        logger.debug('AutophoneTreeherder: %s', self)

    def __str__(self):
        # Do not publish sensitive information
        whitelist = ('url',
                     'retry_wait')
        d = {}
        for attr in whitelist:
            d[attr] = getattr(self, attr)
        return '%s' % d

    def post_request(self, machine, project, job_collection, attempts, last_attempt):
        logger = utils.getLogger()
        logger.debug('AutophoneTreeherder.post_request: %s, attempt=%d, last=%s',
                     job_collection.__dict__, attempts, last_attempt)

        try:
            self.client.post_collection(project, job_collection)
            return True
        except Exception, e:
            logger.exception('Error submitting request to Treeherder, attempt=%d, last=%s',
                             attempts, last_attempt)
            if attempts > 1 and self.mailer:
                if hasattr(e, 'response') and e.response:
                    response_json = json.dumps(e.response.json(),
                                               indent=2, sort_keys=True)
                else:
                    response_json = None
                request_len = len(job_collection.to_json())
                self.mailer.send(
                    '%s attempt %d Error submitting request to Treeherder' %
                    (utils.host(), attempts),
                    'Phone: %s\n'
                    'Exception: %s\n'
                    'Last attempt: %s\n'
                    'Request length: %d\n'
                    'Response: %s\n' % (
                        machine,
                        e,
                        last_attempt,
                        request_len,
                        response_json))
        return False
示例#50
0
def submit(perf_data, failures, revision, summary, engine):

    print("[DEBUG] failures:")
    print(list(map(lambda x: x['testcase'], failures)))

    author = "{} <{}>".format(revision['author']['name'],
                              revision['author']['email'])

    dataset = [{
        # The top-most revision in the list of commits for a push.
        'revision':
        revision['commit'],
        'author':
        author,
        'push_timestamp':
        int(revision['author']['timestamp']),
        'type':
        'push',
        # a list of revisions associated with the resultset. There should
        # be at least one.
        'revisions': [{
            'comment': revision['subject'],
            'revision': revision['commit'],
            'repository': 'servo',
            'author': author
        }]
    }]

    trsc = create_resultset_collection(dataset)

    result = "success"
    # TODO: verify a failed test won't affect Perfherder visualization
    # if len(failures) > 0:
    #     result = "testfailed"

    hashlen = len(revision['commit'])
    job_guid = ''.join(
        random.choice(string.ascii_letters + string.digits)
        for i in range(hashlen))

    if (engine == "gecko"):
        project = "servo"
        job_symbol = 'PLG'
        group_symbol = 'SPG'
        group_name = 'Servo Perf on Gecko'
    else:
        project = "servo"
        job_symbol = 'PL'
        group_symbol = 'SP'
        group_name = 'Servo Perf'

    dataset = [{
        'project': project,
        'revision': revision['commit'],
        'job': {
            'job_guid':
            job_guid,
            'product_name':
            project,
            'reason':
            'scheduler',
            # TODO: What is `who` for?
            'who':
            'Servo',
            'desc':
            'Servo Page Load Time Tests',
            'name':
            'Servo Page Load Time',
            # The symbol representing the job displayed in
            # treeherder.allizom.org
            'job_symbol':
            job_symbol,

            # The symbol representing the job group in
            # treeherder.allizom.org
            'group_symbol':
            group_symbol,
            'group_name':
            group_name,

            # TODO: get the real timing from the test runner
            'submit_timestamp':
            str(int(time.time())),
            'start_timestamp':
            str(int(time.time())),
            'end_timestamp':
            str(int(time.time())),
            'state':
            'completed',
            'result':
            result,  # "success" or "testfailed"
            'machine':
            'local-machine',
            # TODO: read platform from test result
            'build_platform': {
                'platform': 'linux64',
                'os_name': 'linux',
                'architecture': 'x86_64'
            },
            'machine_platform': {
                'platform': 'linux64',
                'os_name': 'linux',
                'architecture': 'x86_64'
            },
            'option_collection': {
                'opt': True
            },

            # jobs can belong to different tiers
            # setting the tier here will determine which tier the job
            # belongs to.  However, if a job is set as Tier of 1, but
            # belongs to the Tier 2 profile on the server, it will still
            # be saved as Tier 2.
            'tier':
            1,

            # the ``name`` of the log can be the default of "buildbot_text"
            # however, you can use a custom name.  See below.
            # TODO: point this to the log when we have them uploaded to S3
            'log_references': [{
                'url': 'TBD',
                'name': 'test log'
            }],
            # The artifact can contain any kind of structured data
            # associated with a test.
            'artifacts': [
                {
                    'type': 'json',
                    'name': 'performance_data',
                    # TODO: include the job_guid when the runner actually
                    # generates one
                    # 'job_guid': job_guid,
                    'blob': perf_data
                },
                {
                    'type': 'json',
                    'name': 'Job Info',
                    # 'job_guid': job_guid,
                    "blob": {
                        "job_details": [{
                            "content_type": "link",
                            "url": "https://www.github.com/servo/servo",
                            "value": "GitHub",
                            "title": "Source code"
                        }, {
                            "content_type": "raw_html",
                            "title": "Result Summary",
                            "value": summary
                        }]
                    }
                }
            ],
            # List of job guids that were coalesced to this job
            'coalesced': []
        }
    }]

    tjc = create_job_collection(dataset)

    # TODO: extract this read credential code out of this function.
    cred = {
        'client_id': os.environ['TREEHERDER_CLIENT_ID'],
        'secret': os.environ['TREEHERDER_CLIENT_SECRET']
    }

    client = TreeherderClient(server_url='https://treeherder.mozilla.org',
                              client_id=cred['client_id'],
                              secret=cred['secret'])

    # data structure validation is automatically performed here, if validation
    # fails a TreeherderClientError is raised
    client.post_collection('servo', trsc)
    client.post_collection('servo', tjc)
示例#51
0
class TreeherderApi(QueryApi):
    def __init__(self,
                 server_url='https://treeherder.mozilla.org',
                 treeherder_host=None):
        if treeherder_host:
            LOG.warning(
                "The `TreeherderApi()` parameter `treeherder_host` is deprecated. "
                "Use `server_url` instead, or omit entirely to use the default of "
                "production Treeherder.")
            server_url = 'https://%s' % treeherder_host
        self.treeherder_client = TreeherderClient(server_url=server_url)

    def get_all_jobs(self, repo_name, revision, **params):
        """
        Return all jobs for a given revision.
        If we can't query about this revision in treeherder api, we return an empty list.
        """
        # We query treeherder for its internal revision_id, and then get the jobs from them.
        # We cannot get jobs directly from revision and repo_name in TH api.
        # See: https://bugzilla.mozilla.org/show_bug.cgi?id=1165401
        results = self.treeherder_client.get_resultsets(repo_name,
                                                        revision=revision,
                                                        **params)
        all_jobs = []
        if results:
            revision_id = results[0]["id"]
            all_jobs = self.treeherder_client.get_jobs(
                repo_name, count=2000, result_set_id=revision_id, **params)
        return all_jobs

    def get_buildapi_request_id(self, repo_name, job):
        """ Method to return buildapi's request_id. """
        job_details = self.treeherder_client.get_job_details(
            job_id=job["id"],
            title='buildbot_request_id',
            repository=repo_name)
        if not job_details:
            raise ValueError(
                "No buildbot request id for job ({}, {}, {})".format(
                    job["id"], 'buildbot_request_id', repo_name))

        return int(job_details[0]["value"])

    def get_hidden_jobs(self, repo_name, revision):
        """ Return all hidden jobs on Treeherder """
        return self.get_all_jobs(repo_name,
                                 revision=revision,
                                 visibility='excluded')

    def get_matching_jobs(self, repo_name, revision, buildername):
        """
        Return all jobs that matched the criteria.
        """
        LOG.debug("Find jobs matching '%s'" % buildername)
        all_jobs = self.get_all_jobs(repo_name, revision)
        matching_jobs = []
        for j in all_jobs:
            if j["ref_data_name"] == buildername:
                matching_jobs.append(j)

        LOG.debug("We have found %d job(s) of '%s'." %
                  (len(matching_jobs), buildername))
        return matching_jobs

    def get_job_status(self, job):
        """
        Helper to determine the scheduling status of a job from treeherder.

        Raises a TreeherderError if the job doesn't complete.
        """
        if job["job_coalesced_to_guid"] is not None:
            return COALESCED

        if job["result"] == "unknown":
            if job["state"] == "pending":
                return PENDING
            elif job["state"] == "running":
                return RUNNING
            else:
                return UNKNOWN

        # If the job 'state' is completed, we can have the following possible statuses:
        # https://github.com/mozilla/treeherder/blob/master/treeherder/etl/buildbot.py#L7
        status_dict = {
            "success": SUCCESS,
            "busted": FAILURE,
            "testfailed": FAILURE,
            "skipped": SKIPPED,
            "exception": EXCEPTION,
            "retry": RETRY,
            "usercancel": CANCELLED
        }

        if job["state"] == "completed":
            return status_dict[job["result"]]

        LOG.debug(job)
        raise TreeherderError("Unexpected status")

    def find_all_jobs_by_status(self, repo_name, revision, status):
        builder_names = []
        jobs = self.get_all_jobs(repo_name, revision)
        # filer out those jobs without builder name
        jobs = [job for job in jobs if job['machine_name'] != 'unknown']
        for job in jobs:
            try:
                job_status = self.get_job_status(job)
            except TreeherderError:
                continue
            if job_status == status:
                if job['build_system_type'] == 'taskcluster':
                    job_name = job['job_type_name']
                else:
                    job_name = job['ref_data_name']
                builder_names.append(job_name)
        return builder_names

    def query_revision_for_job(self, repo_name, job_id):
        '''Return revision for a known Treeherder job id.'''
        job_info = self.treeherder_client.get_jobs(repo_name, id=job_id)[0]
        result_sets = self.treeherder_client.get_resultsets(
            repo_name, id=job_info["result_set_id"])
        revision = result_sets[0]["revision"]

        return revision

    def query_revision_for_resultset(self, repo_name, resultset_id):
        '''Return revision for a known Treeherder resultset id.'''
        return self.treeherder_client.get_resultsets(
            repo_name, id=resultset_id)[0]["revision"]
示例#52
0
 def __init__(self):
     self.treeherder_client = TreeherderClient()
def on_event(data, message, dry_run, treeherder_server_url, acknowledge, **kwargs):
    """Act upon Treeherder job events.

    Return if the outcome was successful or not
    """
    exit_code = 0  # SUCCESS

    if ignored(data):
        if acknowledge:
            # We need to ack the message to remove it from our queue
            message.ack()
        return exit_code

    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    treeherder_client = TreeherderClient(server_url=treeherder_server_url)

    action = data["action"].capitalize()
    job_id = data["job_id"]
    repo_name = data["project"]
    status = None

    # We want to know the status of the job we're processing
    try:
        job_info = treeherder_client.get_jobs(repo_name, id=job_id)[0]
    except IndexError:
        LOG.info("We could not find any job_info for repo_name: %s and " "job_id: %s" % (repo_name, job_id))
        return exit_code

    buildername = job_info["ref_data_name"]

    # We want to know the revision associated for this job
    result_sets = treeherder_client.get_resultsets(repo_name, id=job_info["result_set_id"])
    revision = result_sets[0]["revision"]

    link_to_job = "{}/#/jobs?repo={}&revision={}&selectedJob={}".format(
        treeherder_server_url, repo_name, revision, job_id
    )

    LOG.info("{} action requested by {} for '{}'".format(action, data["requester"], buildername))
    LOG.info("Request for {}".format(link_to_job))

    buildername = filter_invalid_builders(buildername)

    if buildername is None:
        LOG.info("Treeherder can send us invalid builder names.")
        LOG.info("See https://bugzilla.mozilla.org/show_bug.cgi?id=1242038.")
        LOG.warning('Requested job name "%s" is invalid.' % job_info["ref_data_name"])
        exit_code = -1  # FAILURE

    # There are various actions that can be taken on a job, however, we currently
    # only process the backfill one
    elif action == "Backfill":
        exit_code = manual_backfill(revision=revision, buildername=buildername, dry_run=dry_run)
        if not dry_run:
            status = "Backfill request sent"
        else:
            status = "Dry-run mode, nothing was backfilled."
        LOG.debug(status)

    else:
        LOG.error('We were not aware of the "{}" action. Please file an issue'.format(action))
        exit_code = -1  # FAILURE

    if acknowledge:
        # We need to ack the message to remove it from our queue
        message.ack()

    return exit_code
示例#54
0
class TreeherderApi(QueryApi):
    def __init__(self):
        self.treeherder_client = TreeherderClient()

    def _get_all_jobs(self, repo_name, revision, **params):
        """
        Return all jobs for a given revision.
        If we can't query about this revision in treeherder api, we return an empty list.
        """
        # We query treeherder for its internal revision_id, and then get the jobs from them.
        # We cannot get jobs directly from revision and repo_name in TH api.
        # See: https://bugzilla.mozilla.org/show_bug.cgi?id=1165401
        results = self.treeherder_client.get_resultsets(repo_name,
                                                        revision=revision,
                                                        **params)
        all_jobs = []
        if results:
            revision_id = results[0]["id"]
            all_jobs = self.treeherder_client.get_jobs(
                repo_name, count=2000, result_set_id=revision_id, **params)
        return all_jobs

    def get_buildapi_request_id(self, repo_name, job):
        """ Method to return buildapi's request_id. """
        job_id = job["id"]
        query_params = {'job_id': job_id, 'name': 'buildapi'}
        LOG.debug("We are fetching request_id from treeherder artifacts api")
        artifact_content = self.treeherder_client.get_artifacts(
            repo_name, **query_params)
        return artifact_content[0]["blob"]["request_id"]

    def get_hidden_jobs(self, repo_name, revision):
        """ Return all hidden jobs on Treeherder """
        return self._get_all_jobs(repo_name,
                                  revision=revision,
                                  visibility='excluded')

    def get_matching_jobs(self, repo_name, revision, buildername):
        """
        Return all jobs that matched the criteria.
        """
        LOG.debug("Find jobs matching '%s'" % buildername)
        all_jobs = self._get_all_jobs(repo_name, revision)
        matching_jobs = []
        for j in all_jobs:
            if j["ref_data_name"] == buildername:
                matching_jobs.append(j)

        LOG.debug("We have found %d job(s) of '%s'." %
                  (len(matching_jobs), buildername))
        return matching_jobs

    def get_job_status(self, job):
        """
        Helper to determine the scheduling status of a job from treeherder.

        Raises a TreeherderError if the job doesn't complete.
        """
        if job["job_coalesced_to_guid"] is not None:
            return COALESCED

        if job["result"] == "unknown":
            if job["state"] == "pending":
                return PENDING
            elif job["state"] == "running":
                return RUNNING
            else:
                return UNKNOWN

        # If the job 'state' is completed, we can have the following possible statuses:
        # https://github.com/mozilla/treeherder/blob/master/treeherder/etl/buildbot.py#L7
        status_dict = {
            "success": SUCCESS,
            "busted": FAILURE,
            "testfailed": FAILURE,
            "skipped": SKIPPED,
            "exception": EXCEPTION,
            "retry": RETRY,
            "usercancel": CANCELLED
        }

        if job["state"] == "completed":
            return status_dict[job["result"]]

        LOG.debug(job)
        raise TreeherderError("Unexpected status")
示例#55
0
    def get_test_packages_url(self, properties):
        """Return the URL of the test packages JSON file.

        In case of localized daily builds we can query the en-US build to get
        the URL, but for candidate builds we need the tinderbox build
        of the first parent changeset which was not checked-in by the release
        automation process (necessary until bug 1242035 is not fixed).
        """
        if properties.get('test_packages_url'):
            url = properties['test_packages_url']
        else:
            overrides = {
                'locale': 'en-US',
                'extension': 'test_packages.json',
            }

            # Use Treeherder to query for the next revision which has Tinderbox builds
            # available. We can use this revision to retrieve the test-packages URL.
            if properties['tree'].startswith('release-'):
                platform_map = {
                    'linux': {
                        'build_platform': 'linux32'
                    },
                    'linux64': {
                        'build_platform': 'linux64'
                    },
                    'macosx': {
                        'build_os': 'mac',
                        'build_architecture': 'x86_64'
                    },
                    'macosx64': {
                        'build_os': 'mac',
                        'build_architecture': 'x86_64'
                    },
                    'win32': {
                        'build_os': 'win',
                        'build_architecture': 'x86'
                    },
                    'win64': {
                        'build_os': 'win',
                        'build_architecture': 'x86_64'
                    },
                }

                self.logger.info(
                    'Querying tinderbox revision for {} build...'.format(
                        properties['tree']))
                revision = properties['revision'][:12]

                client = TreeherderClient(
                    server_url='https://treeherder.mozilla.org')
                resultsets = client.get_resultsets(properties['branch'],
                                                   tochange=revision,
                                                   count=50)

                # Retrieve the option hashes to filter for opt builds
                option_hash = None
                for key, values in client.get_option_collection_hash(
                ).iteritems():
                    for value in values:
                        if value['name'] == 'opt':
                            option_hash = key
                            break
                    if option_hash:
                        break

                # Set filters to speed-up querying jobs
                kwargs = {
                    'job_type_name': 'Build',
                    'exclusion_profile': False,
                    'option_collection_hash': option_hash,
                    'result': 'success',
                }
                kwargs.update(platform_map[properties['platform']])

                for resultset in resultsets:
                    kwargs.update({'result_set_id': resultset['id']})
                    jobs = client.get_jobs(properties['branch'], **kwargs)
                    if len(jobs):
                        revision = resultset['revision']
                        break

                self.logger.info(
                    'Found revision for tinderbox build: {}'.format(revision))

                overrides['build_type'] = 'tinderbox'
                overrides['revision'] = revision

            # For update tests we need the test package of the target build. That allows
            # us to add fallback code in case major parts of the ui are changing in Firefox.
            if properties.get('target_buildid'):
                overrides['build_id'] = properties['target_buildid']

            # The test package json file has a prefix with bug 1239808 fixed. Older builds need
            # a fallback to a prefix-less filename.
            try:
                self.logger.info('Querying test packages URL...')
                url = self.query_file_url(properties,
                                          property_overrides=overrides)
            except download_errors.NotFoundError:
                self.logger.info(
                    'URL not found. Querying not-prefixed test packages URL...'
                )
                extension = overrides.pop('extension')
                build_url = self.query_file_url(properties,
                                                property_overrides=overrides)
                url = '{}/{}'.format(build_url[:build_url.rfind('/')],
                                     extension)
                r = requests.head(url)
                if r.status_code != 200:
                    url = None

            self.logger.info('Found test package URL at: {}'.format(url))

        return url
示例#56
0
 def __init__(self):
     self.treeherder_client = TreeherderClient()
示例#57
0
class TreeWatcher(object):
    """Class to keep track of test jobs starting and finishing, known
    revisions and builders, and re-trigger jobs in either when a job
    fails or a when requested by a user.

    Redundant triggers are prevented by keeping track of each buildername,
    tree, revision we've already triggered. The invariant is that for
    any (buildername, tree, revision) combination, we will only issue triggers
    once. Old revisions are purged after a certain interval, so care must
    be taken that enough revisions are stored at a time to prevent issuing
    redundant triggers.
    """
    # Allow at least this many failures for a revision.
    # If we re-trigger for each orange and per-push orange
    # factor is approximately fixed, we shouldn't need to trigger
    # much more than that for any push that would be suitable to land.
    default_retry = 1
    per_push_failures = 4
    # We may trigger more than this as long as the total is below this
    # proportion of all builds for a push (~3% of jobs for now).
    failure_tolerance_factor = 33

    # See the comment below about pruning old revisions.
    revmap_threshold = 2000
    # If someone asks for more than 20 rebuilds on a push, only give them 20.
    requested_limit = 20

    def __init__(self, ldap_auth, is_triggerbot_user=lambda _: True):
        self.revmap = defaultdict(dict)
        self.revmap_threshold = TreeWatcher.revmap_threshold
        self.auth = ldap_auth
        self.lower_trigger_limit = TreeWatcher.default_retry * TreeWatcher.per_push_failures
        self.log = logging.getLogger('trigger-bot')
        self.is_triggerbot_user = is_triggerbot_user
        self.global_trigger_count = 0
        self.treeherder_client = TreeherderClient()
        self.hidden_builders = set()
        self.refresh_builder_counter = 0

    def _prune_revmap(self):
        # After a certain point we'll need to prune our revmap so it doesn't grow
        # infinitely.
        # We only need to keep an entry around from when we last see it
        # as an incoming revision and the next time it's finished and potentially
        # failed, but it could be pending for a while so we don't know how long that
        # will be.
        target_count = int(TreeWatcher.revmap_threshold * 2 / 3)
        prune_count = len(self.revmap.keys()) - target_count
        self.log.info('Pruning %d entries from the revmap' % prune_count)

        # Could/should use an LRU cache here, but assuming any job will go
        # from pending to complete in 24 hrs and we have up to 528 pushes a
        # day (like we had last April fool's day), that's still just 528
        # entries to sort.
        for rev, data in sorted(self.revmap.items(),
                                key=lambda (k, v): v['time_seen']):
            if not prune_count:
                self.log.info('Finished pruning, oldest rev is now: %s' % rev)
                return

            del self.revmap[rev]
            prune_count -= 1

    def known_rev(self, branch, rev):
        return rev in self.revmap

    def _get_jobs(self, branch, rev, hidden):
        results = self.treeherder_client.get_resultsets(branch, revision=rev)
        jobs = []
        if results:
            result_set_id = results[0]['id']
            kwargs = {
                'count': 2000,
                'result_set_id': result_set_id,
            }
            if hidden:
                kwargs['visibility'] = 'excluded'
            jobs = self.treeherder_client.get_jobs(branch, **kwargs)
        return [
            job['ref_data_name'] for job in jobs
            if not re.match('[a-z0-9]{12}', job['ref_data_name'])
        ]

    def get_hidden_jobs(self, branch, rev):
        return self._get_jobs(branch, rev, True)

    def get_visible_jobs(self, branch, rev):
        return self._get_jobs(branch, rev, False)

    def update_hidden_builders(self, branch, rev):
        hidden_builders = set(self.get_hidden_jobs(branch, rev))
        visible_builders = set(self.get_visible_jobs(branch, rev))
        self.hidden_builders -= visible_builders
        self.hidden_builders |= hidden_builders
        self.log.info('Updating hidden builders')
        self.log.info('There are %d hidden builders on try' %
                      len(self.hidden_builders))

    def failure_trigger(self, branch, rev, builder):

        if rev in self.revmap:

            if 'fail_retrigger' not in self.revmap[rev]:
                self.log.info('Found no request to retrigger %s on failure' %
                              rev)
                return

            seen_builders = self.revmap[rev]['seen_builders']

            if builder in seen_builders:
                self.log.info('We\'ve already seen "%s" at %s and don\'t'
                              ' need to trigger it' % (builder, rev))
                return

            if builder in self.hidden_builders:
                self.log.info(
                    'Would have triggered "%s" at %s due to failures,'
                    ' but that builder is hidden.' % (builder, rev))
                return

            seen_builders.add(builder)

            count = self.revmap[rev]['fail_retrigger']
            seen = self.revmap[rev]['rev_trigger_count']

            triggered = self.attempt_triggers(branch, rev, builder, count,
                                              seen)
            if triggered:
                self.revmap[rev]['rev_trigger_count'] += triggered
                self.log.info('Triggered %d of "%s" at %s' %
                              (triggered, builder, rev))

    def requested_trigger(self, branch, rev, builder):
        if rev in self.revmap and 'requested_trigger' in self.revmap[rev]:

            self.log.info('Found a request to trigger %s and may retrigger' %
                          rev)
            seen_builders = self.revmap[rev]['seen_builders']

            if builder in seen_builders:
                self.log.info('We already triggered "%s" at %s don\'t need'
                              ' to do it again' % (builder, rev))
                return

            seen_builders.add(builder)
            count, talos_count = self.revmap[rev]['requested_trigger']
            if talos_count and 'talos' in builder:
                count = talos_count

            self.log.info('May trigger %d requested jobs for "%s" at %s' %
                          (count, builder, rev))
            self.attempt_triggers(branch, rev, builder, count)

    def add_rev(self, branch, rev, comments, user):

        req_count, req_talos_count, should_retry = self.triggers_from_msg(
            comments)

        # Only trigger based on a request or a failure, not both.
        if req_count or req_talos_count:
            self.log.info('Added %d triggers for %s' % (req_count, rev))
            self.revmap[rev]['requested_trigger'] = (req_count,
                                                     req_talos_count)

        if should_retry and not req_count:
            # self.log.info('Adding default failure retries for %s' % rev)
            self.revmap[rev]['fail_retrigger'] = TreeWatcher.default_retry

        self.revmap[rev]['rev_trigger_count'] = 0

        # When we need to purge old revisions, we need to purge the
        # oldest first.
        self.revmap[rev]['time_seen'] = time.time()

        # Prevent an infinite retrigger loop - if we take a trigger action,
        # ensure we only take it once for a builder on a particular revision.
        self.revmap[rev]['seen_builders'] = set()

        # Filter triggering activity based on users.
        self.revmap[rev]['user'] = user

        if len(self.revmap.keys()) > self.revmap_threshold:
            self._prune_revmap()

    def triggers_from_msg(self, msg):

        try_message = None
        all_try_args = None

        for line in msg.splitlines():
            if 'try: ' in line:
                # Autoland adds quotes to try strings that will confuse our
                # args later on.
                if line.startswith('"') and line.endswith('"'):
                    line = line[1:-1]
                # Allow spaces inside of [filter expressions]
                try_message = line.strip().split('try: ', 1)
                all_try_args = re.findall(r'(?:\[.*?\]|\S)+', try_message[1])
                break

        if not try_message:
            return 0

        parser = argparse.ArgumentParser()
        parser.add_argument('--rebuild', type=int, default=0)
        parser.add_argument('--rebuild-talos',
                            type=int,
                            dest='rebuild_talos',
                            default=0)
        parser.add_argument('--no-retry',
                            action='store_false',
                            dest='retry',
                            default=True)
        (args, _) = parser.parse_known_args(all_try_args)

        limit = TreeWatcher.requested_limit
        rebuilds = args.rebuild if args.rebuild < limit else limit
        rebuild_talos = args.rebuild_talos if args.rebuild_talos < limit else limit
        return rebuilds, rebuild_talos, args.retry

    def handle_message(self, key, branch, rev, builder, status, comments,
                       user):
        if not self.known_rev(branch, rev) and comments:
            # First time we've seen this revision? Add it to known
            # revs and mark required triggers,
            self.add_rev(branch, rev, comments, user)

        if key.endswith('started'):
            # If the job is starting and a user requested unconditional
            # retriggers, process them right away.
            self.requested_trigger(branch, rev, builder)

        if status in (1, 2):
            # A failing job is a candidate to retrigger.
            self.failure_trigger(branch, rev, builder)

        if self.refresh_builder_counter == 0:
            self.update_hidden_builders(branch, rev)
            self.refresh_builder_counter = 300
        else:
            self.refresh_builder_counter -= 1

    def attempt_triggers(self, branch, rev, builder, count, seen=0, attempt=0):
        if not re.match('[a-z0-9]{12}', rev):
            self.log.error(
                '%s doesn\'t look like a valid revision, can\'t trigger it' %
                rev)
            return

        build_data = self._get_ids_for_rev(branch, rev, builder)

        if build_data is None:
            return

        found_buildid, found_requestid, builder_total, rev_total = build_data

        if builder_total > count:
            self.log.warning(
                'Would have triggered %d of "%s" at %s, but we\'ve already'
                ' found more requests than that for this builder/rev.' %
                (count, builder, rev))
            return

        self.log.info("Found %s jobs total for %s" % (rev_total, rev))
        if (seen * self.failure_tolerance_factor > rev_total
                and seen > self.lower_trigger_limit):
            self.log.warning(
                'Would have triggered "%s" at %s but there are already '
                'too many failures.' % (builder, rev))
            return

        self.global_trigger_count += count
        self.log.warning(
            'Up to %d total triggers have been performed by this service.' %
            self.global_trigger_count)

        if not self.is_triggerbot_user(self.revmap[rev]['user']):
            self.log.warning('Would have triggered "%s" at %s %d times.' %
                             (builder, rev, count))
            self.log.warning('But %s is not a triggerbot user.' %
                             self.revmap[rev]['user'])
            # Pretend we did these triggers, just for accounting purposes.
            return count

        self.log.info('attempt_triggers, attempt %d' % attempt)

        root_url = 'https://secure.pub.build.mozilla.org/buildapi/self-serve'
        payload = {
            'count': count,
        }

        if found_buildid:
            build_url = '%s/%s/build' % (root_url, branch)
            payload['build_id'] = found_buildid
        elif found_requestid:
            build_url = '%s/%s/request' % (root_url, branch)
            payload['request_id'] = found_requestid
        else:
            # For a short time after a job starts it seems there might not be
            # any info associated with this job/builder in.
            self.log.warning(
                'Could not trigger "%s" at %s because there were '
                'no builds found with that buildername to rebuild.' %
                (builder, rev))

            if attempt > 4:
                self.log.warning('Already tried to find something to rebuild '
                                 'for "%s" at %s, giving up' % (builder, rev))
                return

            self.log.warning('Will re-attempt')
            tm = Timer(90,
                       self.attempt_triggers,
                       args=[branch, rev, builder, count, seen, attempt + 1])
            tm.start()
            # Assume some subsequent attempt will be succesful for accounting
            # purposes.
            return count

        self._rebuild(build_url, payload)
        return count

    def _get_ids_for_rev(self, branch, rev, builder):
        # Get the request or build id associated with the given branch/rev/builder,
        # if any.
        root_url = 'https://secure.pub.build.mozilla.org/buildapi/self-serve'

        # First find the build_id for the job to rebuild
        build_info_url = '%s/%s/rev/%s?format=json' % (root_url, branch, rev)
        info_req = requests.get(build_info_url,
                                headers={'Accept': 'application/json'},
                                auth=self.auth)
        found_buildid = None
        found_requestid = None
        builder_total, rev_total = 0, 0

        try:
            results = info_req.json()
        except ValueError:
            self.log.error('Received an unexpected ValueError when retrieving '
                           'information about %s from buildapi.' % rev)
            self.log.error('Request status: %d' % info_req.status_code)
            return None

        for res in results:
            rev_total += 1
            if res['buildername'] == builder:
                builder_total += 1
                if 'build_id' in res and not found_buildid:
                    found_buildid = res['build_id']
                if 'request_id' in res and not found_requestid:
                    found_requestid = res['request_id']

        return found_buildid, found_requestid, builder_total, rev_total

    def _rebuild(self, build_url, payload):
        # Actually do the triggering for a url and payload and keep track of the result.
        self.log.info('Triggering url: %s' % build_url)
        self.log.debug('Triggering payload:\n\t%s' % payload)
        req = requests.post(build_url,
                            headers={'Accept': 'application/json'},
                            data=payload,
                            auth=self.auth)
        self.log.info('Requested job, return: %s' % req.status_code)