示例#1
0
 def __init__(self, amqp_prefix, openqa_url):
     super(Listener, self).__init__(amqp_prefix,
                                    logging.getLogger(__name__))
     self.projects = []
     self.amqp_prefix = amqp_prefix
     self.openqa_url = openqa_url
     self.openqa = OpenQA_Client(server=openqa_url)
示例#2
0
 def __init__(self, apiurl, opts):
     self.projects = []
     self.opts = opts
     self.apiurl = apiurl
     if apiurl.endswith('suse.de'):
         openqa_url = 'https://openqa.suse.de'
     else:
         openqa_url = 'https://openqa.opensuse.org'
     self.openqa = OpenQA_Client(openqa_url)
 def __init__(self, project, dryrun=False):
     self.project = project
     self.dryrun = dryrun
     self.api = StagingAPI(osc.conf.config['apiurl'],
                           project='openSUSE:%s' % project)
     self.openqa = OpenQA_Client(server='https://openqa.opensuse.org')
     self.issues_to_ignore = []
     self.issuefile = "{}_{}".format(self.project, ISSUE_FILE)
     if os.path.isfile(self.issuefile):
         with open(self.issuefile, 'r') as f:
             for line in f.readlines():
                 self.issues_to_ignore.append(line.strip())
示例#4
0
 def get_latest_job(self, filter):
     try:
         client = OpenQA_Client(server=self._instance)
         result = client.openqa_request('GET', 'jobs', filter)
         jobs = sorted(result['jobs'], key=lambda x: str(x['t_finished']))
         if jobs:
             return ([[job] for job in jobs
                      if job['result'] in ['passed', 'softfailed']][-1][0])
         else:
             raise LatestJobNotFound("no such job found")
     except Exception:
         raise LatestJobNotFound("no such job found")
示例#5
0
def _get_conf(jobid, srv):
    client = OpenQA_Client(server='http://%s' % srv)
    conf = client.openqa_request('GET', 'jobs/%s' % jobid)

    testname = conf['job']['settings']['TEST_SUITE_NAME']
    jobvars = {}
    jobvars['name'] = testname
    suitevars = client.openqa_request('GET', 'test_suites')

    vars_generator = (settings for settings in suitevars['TestSuites'])

    for v in vars_generator:
        if testname == v.get('name'):
            jobvars.update(v)
            break
    return jobvars
示例#6
0
 def __init__(self, project, dryrun=False, norelease=False, api_url=None, openqa_server='https://openqa.opensuse.org', test_subproject=None):
     self.project = project
     self.dryrun = dryrun
     self.norelease = norelease
     if not api_url:
         api_url = osc.conf.config['apiurl']
     self.api = StagingAPI(api_url, project=project)
     self.openqa_server = openqa_server
     if not test_subproject:
         test_subproject = 'ToTest'
     self.test_project = '%s:%s' % (self.project, test_subproject)
     self.openqa = OpenQA_Client(server=openqa_server)
     self.load_issues_to_ignore()
     self.project_base = project.split(':')[0]
     self.update_pinned_descr = False
     self.amqp_url = osc.conf.config.get('ttm_amqp_url')
def _get_conf(jobid, srv):
    client = OpenQA_Client(server='http://%s' % srv)
    conf = client.openqa_request('GET', 'jobs/%s' % jobid)

    testname = conf['job']['test']
    jobvars = {}
    jobvars['name'] = testname
    suitevars = client.openqa_request('GET', 'test_suites')

    vars_generator = (settings for settings in suitevars['TestSuites'])

    for v in vars_generator:
        # there are test jobs that the name is modified in job group and is run with
        # another name than the name in 'TestSuites' ex installer_extended_textmode
        # Because ususally the name just appends the name we just check if it startswith
        if testname.startswith(v['name']):
            jobvars.update(v)
    return jobvars
 def __init__(self, project, dryrun=False, api_url=None, openqa_server='https://openqa.opensuse.org', test_subproject=None):
     self.project = project
     self.dryrun = dryrun
     if not api_url:
         api_url = osc.conf.config['apiurl']
     self.api = StagingAPI(api_url, project=project)
     self.openqa_server = openqa_server
     if not test_subproject:
         test_subproject = 'ToTest'
     self.test_project = '%s:%s' % (self.project, test_subproject)
     self.openqa = OpenQA_Client(server=openqa_server)
     self.issues_to_ignore = []
     self.issuefile = "{}_{}".format(self.project, ISSUE_FILE)
     if os.path.isfile(self.issuefile):
         with open(self.issuefile, 'r') as f:
             for line in f.readlines():
                 self.issues_to_ignore.append(line.strip())
     self.project_base = project.split(':')[0]
     self.update_pinned_descr = False
示例#9
0
    def __init__(self, *args, **kwargs):
        self.force = False
        self.openqa = None
        self.do_comments = True
        if 'force' in kwargs:
            if kwargs['force'] is True:
                self.force = True
            del kwargs['force']
        if 'openqa' in kwargs:
            self.openqa = OpenQA_Client(server=kwargs['openqa'])
            del kwargs['openqa']
        if 'do_comments' in kwargs:
            if kwargs['do_comments'] is not None:
                self.do_comments = kwargs['do_comments']
            del kwargs['do_comments']

        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.logger.debug(self.do_comments)

        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = dict()
    def setup_checker(self):
        bot = ReviewBot.CommandLineInterface.setup_checker(self)

        if self.options.force:
            bot.force = True
        bot.do_comments = self.options.comment
        if not self.options.openqa:
            raise osc.oscerr.WrongArgs("missing openqa url")
        bot.openqa = OpenQA_Client(server=self.options.openqa)

        global logger
        logger = self.logger

        return bot
示例#11
0
    def setup_checker(self):
        bot = ReviewBot.CommandLineInterface.setup_checker(self)

        if self.options.force:
            bot.force = True

        bot.do_comments = self.options.comment

        if not self.options.openqa:
            raise osc.oscerr.WrongArgs("missing openqa url")

        bot.openqa = OpenQA_Client(server=self.options.openqa)
        project, target, api = self._load_metadata()
        bot.api_map = api
        bot.tgt_repo = target
        bot.project_settings = project

        return bot
    def __init__(self, *args, **kwargs):
        self.force = False
        self.openqa = None
        self.do_comments = True
        if 'force' in kwargs:
            if kwargs['force'] is True:
                self.force = True
            del kwargs['force']
        if 'openqa' in kwargs:
            self.openqa = OpenQA_Client(server=kwargs['openqa'])
            del kwargs['openqa']
        if 'do_comments' in kwargs:
            if kwargs['do_comments'] is not None:
                self.do_comments = kwargs['do_comments']
            del kwargs['do_comments']

        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.logger.debug(self.do_comments)

        self.commentapi = CommentAPI(self.apiurl)
    def trigger_tests(
        self,
        client: OpenQA_Client,
        casedir: str,
        build: str,
        dry_run: bool = False,
        openqa_host_os: OpenqaHostOsT = "opensuse",
    ) -> List[JobScheduledReply]:

        all_params = []
        for pkg in self.packages:
            all_params.append(
                {**self._params_from_pkg(pkg, casedir, build, efi_mode=False)})

            if self.with_uefi and pkg.supports_uefi:
                efi_params = self._params_from_pkg(pkg,
                                                   casedir,
                                                   build,
                                                   efi_mode=True)
                uefi_pflash = get_uefi_pflash(openqa_host_os)
                efi_params["UEFI_PFLASH_CODE"] = uefi_pflash.code
                efi_params["UEFI_PFLASH_VARS"] = uefi_pflash.vars
                all_params.append({**efi_params})

        launched_jobs = []

        for param_dict in all_params:
            if dry_run:
                print("POST", "isos", param_dict)
            else:
                launched_jobs.append(
                    client.openqa_request("POST",
                                          "isos",
                                          param_dict,
                                          retries=0))

        return launched_jobs
示例#14
0
class OpenQABot(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """
    def __init__(self, *args, **kwargs):
        self.force = False
        self.openqa = None
        self.do_comments = True
        if 'force' in kwargs:
            if kwargs['force'] is True:
                self.force = True
            del kwargs['force']
        if 'openqa' in kwargs:
            self.openqa = OpenQA_Client(server=kwargs['openqa'])
            del kwargs['openqa']
        if 'do_comments' in kwargs:
            if kwargs['do_comments'] is not None:
                self.do_comments = kwargs['do_comments']
            del kwargs['do_comments']

        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.logger.debug(self.do_comments)

        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = dict()

    def gather_test_builds(self):
        for prj, u in TARGET_REPO_SETTINGS[self.openqa.baseurl].items():
            buildnr = 0
            for j in self.jobs_for_target(u):
                buildnr = j['settings']['BUILD']
            self.update_test_builds[prj] = buildnr

    # reimplemention from baseclass
    def check_requests(self):

        # first calculate the latest build number for current jobs
        self.gather_test_builds()

        started = []
        all_done = True
        # then check progress on running incidents
        for req in self.requests:
            # just patch apiurl in to avoid having to pass it around
            req.apiurl = self.apiurl
            jobs = self.request_get_openqa_jobs(req,
                                                incident=True,
                                                test_repo=True)
            ret = self.calculate_qa_status(jobs)
            if ret != QA_UNKNOWN:
                started.append(req)
                if ret == QA_INPROGRESS:
                    all_done = False

        all_requests = self.requests
        self.requests = started
        ReviewBot.ReviewBot.check_requests(self)

        if not all_done:
            return

        self.requests = all_requests

        # now make sure the jobs are for current repo
        for prj, u in TARGET_REPO_SETTINGS[self.openqa.baseurl].items():
            self.trigger_build_for_target(prj, u)

        ReviewBot.ReviewBot.check_requests(self)

    def check_action_maintenance_release(self, req, a):
        # we only look at the binaries of the patchinfo
        if a.src_package != 'patchinfo':
            return None

        if a.tgt_project not in PROJECT_OPENQA_SETTINGS:
            self.logger.warn("not handling %s" % a.tgt_project)
            return None

        packages = []
        patch_id = None
        # patchinfo collects the binaries and is build for an
        # unpredictable architecture so we need iterate over all
        url = osc.core.makeurl(
            self.apiurl,
            ('build', a.src_project, a.tgt_project.replace(':', '_')))
        root = ET.parse(osc.core.http_GET(url)).getroot()
        for arch in [n.attrib['name'] for n in root.findall('entry')]:
            query = {'nosource': 1}
            url = osc.core.makeurl(
                self.apiurl,
                ('build', a.src_project, a.tgt_project.replace(
                    ':', '_'), arch, a.src_package),
                query=query)

            root = ET.parse(osc.core.http_GET(url)).getroot()

            for binary in root.findall('binary'):
                m = pkgname_re.match(binary.attrib['filename'])
                if m:
                    # can't use arch here as the patchinfo mixes all
                    # archs
                    packages.append(
                        Package(m.group('name'), m.group('version'),
                                m.group('release')))
                elif binary.attrib['filename'] == 'updateinfo.xml':
                    url = osc.core.makeurl(
                        self.apiurl,
                        ('build', a.src_project, a.tgt_project.replace(
                            ':', '_'), arch, a.src_package, 'updateinfo.xml'))
                    ui = ET.parse(osc.core.http_GET(url)).getroot()
                    patch_id = ui.find('.//id').text

        if not packages:
            raise Exception("no packages found")

        self.logger.debug('found packages %s and patch id %s',
                          ' '.join(set([p.name for p in packages])), patch_id)

        for update in PROJECT_OPENQA_SETTINGS[a.tgt_project]:
            settings = update.settings(a.src_project, a.tgt_project, packages,
                                       req)
            settings['INCIDENT_PATCH'] = patch_id
            if settings is not None:
                update.calculate_lastest_good_updates(self.openqa, settings)

                self.logger.info("posting %s %s %s", settings['VERSION'],
                                 settings['ARCH'], settings['BUILD'])
                self.logger.debug('\n'.join(
                    ["  %s=%s" % i for i in settings.items()]))
                if not self.dryrun:
                    try:
                        ret = self.openqa.openqa_request('POST',
                                                         'isos',
                                                         data=settings,
                                                         retries=1)
                        self.logger.info(pformat(ret))
                    except JSONDecodeError, e:
                        self.logger.error(e)
                        # TODO: record error
                    except openqa_exceptions.RequestError, e:
                        self.logger.error(e)
示例#15
0
 def setup(self, project):
     super(ToTestPublisher, self).setup(project)
     self.openqa = OpenQA_Client(server=self.project.openqa_server)
     self.update_pinned_descr = False
     self.load_issues_to_ignore()
示例#16
0
class ToTestBase(object):

    """Base class to store the basic interface"""

    product_repo = 'images'
    product_arch = 'local'
    livecd_repo = 'images'
    livecd_archs = ['i586', 'x86_64']

    def __init__(self, project, dryrun=False, api_url=None, openqa_server='https://openqa.opensuse.org', test_subproject=None):
        self.project = project
        self.dryrun = dryrun
        if not api_url:
            api_url = osc.conf.config['apiurl']
        self.api = StagingAPI(api_url, project=project)
        self.openqa_server = openqa_server
        if not test_subproject:
            test_subproject = 'ToTest'
        self.test_project = '%s:%s' % (self.project, test_subproject)
        self.openqa = OpenQA_Client(server=openqa_server)
        self.issues_to_ignore = []
        self.issuefile = "{}_{}".format(self.project, ISSUE_FILE)
        if os.path.isfile(self.issuefile):
            with open(self.issuefile, 'r') as f:
                for line in f.readlines():
                    self.issues_to_ignore.append(line.strip())
        self.project_base = project.split(':')[0]
        self.update_pinned_descr = False

    def openqa_group(self):
        return self.project

    def iso_prefix(self):
        return self.project

    def jobs_num(self):
        return 70

    def current_version(self):
        return self.release_version()

    def binaries_of_product(self, project, product):
        url = self.api.makeurl(['build', project, self.product_repo, self.product_arch, product])
        try:
            f = self.api.retried_GET(url)
        except urllib2.HTTPError:
            return []

        ret = []
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            ret.append(binary.get('filename'))

        return ret

    def get_current_snapshot(self):
        """Return the current snapshot in the test project"""

        for binary in self.binaries_of_product(self.test_project, '_product:%s-cd-mini-%s' % (self.project_base, self.arch())):
            result = re.match(r'%s-%s-NET-.*-Snapshot(.*)-Media.iso' % (self.project_base, self.iso_prefix()),
                              binary)
            if result:
                return result.group(1)

        return None

    def ftp_build_version(self, project, tree, base=None):
        if not base:
            base = self.project_base
        for binary in self.binaries_of_product(project, tree):
            result = re.match(r'%s.*Build(.*)-Media1.report' % base, binary)
            if result:
                return result.group(1)
        raise NotFoundException("can't find %s ftp version" % project)

    def iso_build_version(self, project, tree, base=None):
        if not base:
            base = self.project_base
        for binary in self.binaries_of_product(project, tree):
            result = re.match(r'%s.*Build(.*)-Media(.*).iso' % base, binary)
            if result:
                return result.group(1)
        raise NotFoundException("can't find %s iso version" % project)

    def release_version(self):
        url = self.api.makeurl(['build', self.project, 'standard', self.arch(),
                                '_product:%s-release' % self.project_base])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            binary = binary.get('filename', '')
            result = re.match(r'.*-([^-]*)-[^-]*.src.rpm', binary)
            if result:
                return result.group(1)

        raise NotFoundException("can't find %s version" % self.project)

    def current_qa_version(self):
        return self.api.dashboard_content_load('version_totest')

    def find_openqa_results(self, snapshot):
        """Return the openqa jobs of a given snapshot and filter out the
        cloned jobs

        """

        url = makeurl(self.openqa_server,
                      ['api', 'v1', 'jobs'], {'group': self.openqa_group(), 'build': snapshot, 'latest': 1})
        f = self.api.retried_GET(url)
        jobs = []
        for job in json.load(f)['jobs']:
            if job['clone_id'] or job['result'] == 'obsoleted':
                continue
            job['name'] = job['name'].replace(snapshot, '')
            jobs.append(job)
        return jobs

    def _result2str(self, result):
        if result == QA_INPROGRESS:
            return 'inprogress'
        elif result == QA_FAILED:
            return 'failed'
        else:
            return 'passed'

    def find_failed_module(self, testmodules):
        # print json.dumps(testmodules, sort_keys=True, indent=4)
        for module in testmodules:
            if module['result'] != 'failed':
                continue
            flags = module['flags']
            if 'fatal' in flags or 'important' in flags:
                return module['name']
                break
            logger.info('%s %s %s' %
                        (module['name'], module['result'], module['flags']))

    def update_openqa_status_message(self):
        url = makeurl(self.openqa_server,
                      ['api', 'v1', 'job_groups'])
        f = self.api.retried_GET(url)
        job_groups = json.load(f)
        group_id = 0
        for jg in job_groups:
            if jg['name'] == self.openqa_group():
                group_id = jg['id']
                break

        if not group_id:
            logger.debug('No openQA group id found for status comment update, ignoring')
            return

        pinned_ignored_issue = 0
        issues = ' , '.join(self.issues_to_ignore)
        status_flag = 'publishing' if self.status_for_openqa['is_publishing'] else \
            'preparing' if self.status_for_openqa['can_release'] else \
                'testing' if self.status_for_openqa['snapshotable'] else \
                'building'
        status_msg = "tag:{}:{}:{}".format(self.status_for_openqa['new_snapshot'], status_flag, status_flag)
        msg = "pinned-description: Ignored issues\r\n\r\n{}\r\n\r\n{}".format(issues, status_msg)
        data = {'text': msg}

        url = makeurl(self.openqa_server,
                      ['api', 'v1', 'groups', str(group_id), 'comments'])
        f = self.api.retried_GET(url)
        comments = json.load(f)
        for comment in comments:
            if comment['userName'] == 'ttm' and \
                    comment['text'].startswith('pinned-description: Ignored issues'):
                pinned_ignored_issue = comment['id']

        logger.debug('Writing openQA status message: {}'.format(data))
        if not self.dryrun:
            if pinned_ignored_issue:
                self.openqa.openqa_request(
                    'PUT', 'groups/%s/comments/%d' % (group_id, pinned_ignored_issue), data=data)
            else:
                self.openqa.openqa_request(
                    'POST', 'groups/%s/comments' % group_id, data=data)

    def overall_result(self, snapshot):
        """Analyze the openQA jobs of a given snapshot Returns a QAResult"""

        if snapshot is None:
            return QA_FAILED

        jobs = self.find_openqa_results(snapshot)

        if len(jobs) < self.jobs_num():  # not yet scheduled
            logger.warning('we have only %s jobs' % len(jobs))
            return QA_INPROGRESS

        number_of_fails = 0
        in_progress = False
        for job in jobs:
            # print json.dumps(job, sort_keys=True, indent=4)
            if job['result'] in ('failed', 'incomplete', 'skipped', 'user_cancelled', 'obsoleted', 'parallel_failed'):
                jobname = job['name']
                # print json.dumps(job, sort_keys=True, indent=4), jobname
                url = makeurl(self.openqa_server,
                              ['api', 'v1', 'jobs', str(job['id']), 'comments'])
                f = self.api.retried_GET(url)
                comments = json.load(f)
                refs = set()
                labeled = 0
                to_ignore = False
                for comment in comments:
                    for ref in comment['bugrefs']:
                        refs.add(str(ref))
                    if comment['userName'] == 'ttm' and comment['text'] == 'label:unknown_failure':
                        labeled = comment['id']
                    if re.search(r'@ttm:? ignore', comment['text']):
                        to_ignore = True
                ignored = len(refs) > 0
                for ref in refs:
                    if ref not in self.issues_to_ignore:
                        if to_ignore:
                            self.issues_to_ignore.append(ref)
                            self.update_pinned_descr = True
                            with open(self.issuefile, 'a') as f:
                                f.write("%s\n" % ref)
                        else:
                            ignored = False

                if not ignored:
                    number_of_fails += 1
                    if not labeled and len(refs) > 0 and not self.dryrun:
                        data = {'text': 'label:unknown_failure'}
                        self.openqa.openqa_request(
                            'POST', 'jobs/%s/comments' % job['id'], data=data)
                elif labeled:
                    # remove flag - unfortunately can't delete comment unless admin
                    data = {'text': 'Ignored issue'}
                    self.openqa.openqa_request(
                        'PUT', 'jobs/%s/comments/%d' % (job['id'], labeled), data=data)

                if ignored:
                    logger.info("job %s failed, but was ignored", jobname)
                else:
                    joburl = '%s/tests/%s' % (self.openqa_server, job['id'])
                    logger.info("job %s failed, see %s", jobname, joburl)

            elif job['result'] == 'passed' or job['result'] == 'softfailed':
                continue
            elif job['result'] == 'none':
                if job['state'] != 'cancelled':
                    in_progress = True
            else:
                raise Exception(job['result'])

        if number_of_fails > 0:
            return QA_FAILED

        if in_progress:
            return QA_INPROGRESS

        return QA_PASSED

    def all_repos_done(self, project, codes=None):
        """Check the build result of the project and only return True if all
        repos of that project are either published or unpublished

        """

        # coolo's experience says that 'finished' won't be
        # sufficient here, so don't try to add it :-)
        codes = ['published', 'unpublished'] if not codes else codes

        url = self.api.makeurl(
            ['build', project, '_result'], {'code': 'failed'})
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        ready = True
        for repo in root.findall('result'):
            # ignore ports. 'factory' is used by arm for repos that are not
            # meant to use the totest manager.
            if repo.get('repository') in ('ports', 'factory', 'images_staging'):
                continue
            # ignore 32bit for now. We're only interesed in aarch64 here
            if repo.get('arch') in ('armv6l', 'armv7l'):
                continue
            if repo.get('dirty', '') == 'true':
                logger.info('%s %s %s -> %s' % (repo.get('project'),
                                                repo.get('repository'), repo.get('arch'), 'dirty'))
                ready = False
            if repo.get('code') not in codes:
                logger.info('%s %s %s -> %s' % (repo.get('project'),
                                                repo.get('repository'), repo.get('arch'), repo.get('code')))
                ready = False
        return ready

    def maxsize_for_package(self, package):
        if re.match(r'.*-mini-.*', package):
            return 737280000  # a CD needs to match

        if re.match(r'.*-dvd5-.*', package):
            return 4700372992  # a DVD needs to match

        if re.match(r'livecd-x11', package):
            return 681574400  # not a full CD

        if re.match(r'livecd-.*', package):
            return 999999999  # a GB stick

        if re.match(r'.*-(dvd9-dvd|cd-DVD)-.*', package):
            return 8539996159

        if re.match(r'.*-ftp-(ftp|POOL)-', package):
            return None

        if ':%s-Addon-NonOss-ftp-ftp' % self.base in package:
            return None

        raise Exception('No maxsize for {}'.format(package))

    def package_ok(self, project, package, repository, arch):
        """Checks one package in a project and returns True if it's succeeded

        """

        query = {'package': package, 'repository': repository, 'arch': arch}

        url = self.api.makeurl(['build', project, '_result'], query)
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for repo in root.findall('result'):
            status = repo.find('status')
            if status.get('code') != 'succeeded':
                logger.info(
                    '%s %s %s %s -> %s' % (project, package, repository, arch, status.get('code')))
                return False

        maxsize = self.maxsize_for_package(package)
        if not maxsize:
            return True

        url = self.api.makeurl(['build', project, repository, arch, package])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            if not binary.get('filename', '').endswith('.iso'):
                continue
            isosize = int(binary.get('size', 0))
            if isosize > maxsize:
                logger.error('%s %s %s %s: %s' % (
                    project, package, repository, arch, 'too large by %s bytes' % (isosize - maxsize)))
                return False

        return True

    def is_snapshottable(self):
        """Check various conditions required for factory to be snapshotable

        """

        if not self.all_repos_done(self.project):
            return False

        for product in self.ftp_products + self.main_products:
            if not self.package_ok(self.project, product, self.product_repo, self.product_arch):
                return False

            if len(self.livecd_products):

                if not self.all_repos_done('%s:Live' % self.project):
                    return False

                for arch in self.livecd_archs:
                    for product in self.livecd_products:
                        if not self.package_ok('%s:Live' % self.project, product, self.livecd_repo, arch):
                            return False

        return True

    def _release_package(self, project, package, set_release=None):
        query = {'cmd': 'release'}

        if set_release:
            query['setrelease'] = set_release

        # FIXME: make configurable. openSUSE:Factory:ARM currently has multiple
        # repos with release targets, so obs needs to know which one to release
        if project == 'openSUSE:Factory:ARM':
            query['repository'] = 'images'

        baseurl = ['source', project, package]

        url = self.api.makeurl(baseurl, query=query)
        if self.dryrun:
            logger.info("release %s/%s (%s)" % (project, package, set_release))
        else:
            self.api.retried_POST(url)

    def _release(self, set_release=None):
        for product in self.ftp_products:
            self._release_package(self.project, product)

        for cd in self.livecd_products:
            self._release_package('%s:Live' %
                                  self.project, cd, set_release=set_release)

        for cd in self.main_products:
            self._release_package(self.project, cd, set_release=set_release)

    def update_totest(self, snapshot=None):
        release = 'Snapshot%s' % snapshot if snapshot else None
        logger.info('Updating snapshot %s' % snapshot)
        if not self.dryrun:
            self.api.switch_flag_in_prj(self.test_project, flag='publish', state='disable')

        self._release(set_release=release)

    def publish_factory_totest(self):
        logger.info('Publish test project content')
        if not self.dryrun:
            self.api.switch_flag_in_prj(
                self.test_project, flag='publish', state='enable')

    def totest_is_publishing(self):
        """Find out if the publishing flag is set in totest's _meta"""

        url = self.api.makeurl(
            ['source', self.test_project, '_meta'])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        if not root.find('publish'):  # default true
            return True

        for flag in root.find('publish'):
            if flag.get('repository', None) or flag.get('arch', None):
                continue
            if flag.tag == 'enable':
                return True
        return False

    def totest(self):
        try:
            current_snapshot = self.get_current_snapshot()
        except NotFoundException as e:
            # nothing in test project (yet)
            logger.warn(e)
            current_snapshot = None
        new_snapshot = self.current_version()
        self.update_pinned_descr = False
        current_result = self.overall_result(current_snapshot)
        current_qa_version = self.current_qa_version()

        logger.info('current_snapshot %s: %s' %
                    (current_snapshot, self._result2str(current_result)))
        logger.debug('new_snapshot %s', new_snapshot)
        logger.debug('current_qa_version %s', current_qa_version)

        snapshotable = self.is_snapshottable()
        logger.debug("snapshotable: %s", snapshotable)
        can_release = ((current_snapshot is None or current_result != QA_INPROGRESS) and snapshotable)

        # not overwriting
        if new_snapshot == current_snapshot:
            logger.debug("no change in snapshot version")
            can_release = False
        elif not self.all_repos_done(self.test_project):
            logger.debug("not all repos done, can't release")
            # the repos have to be done, otherwise we better not touch them
            # with a new release
            can_release = False

        can_publish = (current_result == QA_PASSED)

        # already published
        totest_is_publishing = self.totest_is_publishing()
        if totest_is_publishing:
            logger.debug("totest already publishing")
            can_publish = False

        if self.update_pinned_descr:
            self.status_for_openqa = {
                'current_snapshot': current_snapshot,
                'new_snapshot': new_snapshot,
                'snapshotable': snapshotable,
                'can_release': can_release,
                'is_publishing': totest_is_publishing,
            }
            self.update_openqa_status_message()

        if can_publish:
            if current_qa_version == current_snapshot:
                self.publish_factory_totest()
                self.write_version_to_dashboard("snapshot", current_snapshot)
                can_release = False  # we have to wait
            else:
                # We reached a very bad status: openQA testing is 'done', but not of the same version
                # currently in test project. This can happen when 'releasing' the
                # product failed
                raise Exception("Publishing stopped: tested version (%s) does not match version in test project (%s)"
                                % (current_qa_version, current_snapshot))

        if can_release:
            self.update_totest(new_snapshot)
            self.write_version_to_dashboard("totest", new_snapshot)

    def release(self):
        new_snapshot = self.current_version()
        self.update_totest(new_snapshot)

    def write_version_to_dashboard(self, target, version):
        if not self.dryrun:
            self.api.dashboard_content_ensure('version_%s' % target, version, comment='Update version')
示例#17
0
#!/usr/bin/python3

import sys

from openqa_client.client import OpenQA_Client

client = OpenQA_Client(server='openqa.qubes-os.org')
params = {
    'DISTRI': 'qubesos',
    'VERSION': sys.argv[1],
    'FLAVOR': 'install-iso',
    'ARCH': 'x86_64',
    'BUILD': sys.argv[2],
    'ISO_URL': f'https://qubes.notset.fr/iso/{sys.argv[3]}',
    'KERNEL_VERSION': "latest" if "kernel-latest" in sys.argv[2] else "stable"
}
print(client.openqa_request('POST', 'isos', params))
示例#18
0
class ToTestPublisher(ToTestManager):
    def __init__(self, tool):
        ToTestManager.__init__(self, tool)

    def setup(self, project):
        super(ToTestPublisher, self).setup(project)
        self.openqa = OpenQA_Client(server=self.project.openqa_server)
        self.update_pinned_descr = False
        self.load_issues_to_ignore()

    def overall_result(self, snapshot):
        """Analyze the openQA jobs of a given snapshot Returns a QAResult"""

        if snapshot is None:
            return QAResult.failed

        jobs = self.find_openqa_results(snapshot)

        self.failed_relevant_jobs = []
        self.failed_ignored_jobs = []

        if len(jobs) < self.project.jobs_num:  # not yet scheduled
            self.logger.warning('we have only %s jobs' % len(jobs))
            return QAResult.inprogress

        in_progress = False
        for job in jobs:
            # print json.dumps(job, sort_keys=True, indent=4)
            if job['result'] in ('failed', 'incomplete', 'skipped',
                                 'user_cancelled', 'obsoleted',
                                 'parallel_failed'):
                # print json.dumps(job, sort_keys=True, indent=4), jobname
                url = makeurl(
                    self.project.openqa_server,
                    ['api', 'v1', 'jobs',
                     str(job['id']), 'comments'])
                f = self.api.retried_GET(url)
                comments = json.load(f)
                refs = set()
                labeled = 0
                to_ignore = False
                for comment in comments:
                    for ref in comment['bugrefs']:
                        refs.add(str(ref))
                    if comment['userName'] == 'ttm' and comment[
                            'text'] == 'label:unknown_failure':
                        labeled = comment['id']
                    if re.search(r'@ttm:? ignore', comment['text']):
                        to_ignore = True
                # to_ignore can happen with or without refs
                ignored = True if to_ignore else len(refs) > 0
                build_nr = str(job['settings']['BUILD'])
                for ref in refs:
                    if ref not in self.issues_to_ignore:
                        if to_ignore:
                            self.issues_to_ignore[ref] = build_nr
                            self.update_pinned_descr = True
                        else:
                            ignored = False
                    else:
                        # update reference
                        self.issues_to_ignore[ref] = build_nr

                if ignored:
                    self.failed_ignored_jobs.append(job['id'])
                    if labeled:
                        text = 'Ignored issue' if len(
                            refs) > 0 else 'Ignored failure'
                        # remove flag - unfortunately can't delete comment unless admin
                        data = {'text': text}
                        if self.dryrun:
                            self.logger.info('Would label {} with: {}'.format(
                                job['id'], text))
                        else:
                            self.openqa.openqa_request('PUT',
                                                       'jobs/%s/comments/%d' %
                                                       (job['id'], labeled),
                                                       data=data)

                    self.logger.info('job %s failed, but was ignored',
                                     job['name'])
                else:
                    self.failed_relevant_jobs.append(job['id'])
                    if not labeled and len(refs) > 0:
                        data = {'text': 'label:unknown_failure'}
                        if self.dryrun:
                            self.logger.info(
                                'Would label {} as unknown'.format(job['id']))
                        else:
                            self.openqa.openqa_request('POST',
                                                       'jobs/%s/comments' %
                                                       job['id'],
                                                       data=data)

                    joburl = '%s/tests/%s' % (self.project.openqa_server,
                                              job['id'])
                    self.logger.info('job %s failed, see %s', job['name'],
                                     joburl)

            elif job['result'] == 'passed' or job['result'] == 'softfailed':
                continue
            elif job['result'] == 'none':
                if job['state'] != 'cancelled':
                    in_progress = True
            else:
                raise Exception(job['result'])

        self.save_issues_to_ignore()

        if len(self.failed_relevant_jobs) > 0:
            return QAResult.failed

        if in_progress:
            return QAResult.inprogress

        return QAResult.passed

    def send_amqp_event(self, current_snapshot, current_result):
        amqp_url = osc.conf.config.get('ttm_amqp_url')
        if not amqp_url:
            self.logger.debug(
                'No ttm_amqp_url configured in oscrc - skipping amqp event emission'
            )
            return

        self.logger.debug('Sending AMQP message')
        inf = re.sub(r'ed$', '', str(current_result))
        msg_topic = '%s.ttm.build.%s' % (self.project.base.lower(), inf)
        msg_body = json.dumps({
            'build': current_snapshot,
            'project': self.project.name,
            'failed_jobs': {
                'relevant': self.failed_relevant_jobs,
                'ignored': self.failed_ignored_jobs,
            }
        })

        # send amqp event
        tries = 7  # arbitrary
        for t in range(tries):
            try:
                notify_connection = pika.BlockingConnection(
                    pika.URLParameters(amqp_url))
                notify_channel = notify_connection.channel()
                notify_channel.exchange_declare(exchange='pubsub',
                                                exchange_type='topic',
                                                passive=True,
                                                durable=True)
                notify_channel.basic_publish(exchange='pubsub',
                                             routing_key=msg_topic,
                                             body=msg_body)
                notify_connection.close()
                break
            except pika.exceptions.ConnectionClosed as e:
                self.logger.warn(
                    'Sending AMQP event did not work: %s. Retrying try %s out of %s'
                    % (e, t, tries))
        else:
            self.logger.error(
                'Could not send out AMQP event for %s tries, aborting.' %
                tries)

    def publish(self, project, force=False):
        self.setup(project)

        if not self.get_status('testing'):
            # migrating to the attribute status

            try:
                self.update_status('testing',
                                   self.version_from_totest_project())
            except NotFoundException:
                self.logger.error(
                    'Nothing in totest - release something first')
                return None

            self.update_status(
                'publishing',
                self.api.pseudometa_file_load(self.version_file('snapshot')))

        current_snapshot = self.get_status('testing')

        group_id = self.openqa_group_id()

        if self.get_status('publishing') == current_snapshot:
            self.logger.info(
                '{} is already publishing'.format(current_snapshot))
            # migrating - if there is no published entry, the last publish call
            # didn't wait for publish - and as such didn't set published state
            if self.get_status('published') != current_snapshot:
                return QAResult.passed
            return None

        current_result = self.overall_result(current_snapshot)
        current_qa_version = self.current_qa_version()

        self.logger.info('current_snapshot {}: {}'.format(
            current_snapshot, str(current_result)))
        self.logger.debug('current_qa_version {}'.format(current_qa_version))

        self.send_amqp_event(current_snapshot, current_result)

        if current_result == QAResult.failed:
            self.update_status('failed', current_snapshot)
            return QAResult.failed
        else:
            self.update_status('failed', '')

        if current_result != QAResult.passed:
            return QAResult.inprogress

        if current_qa_version != current_snapshot:
            # We reached a very bad status: openQA testing is 'done', but not of the same version
            # currently in test project. This can happen when 'releasing' the
            # product failed
            raise Exception(
                'Publishing stopped: tested version (%s) does not match version in test project (%s)'
                % (current_qa_version, current_snapshot))

        self.publish_factory_totest()
        self.write_version_to_dashboard('snapshot', current_snapshot)
        self.update_status('publishing', current_snapshot)
        return QAResult.passed

    def wait_for_published(self, project, force=False):
        self.setup(project)

        if not force:
            wait_time = 20
            while not self.all_repos_done(self.project.test_project):
                if self.dryrun:
                    self.logger.info(
                        '{} is still not published, do not wait as dryrun.'.
                        format(self.project.test_project))
                    return
                self.logger.info(
                    '{} is still not published, waiting {} seconds'.format(
                        self.project.test_project, wait_time))
                time.sleep(wait_time)

        current_snapshot = self.get_status('publishing')
        if self.dryrun:
            self.logger.info(
                'Publisher finished, updating published snapshot to {}'.format(
                    current_snapshot))
            return

        self.update_status('published', current_snapshot)
        group_id = self.openqa_group_id()
        if not group_id:
            return

        self.add_published_tag(group_id, current_snapshot)
        if self.update_pinned_descr:
            self.update_openqa_status_message(group_id)

    def find_openqa_results(self, snapshot):
        """Return the openqa jobs of a given snapshot and filter out the
        cloned jobs

        """

        url = makeurl(self.project.openqa_server, ['api', 'v1', 'jobs'], {
            'group': self.project.openqa_group,
            'build': snapshot,
            'latest': 1
        })
        f = self.api.retried_GET(url)
        jobs = []
        for job in json.load(f)['jobs']:
            if job['clone_id'] or job['result'] == 'obsoleted':
                continue
            job['name'] = job['name'].replace(snapshot, '')
            jobs.append(job)
        return jobs

    def add_published_tag(self, group_id, snapshot):
        if self.dryrun:
            return

        url = makeurl(self.project.openqa_server,
                      ['api', 'v1', 'groups',
                       str(group_id), 'comments'])

        status_flag = 'published'
        data = {
            'text': 'tag:{}:{}:{}'.format(snapshot, status_flag, status_flag)
        }
        self.openqa.openqa_request('POST',
                                   'groups/%s/comments' % group_id,
                                   data=data)

    def openqa_group_id(self):
        url = makeurl(self.project.openqa_server, ['api', 'v1', 'job_groups'])
        f = self.api.retried_GET(url)
        job_groups = json.load(f)
        for jg in job_groups:
            if jg['name'] == self.project.openqa_group:
                return jg['id']

        self.logger.debug(
            'No openQA group id found for status comment update, ignoring')

    def update_openqa_status_message(self, group_id):
        pinned_ignored_issue = 0
        issues = ' , '.join(self.issues_to_ignore.keys())
        msg = 'pinned-description: Ignored issues\r\n\r\n{}'.format(issues)
        data = {'text': msg}

        url = makeurl(self.project.openqa_server,
                      ['api', 'v1', 'groups',
                       str(group_id), 'comments'])
        f = self.api.retried_GET(url)
        comments = json.load(f)
        for comment in comments:
            if comment['userName'] == 'ttm' and \
                    comment['text'].startswith('pinned-description: Ignored issues'):
                pinned_ignored_issue = comment['id']

        self.logger.debug('Writing openQA status message: {}'.format(data))
        if not self.dryrun:
            if pinned_ignored_issue:
                self.openqa.openqa_request('PUT',
                                           'groups/%s/comments/%d' %
                                           (group_id, pinned_ignored_issue),
                                           data=data)
            else:
                self.openqa.openqa_request('POST',
                                           'groups/%s/comments' % group_id,
                                           data=data)

    def load_issues_to_ignore(self):
        text = self.api.attribute_value_load('IgnoredIssues')
        if text:
            root = yaml.safe_load(text)
            self.issues_to_ignore = root.get('last_seen')
        else:
            self.issues_to_ignore = dict()

    def save_issues_to_ignore(self):
        if self.dryrun:
            return
        text = yaml.dump({'last_seen': self.issues_to_ignore},
                         default_flow_style=False)
        self.api.attribute_value_save('IgnoredIssues', text)

    def publish_factory_totest(self):
        self.logger.info('Publish test project content')
        if self.dryrun or self.project.do_not_release:
            return
        if self.project.container_products:
            self.logger.info('Releasing container products from ToTest')
            for container in self.project.container_products:
                self.release_package(
                    self.project.test_project,
                    container.package,
                    repository=self.project.totest_container_repo)
        self.api.switch_flag_in_prj(self.project.test_project,
                                    flag='publish',
                                    state='enable',
                                    repository=self.project.product_repo)

        if self.project.totest_images_repo != self.project.product_repo:
            self.logger.info('Publish test project content (image_products)')
            self.api.switch_flag_in_prj(
                self.project.test_project,
                flag='publish',
                state='enable',
                repository=self.project.totest_images_repo)
示例#19
0
class Listener(PubSubConsumer):
    def __init__(self, amqp_prefix, openqa_url):
        super(Listener, self).__init__(amqp_prefix,
                                       logging.getLogger(__name__))
        self.projects = []
        self.amqp_prefix = amqp_prefix
        self.openqa_url = openqa_url
        self.openqa = OpenQA_Client(server=openqa_url)
        self.projects_to_check = set()

    def routing_keys(self):
        ret = []
        for suffix in [
                '.obs.repo.published', '.openqa.job.done',
                '.openqa.job.create', '.openqa.job.restart'
        ]:
            ret.append(self.amqp_prefix + suffix)
        return ret

    def add(self, project):
        project.listener = self
        self.projects.append(project)

    def start_consuming(self):
        # now we are (re-)connected to the bus and need to fetch the
        # initial state
        self.projects_to_check = set()
        for project in self.projects:
            self.logger.info('Fetching ISOs of %s', project.name)
            for sproj in project.init():
                self.projects_to_check.add((project, sproj))
        self.logger.info('Finished fetching initial ISOs, listening')
        super(Listener, self).start_consuming()

    def interval(self):
        if len(self.projects_to_check):
            return 5
        return super(Listener, self).interval()

    def check_some_projects(self):
        count = 0
        limit = 5
        while len(self.projects_to_check):
            project, staging = self.projects_to_check.pop()
            project.update_staging_status(staging)
            count += 1
            if count >= limit:
                return

    def still_alive(self):
        self.check_some_projects()
        super(Listener, self).still_alive()

    def jobs_for_iso(self, iso):
        values = {
            'iso': iso,
            'scope': 'current',
            'latest': '1',
        }
        jobs = self.openqa.openqa_request('GET', 'jobs', values)['jobs']
        # Ignore PR verification runs (and jobs without 'BUILD')
        return [
            job for job in jobs
            if '/' not in job['settings'].get('BUILD', '/')
        ]

    def get_step_url(self, testurl, modulename):
        failurl = testurl + '/modules/{!s}/fails'.format(
            quote_plus(modulename))
        fails = requests.get(failurl).json()
        failed_step = fails.get('first_failed_step', 1)
        return "{!s}#step/{!s}/{:d}".format(testurl, modulename, failed_step)

    def test_url(self, job):
        url = self.openqa_url + ("/tests/%d" % job['id'])
        if job['result'] == 'failed':
            for module in job['modules']:
                if module['result'] == 'failed':
                    return self.get_step_url(url, module['name'])
        return url

    def on_published_repo(self, payload):
        for p in self.projects:
            p.check_published_repo(str(payload['project']),
                                   str(payload['repo']),
                                   str(payload['buildid']))

    def on_openqa_job(self, iso):
        self.logger.debug('openqa_job_change %s', iso)
        for p in self.projects:
            p.openqa_job_change(iso)

    def on_message(self, unused_channel, method, properties, body):
        self.acknowledge_message(method.delivery_tag)
        if method.routing_key == '{}.obs.repo.published'.format(amqp_prefix):
            self.on_published_repo(json.loads(body))
        elif re.search(r'.openqa.', method.routing_key):
            data = json.loads(body)
            if '/' in data.get('BUILD'):
                return  # Ignore PR verification runs
            self.on_openqa_job(data.get('ISO'))
        else:
            self.logger.warning("unknown rabbitmq message {}".format(
                method.routing_key))
示例#20
0
 def __init__(self, amqp_prefix, openqa_url):
     super(Listener, self).__init__(amqp_prefix, logging.getLogger(__name__))
     self.projects = []
     self.amqp_prefix = amqp_prefix
     self.openqa_url = openqa_url
     self.openqa = OpenQA_Client(server=openqa_url)
示例#21
0
class Fetcher(object):
    def __init__(self, apiurl, opts):
        self.projects = []
        self.opts = opts
        self.apiurl = apiurl
        if apiurl.endswith('suse.de'):
            amqp_prefix = 'suse'
            openqa_url = 'https://openqa.suse.de'
        else:
            amqp_prefix = 'opensuse'
            openqa_url = 'https://openqa.opensuse.org'
        self.openqa = OpenQA_Client(openqa_url)

    def openqa_results(self, openqa_group, snapshot):
        jobs = {}
        if not openqa_group or not snapshot:
            return jobs
        result = self.openqa.openqa_request('GET', 'jobs', {'groupid': openqa_group, 'build': snapshot, 'latest': 1})
        for job in result['jobs']:
            if job['clone_id'] or job['result'] == 'obsoleted':
                continue
            name = job['name'].replace(snapshot, '')
            key = job['result']
            if job['state'] != 'done':
                key = job['state']
                if key == 'uploading' or key == 'assigned':
                    key = 'running'
            jobs.setdefault(key, []).append(job['name'])
        return jobs

    def add(self, name, **kwargs):
        # cyclic dependency!
        self.projects.append(Project(self, name, kwargs))

    def build_summary(self, project, repository):
        url = makeurl(self.apiurl, ['build', project, '_result'], { 'repository': repository, 'view': 'summary' })
        try:
            f = http_GET(url)
        except HTTPError as e:
            return { 'building': -1 }
        root = ET.parse(f).getroot()
        failed = 0
        unresolvable = 0
        building = 0
        succeeded = 0
        for result in root.findall('.//statuscount'):
            code = result.get('code')
            count = int(result.get('count'))
            if code == 'excluded' or code == 'disabled':
                continue # ignore
            if code == 'succeeded':
                succeeded += count
                continue
            if code == "failed":
                failed += count
                continue
            if code == "unresolvable":
                unresolvable += count
                continue
            building += count
            #print(code, file=sys.stderr)
        # let's count them as building
        if building > 0:
            building += unresolvable
            unresolvable = 0
        if building + failed + succeeded == 0:
            return {'building': -1}
        return { 'building': 1000 - int(building * 1000 / (building + failed + succeeded)),
                 'failed': failed,
                 'unresolvable': unresolvable }

    def generate_all_archs(self, project):
        meta = ET.fromstringlist(show_project_meta(self.apiurl, project))
        archs = set()
        for arch in meta.findall('.//arch'):
            archs.add(arch.text)
        result = []
        for arch in archs:
            result.append(f"arch_{arch}=1")
        return '&'.join(result)

    def fetch_ttm_status(self, project):
        text = attribute_value_load(self.apiurl, project, 'ToTestManagerStatus')
        if text:
            return yaml.safe_load(text)
        return dict()

    def fetch_product_version(self, project):
        return attribute_value_load(self.apiurl, project, 'ProductVersion')
示例#22
0
def ensure_kiwi_settings(client: OpenQA_Client) -> None:

    test_suites = client.openqa_request("GET", "test_suites")["TestSuites"]

    for suite_name in KIWI_TEST_SUITES:
        matching_suites = list(
            filter(lambda s: s["name"] == suite_name, test_suites))
        params = {**KIWI_TEST_SUITES[suite_name], "name": suite_name}
        if len(matching_suites) > 1:
            raise ValueError(
                f"Found {len(matching_suites)} with the name {suite_name}")
        elif len(matching_suites) == 1:
            client.openqa_request(
                "POST",
                f"test_suites/{matching_suites[0]['id']}",
                params=params,
            )
        else:
            client.openqa_request("POST", "test_suites", params=params)

    products = client.openqa_request("GET", "products")["Products"]
    for kiwi_product in KIWI_PRODUCTS:
        matching_products = list(
            filter(lambda p: kiwi_product.equal_in_db(p), products))
        if len(matching_products) > 1:
            raise ValueError(f"Got {len(matching_products)} products matching"
                             f" {kiwi_product=}")
        elif len(matching_products) == 1:
            client.openqa_request(
                "PUT",
                f"products/{matching_products[0]['id']}",
                params=kiwi_product.__dict__,
            )
        else:
            client.openqa_request("POST",
                                  "products",
                                  params=kiwi_product.__dict__)

    machines = client.openqa_request("GET", "machines")["Machines"]
    sixty_four_bit_machine = [
        machine for machine in machines if machine["name"] == "64bit"
    ]
    if len(sixty_four_bit_machine) > 1:
        raise ValueError(
            f"Got {len(sixty_four_bit_machine)} machines with the name '64bit'"
        )
    elif len(sixty_four_bit_machine) == 0:
        client.openqa_request("POST",
                              "machines",
                              params=SIXTY_FOUR_BIT_MACHINE_SETTINGS)

    job_groups = client.openqa_request("GET", "job_groups")
    matching_job_groups = list(
        filter(lambda g: g["name"] == KIWI_JOB_GROUP_NAME, job_groups))
    if len(matching_job_groups) > 1:
        raise ValueError(
            f"Got {len(matching_job_groups)} job groups with the name " +
            KIWI_JOB_GROUP_NAME)
    elif len(matching_job_groups) == 1:
        grp_id = matching_job_groups[0]["id"]
        client.openqa_request(
            "PUT",
            f"job_groups/{grp_id}",
            data={
                "name": KIWI_JOB_GROUP_NAME,
            },
        )
    else:
        grp_id = client.openqa_request(
            "POST",
            "job_groups",
            data={
                "name": KIWI_JOB_GROUP_NAME,
                "template": KIWI_JOB_TEMPLATE,
            },
        )["id"]

    client.openqa_request(
        "POST",
        f"job_templates_scheduling/{grp_id}",
        data={
            "name": KIWI_JOB_GROUP_NAME,
            "template": KIWI_JOB_TEMPLATE,
            "schema": "JobTemplates-01.yaml",
        },
    )
class OpenQABot(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """

    def __init__(self, *args, **kwargs):
        self.force = False
        self.openqa = None
        self.do_comments = True
        if 'force' in kwargs:
            if kwargs['force'] is True:
                self.force = True
            del kwargs['force']
        if 'openqa' in kwargs:
            self.openqa = OpenQA_Client(server=kwargs['openqa'])
            del kwargs['openqa']
        if 'do_comments' in kwargs:
            if kwargs['do_comments'] is not None:
                self.do_comments = kwargs['do_comments']
            del kwargs['do_comments']

        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.logger.debug(self.do_comments)

        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = dict()

    def gather_test_builds(self):
        for prj, u in TARGET_REPO_SETTINGS[self.openqa.baseurl].items():
            buildnr = 0
            for j in self.jobs_for_target(u):
                buildnr = j['settings']['BUILD']
            self.update_test_builds[prj] = buildnr

    # reimplemention from baseclass
    def check_requests(self):

        # first calculate the latest build number for current jobs
        self.gather_test_builds()

        started = []
        all_done = True
        # then check progress on running incidents
        for req in self.requests:
            # just patch apiurl in to avoid having to pass it around
            req.apiurl = self.apiurl
            jobs = self.request_get_openqa_jobs(req, incident=True, test_repo=True)
            ret = self.calculate_qa_status(jobs)
            if ret != QA_UNKNOWN:
                started.append(req)
                if ret == QA_INPROGRESS:
                    all_done = False

        all_requests = self.requests
        self.requests = started
        ReviewBot.ReviewBot.check_requests(self)

        if not all_done:
            return

        self.requests = all_requests

        # now make sure the jobs are for current repo
        for prj, u in TARGET_REPO_SETTINGS[self.openqa.baseurl].items():
            self.trigger_build_for_target(prj, u)

        ReviewBot.ReviewBot.check_requests(self)

    def check_action_maintenance_release(self, req, a):
        # we only look at the binaries of the patchinfo
        if a.src_package != 'patchinfo':
            return None

        if a.tgt_project not in PROJECT_OPENQA_SETTINGS:
            self.logger.warn("not handling %s" % a.tgt_project)
            return None

        packages = []
        patch_id = None
        # patchinfo collects the binaries and is build for an
        # unpredictable architecture so we need iterate over all
        url = osc.core.makeurl(
            self.apiurl,
            ('build', a.src_project, a.tgt_project.replace(':', '_')))
        root = ET.parse(osc.core.http_GET(url)).getroot()
        for arch in [n.attrib['name'] for n in root.findall('entry')]:
            query = {'nosource': 1}
            url = osc.core.makeurl(
                self.apiurl,
                ('build', a.src_project, a.tgt_project.replace(':', '_'), arch, a.src_package),
                query=query)

            root = ET.parse(osc.core.http_GET(url)).getroot()

            for binary in root.findall('binary'):
                m = pkgname_re.match(binary.attrib['filename'])
                if m:
                    # can't use arch here as the patchinfo mixes all
                    # archs
                    packages.append(Package(m.group('name'), m.group('version'), m.group('release')))
                elif binary.attrib['filename'] == 'updateinfo.xml':
                    url = osc.core.makeurl(
                        self.apiurl,
                        ('build', a.src_project, a.tgt_project.replace(':', '_'), arch, a.src_package, 'updateinfo.xml'))
                    ui = ET.parse(osc.core.http_GET(url)).getroot()
                    patch_id = ui.find('.//id').text

        if not packages:
            raise Exception("no packages found")

        self.logger.debug('found packages %s and patch id %s', ' '.join(set([p.name for p in packages])), patch_id)

        for update in PROJECT_OPENQA_SETTINGS[a.tgt_project]:
            settings = update.settings(a.src_project, a.tgt_project, packages, req)
            settings['INCIDENT_PATCH'] = patch_id
            if settings is not None:
                update.calculate_lastest_good_updates(self.openqa, settings)

                self.logger.info("posting %s %s %s", settings['VERSION'], settings['ARCH'], settings['BUILD'])
                self.logger.debug('\n'.join(["  %s=%s" % i for i in settings.items()]))
                if not self.dryrun:
                    try:
                        ret = self.openqa.openqa_request('POST', 'isos', data=settings, retries=1)
                        self.logger.info(pformat(ret))
                    except JSONDecodeError, e:
                        self.logger.error(e)
                        # TODO: record error
                    except openqa_exceptions.RequestError, e:
                        self.logger.error(e)
class OpenQABot(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """

    def __init__(self, *args, **kwargs):
        self.force = False
        self.openqa = None
        self.do_comments = True
        if 'force' in kwargs:
            if kwargs['force'] is True:
                self.force = True
            del kwargs['force']
        if 'openqa' in kwargs:
            self.openqa = OpenQA_Client(server=kwargs['openqa'])
            del kwargs['openqa']
        if 'do_comments' in kwargs:
            if kwargs['do_comments'] is not None:
                self.do_comments = kwargs['do_comments']
            del kwargs['do_comments']

        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.logger.debug(self.do_comments)

        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = dict()

    def prepare_review(self):
        for prj, u in TARGET_REPO_SETTINGS.items():
            self.trigger_build_for_target(prj, u)

    def check_action_maintenance_release(self, req, a):
        # we only look at the binaries of the patchinfo
        if a.src_package != 'patchinfo':
            return None

        if a.tgt_project not in PROJECT_OPENQA_SETTINGS:
            self.logger.warn("not handling %s" % a.tgt_project)
            return None

        packages = []
        # patchinfo collects the binaries and is build for an
        # unpredictable architecture so we need iterate over all
        url = osc.core.makeurl(
            self.apiurl,
            ('build', a.src_project, a.tgt_project.replace(':', '_')))
        root = ET.parse(osc.core.http_GET(url)).getroot()
        for arch in [n.attrib['name'] for n in root.findall('entry')]:
            query = {'nosource': 1}
            url = osc.core.makeurl(
                self.apiurl,
                ('build', a.src_project, a.tgt_project.replace(':', '_'), arch, a.src_package),
                query=query)

            root = ET.parse(osc.core.http_GET(url)).getroot()

            for binary in root.findall('binary'):
                m = pkgname_re.match(binary.attrib['filename'])
                if m:
                    # can't use arch here as the patchinfo mixes all
                    # archs
                    packages.append(Package(m.group('name'), m.group('version'), m.group('release')))

        if not packages:
            raise Exception("no packages found")

        self.logger.debug('found packages %s', ' '.join(set([p.name for p in packages])))

        for update in PROJECT_OPENQA_SETTINGS[a.tgt_project]:
            settings = update.settings(a.src_project, a.tgt_project, packages, req)
            if settings is not None:
                self.logger.info("posting %s %s %s", settings['VERSION'], settings['ARCH'], settings['BUILD'])
                self.logger.debug('\n'.join(["  %s=%s" % i for i in settings.items()]))
                if not self.dryrun:
                    try:
                        ret = self.openqa.openqa_request('POST', 'isos', data=settings, retries=1)
                        self.logger.info(pformat(ret))
                    except JSONDecodeError, e:
                        self.logger.error(e)
                        # TODO: record error
                    except openqa_exceptions.RequestError, e:
                        self.logger.error(e)
示例#25
0
def start_amqp():
    pending_jobs = 0
    connection = pika.BlockingConnection(
        pika.URLParameters(config["rabbit_server"]))
    channel = connection.channel()

    channel.exchange_declare(exchange='pubsub',
                             exchange_type='topic',
                             passive=True,
                             durable=True)

    result = channel.queue_declare(exclusive=True)
    queue_name = result.method.queue

    channel.queue_bind(exchange='pubsub',
                       queue=queue_name,
                       routing_key=config["amqp_obskey"])
    channel.queue_bind(exchange='pubsub',
                       queue=queue_name,
                       routing_key=config["amqp_openqakey"])

    channel.basic_consume(callback, queue=queue_name, no_ack=True)

    channel.start_consuming()


client = OpenQA_Client(server=config["openqa_url"])
print(' [*] Waiting for SLE12-SP4 kernel builds. Abort with CTRL+C')
start_amqp()
示例#26
0
    parser.add_argument('--openqa', type=str, required=True, help='OpenQA URL')
    parser.add_argument('--repos',
                        type=str,
                        required=True,
                        help='Directory to read from')
    parser.add_argument('--to',
                        type=str,
                        required=True,
                        help='Directory to commit into')

    global args
    args = parser.parse_args()
    global logger
    logging.basicConfig(level=logging.DEBUG)
    logger = logging.getLogger(__name__)

    openqa = OpenQA_Client(server=args.openqa)

    interesting_repos = dict()
    list = openqa.openqa_request('GET', 'obs_rsync')
    for repopair in list:
        project, repository = repopair
        interesting_repos[f'{project}_-_{repository}'] = 1

    openqa = OpenQA_Client(server=args.openqa)
    for state in glob.glob('{}/*.yaml'.format(args.repos)):
        state = basename(state).replace('.yaml', '')
        if not state in interesting_repos:
            continue
        notify_project(openqa, state)
示例#27
0
class ToTestPublisher(ToTestManager):

    def __init__(self, tool):
        ToTestManager.__init__(self, tool)

    def setup(self, project):
        super(ToTestPublisher, self).setup(project)
        self.openqa = OpenQA_Client(server=self.project.openqa_server)
        self.update_pinned_descr = False
        self.load_issues_to_ignore()

    def overall_result(self, snapshot):
        """Analyze the openQA jobs of a given snapshot Returns a QAResult"""

        if snapshot is None:
            return QAResult.failed

        jobs = self.find_openqa_results(snapshot)

        self.failed_relevant_jobs = []
        self.failed_ignored_jobs = []

        if len(jobs) < self.project.jobs_num:  # not yet scheduled
            self.logger.warning('we have only %s jobs' % len(jobs))
            return QAResult.inprogress

        in_progress = False
        for job in jobs:
            # print json.dumps(job, sort_keys=True, indent=4)
            if job['result'] in ('failed', 'incomplete', 'skipped', 'user_cancelled', 'obsoleted', 'parallel_failed'):
                # print json.dumps(job, sort_keys=True, indent=4), jobname
                url = makeurl(self.project.openqa_server,
                              ['api', 'v1', 'jobs', str(job['id']), 'comments'])
                f = self.api.retried_GET(url)
                comments = json.load(f)
                refs = set()
                labeled = 0
                to_ignore = False
                for comment in comments:
                    for ref in comment['bugrefs']:
                        refs.add(str(ref))
                    if comment['userName'] == 'ttm' and comment['text'] == 'label:unknown_failure':
                        labeled = comment['id']
                    if re.search(r'@ttm:? ignore', comment['text']):
                        to_ignore = True
                # to_ignore can happen with or without refs
                ignored = True if to_ignore else len(refs) > 0
                build_nr = str(job['settings']['BUILD'])
                for ref in refs:
                    if ref not in self.issues_to_ignore:
                        if to_ignore:
                            self.issues_to_ignore[ref] = build_nr
                            self.update_pinned_descr = True
                        else:
                            ignored = False
                    else:
                        # update reference
                        self.issues_to_ignore[ref] = build_nr

                if ignored:
                    self.failed_ignored_jobs.append(job['id'])
                    if labeled:
                        text = 'Ignored issue' if len(refs) > 0 else 'Ignored failure'
                        # remove flag - unfortunately can't delete comment unless admin
                        data = {'text': text}
                        if self.dryrun:
                            self.logger.info('Would label {} with: {}'.format(job['id'], text))
                        else:
                            self.openqa.openqa_request(
                                'PUT', 'jobs/%s/comments/%d' % (job['id'], labeled), data=data)

                    self.logger.info('job %s failed, but was ignored', job['name'])
                else:
                    self.failed_relevant_jobs.append(job['id'])
                    if not labeled and len(refs) > 0:
                        data = {'text': 'label:unknown_failure'}
                        if self.dryrun:
                            self.logger.info('Would label {} as unknown'.format(job['id']))
                        else:
                            self.openqa.openqa_request(
                                'POST', 'jobs/%s/comments' % job['id'], data=data)

                    joburl = '%s/tests/%s' % (self.project.openqa_server, job['id'])
                    self.logger.info('job %s failed, see %s', job['name'], joburl)

            elif job['result'] == 'passed' or job['result'] == 'softfailed':
                continue
            elif job['result'] == 'none':
                if job['state'] != 'cancelled':
                    in_progress = True
            else:
                raise Exception(job['result'])

        self.save_issues_to_ignore()

        if len(self.failed_relevant_jobs) > 0:
            return QAResult.failed

        if in_progress:
            return QAResult.inprogress

        return QAResult.passed

    def send_amqp_event(self, current_snapshot, current_result):
        amqp_url = osc.conf.config.get('ttm_amqp_url')
        if not amqp_url:
            self.logger.debug('No ttm_amqp_url configured in oscrc - skipping amqp event emission')
            return

        self.logger.debug('Sending AMQP message')
        inf = re.sub(r'ed$', '', str(current_result))
        msg_topic = '%s.ttm.build.%s' % (self.project.base.lower(), inf)
        msg_body = json.dumps({
            'build': current_snapshot,
            'project': self.project.name,
            'failed_jobs': {
                'relevant': self.failed_relevant_jobs,
                'ignored': self.failed_ignored_jobs,
            }
        })

        # send amqp event
        tries = 7  # arbitrary
        for t in range(tries):
            try:
                notify_connection = pika.BlockingConnection(pika.URLParameters(amqp_url))
                notify_channel = notify_connection.channel()
                notify_channel.exchange_declare(exchange='pubsub', exchange_type='topic', passive=True, durable=True)
                notify_channel.basic_publish(exchange='pubsub', routing_key=msg_topic, body=msg_body)
                notify_connection.close()
                break
            except pika.exceptions.ConnectionClosed as e:
                self.logger.warn('Sending AMQP event did not work: %s. Retrying try %s out of %s' % (e, t, tries))
        else:
            self.logger.error('Could not send out AMQP event for %s tries, aborting.' % tries)

    def publish(self, project, force=False):
        self.setup(project)

        if not self.get_status('testing'):
            # migrating to the attribute status

            try:
                self.update_status('testing', self.version_from_totest_project())
            except NotFoundException:
                self.logger.error('Nothing in totest - release something first')
                return None

            self.update_status('publishing', self.api.pseudometa_file_load(self.version_file('snapshot')))

        current_snapshot = self.get_status('testing')

        group_id = self.openqa_group_id()

        if self.get_status('publishing') == current_snapshot:
            self.logger.info('{} is already publishing'.format(current_snapshot))
            # migrating - if there is no published entry, the last publish call
            # didn't wait for publish - and as such didn't set published state
            if self.get_status('published') != current_snapshot:
                return QAResult.passed
            return None

        current_result = self.overall_result(current_snapshot)
        current_qa_version = self.current_qa_version()

        self.logger.info('current_snapshot {}: {}'.format(current_snapshot, str(current_result)))
        self.logger.debug('current_qa_version {}'.format(current_qa_version))

        self.send_amqp_event(current_snapshot, current_result)

        if current_result == QAResult.failed:
            self.update_status('failed', current_snapshot)
            return QAResult.failed
        else:
            self.update_status('failed', '')

        if current_result != QAResult.passed:
            return QAResult.inprogress

        if current_qa_version != current_snapshot:
            # We reached a very bad status: openQA testing is 'done', but not of the same version
            # currently in test project. This can happen when 'releasing' the
            # product failed
            raise Exception('Publishing stopped: tested version (%s) does not match version in test project (%s)'
                            % (current_qa_version, current_snapshot))

        self.publish_factory_totest()
        self.write_version_to_dashboard('snapshot', current_snapshot)
        self.update_status('publishing', current_snapshot)
        return QAResult.passed

    def wait_for_published(self, project, force=False):
        self.setup(project)

        if not force:
            wait_time = 20
            while not self.all_repos_done(self.project.test_project):
                if self.dryrun:
                    self.logger.info('{} is still not published, do not wait as dryrun.'.format(self.project.test_project))
                    return
                self.logger.info('{} is still not published, waiting {} seconds'.format(self.project.test_project, wait_time))
                time.sleep(wait_time)

        current_snapshot = self.get_status('publishing')
        if self.dryrun:
            self.logger.info('Publisher finished, updating published snapshot to {}'.format(current_snapshot))
            return

        self.update_status('published', current_snapshot)
        group_id = self.openqa_group_id()
        if not group_id:
            return

        self.add_published_tag(group_id, current_snapshot)
        if self.update_pinned_descr:
            self.update_openqa_status_message(group_id)

    def find_openqa_results(self, snapshot):
        """Return the openqa jobs of a given snapshot and filter out the
        cloned jobs

        """

        url = makeurl(self.project.openqa_server,
                      ['api', 'v1', 'jobs'], {'group': self.project.openqa_group, 'build': snapshot, 'latest': 1})
        f = self.api.retried_GET(url)
        jobs = []
        for job in json.load(f)['jobs']:
            if job['clone_id'] or job['result'] == 'obsoleted':
                continue
            job['name'] = job['name'].replace(snapshot, '')
            jobs.append(job)
        return jobs

    def add_published_tag(self, group_id, snapshot):
        if self.dryrun:
            return

        url = makeurl(self.project.openqa_server,
                      ['api', 'v1', 'groups', str(group_id), 'comments'])

        status_flag = 'published'
        data = {'text': 'tag:{}:{}:{}'.format(snapshot, status_flag, status_flag) }
        self.openqa.openqa_request('POST', 'groups/%s/comments' % group_id, data=data)

    def openqa_group_id(self):
        url = makeurl(self.project.openqa_server,
                      ['api', 'v1', 'job_groups'])
        f = self.api.retried_GET(url)
        job_groups = json.load(f)
        for jg in job_groups:
            if jg['name'] == self.project.openqa_group:
                return jg['id']

        self.logger.debug('No openQA group id found for status comment update, ignoring')

    def update_openqa_status_message(self, group_id):
        pinned_ignored_issue = 0
        issues = ' , '.join(self.issues_to_ignore.keys())
        msg = 'pinned-description: Ignored issues\r\n\r\n{}'.format(issues)
        data = {'text': msg}

        url = makeurl(self.project.openqa_server,
                      ['api', 'v1', 'groups', str(group_id), 'comments'])
        f = self.api.retried_GET(url)
        comments = json.load(f)
        for comment in comments:
            if comment['userName'] == 'ttm' and \
                    comment['text'].startswith('pinned-description: Ignored issues'):
                pinned_ignored_issue = comment['id']

        self.logger.debug('Writing openQA status message: {}'.format(data))
        if not self.dryrun:
            if pinned_ignored_issue:
                self.openqa.openqa_request(
                    'PUT', 'groups/%s/comments/%d' % (group_id, pinned_ignored_issue), data=data)
            else:
                self.openqa.openqa_request(
                    'POST', 'groups/%s/comments' % group_id, data=data)

    def load_issues_to_ignore(self):
        text = self.api.attribute_value_load('IgnoredIssues')
        if text:
            root = yaml.safe_load(text)
            self.issues_to_ignore = root.get('last_seen')
        else:
            self.issues_to_ignore = dict()

    def save_issues_to_ignore(self):
        if self.dryrun:
            return
        text = yaml.dump({'last_seen': self.issues_to_ignore}, default_flow_style=False)
        self.api.attribute_value_save('IgnoredIssues', text)

    def publish_factory_totest(self):
        self.logger.info('Publish test project content')
        if self.dryrun or self.project.do_not_release:
            return
        if self.project.container_products:
            self.logger.info('Releasing container products from ToTest')
            for container in self.project.container_products:
                self.release_package(self.project.test_project, container.package,
                                      repository=self.project.totest_container_repo)
        self.api.switch_flag_in_prj(
            self.project.test_project, flag='publish', state='enable',
            repository=self.project.product_repo)

        if self.project.totest_images_repo != self.project.product_repo:
            self.logger.info('Publish test project content (image_products)')
            self.api.switch_flag_in_prj(self.project.test_project, flag='publish', state='enable',
            repository=self.project.totest_images_repo)
示例#28
0
class Listener(PubSubConsumer):
    def __init__(self, amqp_prefix, openqa_url):
        super(Listener, self).__init__(amqp_prefix, logging.getLogger(__name__))
        self.projects = []
        self.amqp_prefix = amqp_prefix
        self.openqa_url = openqa_url
        self.openqa = OpenQA_Client(server=openqa_url)

    def routing_keys(self):
        ret = []
        for suffix in ['.obs.repo.published', '.openqa.job.done',
                       '.openqa.job.create', '.openqa.job.restart']:
            ret.append(self.amqp_prefix + suffix)
        return ret

    def add(self, project):
        project.listener = self
        self.projects.append(project)

    def start_consuming(self):
        # now we are (re-)connected to the bus and need to fetch the
        # initial state
        for project in self.projects:
            self.logger.info('Fetching ISOs of %s', project.name)
            project.init()
        self.logger.info('Finished fetching initial ISOs, listening')
        super(Listener, self).start_consuming()

    def jobs_for_iso(self, iso):
        values = {
            'iso': iso,
            'scope': 'current',
            'latest': '1',
        }
        return self.openqa.openqa_request('GET', 'jobs', values)['jobs']

    def get_step_url(self, testurl, modulename):
        failurl = testurl + '/modules/{!s}/fails'.format(quote_plus(modulename))
        fails = requests.get(failurl).json()
        failed_step = fails.get('first_failed_step', 1)
        return "{!s}#step/{!s}/{:d}".format(testurl, modulename, failed_step)

    def test_url(self, job):
        url = self.openqa_url + ("/tests/%d" % job['id'])
        if job['result'] == 'failed':
            for module in job['modules']:
                if module['result'] == 'failed':
                    return self.get_step_url(url, module['name'])
        return url

    def on_published_repo(self, payload):
        for p in self.projects:
            p.check_published_repo(str(payload['project']), str(payload['repo']), str(payload['buildid']))

    def on_openqa_job(self, iso):
        self.logger.debug('openqa_job_change %s', iso)
        for p in self.projects:
            p.openqa_job_change(iso)

    def on_message(self, unused_channel, method, properties, body):
        if method.routing_key == '{}.obs.repo.published'.format(amqp_prefix):
            self.on_published_repo(json.loads(body))
        elif re.search(r'.openqa.', method.routing_key):
            self.on_openqa_job(json.loads(body).get('ISO'))
        else:
            self.logger.warning("unknown rabbitmq message {}".format(method.routing_key))
示例#29
0
 def setup(self, project):
     super(ToTestPublisher, self).setup(project)
     self.openqa = OpenQA_Client(server=self.project.openqa_server)
     self.update_pinned_descr = False
     self.load_issues_to_ignore()
示例#30
0
class OpenQABot(ReviewBot.ReviewBot):
    """ check ABI of library packages
    """
    def __init__(self, *args, **kwargs):
        self.force = False
        self.openqa = None
        self.do_comments = True
        if 'force' in kwargs:
            if kwargs['force'] is True:
                self.force = True
            del kwargs['force']
        if 'openqa' in kwargs:
            self.openqa = OpenQA_Client(server=kwargs['openqa'])
            del kwargs['openqa']
        if 'do_comments' in kwargs:
            if kwargs['do_comments'] is not None:
                self.do_comments = kwargs['do_comments']
            del kwargs['do_comments']

        ReviewBot.ReviewBot.__init__(self, *args, **kwargs)

        self.logger.debug(self.do_comments)

        self.commentapi = CommentAPI(self.apiurl)
        self.update_test_builds = dict()

    def prepare_review(self):
        for prj, u in TARGET_REPO_SETTINGS.items():
            self.trigger_build_for_target(prj, u)

    def check_action_maintenance_release(self, req, a):
        # we only look at the binaries of the patchinfo
        if a.src_package != 'patchinfo':
            return None

        if a.tgt_project not in PROJECT_OPENQA_SETTINGS:
            self.logger.warn("not handling %s" % a.tgt_project)
            return None

        packages = []
        # patchinfo collects the binaries and is build for an
        # unpredictable architecture so we need iterate over all
        url = osc.core.makeurl(
            self.apiurl,
            ('build', a.src_project, a.tgt_project.replace(':', '_')))
        root = ET.parse(osc.core.http_GET(url)).getroot()
        for arch in [n.attrib['name'] for n in root.findall('entry')]:
            query = {'nosource': 1}
            url = osc.core.makeurl(
                self.apiurl,
                ('build', a.src_project, a.tgt_project.replace(
                    ':', '_'), arch, a.src_package),
                query=query)

            root = ET.parse(osc.core.http_GET(url)).getroot()

            for binary in root.findall('binary'):
                m = pkgname_re.match(binary.attrib['filename'])
                if m:
                    # can't use arch here as the patchinfo mixes all
                    # archs
                    packages.append(
                        Package(m.group('name'), m.group('version'),
                                m.group('release')))

        if not packages:
            raise Exception("no packages found")

        self.logger.debug('found packages %s',
                          ' '.join(set([p.name for p in packages])))

        for update in PROJECT_OPENQA_SETTINGS[a.tgt_project]:
            settings = update.settings(a.src_project, a.tgt_project, packages,
                                       req)
            if settings is not None:
                self.logger.info("posting %s %s %s", settings['VERSION'],
                                 settings['ARCH'], settings['BUILD'])
                self.logger.debug('\n'.join(
                    ["  %s=%s" % i for i in settings.items()]))
                if not self.dryrun:
                    try:
                        ret = self.openqa.openqa_request('POST',
                                                         'isos',
                                                         data=settings,
                                                         retries=1)
                        self.logger.info(pformat(ret))
                    except JSONDecodeError, e:
                        self.logger.error(e)
                        # TODO: record error
                    except openqa_exceptions.RequestError, e:
                        self.logger.error(e)
class ToTestBase(object):
    """Base class to store the basic interface"""

    product_repo = 'images'
    product_arch = 'local'
    livecd_repo = 'images'
    totest_container_repo = 'containers'

    main_products = []
    ftp_products = []
    container_products = []
    livecd_products = []
    image_products = []

    def __init__(self,
                 project,
                 dryrun=False,
                 norelease=False,
                 api_url=None,
                 openqa_server='https://openqa.opensuse.org',
                 test_subproject=None):
        self.project = project
        self.dryrun = dryrun
        self.norelease = norelease
        if not api_url:
            api_url = osc.conf.config['apiurl']
        self.api = StagingAPI(api_url, project=project)
        self.openqa_server = openqa_server
        if not test_subproject:
            test_subproject = 'ToTest'
        self.test_project = '%s:%s' % (self.project, test_subproject)
        self.openqa = OpenQA_Client(server=openqa_server)
        self.load_issues_to_ignore()
        self.project_base = project.split(':')[0]
        self.update_pinned_descr = False
        self.amqp_url = osc.conf.config.get('ttm_amqp_url')

    def load_issues_to_ignore(self):
        text = self.api.attribute_value_load('IgnoredIssues')
        if text:
            root = yaml.load(text)
            self.issues_to_ignore = root.get('last_seen')
        else:
            self.issues_to_ignore = dict()

    def save_issues_to_ignore(self):
        if self.dryrun:
            return
        text = yaml.dump({'last_seen': self.issues_to_ignore},
                         default_flow_style=False)
        self.api.attribute_value_save('IgnoredIssues', text)

    def openqa_group(self):
        return self.project

    def iso_prefix(self):
        return self.project

    def jobs_num(self):
        return 70

    def current_version(self):
        return self.release_version()

    def binaries_of_product(self, project, product, repo=None, arch=None):
        if repo is None:
            repo = self.product_repo
        if arch is None:
            arch = self.product_arch

        url = self.api.makeurl(['build', project, repo, arch, product])
        try:
            f = self.api.retried_GET(url)
        except HTTPError:
            return []

        ret = []
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            ret.append(binary.get('filename'))

        return ret

    def get_current_snapshot(self):
        """Return the current snapshot in the test project"""

        for binary in self.binaries_of_product(
                self.test_project,
                '000product:%s-cd-mini-%s' % (self.project_base, self.arch())):
            result = re.match(
                r'%s-%s-NET-.*-Snapshot(.*)-Media.iso' %
                (self.project_base, self.iso_prefix()), binary)
            if result:
                return result.group(1)

        return None

    def ftp_build_version(self, project, tree, base=None):
        if not base:
            base = self.project_base
        for binary in self.binaries_of_product(project, tree):
            result = re.match(r'%s.*Build(.*)-Media1.report' % base, binary)
            if result:
                return result.group(1)
        raise NotFoundException("can't find %s ftp version" % project)

    def iso_build_version(self,
                          project,
                          tree,
                          base=None,
                          repo=None,
                          arch=None):
        if not base:
            base = self.project_base
        for binary in self.binaries_of_product(project,
                                               tree,
                                               repo=repo,
                                               arch=arch):
            result = re.match(
                r'.*-(?:Build|Snapshot)([0-9.]+)(?:-Media.*\.iso|\.docker\.tar\.xz)',
                binary)
            if result:
                return result.group(1)
        raise NotFoundException("can't find %s iso version" % project)

    def release_version(self):
        url = self.api.makeurl([
            'build', self.project, 'standard',
            self.arch(),
            '000release-packages:%s-release' % self.project_base
        ])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            binary = binary.get('filename', '')
            result = re.match(r'.*-([^-]*)-[^-]*.src.rpm', binary)
            if result:
                return result.group(1)

        raise NotFoundException("can't find %s version" % self.project)

    def current_qa_version(self):
        return self.api.pseudometa_file_load('version_totest')

    def find_openqa_results(self, snapshot):
        """Return the openqa jobs of a given snapshot and filter out the
        cloned jobs

        """

        url = makeurl(self.openqa_server, ['api', 'v1', 'jobs'], {
            'group': self.openqa_group(),
            'build': snapshot,
            'latest': 1
        })
        f = self.api.retried_GET(url)
        jobs = []
        for job in json.load(f)['jobs']:
            if job['clone_id'] or job['result'] == 'obsoleted':
                continue
            job['name'] = job['name'].replace(snapshot, '')
            jobs.append(job)
        return jobs

    def _result2str(self, result):
        if result == QA_INPROGRESS:
            return 'inprogress'
        elif result == QA_FAILED:
            return 'failed'
        else:
            return 'passed'

    def find_failed_module(self, testmodules):
        # print json.dumps(testmodules, sort_keys=True, indent=4)
        for module in testmodules:
            if module['result'] != 'failed':
                continue
            flags = module['flags']
            if 'fatal' in flags or 'important' in flags:
                return module['name']
                break
            logger.info('%s %s %s' %
                        (module['name'], module['result'], module['flags']))

    def update_openqa_status_message(self):
        url = makeurl(self.openqa_server, ['api', 'v1', 'job_groups'])
        f = self.api.retried_GET(url)
        job_groups = json.load(f)
        group_id = 0
        for jg in job_groups:
            if jg['name'] == self.openqa_group():
                group_id = jg['id']
                break

        if not group_id:
            logger.debug(
                'No openQA group id found for status comment update, ignoring')
            return

        pinned_ignored_issue = 0
        issues = ' , '.join(self.issues_to_ignore.keys())
        status_flag = 'publishing' if self.status_for_openqa['is_publishing'] else \
            'preparing' if self.status_for_openqa['can_release'] else \
            'testing' if self.status_for_openqa['snapshotable'] else \
            'building'
        status_msg = "tag:{}:{}:{}".format(
            self.status_for_openqa['new_snapshot'], status_flag, status_flag)
        msg = "pinned-description: Ignored issues\r\n\r\n{}\r\n\r\n{}".format(
            issues, status_msg)
        data = {'text': msg}

        url = makeurl(self.openqa_server,
                      ['api', 'v1', 'groups',
                       str(group_id), 'comments'])
        f = self.api.retried_GET(url)
        comments = json.load(f)
        for comment in comments:
            if comment['userName'] == 'ttm' and \
                    comment['text'].startswith('pinned-description: Ignored issues'):
                pinned_ignored_issue = comment['id']

        logger.debug('Writing openQA status message: {}'.format(data))
        if not self.dryrun:
            if pinned_ignored_issue:
                self.openqa.openqa_request('PUT',
                                           'groups/%s/comments/%d' %
                                           (group_id, pinned_ignored_issue),
                                           data=data)
            else:
                self.openqa.openqa_request('POST',
                                           'groups/%s/comments' % group_id,
                                           data=data)

    def overall_result(self, snapshot):
        """Analyze the openQA jobs of a given snapshot Returns a QAResult"""

        if snapshot is None:
            return QA_FAILED

        jobs = self.find_openqa_results(snapshot)

        self.failed_relevant_jobs = []
        self.failed_ignored_jobs = []

        if len(jobs) < self.jobs_num():  # not yet scheduled
            logger.warning('we have only %s jobs' % len(jobs))
            return QA_INPROGRESS

        in_progress = False
        for job in jobs:
            # print json.dumps(job, sort_keys=True, indent=4)
            if job['result'] in ('failed', 'incomplete', 'skipped',
                                 'user_cancelled', 'obsoleted',
                                 'parallel_failed'):
                # print json.dumps(job, sort_keys=True, indent=4), jobname
                url = makeurl(
                    self.openqa_server,
                    ['api', 'v1', 'jobs',
                     str(job['id']), 'comments'])
                f = self.api.retried_GET(url)
                comments = json.load(f)
                refs = set()
                labeled = 0
                to_ignore = False
                for comment in comments:
                    for ref in comment['bugrefs']:
                        refs.add(str(ref))
                    if comment['userName'] == 'ttm' and comment[
                            'text'] == 'label:unknown_failure':
                        labeled = comment['id']
                    if re.search(r'@ttm:? ignore', comment['text']):
                        to_ignore = True
                # to_ignore can happen with or without refs
                ignored = True if to_ignore else len(refs) > 0
                build_nr = str(job['settings']['BUILD'])
                for ref in refs:
                    if ref not in self.issues_to_ignore:
                        if to_ignore:
                            self.issues_to_ignore[ref] = build_nr
                            self.update_pinned_descr = True
                        else:
                            ignored = False
                    else:
                        # update reference
                        self.issues_to_ignore[ref] = build_nr

                if ignored:
                    self.failed_ignored_jobs.append(job['id'])
                    if labeled:
                        text = 'Ignored issue' if len(
                            refs) > 0 else 'Ignored failure'
                        # remove flag - unfortunately can't delete comment unless admin
                        data = {'text': text}
                        if self.dryrun:
                            logger.info("Would label {} with: {}".format(
                                job['id'], text))
                        else:
                            self.openqa.openqa_request('PUT',
                                                       'jobs/%s/comments/%d' %
                                                       (job['id'], labeled),
                                                       data=data)

                    logger.info("job %s failed, but was ignored", job['name'])
                else:
                    self.failed_relevant_jobs.append(job['id'])
                    if not labeled and len(refs) > 0:
                        data = {'text': 'label:unknown_failure'}
                        if self.dryrun:
                            logger.info("Would label {} as unknown".format(
                                job['id']))
                        else:
                            self.openqa.openqa_request('POST',
                                                       'jobs/%s/comments' %
                                                       job['id'],
                                                       data=data)

                    joburl = '%s/tests/%s' % (self.openqa_server, job['id'])
                    logger.info("job %s failed, see %s", job['name'], joburl)

            elif job['result'] == 'passed' or job['result'] == 'softfailed':
                continue
            elif job['result'] == 'none':
                if job['state'] != 'cancelled':
                    in_progress = True
            else:
                raise Exception(job['result'])

        self.save_issues_to_ignore()

        if len(self.failed_relevant_jobs) > 0:
            return QA_FAILED

        if in_progress:
            return QA_INPROGRESS

        return QA_PASSED

    def all_repos_done(self, project, codes=None):
        """Check the build result of the project and only return True if all
        repos of that project are either published or unpublished

        """

        # coolo's experience says that 'finished' won't be
        # sufficient here, so don't try to add it :-)
        codes = ['published', 'unpublished'] if not codes else codes

        url = self.api.makeurl(['build', project, '_result'],
                               {'code': 'failed'})
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        ready = True
        for repo in root.findall('result'):
            # ignore ports. 'factory' is used by arm for repos that are not
            # meant to use the totest manager.
            if repo.get('repository') in ('ports', 'factory',
                                          'images_staging'):
                continue
            if repo.get('dirty', '') == 'true':
                logger.info('%s %s %s -> %s' %
                            (repo.get('project'), repo.get('repository'),
                             repo.get('arch'), 'dirty'))
                ready = False
            if repo.get('code') not in codes:
                logger.info('%s %s %s -> %s' %
                            (repo.get('project'), repo.get('repository'),
                             repo.get('arch'), repo.get('code')))
                ready = False
        return ready

    def maxsize_for_package(self, package):
        if re.match(r'.*-mini-.*', package):
            return 737280000  # a CD needs to match

        if re.match(r'.*-dvd5-.*', package):
            return 4700372992  # a DVD needs to match

        if re.match(r'livecd-x11', package):
            return 681574400  # not a full CD

        if re.match(r'livecd-.*', package):
            return 999999999  # a GB stick

        if re.match(r'.*-(dvd9-dvd|cd-DVD)-.*', package):
            return 8539996159

        if re.match(r'.*-ftp-(ftp|POOL)-', package):
            return None

        # docker container has no size limit
        if re.match(r'opensuse-.*-image.*', package):
            return None

        if '-Addon-NonOss-ftp-ftp' in package:
            return None

        if 'JeOS' in package:
            return 4700372992

        raise Exception('No maxsize for {}'.format(package))

    def package_ok(self, project, package, repository, arch):
        """Checks one package in a project and returns True if it's succeeded

        """

        query = {'package': package, 'repository': repository, 'arch': arch}

        url = self.api.makeurl(['build', project, '_result'], query)
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        # [@code!='succeeded'] is not supported by ET
        failed = [
            status for status in root.findall("result/status")
            if status.get('code') != 'succeeded'
        ]

        if any(failed):
            logger.info(
                '%s %s %s %s -> %s' %
                (project, package, repository, arch, failed[0].get('code')))
            return False

        if not len(root.findall('result/status[@code="succeeded"]')):
            logger.info('No "succeeded" for %s %s %s %s' %
                        (project, package, repository, arch))
            return False

        maxsize = self.maxsize_for_package(package)
        if not maxsize:
            return True

        url = self.api.makeurl(['build', project, repository, arch, package])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            if not binary.get('filename', '').endswith('.iso'):
                continue
            isosize = int(binary.get('size', 0))
            if isosize > maxsize:
                logger.error('%s %s %s %s: %s' %
                             (project, package, repository, arch,
                              'too large by %s bytes' % (isosize - maxsize)))
                return False

        return True

    def is_snapshottable(self):
        """Check various conditions required for factory to be snapshotable

        """

        if not self.all_repos_done(self.project):
            return False

        for product in self.ftp_products + self.main_products:
            if not self.package_ok(self.project, product, self.product_repo,
                                   self.product_arch):
                return False

        for product in self.image_products + self.container_products:
            for arch in product.archs:
                if not self.package_ok(self.project, product.package,
                                       self.product_repo, arch):
                    return False

        if len(self.livecd_products):
            if not self.all_repos_done('%s:Live' % self.project):
                return False

            for product in self.livecd_products:
                for arch in product.archs:
                    if not self.package_ok('%s:Live' % self.project,
                                           product.package, self.product_repo,
                                           arch):
                        return False

        return True

    def _release_package(self,
                         project,
                         package,
                         set_release=None,
                         repository=None,
                         target_project=None,
                         target_repository=None):
        query = {'cmd': 'release'}

        if set_release:
            query['setrelease'] = set_release

        if repository is not None:
            query['repository'] = repository

        if target_project is not None:
            # Both need to be set
            query['target_project'] = target_project
            query['target_repository'] = target_repository

        baseurl = ['source', project, package]

        url = self.api.makeurl(baseurl, query=query)
        if self.dryrun or self.norelease:
            logger.info("release %s/%s (%s)" % (project, package, query))
        else:
            self.api.retried_POST(url)

    def _release(self, set_release=None):
        for product in self.ftp_products:
            self._release_package(self.project,
                                  product,
                                  repository=self.product_repo)

        for cd in self.livecd_products:
            self._release_package('%s:Live' % self.project,
                                  cd.package,
                                  set_release=set_release,
                                  repository=self.livecd_repo)

        for image in self.image_products:
            self._release_package(self.project,
                                  image.package,
                                  set_release=set_release,
                                  repository=self.product_repo)

        for cd in self.main_products:
            self._release_package(self.project,
                                  cd,
                                  set_release=set_release,
                                  repository=self.product_repo)

        for container in self.container_products:
            # Containers are built in the same repo as other image products,
            # but released into a different repo in :ToTest
            self._release_package(self.project,
                                  container.package,
                                  repository=self.product_repo,
                                  target_project=self.test_project,
                                  target_repository=self.totest_container_repo)

    def update_totest(self, snapshot=None):
        release = 'Snapshot%s' % snapshot if snapshot else None
        logger.info('Updating snapshot %s' % snapshot)
        if not (self.dryrun or self.norelease):
            self.api.switch_flag_in_prj(self.test_project,
                                        flag='publish',
                                        state='disable',
                                        repository=self.product_repo)

        self._release(set_release=release)

    def publish_factory_totest(self):
        logger.info('Publish test project content')
        if not (self.dryrun or self.norelease):
            self.api.switch_flag_in_prj(self.test_project,
                                        flag='publish',
                                        state='enable',
                                        repository=self.product_repo)
        if self.container_products:
            logger.info('Releasing container products from ToTest')
            for container in self.container_products:
                self._release_package(self.test_project,
                                      container.package,
                                      repository=self.totest_container_repo)

    def totest_is_publishing(self):
        """Find out if the publishing flag is set in totest's _meta"""

        url = self.api.makeurl(['source', self.test_project, '_meta'])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        if not root.find('publish'):  # default true
            return True

        for flag in root.find('publish'):
            if flag.get('repository', None) not in [None, self.product_repo]:
                continue
            if flag.get('arch', None):
                continue
            if flag.tag == 'enable':
                return True
        return False

    def totest(self):
        try:
            current_snapshot = self.get_current_snapshot()
        except NotFoundException as e:
            # nothing in test project (yet)
            logger.warn(e)
            current_snapshot = None
        new_snapshot = self.current_version()
        self.update_pinned_descr = False
        current_result = self.overall_result(current_snapshot)
        current_qa_version = self.current_qa_version()

        logger.info('current_snapshot %s: %s' %
                    (current_snapshot, self._result2str(current_result)))
        logger.debug('new_snapshot %s', new_snapshot)
        logger.debug('current_qa_version %s', current_qa_version)

        snapshotable = self.is_snapshottable()
        logger.debug("snapshotable: %s", snapshotable)
        can_release = ((current_snapshot is None
                        or current_result != QA_INPROGRESS) and snapshotable)

        # not overwriting
        if new_snapshot == current_qa_version:
            logger.debug("no change in snapshot version")
            can_release = False
        elif not self.all_repos_done(self.test_project):
            logger.debug("not all repos done, can't release")
            # the repos have to be done, otherwise we better not touch them
            # with a new release
            can_release = False

        self.send_amqp_event(current_snapshot, current_result)

        can_publish = (current_result == QA_PASSED)

        # already published
        totest_is_publishing = self.totest_is_publishing()
        if totest_is_publishing:
            logger.debug("totest already publishing")
            can_publish = False

        if self.update_pinned_descr:
            self.status_for_openqa = {
                'current_snapshot': current_snapshot,
                'new_snapshot': new_snapshot,
                'snapshotable': snapshotable,
                'can_release': can_release,
                'is_publishing': totest_is_publishing,
            }
            self.update_openqa_status_message()

        if can_publish:
            if current_qa_version == current_snapshot:
                self.publish_factory_totest()
                self.write_version_to_dashboard("snapshot", current_snapshot)
                can_release = False  # we have to wait
            else:
                # We reached a very bad status: openQA testing is 'done', but not of the same version
                # currently in test project. This can happen when 'releasing' the
                # product failed
                raise Exception(
                    "Publishing stopped: tested version (%s) does not match version in test project (%s)"
                    % (current_qa_version, current_snapshot))

        if can_release:
            self.update_totest(new_snapshot)
            self.write_version_to_dashboard("totest", new_snapshot)

    def send_amqp_event(self, current_snapshot, current_result):
        if not self.amqp_url:
            logger.debug(
                'No ttm_amqp_url configured in oscrc - skipping amqp event emission'
            )
            return

        logger.debug('Sending AMQP message')
        inf = re.sub(r"ed$", '', self._result2str(current_result))
        msg_topic = '%s.ttm.build.%s' % (self.project_base.lower(), inf)
        msg_body = json.dumps({
            'build': current_snapshot,
            'project': self.project,
            'failed_jobs': {
                'relevant': self.failed_relevant_jobs,
                'ignored': self.failed_ignored_jobs,
            }
        })

        # send amqp event
        tries = 7  # arbitrary
        for t in range(tries):
            try:
                notify_connection = pika.BlockingConnection(
                    pika.URLParameters(self.amqp_url))
                notify_channel = notify_connection.channel()
                notify_channel.exchange_declare(exchange='pubsub',
                                                exchange_type='topic',
                                                passive=True,
                                                durable=True)
                notify_channel.basic_publish(exchange='pubsub',
                                             routing_key=msg_topic,
                                             body=msg_body)
                notify_connection.close()
                break
            except pika.exceptions.ConnectionClosed as e:
                logger.warn(
                    'Sending AMQP event did not work: %s. Retrying try %s out of %s'
                    % (e, t, tries))
        else:
            logger.error(
                'Could not send out AMQP event for %s tries, aborting.' %
                tries)

    def release(self):
        new_snapshot = self.current_version()
        self.update_totest(new_snapshot)

    def write_version_to_dashboard(self, target, version):
        if not (self.dryrun or self.norelease):
            self.api.pseudometa_file_ensure('version_%s' % target,
                                            version,
                                            comment='Update version')