示例#1
0
 def setUp(self):
     """
     Initialize the configuration
     """
     self.obs = OBS()
     Config('openSUSE:Factory')
     self.api = StagingAPI(APIURL, 'openSUSE:Factory')
示例#2
0
 def __init__(self, name):
     self.name = name
     Config(apiurl, name)
     self.api = StagingAPI(apiurl, name)
     self.staging_projects = dict()
     self.listener = None
     self.replace_string = self.api.attribute_value_load('OpenQAMapping')
示例#3
0
class CheckRepo(object):
    def __init__(self,
                 apiurl,
                 project,
                 readonly=False,
                 force_clean=False,
                 debug=False):
        """CheckRepo constructor."""
        self.apiurl = apiurl
        self.project = project
        self.staging = StagingAPI(apiurl, self.project)

        self.pkgcache = PkgCache(BINCACHE, force_clean=force_clean)

        # grouped = { id: staging, }
        self.grouped = {}
        # groups = { staging: [ids,], }
        self.groups = {}
        self._staging()
        self.readonly = readonly
        self.debug_enable = debug

    def debug(self, *args):
        if not self.debug_enable:
            return
        print ' '.join(
            [i if isinstance(i, basestring) else pformat(i) for i in args])

    def _staging(self):
        """Preload the groups of related request associated by the same
        staging project.

        """
        for project in self.staging.get_staging_projects():
            # Get all the requests identifier for the project
            requests = self.staging.get_prj_pseudometa(project)['requests']
            requests = [req['id'] for req in requests]

            # Note: Originally we recover also the request returned by
            # list_requests_in_prj().  I guest that if the staging
            # project is working properly, this method do not add any
            # new request to the list.
            if requests:
                self.groups[project] = requests
                self.grouped.update({req: project for req in requests})

    def get_request_state(self, request_id):
        """Return the current state of the request."""
        state = None
        url = makeurl(self.apiurl, ('request', str(request_id)))
        try:
            root = ET.parse(http_GET(url)).getroot()
            state = root.find('state').get('name')
        except urllib2.HTTPError, e:
            print('ERROR in URL %s [%s]' % (url, e))
        return state
 def __init__(self, project, dryrun=False):
     self.project = project
     self.dryrun = dryrun
     self.api = StagingAPI(osc.conf.config['apiurl'],
                           project='openSUSE:%s' % project)
     self.openqa = OpenQA_Client(server='https://openqa.opensuse.org')
     self.issues_to_ignore = []
     self.issuefile = "{}_{}".format(self.project, ISSUE_FILE)
     if os.path.isfile(self.issuefile):
         with open(self.issuefile, 'r') as f:
             for line in f.readlines():
                 self.issues_to_ignore.append(line.strip())
示例#5
0
class CheckRepo(object):

    def __init__(self, apiurl, project, readonly=False, force_clean=False, debug=False):
        """CheckRepo constructor."""
        self.apiurl = apiurl
        self.project = project
        self.staging = StagingAPI(apiurl, self.project)

        self.pkgcache = PkgCache(BINCACHE, force_clean=force_clean)

        # grouped = { id: staging, }
        self.grouped = {}
        # groups = { staging: [ids,], }
        self.groups = {}
        self._staging()
        self.readonly = readonly
        self.debug_enable = debug

    def debug(self, *args):
        if not self.debug_enable:
            return
        print ' '.join([i if isinstance(i, basestring) else pformat(i) for i in args])

    def _staging(self):
        """Preload the groups of related request associated by the same
        staging project.

        """
        for project in self.staging.get_staging_projects():
            # Get all the requests identifier for the project
            requests = self.staging.get_prj_pseudometa(project)['requests']
            requests = [req['id'] for req in requests]

            # Note: Originally we recover also the request returned by
            # list_requests_in_prj().  I guest that if the staging
            # project is working properly, this method do not add any
            # new request to the list.
            if requests:
                self.groups[project] = requests
                self.grouped.update({req: project for req in requests})

    def get_request_state(self, request_id):
        """Return the current state of the request."""
        state = None
        url = makeurl(self.apiurl, ('request', str(request_id)))
        try:
            root = ET.parse(http_GET(url)).getroot()
            state = root.find('state').get('name')
        except urllib2.HTTPError, e:
            print('ERROR in URL %s [%s]' % (url, e))
        return state
class TestConfig(unittest.TestCase):
    def setUp(self):
        self.obs = OBS()
        self.config = Config(PROJECT)
        self.api = StagingAPI(APIURL, PROJECT)

    def test_basic(self):
        self.assertEqual('openSUSE', conf.config[PROJECT]['lock-ns'])

    def test_remote(self):
        self.assertEqual('local', conf.config[PROJECT]['overridden-by-local'])
        self.assertIsNone(conf.config[PROJECT].get('remote-only'))

        self.config.apply_remote(self.api)

        self.assertEqual('local', conf.config[PROJECT]['overridden-by-local'])
        self.assertEqual('remote-indeed', conf.config[PROJECT]['remote-only'])

        self.api.attribute_value_save('Config', 'remote-only = nope')
        self.config.apply_remote(self.api)

        self.assertEqual('local', conf.config[PROJECT]['overridden-by-local'])
        self.assertEqual('nope', conf.config[PROJECT]['remote-only'])

    def test_remote_none(self):
        self.api.attribute_value_save('Config', '')
        # don't crash
        self.config.apply_remote(self.api)

    def test_pattern_order(self):
        # Add pattern to defaults in order to identify which was matched.
        for pattern in DEFAULT:
            DEFAULT[pattern]['pattern'] = pattern

        # A list of projects that should match each of the DEFAULT patterns.
        projects = (
            'openSUSE:Factory',
            'openSUSE:Leap:15.0',
            'openSUSE:Backports:SLE-15',
            'SUSE:SLE-15:GA',
            'SUSE:SLE-12:GA',
            'GNOME:Factory',
        )

        # Ensure each pattern is match instead of catch-all pattern.
        patterns = set()
        for project in projects:
            config = Config(project)
            patterns.add(conf.config[project]['pattern'])

        self.assertEqual(len(patterns), len(DEFAULT))
示例#7
0
    def __init__(self, apiurl, project, readonly=False, force_clean=False, debug=False):
        """CheckRepo constructor."""
        self.apiurl = apiurl
        self.project = project
        self.staging = StagingAPI(apiurl, self.project)

        self.pkgcache = PkgCache(BINCACHE, force_clean=force_clean)

        # grouped = { id: staging, }
        self.grouped = {}
        # groups = { staging: [ids,], }
        self.groups = {}
        self._staging()
        self.readonly = readonly
        self.debug_enable = debug
示例#8
0
 def __init__(self, project, dryrun=False, norelease=False, api_url=None, openqa_server='https://openqa.opensuse.org', test_subproject=None):
     self.project = project
     self.dryrun = dryrun
     self.norelease = norelease
     if not api_url:
         api_url = osc.conf.config['apiurl']
     self.api = StagingAPI(api_url, project=project)
     self.openqa_server = openqa_server
     if not test_subproject:
         test_subproject = 'ToTest'
     self.test_project = '%s:%s' % (self.project, test_subproject)
     self.openqa = OpenQA_Client(server=openqa_server)
     self.load_issues_to_ignore()
     self.project_base = project.split(':')[0]
     self.update_pinned_descr = False
     self.amqp_url = osc.conf.config.get('ttm_amqp_url')
示例#9
0
    def __init__(self, project=PROJECT):
        """
        Initialize the configuration
        """
        THIS_DIR = os.path.dirname(os.path.abspath(__file__))
        oscrc = os.path.join(THIS_DIR, 'test.oscrc')

        self.apiurl = APIURL
        logging.basicConfig()

        # clear cache from other tests - otherwise the VCR is replayed depending
        # on test order, which can be harmful
        memoize_session_reset()

        osc.core.conf.get_config(override_conffile=oscrc,
                                 override_no_keyring=True,
                                 override_no_gnome_keyring=True)
        if os.environ.get('OSC_DEBUG'):
            osc.core.conf.config['debug'] = 1
        self.project = project
        self.projects = {}
        self.requests = []
        self.groups = []
        self.users = []
        CacheManager.test = True
        # disable caching, the TTLs break any reproduciblity
        Cache.CACHE_DIR = None
        Cache.PATTERNS = {}
        Cache.init()
        self.setup_remote_config()
        self.load_config()
        self.api = StagingAPI(APIURL, project)
示例#10
0
    def setUp(self):
        """
        Initialize the configuration
        """

        self.obs = OBS()
        self.api = StagingAPI(APIURL)
示例#11
0
def main(args):
    global client
    client = InfluxDBClient(args.host, args.port, args.user, args.password,
                            args.project)

    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug

    # Use separate cache since it is persistent.
    Cache.CACHE_DIR = os.path.expanduser('~/.cache/osc-plugin-factory-metrics')
    if args.wipe_cache:
        Cache.delete_all()
    Cache.PATTERNS['/search/request'] = sys.maxint
    Cache.init()

    Config(args.project)
    api = StagingAPI(osc.conf.config['apiurl'], args.project)

    global who_workaround_swap, who_workaround_miss
    who_workaround_swap = who_workaround_miss = 0

    points_requests = ingest_requests(api, args.project)
    points_schedule = ingest_release_schedule(args.project)

    print('who_workaround_swap', who_workaround_swap)
    print('who_workaround_miss', who_workaround_miss)

    print('wrote {:,} points and {:,} annotation points to db'.format(
        points_requests, points_schedule))
示例#12
0
    def setUp(self):
        """Initialize the configuration."""

        self.obs = OBS()
        Config(APIURL, 'openSUSE:Factory')
        self.stagingapi = StagingAPI(APIURL, 'openSUSE:Factory')
        self.checkcommand = CheckCommand(self.stagingapi)
示例#13
0
 def setUp(self):
     """
     Initialize the configuration
     """
     self.obs = obs.OBS()
     Config(obs.APIURL, 'openSUSE:Factory')
     self.api = StagingAPI(obs.APIURL, 'openSUSE:Factory')
def main(args):
    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug

    devel_projects = devel_projects_get(osc.conf.config['apiurl'],
                                        args.project)
    if len(devel_projects) == 0:
        print('no devel projects found')
    else:
        out = '\n'.join(devel_projects)
        print(out)

        if args.write:
            Config(args.project)
            api = StagingAPI(osc.conf.config['apiurl'], args.project)
            api.save_file_content('%s:Staging' % api.project, 'dashboard',
                                  'devel_projects', out)
示例#15
0
    def setUp(self):
        """
        Initialize the configuration
        """

        self.obs = OBS()
        Config("openSUSE:Factory")
        self.api = StagingAPI(APIURL, "openSUSE:Factory")
示例#16
0
 def __init__(self, name):
     self.name = name
     Config(apiurl, name)
     self.api = StagingAPI(apiurl, name)
     self.staging_projects = dict()
     self.listener = None
     self.logger = logging.getLogger(__name__)
     self.replace_string = self.api.attribute_value_load('OpenQAMapping')
    def staging_api(self, project):
        if project not in self.staging_apis:
            config = Config(project)
            self.staging_apis[project] = StagingAPI(self.apiurl, project)

            config.apply_remote(self.staging_apis[project])
            self.staging_config[project] = conf.config[project].copy()

        return self.staging_apis[project]
示例#18
0
    def __init__(self, project=PROJECT):
        """Initializes the configuration

        Note this constructor calls :func:`create_target`, which implies several projects and users
        are created right away.

        :param project: default target project
        :type project: str
        """
        THIS_DIR = os.path.dirname(os.path.abspath(__file__))
        oscrc = os.path.join(THIS_DIR, 'test.oscrc')

        # set to None so we return the destructor early in case of exceptions
        self.api = None
        self.apiurl = APIURL
        self.project = project
        self.projects = {}
        self.requests = []
        self.groups = []
        self.users = []
        self.attr_types = {}
        logging.basicConfig()

        # clear cache from other tests - otherwise the VCR is replayed depending
        # on test order, which can be harmful
        memoize_session_reset()

        osc.core.conf.get_config(override_conffile=oscrc,
                                 override_no_keyring=True,
                                 override_no_gnome_keyring=True)
        os.environ['OSC_CONFIG'] = oscrc

        if os.environ.get('OSC_DEBUG'):
            osc.core.conf.config['debug'] = 1

        CacheManager.test = True
        # disable caching, the TTLs break any reproduciblity
        Cache.CACHE_DIR = None
        Cache.PATTERNS = {}
        Cache.init()
        # Note this implicitly calls create_target()
        self.setup_remote_config()
        self.load_config()
        self.api = StagingAPI(APIURL, project)
示例#19
0
 def __init__(self, project, dryrun=False, api_url=None, openqa_server='https://openqa.opensuse.org', test_subproject=None):
     self.project = project
     self.dryrun = dryrun
     if not api_url:
         api_url = osc.conf.config['apiurl']
     self.api = StagingAPI(api_url, project=project)
     self.openqa_server = openqa_server
     if not test_subproject:
         test_subproject = 'ToTest'
     self.test_project = '%s:%s' % (self.project, test_subproject)
     self.openqa = OpenQA_Client(server=openqa_server)
     self.issues_to_ignore = []
     self.issuefile = "{}_{}".format(self.project, ISSUE_FILE)
     if os.path.isfile(self.issuefile):
         with open(self.issuefile, 'r') as f:
             for line in f.readlines():
                 self.issues_to_ignore.append(line.strip())
     self.project_base = project.split(':')[0]
     self.update_pinned_descr = False
示例#20
0
class TestUnselect(unittest.TestCase):
    def setUp(self):
        self.obs = obs.OBS()
        Config(obs.APIURL, obs.PROJECT)
        self.api = StagingAPI(obs.APIURL, obs.PROJECT)

    def test_cleanup_filter(self):
        UnselectCommand.config_init(self.api)
        UnselectCommand.cleanup_days = 1
        obsolete = self.api.project_status_requests('obsolete', UnselectCommand.filter_obsolete)
        self.assertSequenceEqual(['627445', '642126', '646560', '645723', '646823'], obsolete)
    def staging_api(self, project):
        # Allow for the Staging subproject to be passed directly from config
        # which should be stripped before initializing StagingAPI. This allows
        # for NonFree subproject to utilize StagingAPI for main project.
        if project.endswith(':Staging'):
            project = project[:-8]

        if project not in self.staging_apis:
            Config.get(self.apiurl, project)
            self.staging_apis[project] = StagingAPI(self.apiurl, project)

        return self.staging_apis[project]
示例#22
0
class TestSelect(unittest.TestCase):

    def setUp(self):
        """
        Initialize the configuration
        """
        self.obs = obs.OBS()
        Config(obs.APIURL, 'openSUSE:Factory')
        self.api = StagingAPI(obs.APIURL, 'openSUSE:Factory')

    def test_old_frozen(self):
        self.assertEqual(self.api.prj_frozen_enough('openSUSE:Factory:Staging:A'), False)
        # check it won't allow selecting
        self.assertEqual(False, SelectCommand(self.api, 'openSUSE:Factory:Staging:A').perform(['gcc']))

    def test_select_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_b = 'openSUSE:Factory:Staging:B'
        comments = c_api.get_comments(project_name=staging_b)

        # First select
        self.assertEqual(True, SelectCommand(self.api, staging_b).perform(['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        self.assertTrue('request#123 for package gcc submitted by Admin' in first_select_comment['comment'])

        # Second select
        self.assertEqual(True, SelectCommand(self.api, staging_b).perform(['puppet']))
        second_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments increased by one
        self.assertEqual(len(second_select_comments) - 1, len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'], first_select_comment['comment'])
        # The new comments contains new, but not old
        self.assertFalse('request#123 for package gcc submitted by Admin' in second_select_comment['comment'])
        self.assertTrue('added request#321 for package puppet submitted by Admin' in second_select_comment['comment'])

    def test_no_matches(self):
        # search for requests
        with self.assertRaises(oscerr.WrongArgs) as cm:
            SelectCommand(self.api, 'openSUSE:Factory:Staging:B').perform(['bash'])
        self.assertEqual(str(cm.exception), "No SR# found for: bash")

    def test_selected(self):
        # make sure the project is frozen recently for other tests

        ret = SelectCommand(self.api, 'openSUSE:Factory:Staging:B').perform(['wine'])
        self.assertEqual(True, ret)
示例#23
0
    def __init__(self, from_prj, to_prj):
        self.from_prj = from_prj
        self.to_prj = to_prj
        self.apiurl = osc.conf.config['apiurl']
        self.debug = osc.conf.config['debug']
        self.filter_lookup = set()
        self.caching = False
        self.dryrun = False
        self.skipped = {}
        self.submit_new = {}
        self.api = StagingAPI(osc.conf.config['apiurl'], project=to_prj)

        self.parse_lookup()
示例#24
0
class TestUnselect(unittest.TestCase):
    def setUp(self):
        self.obs = OBS()
        Config(APIURL, PROJECT)
        self.api = StagingAPI(APIURL, PROJECT)

    def test_cleanup_filter(self):
        UnselectCommand.config_init(self.api)
        UnselectCommand.cleanup_days = 1
        obsolete = self.api.project_status_requests(
            'obsolete', UnselectCommand.filter_obsolete)
        self.assertSequenceEqual(
            ['627445', '642126', '646560', '645723', '646823'], obsolete)
示例#25
0
class TestUnselect(unittest.TestCase):
    def setUp(self):
        self.obs = OBS()
        Config(APIURL, PROJECT)
        self.api = StagingAPI(APIURL, PROJECT)

    def test_cleanup_filter(self):
        UnselectCommand.config_init(self.api)
        obsolete = self.api.project_status_requests('obsolete', UnselectCommand.filter_obsolete)
        self.assertTrue('492438' in obsolete, 'revoked')
        self.assertTrue('592437' in obsolete, 'superseded but over threshold')
        self.assertTrue('492439' in obsolete, 'declined by leaper')
        self.assertTrue('492441' in obsolete, 'declined but over threshold')
        self.assertEqual(len(obsolete), 4)
示例#26
0
    def __init__(self, apiurl, project, readonly=False, force_clean=False, debug=False):
        """CheckRepo constructor."""
        self.apiurl = apiurl
        self.project = project
        self.staging = StagingAPI(apiurl, self.project)

        self.pkgcache = PkgCache(BINCACHE, force_clean=force_clean)

        # grouped = { id: staging, }
        self.grouped = {}
        # groups = { staging: [ids,], }
        self.groups = {}
        self._staging()
        self.readonly = readonly
        self.debug_enable = debug
示例#27
0
def main(args):
    global client
    client = InfluxDBClient(args.host, args.port, args.user, args.password,
                            args.project)

    osc.conf.get_config(override_apiurl=args.apiurl)
    apiurl = osc.conf.config['apiurl']
    osc.conf.config['debug'] = args.debug

    # Ensure database exists.
    client.create_database(client._database)

    metrics_release.ingest(client)
    if args.release_only:
        return

    # Use separate cache since it is persistent.
    _, package = project_pseudometa_package(apiurl, args.project)
    if args.wipe_cache:
        Cache.delete_all()
    if args.heavy_cache:
        Cache.PATTERNS[r'/search/request'] = sys.maxsize
        Cache.PATTERNS[r'/source/[^/]+/{}/_history'.format(
            package)] = sys.maxsize
    Cache.PATTERNS[r'/source/[^/]+/{}/[^/]+\?rev=.*'.format(
        package)] = sys.maxsize
    Cache.init('metrics')

    Config(apiurl, args.project)
    api = StagingAPI(apiurl, args.project)

    print('dashboard: wrote {:,} points'.format(ingest_dashboard(api)))

    global who_workaround_swap, who_workaround_miss
    who_workaround_swap = who_workaround_miss = 0

    points_requests = ingest_requests(api, args.project)
    points_schedule = ingest_release_schedule(args.project)

    print('who_workaround_swap', who_workaround_swap)
    print('who_workaround_miss', who_workaround_miss)

    print('wrote {:,} points and {:,} annotation points to db'.format(
        points_requests, points_schedule))
示例#28
0
def main(args):
    global client
    client = InfluxDBClient(args.host, args.port, args.user, args.password,
                            args.project)

    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug

    # Ensure database exists.
    client.create_database(client._database)

    metrics_release.ingest(client)
    if args.release_only:
        return

    # Use separate cache since it is persistent.
    Cache.CACHE_DIR = Cache.CACHE_DIR + '-metrics'
    if args.wipe_cache:
        Cache.delete_all()
    Cache.PATTERNS['/search/request'] = sys.maxint
    Cache.init()

    Config(args.project)
    api = StagingAPI(osc.conf.config['apiurl'], args.project)

    global who_workaround_swap, who_workaround_miss
    who_workaround_swap = who_workaround_miss = 0

    points_requests = ingest_requests(api, args.project)
    points_schedule = ingest_release_schedule(args.project)

    print('who_workaround_swap', who_workaround_swap)
    print('who_workaround_miss', who_workaround_miss)

    print('wrote {:,} points and {:,} annotation points to db'.format(
        points_requests, points_schedule))
示例#29
0
class StagingHelper(object):
    def __init__(self, project):
        self.project = project
        self.apiurl = osc.conf.config['apiurl']
        Config(self.apiurl, self.project)
        self.api = StagingAPI(self.apiurl, self.project)

    def get_support_package_list(self, project, repository):
        f = osc.core.get_buildconfig(self.apiurl, project, repository).splitlines()
        pkg_list = []
        for line in f:
            if re.match('Preinstall', line) or re.match('VM[Ii]nstall', line) or re.match('Support', line):
                content = line.split(':')
                variables = [x.strip() for x in content[1].split(' ')]
                for var in variables:
                    if var != '' and var not in pkg_list:
                        if var.startswith('!') and var[1:] in pkg_list:
                            pkg_list.remove(var[1:])
                        else:
                            pkg_list.append(var)
        return pkg_list

    def get_project_binarylist(self, project, repository, arch):
        query = {'view': 'binaryversions'}
        root = ET.parse(http_GET(makeurl(self.apiurl, ['build', project, repository, arch],
            query=query))).getroot()
        return root

    def process_project_binarylist(self, project, repository, arch):
        prj_binarylist = self.get_project_binarylist(project, repository, arch)
        files = {}
        for package in prj_binarylist.findall('./binaryversionlist'):
            for binary in package.findall('binary'):
                result = re.match(r'(.*)-([^-]*)-([^-]*)\.([^-\.]+)\.rpm', binary.attrib['name'])
                if not result:
                    continue
                bname = result.group(1)
                if bname.endswith('-debuginfo') or bname.endswith('-debuginfo-32bit'):
                    continue
                if bname.endswith('-debugsource'):
                    continue
                if bname.startswith('::import::'):
                    continue
                if result.group(4) == 'src':
                    continue
                files[bname] = package.attrib['package'].split(':', 1)[0]

        return files

    def check_multiple_specs(self, project, packages):
        expanded_packages = []

        for pkg in packages:
            query = {'expand': 1}
            url = makeurl(self.apiurl, ['source', project, pkg], query=query)
            try:
                root = ET.parse(http_GET(url)).getroot()
            except HTTPError as e:
                if e.code == 404:
                    continue
                raise
            for en in root.findall('entry'):
                if en.attrib['name'].endswith('.spec'):
                    expanded_packages.append(en.attrib['name'][:-5])

        return expanded_packages

    def crawl(self):
        """Main method"""
        rebuild_data = self.api.pseudometa_file_load('support_pkg_rebuild')
        if rebuild_data is None:
            print("There is no support_pkg_rebuild file!")
            return

        logging.info('Gathering support package list from %s' % self.project)
        support_pkgs = self.get_support_package_list(self.project, 'standard')
        files = self.process_project_binarylist(self.project, 'standard', 'x86_64')
        staging_projects = ["%s:%s"%(self.api.cstaging, p) for p in self.api.get_staging_projects_short()]
        cand_sources = defaultdict(list)
        for stg in staging_projects:
            prj_meta = self.api.get_prj_pseudometa(stg)
            prj_staged_packages = [req['package'] for req in prj_meta['requests']]
            prj_expanded_packages = self.check_multiple_specs(self.project, prj_staged_packages)
            for pkg in support_pkgs:
                if files.get(pkg) and files.get(pkg) in prj_expanded_packages:
                    if files.get(pkg) not in cand_sources[stg]:
                        cand_sources[stg].append(files.get(pkg))

        root = ET.fromstring(rebuild_data)

        logging.info('Checking rebuild data...')

        for stg in root.findall('staging'):
            rebuild = stg.find('rebuild').text
            suppkg_list = stg.find('supportpkg').text
            need_rebuild = False
            suppkgs = []
            if suppkg_list:
                suppkgs = suppkg_list.split(',')

            stgname =  stg.get('name')
            if len(cand_sources[stgname]) and rebuild == 'unknown':
                need_rebuild = True
                stg.find('rebuild').text = 'needed'
                new_suppkg_list = ','.join(cand_sources[stgname])
                stg.find('supportpkg').text = new_suppkg_list
            elif len(cand_sources[stgname]) and rebuild != 'unknown':
                for cand in cand_sources[stgname]:
                    if cand not in suppkgs:
                        need_rebuild = True
                        stg.find('rebuild').text = 'needed'
                        break
                new_suppkg_list = ','.join(cand_sources[stgname])
                stg.find('supportpkg').text = new_suppkg_list
            elif not len(cand_sources[stgname]):
                stg.find('rebuild').text = 'unneeded'
                stg.find('supportpkg').text = ''

            if stg.find('rebuild').text == 'needed':
                need_rebuild = True

            if need_rebuild and not self.api.is_repo_dirty(stgname, 'standard'):
                logging.info('Rebuild %s' % stgname)
                osc.core.rebuild(self.apiurl, stgname, None, None, None)
                stg.find('rebuild').text = 'unneeded'

        logging.info('Updating support pkg list...')
        rebuild_data_updated = ET.tostring(root)
        logging.debug(rebuild_data_updated)
        if rebuild_data_updated != rebuild_data:
            self.api.pseudometa_file_save(
                'support_pkg_rebuild', rebuild_data_updated, 'support package rebuild')
示例#30
0
 def __init__(self, project):
     self.project = project
     self.apiurl = osc.conf.config['apiurl']
     Config(self.apiurl, self.project)
     self.api = StagingAPI(self.apiurl, self.project)
示例#31
0
 def __init__(self, project):
     self.project = project
     self.apiurl = osc.conf.config['apiurl']
     Config(self.project)
     self.api = StagingAPI(self.apiurl, self.project)
示例#32
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Command to publish openQA status in Staging projects')
    parser.add_argument('-s', '--staging', type=str, default=None,
                        help='staging project letter')
    parser.add_argument('-f', '--force', action='store_true', default=False,
                        help='force the write of the comment')
    parser.add_argument('-p', '--project', type=str, default='Factory',
                        help='openSUSE version to make the check (Factory, 13.2)')
    parser.add_argument('-d', '--debug', action='store_true', default=False,
                        help='enable debug information')

    args = parser.parse_args()

    osc.conf.get_config()
    osc.conf.config['debug'] = args.debug

    if args.force:
        MARGIN_HOURS = 0

    Config('openSUSE:%s' % args.project)
    api = StagingAPI(osc.conf.config['apiurl'], 'openSUSE:%s' % args.project)
    openQA = OpenQAReport(api)

    if args.staging:
        openQA.report(api.prj_from_letter(args.staging))
    else:
        for staging in api.get_staging_projects(include_dvd=False):
            openQA.report(staging)
示例#33
0
class ToTestBase(object):

    """Base class to store the basic interface"""

    product_repo = 'images'
    product_arch = 'local'
    livecd_repo = 'images'
    livecd_archs = ['i586', 'x86_64']

    def __init__(self, project, dryrun=False, api_url=None, openqa_server='https://openqa.opensuse.org', test_subproject=None):
        self.project = project
        self.dryrun = dryrun
        if not api_url:
            api_url = osc.conf.config['apiurl']
        self.api = StagingAPI(api_url, project=project)
        self.openqa_server = openqa_server
        if not test_subproject:
            test_subproject = 'ToTest'
        self.test_project = '%s:%s' % (self.project, test_subproject)
        self.openqa = OpenQA_Client(server=openqa_server)
        self.issues_to_ignore = []
        self.issuefile = "{}_{}".format(self.project, ISSUE_FILE)
        if os.path.isfile(self.issuefile):
            with open(self.issuefile, 'r') as f:
                for line in f.readlines():
                    self.issues_to_ignore.append(line.strip())
        self.project_base = project.split(':')[0]
        self.update_pinned_descr = False

    def openqa_group(self):
        return self.project

    def iso_prefix(self):
        return self.project

    def jobs_num(self):
        return 70

    def current_version(self):
        return self.release_version()

    def binaries_of_product(self, project, product):
        url = self.api.makeurl(['build', project, self.product_repo, self.product_arch, product])
        try:
            f = self.api.retried_GET(url)
        except urllib2.HTTPError:
            return []

        ret = []
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            ret.append(binary.get('filename'))

        return ret

    def get_current_snapshot(self):
        """Return the current snapshot in the test project"""

        for binary in self.binaries_of_product(self.test_project, '_product:%s-cd-mini-%s' % (self.project_base, self.arch())):
            result = re.match(r'%s-%s-NET-.*-Snapshot(.*)-Media.iso' % (self.project_base, self.iso_prefix()),
                              binary)
            if result:
                return result.group(1)

        return None

    def ftp_build_version(self, project, tree, base=None):
        if not base:
            base = self.project_base
        for binary in self.binaries_of_product(project, tree):
            result = re.match(r'%s.*Build(.*)-Media1.report' % base, binary)
            if result:
                return result.group(1)
        raise NotFoundException("can't find %s ftp version" % project)

    def iso_build_version(self, project, tree, base=None):
        if not base:
            base = self.project_base
        for binary in self.binaries_of_product(project, tree):
            result = re.match(r'%s.*Build(.*)-Media(.*).iso' % base, binary)
            if result:
                return result.group(1)
        raise NotFoundException("can't find %s iso version" % project)

    def release_version(self):
        url = self.api.makeurl(['build', self.project, 'standard', self.arch(),
                                '_product:%s-release' % self.project_base])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            binary = binary.get('filename', '')
            result = re.match(r'.*-([^-]*)-[^-]*.src.rpm', binary)
            if result:
                return result.group(1)

        raise NotFoundException("can't find %s version" % self.project)

    def current_qa_version(self):
        return self.api.dashboard_content_load('version_totest')

    def find_openqa_results(self, snapshot):
        """Return the openqa jobs of a given snapshot and filter out the
        cloned jobs

        """

        url = makeurl(self.openqa_server,
                      ['api', 'v1', 'jobs'], {'group': self.openqa_group(), 'build': snapshot, 'latest': 1})
        f = self.api.retried_GET(url)
        jobs = []
        for job in json.load(f)['jobs']:
            if job['clone_id'] or job['result'] == 'obsoleted':
                continue
            job['name'] = job['name'].replace(snapshot, '')
            jobs.append(job)
        return jobs

    def _result2str(self, result):
        if result == QA_INPROGRESS:
            return 'inprogress'
        elif result == QA_FAILED:
            return 'failed'
        else:
            return 'passed'

    def find_failed_module(self, testmodules):
        # print json.dumps(testmodules, sort_keys=True, indent=4)
        for module in testmodules:
            if module['result'] != 'failed':
                continue
            flags = module['flags']
            if 'fatal' in flags or 'important' in flags:
                return module['name']
                break
            logger.info('%s %s %s' %
                        (module['name'], module['result'], module['flags']))

    def update_openqa_status_message(self):
        url = makeurl(self.openqa_server,
                      ['api', 'v1', 'job_groups'])
        f = self.api.retried_GET(url)
        job_groups = json.load(f)
        group_id = 0
        for jg in job_groups:
            if jg['name'] == self.openqa_group():
                group_id = jg['id']
                break

        if not group_id:
            logger.debug('No openQA group id found for status comment update, ignoring')
            return

        pinned_ignored_issue = 0
        issues = ' , '.join(self.issues_to_ignore)
        status_flag = 'publishing' if self.status_for_openqa['is_publishing'] else \
            'preparing' if self.status_for_openqa['can_release'] else \
                'testing' if self.status_for_openqa['snapshotable'] else \
                'building'
        status_msg = "tag:{}:{}:{}".format(self.status_for_openqa['new_snapshot'], status_flag, status_flag)
        msg = "pinned-description: Ignored issues\r\n\r\n{}\r\n\r\n{}".format(issues, status_msg)
        data = {'text': msg}

        url = makeurl(self.openqa_server,
                      ['api', 'v1', 'groups', str(group_id), 'comments'])
        f = self.api.retried_GET(url)
        comments = json.load(f)
        for comment in comments:
            if comment['userName'] == 'ttm' and \
                    comment['text'].startswith('pinned-description: Ignored issues'):
                pinned_ignored_issue = comment['id']

        logger.debug('Writing openQA status message: {}'.format(data))
        if not self.dryrun:
            if pinned_ignored_issue:
                self.openqa.openqa_request(
                    'PUT', 'groups/%s/comments/%d' % (group_id, pinned_ignored_issue), data=data)
            else:
                self.openqa.openqa_request(
                    'POST', 'groups/%s/comments' % group_id, data=data)

    def overall_result(self, snapshot):
        """Analyze the openQA jobs of a given snapshot Returns a QAResult"""

        if snapshot is None:
            return QA_FAILED

        jobs = self.find_openqa_results(snapshot)

        if len(jobs) < self.jobs_num():  # not yet scheduled
            logger.warning('we have only %s jobs' % len(jobs))
            return QA_INPROGRESS

        number_of_fails = 0
        in_progress = False
        for job in jobs:
            # print json.dumps(job, sort_keys=True, indent=4)
            if job['result'] in ('failed', 'incomplete', 'skipped', 'user_cancelled', 'obsoleted', 'parallel_failed'):
                jobname = job['name']
                # print json.dumps(job, sort_keys=True, indent=4), jobname
                url = makeurl(self.openqa_server,
                              ['api', 'v1', 'jobs', str(job['id']), 'comments'])
                f = self.api.retried_GET(url)
                comments = json.load(f)
                refs = set()
                labeled = 0
                to_ignore = False
                for comment in comments:
                    for ref in comment['bugrefs']:
                        refs.add(str(ref))
                    if comment['userName'] == 'ttm' and comment['text'] == 'label:unknown_failure':
                        labeled = comment['id']
                    if re.search(r'@ttm:? ignore', comment['text']):
                        to_ignore = True
                ignored = len(refs) > 0
                for ref in refs:
                    if ref not in self.issues_to_ignore:
                        if to_ignore:
                            self.issues_to_ignore.append(ref)
                            self.update_pinned_descr = True
                            with open(self.issuefile, 'a') as f:
                                f.write("%s\n" % ref)
                        else:
                            ignored = False

                if not ignored:
                    number_of_fails += 1
                    if not labeled and len(refs) > 0 and not self.dryrun:
                        data = {'text': 'label:unknown_failure'}
                        self.openqa.openqa_request(
                            'POST', 'jobs/%s/comments' % job['id'], data=data)
                elif labeled:
                    # remove flag - unfortunately can't delete comment unless admin
                    data = {'text': 'Ignored issue'}
                    self.openqa.openqa_request(
                        'PUT', 'jobs/%s/comments/%d' % (job['id'], labeled), data=data)

                if ignored:
                    logger.info("job %s failed, but was ignored", jobname)
                else:
                    joburl = '%s/tests/%s' % (self.openqa_server, job['id'])
                    logger.info("job %s failed, see %s", jobname, joburl)

            elif job['result'] == 'passed' or job['result'] == 'softfailed':
                continue
            elif job['result'] == 'none':
                if job['state'] != 'cancelled':
                    in_progress = True
            else:
                raise Exception(job['result'])

        if number_of_fails > 0:
            return QA_FAILED

        if in_progress:
            return QA_INPROGRESS

        return QA_PASSED

    def all_repos_done(self, project, codes=None):
        """Check the build result of the project and only return True if all
        repos of that project are either published or unpublished

        """

        # coolo's experience says that 'finished' won't be
        # sufficient here, so don't try to add it :-)
        codes = ['published', 'unpublished'] if not codes else codes

        url = self.api.makeurl(
            ['build', project, '_result'], {'code': 'failed'})
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        ready = True
        for repo in root.findall('result'):
            # ignore ports. 'factory' is used by arm for repos that are not
            # meant to use the totest manager.
            if repo.get('repository') in ('ports', 'factory', 'images_staging'):
                continue
            # ignore 32bit for now. We're only interesed in aarch64 here
            if repo.get('arch') in ('armv6l', 'armv7l'):
                continue
            if repo.get('dirty', '') == 'true':
                logger.info('%s %s %s -> %s' % (repo.get('project'),
                                                repo.get('repository'), repo.get('arch'), 'dirty'))
                ready = False
            if repo.get('code') not in codes:
                logger.info('%s %s %s -> %s' % (repo.get('project'),
                                                repo.get('repository'), repo.get('arch'), repo.get('code')))
                ready = False
        return ready

    def maxsize_for_package(self, package):
        if re.match(r'.*-mini-.*', package):
            return 737280000  # a CD needs to match

        if re.match(r'.*-dvd5-.*', package):
            return 4700372992  # a DVD needs to match

        if re.match(r'livecd-x11', package):
            return 681574400  # not a full CD

        if re.match(r'livecd-.*', package):
            return 999999999  # a GB stick

        if re.match(r'.*-(dvd9-dvd|cd-DVD)-.*', package):
            return 8539996159

        if re.match(r'.*-ftp-(ftp|POOL)-', package):
            return None

        if ':%s-Addon-NonOss-ftp-ftp' % self.base in package:
            return None

        raise Exception('No maxsize for {}'.format(package))

    def package_ok(self, project, package, repository, arch):
        """Checks one package in a project and returns True if it's succeeded

        """

        query = {'package': package, 'repository': repository, 'arch': arch}

        url = self.api.makeurl(['build', project, '_result'], query)
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for repo in root.findall('result'):
            status = repo.find('status')
            if status.get('code') != 'succeeded':
                logger.info(
                    '%s %s %s %s -> %s' % (project, package, repository, arch, status.get('code')))
                return False

        maxsize = self.maxsize_for_package(package)
        if not maxsize:
            return True

        url = self.api.makeurl(['build', project, repository, arch, package])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            if not binary.get('filename', '').endswith('.iso'):
                continue
            isosize = int(binary.get('size', 0))
            if isosize > maxsize:
                logger.error('%s %s %s %s: %s' % (
                    project, package, repository, arch, 'too large by %s bytes' % (isosize - maxsize)))
                return False

        return True

    def is_snapshottable(self):
        """Check various conditions required for factory to be snapshotable

        """

        if not self.all_repos_done(self.project):
            return False

        for product in self.ftp_products + self.main_products:
            if not self.package_ok(self.project, product, self.product_repo, self.product_arch):
                return False

            if len(self.livecd_products):

                if not self.all_repos_done('%s:Live' % self.project):
                    return False

                for arch in self.livecd_archs:
                    for product in self.livecd_products:
                        if not self.package_ok('%s:Live' % self.project, product, self.livecd_repo, arch):
                            return False

        return True

    def _release_package(self, project, package, set_release=None):
        query = {'cmd': 'release'}

        if set_release:
            query['setrelease'] = set_release

        # FIXME: make configurable. openSUSE:Factory:ARM currently has multiple
        # repos with release targets, so obs needs to know which one to release
        if project == 'openSUSE:Factory:ARM':
            query['repository'] = 'images'

        baseurl = ['source', project, package]

        url = self.api.makeurl(baseurl, query=query)
        if self.dryrun:
            logger.info("release %s/%s (%s)" % (project, package, set_release))
        else:
            self.api.retried_POST(url)

    def _release(self, set_release=None):
        for product in self.ftp_products:
            self._release_package(self.project, product)

        for cd in self.livecd_products:
            self._release_package('%s:Live' %
                                  self.project, cd, set_release=set_release)

        for cd in self.main_products:
            self._release_package(self.project, cd, set_release=set_release)

    def update_totest(self, snapshot=None):
        release = 'Snapshot%s' % snapshot if snapshot else None
        logger.info('Updating snapshot %s' % snapshot)
        if not self.dryrun:
            self.api.switch_flag_in_prj(self.test_project, flag='publish', state='disable')

        self._release(set_release=release)

    def publish_factory_totest(self):
        logger.info('Publish test project content')
        if not self.dryrun:
            self.api.switch_flag_in_prj(
                self.test_project, flag='publish', state='enable')

    def totest_is_publishing(self):
        """Find out if the publishing flag is set in totest's _meta"""

        url = self.api.makeurl(
            ['source', self.test_project, '_meta'])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        if not root.find('publish'):  # default true
            return True

        for flag in root.find('publish'):
            if flag.get('repository', None) or flag.get('arch', None):
                continue
            if flag.tag == 'enable':
                return True
        return False

    def totest(self):
        try:
            current_snapshot = self.get_current_snapshot()
        except NotFoundException as e:
            # nothing in test project (yet)
            logger.warn(e)
            current_snapshot = None
        new_snapshot = self.current_version()
        self.update_pinned_descr = False
        current_result = self.overall_result(current_snapshot)
        current_qa_version = self.current_qa_version()

        logger.info('current_snapshot %s: %s' %
                    (current_snapshot, self._result2str(current_result)))
        logger.debug('new_snapshot %s', new_snapshot)
        logger.debug('current_qa_version %s', current_qa_version)

        snapshotable = self.is_snapshottable()
        logger.debug("snapshotable: %s", snapshotable)
        can_release = ((current_snapshot is None or current_result != QA_INPROGRESS) and snapshotable)

        # not overwriting
        if new_snapshot == current_snapshot:
            logger.debug("no change in snapshot version")
            can_release = False
        elif not self.all_repos_done(self.test_project):
            logger.debug("not all repos done, can't release")
            # the repos have to be done, otherwise we better not touch them
            # with a new release
            can_release = False

        can_publish = (current_result == QA_PASSED)

        # already published
        totest_is_publishing = self.totest_is_publishing()
        if totest_is_publishing:
            logger.debug("totest already publishing")
            can_publish = False

        if self.update_pinned_descr:
            self.status_for_openqa = {
                'current_snapshot': current_snapshot,
                'new_snapshot': new_snapshot,
                'snapshotable': snapshotable,
                'can_release': can_release,
                'is_publishing': totest_is_publishing,
            }
            self.update_openqa_status_message()

        if can_publish:
            if current_qa_version == current_snapshot:
                self.publish_factory_totest()
                self.write_version_to_dashboard("snapshot", current_snapshot)
                can_release = False  # we have to wait
            else:
                # We reached a very bad status: openQA testing is 'done', but not of the same version
                # currently in test project. This can happen when 'releasing' the
                # product failed
                raise Exception("Publishing stopped: tested version (%s) does not match version in test project (%s)"
                                % (current_qa_version, current_snapshot))

        if can_release:
            self.update_totest(new_snapshot)
            self.write_version_to_dashboard("totest", new_snapshot)

    def release(self):
        new_snapshot = self.current_version()
        self.update_totest(new_snapshot)

    def write_version_to_dashboard(self, target, version):
        if not self.dryrun:
            self.api.dashboard_content_ensure('version_%s' % target, version, comment='Update version')
示例#34
0
 def setUp(self):
     self.obs = obs.OBS()
     Config(obs.APIURL, obs.PROJECT)
     self.api = StagingAPI(obs.APIURL, obs.PROJECT)
示例#35
0
def do_staging(self, subcmd, opts, *args):
    """${cmd_name}: Commands to work with staging projects

    ${cmd_option_list}

    "accept" will accept all requests in
        $PROJECT:Staging:<LETTER> into $PROJECT
        If openSUSE:* project, requests marked ready from adi stagings will also
        be accepted.

    "acheck" will check if it is safe to accept new staging projects
        As $PROJECT is syncing the right package versions between
        /standard, /totest and /snapshot, it is important that the projects
        are clean prior to a checkin round.

    "adi" will list already staged requests, stage new requests, and supersede
        requests where applicable. New adi stagings will be created for new
        packages based on the grouping options used. The default grouping is by
        source project. When adi stagings are ready the request will be marked
        ready, unstaged, and the adi staging deleted.

    "check" will check if all packages are links without changes

    "check_duplicate_binaries" list binaries provided by multiple packages

    "config" will modify or view staging specific configuration

        Target project OSRT:Config attribute configuration applies to all
        stagings. Both configuration locations follow the .oscrc format (space
        separated list).

        config
            Print all staging configuration.
        config key
            Print the value of key for stagings.
        conf key value...
            Set the value of key for stagings.
        config --clear
            Clear all staging configuration.
        config --clear key
            Clear (unset) a single key from staging configuration
        config --append key value...
            Append value to existing value or set if no existing value.

        All of the above may be restricted to a set of stagings.

        The staging configuration is automatically cleared anytime staging
        psuedometa is cleared (accept, or unstage all requests).

        The keys that may be set in staging configuration are:

        - repo_checker-binary-whitelist[-arch]: appended to target project list
        - todo: text to be printed after staging is accepted

    "cleanup_rings" will try to cleanup rings content and print
        out problems

    "freeze" will freeze the sources of the project's links while not
        affecting the source packages

    "frozenage" will show when the respective staging project was last frozen

    "ignore" will ignore a request from "list" and "adi" commands until unignored

    "unignore" will remove from requests from ignore list
        If the --cleanup flag is included then all ignored requests that were
        changed from state new or review more than 3 days ago will be removed.

    "list" will list/supersede requests for ring packages or all if no rings.

    "lock" acquire a hold on the project in order to execute multiple commands
        and prevent others from interrupting. An example:

        lock -m "checkin round"

        list --supersede
        adi
        accept A B C D E

        unlock

        Each command will update the lock to keep it up-to-date.

    "repair" will attempt to repair the state of a request that has been
        corrupted.

        Use the --cleanup flag to include all untracked requests.

    "select" will add requests to the project
        Stagings are expected to be either in short-hand or the full project
        name. For example letter or named stagings can be specified simply as
        A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2,
        etc. Currently, adi stagings are not supported in proposal mode.

        Requests may either be the target package or the request ID.

        When using --filter-by or --group-by the xpath will be applied to the
        request node as returned by OBS. Use the following on a current request
        to see the XML structure.

        osc api /request/1337

        A number of additional values will supplement the normal request node.

        - ./action/target/@devel_project: the devel project for the package
        - ./action/target/@devel_project_super: super devel project if relevant
        - ./action/target/@ring: the ring to which the package belongs
        - ./@aged: either True or False based on splitter-request-age-threshold
        - ./@nonfree: set to nonfree if targetting nonfree sub project
        - ./@ignored: either False or the provided message

        Some useful examples:

        --filter-by './action/target[starts-with(@package, "yast-")]'
        --filter-by './action/target/[@devel_project="YaST:Head"]'
        --filter-by './action/target[starts-with(@ring, "1")]'
        --filter-by '@id!="1234567"'
        --filter-by 'contains(description, "#Portus")'

        --group-by='./action/target/@devel_project'
        --group-by='./action/target/@ring'

        Multiple filter-by or group-by options may be used at the same time.

        Note that when using proposal mode, multiple stagings to consider may be
        provided in addition to a list of requests by which to filter. A more
        complex example:

        select --group-by='./action/target/@devel_project' A B C 123 456 789

        This will separate the requests 123, 456, 789 by devel project and only
        consider stagings A, B, or C, if available, for placement.

        No arguments is also a valid choice and will propose all non-ignored
        requests into the first available staging. Note that bootstrapped
        stagings are only used when either required or no other stagings are
        available.

        Another useful example is placing all open requests into a specific
        letter staging with:

        select A

        Built in strategies may be specified as well. For example:

        select --strategy devel
        select --strategy quick
        select --strategy special
        select --strategy super

        The default is none and custom is used with any filter-by or group-by
        arguments are provided.

        To merge applicable requests into an existing staging.

        select --merge A

        To automatically try all available strategies.

        select --try-strategies

        These concepts can be combined and interactive mode allows the proposal
        to be modified before it is executed.

        Moving requests can be accomplished using the --move flag. For example,
        to move already staged pac1 and pac2 to staging B use the following.

        select --move B pac1 pac2

        The staging in which the requests are staged will automatically be
        determined and the requests will be removed from that staging and placed
        in the specified staging.

        Related to this, the --filter-from option may be used in conjunction
        with --move to only move requests already staged in a specific staging.
        This can be useful if a staging master is responsible for a specific set
        of packages and wants to move them into a different staging when they
        were already placed in a mixed staging. For example, if one had a file
        with a list of packages the following would move any of them found in
        staging A to staging B.

        select --move --filter-from A B $(< package.list)

    "unselect" will remove from the project - pushing them back to the backlog
        If a message is included the requests will be ignored first.

        Use the --cleanup flag to include all obsolete requests.

    "unlock" will remove the staging lock in case it gets stuck or a manual hold
        If a command lock gets stuck while a hold is placed on a project the
        unlock command will need to be run twice since there are two layers of
        locks.

    "rebuild" will rebuild broken packages in the given stagings or all
        The rebuild command will only trigger builds for packages with less than
        3 failures since the last success or if the build log indicates a stall.

        If the force option is included the rebuild checks will be ignored and
        all packages failing to build will be triggered.

    "setprio" will set priority of requests withing stagings
        If no stagings are specified all stagings will be used.
        The default priority is important, but the possible values are:
          "critical", "important", "moderate" or "low".

    "supersede" will supersede requests were applicable.
        A request list can be used to limit what is superseded.

    Usage:
        osc staging accept [--force] [--no-cleanup] [LETTER...]
        osc staging acheck
        osc staging adi [--move] [--by-develproject] [--split] [REQUEST...]
        osc staging check [--old] [STAGING...]
        osc staging check_duplicate_binaries
        osc staging config [--append] [--clear] [STAGING...] [key] [value]
        osc staging cleanup_rings
        osc staging freeze [--no-bootstrap] STAGING...
        osc staging frozenage [STAGING...]
        osc staging ignore [-m MESSAGE] REQUEST...
        osc staging unignore [--cleanup] [REQUEST...|all]
        osc staging list [--supersede]
        osc staging lock [-m MESSAGE]
        osc staging select [--no-freeze] [--move [--filter-from STAGING]]
            [--add PACKAGE]
            STAGING REQUEST...
        osc staging select [--no-freeze] [--interactive|--non-interactive]
            [--filter-by...] [--group-by...]
            [--merge] [--try-strategies] [--strategy]
            [STAGING...] [REQUEST...]
        osc staging unselect [--cleanup] [-m MESSAGE] [REQUEST...]
        osc staging unlock
        osc staging rebuild [--force] [STAGING...]
        osc staging repair [--cleanup] [REQUEST...]
        osc staging setprio [STAGING...] [priority]
        osc staging supersede [REQUEST...]
    """
    if opts.version:
        self._print_version()

    # verify the argument counts match the commands
    if len(args) == 0:
        raise oscerr.WrongArgs('No command given, see "osc help staging"!')
    cmd = args[0]
    if cmd in (
        'accept',
        'adi',
        'check',
        'config',
        'frozenage',
        'unignore',
        'select',
        'unselect',
        'rebuild',
        'repair',
        'setprio',
        'supersede',
    ):
        min_args, max_args = 0, None
    elif cmd in (
        'freeze',
        'ignore',
    ):
        min_args, max_args = 1, None
    elif cmd in (
        'acheck',
        'check_duplicate_binaries',
        'cleanup_rings',
        'list',
        'lock',
        'unlock',
    ):
        min_args, max_args = 0, 0
    else:
        raise oscerr.WrongArgs('Unknown command: %s' % cmd)
    args = clean_args(args)
    if len(args) - 1 < min_args:
        raise oscerr.WrongArgs('Too few arguments.')
    if max_args is not None and len(args) - 1 > max_args:
        raise oscerr.WrongArgs('Too many arguments.')

    # Allow for determining project from osc store.
    if not opts.project:
        if core.is_project_dir('.'):
            opts.project = core.store_read_project('.')
        else:
            opts.project = 'Factory'

    # Cache the remote config fetch.
    Cache.init()

    # Init the OBS access and configuration
    opts.project = self._full_project_name(opts.project)
    opts.apiurl = self.get_api_url()
    opts.verbose = False
    Config(opts.apiurl, opts.project)

    colorama.init(autoreset=True,
        strip=(opts.no_color or not bool(int(conf.config.get('staging.color', True)))))
    # Allow colors to be changed.
    for name in dir(Fore):
        if not name.startswith('_'):
            # .oscrc requires keys to be lower-case.
            value = conf.config.get('staging.color.' + name.lower())
            if value:
                setattr(Fore, name, ansi.code_to_chars(value))

    if opts.wipe_cache:
        Cache.delete_all()

    api = StagingAPI(opts.apiurl, opts.project)
    needed = lock_needed(cmd, opts)
    with OBSLock(opts.apiurl, opts.project, reason=cmd, needed=needed) as lock:

        # call the respective command and parse args by need
        if cmd == 'check':
            if len(args) == 1:
                CheckCommand(api).perform(None, opts.old)
            else:
                for prj in args[1:]:
                    CheckCommand(api).perform(prj, opts.old)
                    print()
        elif cmd == 'check_duplicate_binaries':
            CheckDuplicateBinariesCommand(api).perform(opts.save)
        elif cmd == 'config':
            projects = set()
            key = value = None
            stagings = api.get_staging_projects_short(None) + \
                       api.get_staging_projects()
            for arg in args[1:]:
                if arg in stagings:
                    projects.add(api.prj_from_short(arg))
                elif key is None:
                    key = arg
                elif value is None:
                    value = arg
                else:
                    value += ' ' + arg

            if not len(projects):
                projects = api.get_staging_projects()

            ConfigCommand(api).perform(projects, key, value, opts.append, opts.clear)
        elif cmd == 'freeze':
            for prj in args[1:]:
                prj = api.prj_from_short(prj)
                print(Fore.YELLOW + prj)
                FreezeCommand(api).perform(prj, copy_bootstrap=opts.bootstrap)
        elif cmd == 'frozenage':
            projects = api.get_staging_projects_short() if len(args) == 1 else args[1:]
            for prj in projects:
                prj = api.prj_from_letter(prj)
                print('{} last frozen {}{:.1f} days ago'.format(
                    Fore.YELLOW + prj + Fore.RESET,
                    Fore.GREEN if api.prj_frozen_enough(prj) else Fore.RED,
                    api.days_since_last_freeze(prj)))
        elif cmd == 'acheck':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64")
            if version_totest:
                version_openqa = api.pseudometa_file_load('version_totest')
                totest_dirty = api.is_repo_dirty(api.project, 'totest')
                print("version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % (version_openqa, version_totest, totest_dirty))
            else:
                print("acheck is unavailable in %s!\n" % (api.project))
        elif cmd == 'accept':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64")

            if version_totest is None or opts.force:
                # SLE does not have a totest_version or openqa_version - ignore it
                version_openqa = version_totest
                totest_dirty   = False
            else:
                version_openqa = api.pseudometa_file_load('version_totest')
                totest_dirty   = api.is_repo_dirty(api.project, 'totest')

            if version_openqa == version_totest and not totest_dirty:
                cmd = AcceptCommand(api)
                for prj in args[1:]:
                    if cmd.perform(api.prj_from_letter(prj), opts.force):
                        cmd.reset_rebuild_data(prj)
                    else:
                        return
                    if not opts.no_cleanup:
                        if api.item_exists(api.prj_from_letter(prj)):
                            cmd.cleanup(api.prj_from_letter(prj))
                cmd.accept_other_new()
                if opts.project.startswith('openSUSE:'):
                    cmd.update_factory_version()
                    if api.item_exists(api.crebuild):
                        cmd.sync_buildfailures()
            else:
                print("Not safe to accept: /totest is not yet synced")
        elif cmd == 'unselect':
            if opts.message:
                print('Ignoring requests first')
                IgnoreCommand(api).perform(args[1:], opts.message)
            UnselectCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'select':
            # Include list of all stagings in short-hand and by full name.
            existing_stagings = api.get_staging_projects_short(None)
            existing_stagings += api.get_staging_projects()
            stagings = []
            requests = []
            for arg in args[1:]:
                # Since requests may be given by either request ID or package
                # name and stagings may include multi-letter special stagings
                # there is no easy way to distinguish between stagings and
                # requests in arguments. Therefore, check if argument is in the
                # list of short-hand and full project name stagings, otherwise
                # consider it a request. This also allows for special stagings
                # with the same name as package, but the staging will be assumed
                # first time around. The current practice seems to be to start a
                # special staging with a capital letter which makes them unique.
                # lastly adi stagings are consistently prefix with adi: which
                # also makes it consistent to distinguish them from request IDs.
                if arg in existing_stagings and arg not in stagings:
                    stagings.append(api.extract_staging_short(arg))
                elif arg not in requests:
                    requests.append(arg)

            if len(stagings) != 1 or len(requests) == 0 or opts.filter_by or opts.group_by:
                if opts.move or opts.filter_from:
                    print('--move and --filter-from must be used with explicit staging and request list')
                    return

                open_requests = api.get_open_requests({'withhistory': 1}, include_nonfree=False)
                if len(open_requests) == 0:
                    print('No open requests to consider')
                    return

                splitter = RequestSplitter(api, open_requests, in_ring=True)

                considerable = splitter.stagings_load(stagings)
                if considerable == 0:
                    print('No considerable stagings on which to act')
                    return

                if opts.merge:
                    splitter.merge()
                if opts.try_strategies:
                    splitter.strategies_try()
                if len(requests) > 0:
                    splitter.strategy_do('requests', requests=requests)
                if opts.strategy:
                    splitter.strategy_do(opts.strategy)
                elif opts.filter_by or opts.group_by:
                    kwargs = {}
                    if opts.filter_by:
                        kwargs['filters'] = opts.filter_by
                    if opts.group_by:
                        kwargs['groups'] = opts.group_by
                    splitter.strategy_do('custom', **kwargs)
                else:
                    if opts.merge:
                        # Merge any none strategies before final none strategy.
                        splitter.merge(strategy_none=True)
                    splitter.strategy_do('none')
                    splitter.strategy_do_non_bootstrapped('none')

                proposal = splitter.proposal
                if len(proposal) == 0:
                    print('Empty proposal')
                    return

                if opts.interactive:
                    with tempfile.NamedTemporaryFile(suffix='.yml') as temp:
                        temp.write(yaml.safe_dump(splitter.proposal, default_flow_style=False) + '\n\n')

                        if len(splitter.requests):
                            temp.write('# remaining requests:\n')
                            for request in splitter.requests:
                                temp.write('#    {}: {}\n'.format(
                                    request.get('id'), request.find('action/target').get('package')))
                            temp.write('\n')

                        temp.write('# move requests between stagings or comment/remove them\n')
                        temp.write('# change the target staging for a group\n')
                        temp.write('# remove the group, requests, staging, or strategy to skip\n')
                        temp.write('# stagings\n')
                        if opts.merge:
                            temp.write('# - mergeable: {}\n'
                                       .format(', '.join(sorted(splitter.stagings_mergeable +
                                                                splitter.stagings_mergeable_none))))
                        temp.write('# - considered: {}\n'
                                   .format(', '.join(sorted(splitter.stagings_considerable))))
                        temp.write('# - remaining: {}\n'
                                   .format(', '.join(sorted(splitter.stagings_available))))
                        temp.flush()

                        editor = os.getenv('EDITOR')
                        if not editor:
                            editor = 'xdg-open'
                        return_code = subprocess.call(editor.split(' ') + [temp.name])

                        proposal = yaml.safe_load(open(temp.name).read())

                        # Filter invalidated groups from proposal.
                        keys = ['group', 'requests', 'staging', 'strategy']
                        for group, info in sorted(proposal.items()):
                            for key in keys:
                                if not info.get(key):
                                    del proposal[group]
                                    break

                print(yaml.safe_dump(proposal, default_flow_style=False))

                print('Accept proposal? [y/n] (y): ', end='')
                if opts.non_interactive:
                    print('y')
                else:
                    response = input().lower()
                    if response != '' and response != 'y':
                        print('Quit')
                        return

                for group, info in sorted(proposal.items()):
                    print('Staging {} in {}'.format(group, info['staging']))

                    # SelectCommand expects strings.
                    request_ids = map(str, info['requests'].keys())
                    target_project = api.prj_from_short(info['staging'])

                    if 'merge' not in info:
                        # Assume that the original splitter_info is desireable
                        # and that this staging is simply manual followup.
                        api.set_splitter_info_in_prj_pseudometa(target_project, info['group'], info['strategy'])

                    SelectCommand(api, target_project) \
                        .perform(request_ids, no_freeze=opts.no_freeze)
            else:
                target_project = api.prj_from_short(stagings[0])
                if opts.add:
                    api.mark_additional_packages(target_project, [opts.add])
                else:
                    filter_from = api.prj_from_short(opts.filter_from) if opts.filter_from else None
                    SelectCommand(api, target_project) \
                        .perform(requests, opts.move,
                                 filter_from, opts.no_freeze)
        elif cmd == 'cleanup_rings':
            CleanupRings(api).perform()
        elif cmd == 'ignore':
            IgnoreCommand(api).perform(args[1:], opts.message)
        elif cmd == 'unignore':
            UnignoreCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'list':
            ListCommand(api).perform(supersede=opts.supersede)
        elif cmd == 'lock':
            lock.hold(opts.message)
        elif cmd == 'adi':
            AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split)
        elif cmd == 'rebuild':
            RebuildCommand(api).perform(args[1:], opts.force)
        elif cmd == 'repair':
            RepairCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'setprio':
            stagings = []
            priority = None

            priorities = ['critical', 'important', 'moderate', 'low']
            for arg in args[1:]:
                if arg in priorities:
                    priority = arg
                else:
                    stagings.append(arg)

            PrioCommand(api).perform(stagings, priority)
        elif cmd == 'supersede':
            SupersedeCommand(api).perform(args[1:])
        elif cmd == 'unlock':
            lock.release(force=True)
示例#36
0
def do_staging(self, subcmd, opts, *args):
    """${cmd_name}: Commands to work with staging projects

    ${cmd_option_list}

    "accept" will accept all requests in
        openSUSE:Factory:Staging:<LETTER> (into Factory)

    "acheck" will check if it's safe to accept new staging projects
        As openSUSE:Factory is syncing the right package versions between
        /standard, /totest and /snapshot, it's important that the projects
        are clean prior to a checkin round.

    "check" will check if all packages are links without changes

    "cleanup_rings" will try to cleanup rings content and print
        out problems

    "freeze" will freeze the sources of the project's links (not
        affecting the packages actually in)

    "frozenage" will show when the respective staging project was last frozen

    "list" will pick the requests not in rings

    "select" will add requests to the project

    "unselect" will remove from the project - pushing them back to the backlog

    Usage:
        osc staging accept [--force] [LETTER...]
        osc staging check [--old] REPO
        osc staging cleanup_rings
        osc staging freeze [--no-boostrap] PROJECT...
        osc staging frozenage PROJECT...
        osc staging list [--supersede]
        osc staging select [--no-freeze] [--move [--from PROJECT]] LETTER REQUEST...
        osc staging unselect REQUEST...
        osc staging repair REQUEST...
    """
    if opts.version:
        self._print_version()

    # verify the argument counts match the commands
    if len(args) == 0:
        raise oscerr.WrongArgs('No command given, see "osc help staging"!')
    cmd = args[0]
    if cmd in ("freeze", "frozenage", "repair"):
        min_args, max_args = 1, None
    elif cmd == "check":
        min_args, max_args = 0, 2
    elif cmd == "select":
        min_args, max_args = 1, None
        if not opts.add:
            min_args = 2
    elif cmd == "unselect":
        min_args, max_args = 1, None
    elif cmd == "adi":
        min_args, max_args = None, None
    elif cmd in ("list", "accept"):
        min_args, max_args = 0, None
    elif cmd in ("cleanup_rings", "acheck"):
        min_args, max_args = 0, 0
    else:
        raise oscerr.WrongArgs("Unknown command: %s" % cmd)
    if len(args) - 1 < min_args:
        raise oscerr.WrongArgs("Too few arguments.")
    if max_args is not None and len(args) - 1 > max_args:
        raise oscerr.WrongArgs("Too many arguments.")

    # Init the OBS access and configuration
    opts.project = self._full_project_name(opts.project)
    opts.apiurl = self.get_api_url()
    opts.verbose = False
    Config(opts.project)

    with OBSLock(opts.apiurl, opts.project):
        api = StagingAPI(opts.apiurl, opts.project)

        # call the respective command and parse args by need
        if cmd == "check":
            prj = args[1] if len(args) > 1 else None
            CheckCommand(api).perform(prj, opts.old)
        elif cmd == "freeze":
            for prj in args[1:]:
                FreezeCommand(api).perform(api.prj_from_letter(prj), copy_bootstrap=opts.bootstrap)
        elif cmd == "frozenage":
            for prj in args[1:]:
                print "%s last frozen %0.1f days ago" % (
                    api.prj_from_letter(prj),
                    api.days_since_last_freeze(api.prj_from_letter(prj)),
                )
        elif cmd == "acheck":
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(
                api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64"
            )
            if version_totest:
                version_openqa = api.load_file_content("%s:Staging" % api.project, "dashboard", "version_totest")
                totest_dirty = api.is_repo_dirty(api.project, "totest")
                print "version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % (
                    version_openqa,
                    version_totest,
                    totest_dirty,
                )
            else:
                print "acheck is unavailable in %s!\n" % (api.project)
        elif cmd == "accept":
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(
                api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64"
            )

            if version_totest is None or opts.force:
                # SLE does not have a totest_version or openqa_version - ignore it
                version_openqa = version_totest
                totest_dirty = False
            else:
                version_openqa = api.load_file_content("%s:Staging" % api.project, "dashboard", "version_totest")
                totest_dirty = api.is_repo_dirty(api.project, "totest")

            if version_openqa == version_totest and not totest_dirty:
                cmd = AcceptCommand(api)
                for prj in args[1:]:
                    if not cmd.perform(api.prj_from_letter(prj), opts.force):
                        return
                    if not opts.no_cleanup:
                        if api.item_exists(api.prj_from_letter(prj)):
                            cmd.cleanup(api.prj_from_letter(prj))
                        if api.item_exists("%s:DVD" % api.prj_from_letter(prj)):
                            cmd.cleanup("%s:DVD" % api.prj_from_letter(prj))
                if opts.project.startswith("openSUSE:"):
                    cmd.accept_other_new()
                    cmd.update_factory_version()
                    if api.item_exists(api.crebuild):
                        cmd.sync_buildfailures()
            else:
                print "Not safe to accept: /totest is not yet synced"
        elif cmd == "unselect":
            UnselectCommand(api).perform(args[1:])
        elif cmd == "select":
            tprj = api.prj_from_letter(args[1])
            if opts.add:
                api.mark_additional_packages(tprj, [opts.add])
            else:
                SelectCommand(api, tprj).perform(args[2:], opts.move, opts.from_, opts.no_freeze)
        elif cmd == "cleanup_rings":
            CleanupRings(api).perform()
        elif cmd == "list":
            ListCommand(api).perform(args[1:], supersede=opts.supersede)
        elif cmd == "adi":
            AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split)
        elif cmd == "repair":
            RepairCommand(api).perform(args[1:])
    parser.add_argument('-s', '--staging', type=str, default=None,
                        help='staging project')
    parser.add_argument('-p', '--project', type=str, default='openSUSE:Factory',
                        help='project to check (ex. openSUSE:Factory, openSUSE:Leap:15.1)')
    parser.add_argument('-d', '--debug', action='store_true', default=False,
                        help='enable debug information')
    parser.add_argument('-A', '--apiurl', metavar='URL', help='API URL')

    args = parser.parse_args()

    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug

    apiurl = osc.conf.config['apiurl']
    config = Config(apiurl, args.project)
    api = StagingAPI(apiurl, args.project)
    staging_report = InstallChecker(api, config)

    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    result = True
    if args.staging:
        result = staging_report.staging(api.prj_from_short(args.staging), force=True)
    else:
        for staging in api.get_staging_projects():
            if api.is_adi_project(staging):
                result = staging_report.staging(staging) and result
示例#38
0
class TestApiCalls(unittest.TestCase):
    """
    Tests for various api calls to ensure we return expected content
    """

    def setUp(self):
        """
        Initialize the configuration
        """

        self.obs = OBS()
        Config('openSUSE:Factory')
        self.api = StagingAPI(APIURL, 'openSUSE:Factory')

    def tearDown(self):
        """Clean internal cache"""
        if hasattr(self.api, '_invalidate_all'):
            self.api._invalidate_all()

    def test_ring_packages(self):
        """
        Validate the creation of the rings.
        """
        # our content in the XML files
        # test content for listonly ie. list command
        ring_packages = {
            'elem-ring-0': 'openSUSE:Factory:Rings:0-Bootstrap',
            'elem-ring-1': 'openSUSE:Factory:Rings:0-Bootstrap',
            'elem-ring-mini': 'openSUSE:Factory:Rings:0-Bootstrap',
            'elem-ring-2': 'openSUSE:Factory:Rings:2-TestDVD',
            'git': 'openSUSE:Factory:Rings:2-TestDVD',
            'wine': 'openSUSE:Factory:Rings:1-MinimalX',
        }
        self.assertEqual(ring_packages, self.api.ring_packages_for_links)

        # test content for real usage
        ring_packages = {
            'elem-ring-0': 'openSUSE:Factory:Rings:0-Bootstrap',
            'elem-ring-1': 'openSUSE:Factory:Rings:1-MinimalX',
            'elem-ring-mini': 'openSUSE:Factory:Rings:0-Bootstrap',
            'elem-ring-2': 'openSUSE:Factory:Rings:2-TestDVD',
            'git': 'openSUSE:Factory:Rings:2-TestDVD',
            'wine': 'openSUSE:Factory:Rings:1-MinimalX',
        }
        self.assertEqual(ring_packages, self.api.ring_packages)

    @unittest.skip("no longer approving non-ring packages")
    def test_dispatch_open_requests(self):
        """
        Test dispatching and closure of non-ring packages
        """

        # Get rid of open requests
        self.api.dispatch_open_requests()
        # Check that we tried to close it
        self.assertEqual(httpretty.last_request().method, 'POST')
        self.assertEqual(httpretty.last_request().querystring[u'cmd'], [u'changereviewstate'])
        # Try it again
        self.api.dispatch_open_requests()
        # This time there should be nothing to close
        self.assertEqual(httpretty.last_request().method, 'GET')

    def test_pseudometa_get_prj(self):
        """
        Test getting project metadata from YAML in project description
        """

        # Try to get data from project that has no metadata
        data = self.api.get_prj_pseudometa('openSUSE:Factory:Staging:A')
        # Should be empty, but contain structure to work with
        self.assertEqual(data, {'requests': []})
        # Add some sample data
        rq = {'id': '123', 'package': 'test-package'}
        data['requests'].append(rq)
        # Save them and read them back
        self.api.set_prj_pseudometa('openSUSE:Factory:Staging:A', data)
        test_data = self.api.get_prj_pseudometa('openSUSE:Factory:Staging:A')
        # Verify that we got back the same data
        self.assertEqual(data, test_data)

    def test_list_projects(self):
        """
        List projects and their content
        """

        # Prepare expected results
        data = []
        for prj in self.obs.staging_project:
            data.append('openSUSE:Factory:Staging:' + prj)

        # Compare the results
        self.assertEqual(data, self.api.get_staging_projects())

    def test_open_requests(self):
        """
        Test searching for open requests
        """

        requests = []

        # get the open requests
        requests = self.api.get_open_requests()

        # Compare the results, we only care now that we got 1 of them not the content
        self.assertEqual(1, len(requests))

    def test_get_package_information(self):
        """
        Test if we get proper project, name and revision from the staging informations
        """

        package_info = {
            'dir_srcmd5': '751efeae52d6c99de48164088a33d855',
            'project': 'home:Admin',
            'rev': '7b98ac01b8071d63a402fa99dc79331c',
            'srcmd5': '7b98ac01b8071d63a402fa99dc79331c',
            'package': 'wine'
        }

        # Compare the results, we only care now that we got 2 of them not the content
        self.assertEqual(
            package_info,
            self.api.get_package_information('openSUSE:Factory:Staging:B', 'wine'))

    def test_request_id_package_mapping(self):
        """
        Test whether we can get correct id for sr in staging project
        """

        prj = 'openSUSE:Factory:Staging:B'
        # Get rq
        num = self.api.get_request_id_for_package(prj, 'wine')
        self.assertEqual(333, num)
        # Get package name
        self.assertEqual('wine', self.api.get_package_for_request_id(prj, num))

    def test_rm_from_prj(self):
        prj = 'openSUSE:Factory:Staging:B'
        pkg = 'wine'

        full_name = prj + '/' + pkg

        # Verify package is there
        self.assertTrue(full_name in self.obs.links)

        # Get rq number
        num = self.api.get_request_id_for_package(prj, pkg)

        # Delete the package
        self.api.rm_from_prj(prj, package='wine')

        # Verify package is not there
        self.assertTrue(full_name not in self.obs.links)

        # RQ is gone
        self.assertEqual(None, self.api.get_request_id_for_package(prj, pkg))
        self.assertEqual(None, self.api.get_package_for_request_id(prj, num))

        # Verify that review is closed
        self.assertEqual('accepted', self.obs.requests[str(num)]['review'])
        self.assertEqual('new', self.obs.requests[str(num)]['request'])

    def test_rm_from_prj_2(self):
        # Try the same with request number
        prj = 'openSUSE:Factory:Staging:B'
        pkg = 'wine'

        full_name = prj + '/' + pkg

        # Get rq number
        num = self.api.get_request_id_for_package(prj, pkg)

        # Delete the package
        self.api.rm_from_prj(prj, request_id=num)

        # Verify package is not there
        self.assertTrue(full_name not in self.obs.links)

        # RQ is gone
        self.assertEqual(None, self.api.get_request_id_for_package(prj, pkg))
        self.assertEqual(None, self.api.get_package_for_request_id(prj, num))

        # Verify that review is closed
        self.assertEqual('accepted', self.obs.requests[str(num)]['review'])
        self.assertEqual('new', self.obs.requests[str(num)]['request'])

    def test_add_sr(self):
        prj = 'openSUSE:Factory:Staging:A'
        rq = '123'

        # Running it twice shouldn't change anything
        for i in range(2):
            # Add rq to the project
            self.api.rq_to_prj(rq, prj)
            # Verify that review is there
            self.assertEqual('new', self.obs.requests[rq]['review'])
            self.assertEqual('review', self.obs.requests[rq]['request'])
            self.assertEqual(self.api.get_prj_pseudometa('openSUSE:Factory:Staging:A'),
                             {'requests': [{'id': 123, 'package': 'gcc', 'author': 'Admin'}]})

    def test_create_package_container(self):
        """Test if the uploaded _meta is correct."""

        self.api.create_package_container('openSUSE:Factory:Staging:B', 'wine')
        self.assertEqual(httpretty.last_request().method, 'PUT')
        self.assertEqual(httpretty.last_request().body, '<package name="wine"><title/><description/></package>')
        self.assertEqual(httpretty.last_request().path, '/source/openSUSE:Factory:Staging:B/wine/_meta')

        self.api.create_package_container('openSUSE:Factory:Staging:B', 'wine', disable_build=True)
        self.assertEqual(httpretty.last_request().method, 'PUT')
        self.assertEqual(httpretty.last_request().body, '<package name="wine"><title /><description /><build><disable /></build></package>')
        self.assertEqual(httpretty.last_request().path, '/source/openSUSE:Factory:Staging:B/wine/_meta')

    def test_review_handling(self):
        """Test whether accepting/creating reviews behaves correctly."""

        # Add review
        self.api.add_review('123', by_project='openSUSE:Factory:Staging:A')
        self.assertEqual(httpretty.last_request().method, 'POST')
        self.assertEqual(httpretty.last_request().querystring[u'cmd'], [u'addreview'])
        # Try to readd, should do anything
        self.api.add_review('123', by_project='openSUSE:Factory:Staging:A')
        self.assertEqual(httpretty.last_request().method, 'GET')
        # Accept review
        self.api.set_review('123', 'openSUSE:Factory:Staging:A')
        self.assertEqual(httpretty.last_request().method, 'POST')
        self.assertEqual(httpretty.last_request().querystring[u'cmd'], [u'changereviewstate'])
        # Try to accept it again should do anything
        self.api.set_review('123', 'openSUSE:Factory:Staging:A')
        self.assertEqual(httpretty.last_request().method, 'GET')
        # But we should be able to reopen it
        self.api.add_review('123', by_project='openSUSE:Factory:Staging:A')
        self.assertEqual(httpretty.last_request().method, 'POST')
        self.assertEqual(httpretty.last_request().querystring[u'cmd'], [u'addreview'])

    def test_prj_from_letter(self):

        # Verify it works
        self.assertEqual(self.api.prj_from_letter('openSUSE:Factory'), 'openSUSE:Factory')
        self.assertEqual(self.api.prj_from_letter('A'), 'openSUSE:Factory:Staging:A')

    def test_frozen_mtime(self):
        """Test frozen mtime."""

        # Testing frozen mtime
        self.assertTrue(self.api.days_since_last_freeze('openSUSE:Factory:Staging:A') > 8)
        self.assertTrue(self.api.days_since_last_freeze('openSUSE:Factory:Staging:B') < 1)

        # U == unfrozen
        self.assertTrue(self.api.days_since_last_freeze('openSUSE:Factory:Staging:U') > 1000)

    def test_frozen_enough(self):
        """Test frozen enough."""

        # Testing frozen mtime
        self.assertEqual(self.api.prj_frozen_enough('openSUSE:Factory:Staging:B'), True)
        self.assertEqual(self.api.prj_frozen_enough('openSUSE:Factory:Staging:A'), False)

        # U == unfrozen
        self.assertEqual(self.api.prj_frozen_enough('openSUSE:Factory:Staging:U'), False)

    def test_move(self):
        """Test package movement."""

        init_data = self.api.get_package_information('openSUSE:Factory:Staging:B', 'wine')
        self.api.move_between_project('openSUSE:Factory:Staging:B', 333, 'openSUSE:Factory:Staging:A')
        test_data = self.api.get_package_information('openSUSE:Factory:Staging:A', 'wine')
        self.assertEqual(init_data, test_data)
        help='project to check (ex. openSUSE:Factory, openSUSE:Leap:15.1)')
    parser.add_argument('-d',
                        '--debug',
                        action='store_true',
                        default=False,
                        help='enable debug information')
    parser.add_argument('-A', '--apiurl', metavar='URL', help='API URL')

    args = parser.parse_args()

    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug

    apiurl = osc.conf.config['apiurl']
    config = Config(apiurl, args.project)
    api = StagingAPI(apiurl, args.project)
    staging_report = InstallChecker(api, config)

    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    result = True
    if args.staging:
        result = staging_report.staging(api.prj_from_short(args.staging),
                                        force=True)
    else:
        for staging in api.get_staging_projects():
            if api.is_adi_project(staging):
                result = staging_report.staging(staging) and result
示例#40
0
class ToTestBase(object):
    """Base class to store the basic interface"""

    def __init__(self, project, dryrun = False):
        self.project = project
        self.dryrun = dryrun
        self.api = StagingAPI(osc.conf.config['apiurl'], project='openSUSE:%s' % project)
        self.known_failures = self.known_failures_from_dashboard(project)

    def openqa_group(self):
        return self.project

    def iso_prefix(self):
        return self.project

    def jobs_num(self):
        return 70

    def current_version(self):
        return self.release_version()

    def binaries_of_product(self, project, product):
        url = self.api.makeurl(['build', project, 'images', 'local', product])
        try:
            f = self.api.retried_GET(url)
        except urllib2.HTTPError:
            return []

        ret = []
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            ret.append(binary.get('filename'))

        return ret

    def get_current_snapshot(self):
        """Return the current snapshot in :ToTest"""

        # for now we hardcode all kind of things
        for binary in self.binaries_of_product('openSUSE:%s:ToTest' % self.project, '_product:openSUSE-cd-mini-%s' % self.arch()):
            result = re.match(r'openSUSE-%s-NET-.*-Snapshot(.*)-Media.iso' % self.iso_prefix(),
                              binary)
            if result:
                return result.group(1)

        return None


    def ftp_build_version(self, tree):
        for binary in self.binaries_of_product('openSUSE:%s' % self.project, tree):
            result = re.match(r'openSUSE.*Build(.*)-Media1.report', binary)

        if result:
            return result.group(1)
        raise Exception("can't find %s version" % self.project)

    def release_version(self):
       url = self.api.makeurl(['build', 'openSUSE:%s' % self.project, 'standard', self.arch(),
                               '_product:openSUSE-release'])
       f = self.api.retried_GET(url)
       root = ET.parse(f).getroot()
       for binary in root.findall('binary'):
           binary = binary.get('filename', '')
           result = re.match(r'.*-([^-]*)-[^-]*.src.rpm', binary)
           if result:
              return result.group(1)

       raise Exception("can't find %s version" % self.project)

    def find_openqa_results(self, snapshot):
        """Return the openqa jobs of a given snapshot and filter out the
        cloned jobs

        """

        url = makeurl('https://openqa.opensuse.org', ['api', 'v1', 'jobs'], { 'group': self.openqa_group(), 'build': snapshot } )
        f = self.api.retried_GET(url)
        jobs = []
        for job in json.load(f)['jobs']:
            if job['clone_id'] or job['result'] == 'obsoleted':
                continue
            job['name'] = job['name'].replace(snapshot, '')
            jobs.append(job)
        return jobs

    def _result2str(self, result):
        if result == QA_INPROGRESS:
            return 'inprogress'
        elif result == QA_FAILED:
            return 'failed'
        else:
            return 'passed'

    def find_failed_module(self, testmodules):
        # print json.dumps(testmodules, sort_keys=True, indent=4)
        for module in testmodules:
            if module['result'] != 'failed':
                continue
            flags = module['flags']
            if 'fatal' in flags or 'important' in flags:
                return module['name']
                break
            logger.info('%s %s %s'%(module['name'], module['result'], module['flags']))

    def overall_result(self, snapshot):
        """Analyze the openQA jobs of a given snapshot Returns a QAResult"""

        if snapshot is None:
            return QA_FAILED

        jobs = self.find_openqa_results(snapshot)

        if len(jobs) < self.jobs_num():  # not yet scheduled
            logger.warning('we have only %s jobs' % len(jobs))
            return QA_INPROGRESS

        number_of_fails = 0
        in_progress = False
        machines = []
        for job in jobs:
            machines.append(job['settings']['MACHINE'])
            # print json.dumps(job, sort_keys=True, indent=4)
            if job['result'] in ('failed', 'incomplete', 'skipped', 'user_cancelled', 'obsoleted'):
                jobname = job['name'] + '@' + job['settings']['MACHINE']
                # Record machines we have tests for
                if jobname in self.known_failures:
                    self.known_failures.remove(jobname)
                    continue
                number_of_fails += 1
                # print json.dumps(job, sort_keys=True, indent=4), jobname
                failedmodule = self.find_failed_module(job['modules'])
                url = 'https://openqa.opensuse.org/tests/%s' % job['id']
                print (jobname, url, failedmodule, job['retry_avbl'])
                # if number_of_fails < 3: continue
            elif job['result'] == 'passed' or job['result'] == 'softfailed':
                continue
            elif job['result'] == 'none':
                if job['state'] != 'cancelled':
                    in_progress = True
            else:
                raise Exception(job['result'])

        if number_of_fails > 0:
            return QA_FAILED

        if in_progress:
            return QA_INPROGRESS

        machines = list(set(machines))
        for item in machines:
            for item2 in self.known_failures:
                if item2.split('@')[1] == item:
                    logger.info('now passing %s'%item2)
        return QA_PASSED

    def all_repos_done(self, project, codes=None):
        """Check the build result of the project and only return True if all
        repos of that project are either published or unpublished

        """

        # coolo's experience says that 'finished' won't be
        # sufficient here, so don't try to add it :-)
        codes = ['published', 'unpublished'] if not codes else codes

        url = self.api.makeurl(['build', project, '_result'], {'code': 'failed'})
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        ready = True
        for repo in root.findall('result'):
            # ignore ports. 'factory' is used by arm for repos that are not
            # meant to use the totest manager.
            if repo.get('repository') in ('ports', 'factory', 'images_staging'):
                continue
            # ignore 32bit for now. We're only interesed in aarch64 here
            if repo.get('arch') in ('armv6l', 'armv7l'):
                continue
            if repo.get('dirty', '') == 'true':
                logger.info('%s %s %s -> %s'%(repo.get('project'), repo.get('repository'), repo.get('arch'), 'dirty'))
                ready = False
            if repo.get('code') not in codes:
                logger.info('%s %s %s -> %s'%(repo.get('project'), repo.get('repository'), repo.get('arch'), repo.get('code')))
                ready = False
        return ready

    def maxsize_for_package(self, package):
        if re.match(r'.*-mini-.*', package):
            return 737280000  # a CD needs to match

        if re.match(r'.*-dvd5-.*', package):
            return 4700372992  # a DVD needs to match

        if re.match(r'.*-image-livecd-x11.*', package):
            return 681574400  # not a full CD

        if re.match(r'.*-image-livecd.*', package):
            return 999999999  # a GB stick

        if re.match(r'.*-dvd9-dvd-.*', package):
            return 8539996159

        if package.startswith('_product:openSUSE-ftp-ftp-'):
            return None

        if package == '_product:openSUSE-Addon-NonOss-ftp-ftp-i586_x86_64':
            return None

        raise Exception('No maxsize for {}'.format(package))

    def package_ok(self, project, package, repository, arch):
        """Checks one package in a project and returns True if it's succeeded

        """

        query = {'package': package, 'repository': repository, 'arch': arch}

        url = self.api.makeurl(['build', project, '_result'], query)
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for repo in root.findall('result'):
            status = repo.find('status')
            if status.get('code') != 'succeeded':
                logger.info('%s %s %s %s -> %s'%(project, package, repository, arch, status.get('code')))
                return False

        maxsize = self.maxsize_for_package(package)
        if not maxsize:
            return True

        url = self.api.makeurl(['build', project, repository, arch, package])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            if not binary.get('filename', '').endswith('.iso'):
                continue
            isosize = int(binary.get('size', 0))
            if isosize > maxsize:
                logger.error('%s %s %s %s: %s'%(project, package, repository, arch, 'too large by %s bytes' % (isosize-maxsize)))
                return False

        return True

    def factory_snapshottable(self):
        """Check various conditions required for factory to be snapshotable

        """

        if not self.all_repos_done('openSUSE:%s' % self.project):
            return False

        for product in self.ftp_products + self.main_products:
            if not self.package_ok('openSUSE:%s' % self.project, product, 'images', 'local'):
                return False

            if len(self.livecd_products):

                if not self.all_repos_done('openSUSE:%s:Live' % self.project):
                    return False

                for arch in ['i586', 'x86_64' ]:
                    for product in self.livecd_products:
                        if not self.package_ok('openSUSE:%s:Live' % self.project, product, 'standard', arch):
                            return False

        return True

    def release_package(self, project, package, set_release=None):
        query = {'cmd': 'release'}

        if set_release:
            query['setrelease'] = set_release

        # FIXME: make configurable. openSUSE:Factory:ARM currently has multiple
        # repos with release targets, so obs needs to know which one to release
        if project == 'openSUSE:Factory:ARM':
            query['repository'] = 'images'

        baseurl = ['source', project, package]

        url = self.api.makeurl(baseurl, query=query)
        if self.dryrun:
            logger.info("release %s/%s (%s)"%(project, package, set_release))
        else:
            self.api.retried_POST(url)

    def update_totest(self, snapshot):
        logger.info('Updating snapshot %s' % snapshot)
        if not self.dryrun:
            self.api.switch_flag_in_prj('openSUSE:%s:ToTest' % self.project, flag='publish', state='disable')

        for product in self.ftp_products:
            self.release_package('openSUSE:%s' % self.project, product)

        for cd in self.livecd_products:
            self.release_package('openSUSE:%s:Live' % self.project, cd, set_release='Snapshot%s' % snapshot)

        for cd in self.main_products:
            self.release_package('openSUSE:%s' % self.project, cd, set_release='Snapshot%s' % snapshot)

    def publish_factory_totest(self):
        logger.info('Publish ToTest')
        if not self.dryrun:
            self.api.switch_flag_in_prj('openSUSE:%s:ToTest' % self.project, flag='publish', state='enable')

    def totest_is_publishing(self):
        """Find out if the publishing flag is set in totest's _meta"""

        url = self.api.makeurl(['source', 'openSUSE:%s:ToTest' % self.project, '_meta'])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        if not root.find('publish'):  # default true
            return True

        for flag in root.find('publish'):
            if flag.get('repository', None) or flag.get('arch', None):
                continue
            if flag.tag == 'enable':
                return True
        return False

    def totest(self):
        current_snapshot = self.get_current_snapshot()
        new_snapshot = self.current_version()

        current_result = self.overall_result(current_snapshot)
        current_qa_version = self.api.load_file_content("%s:Staging" % self.api.project, "dashboard", "version_totest")

        logger.info('current_snapshot %s: %s'%(current_snapshot, self._result2str(current_result)))
        logger.debug('new_snapshot %s', new_snapshot)
        logger.debug('current_qa_version %s', current_qa_version)

        snapshotable = self.factory_snapshottable()
        logger.debug("snapshotable: %s", snapshotable)
        can_release = (current_result != QA_INPROGRESS and snapshotable)

        # not overwriting
        if new_snapshot == current_snapshot:
            logger.debug("no change in snapshot version")
            can_release = False
        elif not self.all_repos_done('openSUSE:%s:ToTest' % self.project):
            logger.debug("not all repos done, can't release")
            # the repos have to be done, otherwise we better not touch them with a new release
            can_release = False

        can_publish = (current_result == QA_PASSED)

        # already published
        if self.totest_is_publishing():
            logger.debug("totest already publishing")
            can_publish = False

        if can_publish:
            if current_qa_version == current_snapshot:
                self.publish_factory_totest()
                self.write_version_to_dashboard("snapshot", current_snapshot)
                can_release = False  # we have to wait
            else:
                # We reached a very bad status: openQA testing is 'done', but not of the same version
                # currently in :ToTest. This can happen when 'releasing' the product failed
                raise Exception("Publishing stopped: tested version (%s) does not match :ToTest version (%s)" 
                    % (current_qa_version, current_snapshot))

        if can_release:
            self.update_totest(new_snapshot)
            self.write_version_to_dashboard("totest", new_snapshot)

    def release(self):
        new_snapshot = self.current_version()
        self.update_totest(new_snapshot)

    def known_failures_from_dashboard(self, project):
        known_failures = []
        if self.project == "Factory:PowerPC":
            project = "Factory"
        else:
            project = self.project

        url = self.api.makeurl(['source', 'openSUSE:%s:Staging' % project, 'dashboard', 'known_failures'])
        f = self.api.retried_GET(url)
        for line in f:
            if not line[0] == '#':
                known_failures.append(line.strip())
        return known_failures

    def write_version_to_dashboard(self, target, version):
        if not self.dryrun:
            url = self.api.makeurl(['source', 'openSUSE:%s:Staging' % self.project, 'dashboard', 'version_%s' % target])
            osc.core.http_PUT(url + '?comment=Update+version', data=version)
示例#41
0
 def setup(self, project):
     self.project = ToTest(project, self.apiurl)
     self.api = StagingAPI(self.apiurl, project=project)
示例#42
0
def do_staging(self, subcmd, opts, *args):
    """${cmd_name}: Commands to work with staging projects

    "accept" will accept all requests in
        openSUSE:Factory:Staging:<LETTER> (into Factory)

    "check" will check if all packages are links without changes

    "cleanup_rings" will try to cleanup rings content and print
        out problems

    "freeze" will freeze the sources of the project's links (not
        affecting the packages actually in)

    "list" will pick the requests not in rings

    "select" will add requests to the project

    "unselect" will remove from the project - pushing them back to the backlog

    Usage:
        osc staging accept LETTER
        osc staging check [--old] REPO
        osc staging cleanup_rings
        osc staging freeze PROJECT...
        osc staging list
        osc staging select [--no-freeze] [--move [--from PROJECT]] LETTER REQUEST...
        osc staging unselect REQUEST...
    """
    if opts.version:
        self._print_version()

    # verify the argument counts match the commands
    if len(args) == 0:
        raise oscerr.WrongArgs('No command given, see "osc help staging"!')
    cmd = args[0]
    if cmd in ('accept', 'freeze'):
        min_args, max_args = 1, None
    elif cmd == 'check':
        min_args, max_args = 0, 2
    elif cmd == 'select':
        min_args, max_args = 1, None
        if not opts.add:
            min_args = 2
    elif cmd == 'unselect':
        min_args, max_args = 1, None
    elif cmd == 'adi':
        min_args, max_args = None, None
    elif cmd in ('list', 'cleanup_rings'):
        min_args, max_args = 0, 0
    else:
        raise oscerr.WrongArgs('Unknown command: %s' % cmd)
    if len(args) - 1 < min_args:
        raise oscerr.WrongArgs('Too few arguments.')
    if max_args is not None and len(args) - 1 > max_args:
        raise oscerr.WrongArgs('Too many arguments.')

    # Init the OBS access and configuration
    opts.project = self._full_project_name(opts.project)
    opts.apiurl = self.get_api_url()
    opts.verbose = False
    Config(opts.project)

    with OBSLock(opts.apiurl, opts.project):
        api = StagingAPI(opts.apiurl, opts.project)

        # call the respective command and parse args by need
        if cmd == 'check':
            prj = args[1] if len(args) > 1 else None
            CheckCommand(api).perform(prj, opts.old)
        elif cmd == 'freeze':
            for prj in args[1:]:
                FreezeCommand(api).perform(api.prj_from_letter(prj))
        elif cmd == 'accept':
            cmd = AcceptCommand(api)
            for prj in args[1:]:
                if not cmd.perform(api.prj_from_letter(prj)):
                    return
            cmd.accept_other_new()
            cmd.update_factory_version()
            if api.item_exists(api.crebuild):
                cmd.sync_buildfailures()
        elif cmd == 'unselect':
            UnselectCommand(api).perform(args[1:])
        elif cmd == 'select':
            tprj = api.prj_from_letter(args[1])
            if opts.add:
                api.mark_additional_packages(tprj, [opts.add])
            else:
                SelectCommand(api, tprj).perform(args[2:], opts.move,
                                           opts.from_, opts.no_freeze)
        elif cmd == 'cleanup_rings':
            CleanupRings(api).perform()
        elif cmd == 'list':
            ListCommand(api).perform()
        elif cmd == 'adi':
            AdiCommand(api).perform(args[1:])
示例#43
0
 def __init__(self, project, dryrun = False):
     self.project = project
     self.dryrun = dryrun
     self.api = StagingAPI(osc.conf.config['apiurl'], project='openSUSE:%s' % project)
     self.known_failures = self.known_failures_from_dashboard(project)
示例#44
0
    def do_update_and_solve(self, subcmd, opts):
        """${cmd_name}: update and solve for given scope

        ${cmd_usage}
        ${cmd_option_list}
        """

        if opts.staging:
            match = re.match('(.*):Staging:(.*)', opts.staging)
            opts.scope = ['staging:' + match.group(2)]
            if opts.project:
                raise ValueError('--staging and --project conflict')
            opts.project = match.group(1)
        elif not opts.project:
            raise ValueError('project is required')
        elif not opts.scope:
            opts.scope = ['all']

        apiurl = conf.config['apiurl']
        Config(apiurl, opts.project)
        target_config = conf.config[opts.project]

        # Store target project as opts.project will contain subprojects.
        target_project = opts.project

        api = StagingAPI(apiurl, target_project)

        main_repo = target_config['main-repo']

        # used by product converter
        # these needs to be kept in sync with OBS config
        if apiurl.find('suse.de') > 0:
            os.environ['OBS_NAME'] = 'build.suse.de'
        if apiurl.find('opensuse.org') > 0:
            os.environ['OBS_NAME'] = 'build.opensuse.org'

        # special case for all
        if opts.scope == ['all']:
            opts.scope = target_config.get('pkglistgen-scopes', 'target').split(' ')

        self.error_occured = False

        def solve_project(project, scope):
            try:
                self.tool.reset()
                self.tool.dry_run = self.options.dry
                if self.tool.update_and_solve_target(api, target_project, target_config, main_repo,
                                project=project, scope=scope, force=opts.force,
                                no_checkout=opts.no_checkout,
                                only_release_packages=opts.only_release_packages,
                                stop_after_solve=opts.stop_after_solve):
                    self.error_occured = True
            except Exception:
                # Print exception, but continue to prevent problems effecting one
                # project from killing the whole process. Downside being a common
                # error will be duplicated for each project. Common exceptions could
                # be excluded if a set list is determined, but that is likely not
                # practical.
                traceback.print_exc()
                self.error_occured = True

        for scope in opts.scope:
            if scope.startswith('staging:'):
                letter = re.match('staging:(.*)', scope).group(1)
                solve_project(api.prj_from_short(letter), 'staging')
            elif scope == 'target':
                solve_project(target_project, scope)
            elif scope == 'rings':
                solve_project(api.rings[1], scope)
            elif scope == 'staging':
                letters = api.get_staging_projects_short()
                for letter in letters:
                    solve_project(api.prj_from_short(letter), scope)
            else:
                raise ValueError('scope "{}" must be one of: {}'.format(scope, ', '.join(self.SCOPES)))

        return self.error_occured
示例#45
0
class StagingWorkflow(object):
    def __init__(self, project=PROJECT):
        """
        Initialize the configuration
        """
        THIS_DIR = os.path.dirname(os.path.abspath(__file__))
        oscrc = os.path.join(THIS_DIR, 'test.oscrc')

        # set to None so we return the destructor early in case of exceptions
        self.api = None
        self.apiurl = APIURL
        self.project = project
        self.projects = {}
        self.requests = []
        self.groups = []
        self.users = []
        logging.basicConfig()

        # clear cache from other tests - otherwise the VCR is replayed depending
        # on test order, which can be harmful
        memoize_session_reset()

        osc.core.conf.get_config(override_conffile=oscrc,
                                 override_no_keyring=True,
                                 override_no_gnome_keyring=True)
        os.environ['OSC_CONFIG'] = oscrc

        if os.environ.get('OSC_DEBUG'):
            osc.core.conf.config['debug'] = 1

        CacheManager.test = True
        # disable caching, the TTLs break any reproduciblity
        Cache.CACHE_DIR = None
        Cache.PATTERNS = {}
        Cache.init()
        self.setup_remote_config()
        self.load_config()
        self.api = StagingAPI(APIURL, project)

    def load_config(self, project=None):
        if project is None:
            project = self.project
        self.config = Config(APIURL, project)

    def create_attribute_type(self, namespace, name, values=None):
        meta = """
        <namespace name='{}'>
            <modifiable_by user='******'/>
        </namespace>""".format(namespace)
        url = osc.core.makeurl(APIURL, ['attribute', namespace, '_meta'])
        osc.core.http_PUT(url, data=meta)

        meta = "<definition name='{}' namespace='{}'><description/>".format(
            name, namespace)
        if values:
            meta += "<count>{}</count>".format(values)
        meta += "<modifiable_by role='maintainer'/></definition>"
        url = osc.core.makeurl(APIURL, ['attribute', namespace, name, '_meta'])
        osc.core.http_PUT(url, data=meta)

    def setup_remote_config(self):
        self.create_target()
        self.create_attribute_type('OSRT', 'Config', 1)

        config = {
            'overridden-by-local': 'remote-nope',
            'staging-group': 'factory-staging',
            'remote-only': 'remote-indeed',
        }
        self.remote_config_set(config, replace_all=True)

    def remote_config_set(self, config, replace_all=False):
        if not replace_all:
            config_existing = Config.get(self.apiurl, self.project)
            config_existing.update(config)
            config = config_existing

        config_lines = []
        for key, value in config.items():
            config_lines.append(f'{key} = {value}')

        attribute_value_save(APIURL, self.project, 'Config',
                             '\n'.join(config_lines))

    def create_group(self, name, users=[]):

        meta = """
        <group>
          <title>{}</title>
        </group>
        """.format(name)

        if len(users):
            root = ET.fromstring(meta)
            persons = ET.SubElement(root, 'person')
            for user in users:
                ET.SubElement(persons, 'person', {'userid': user})
            meta = ET.tostring(root)

        if not name in self.groups:
            self.groups.append(name)
        url = osc.core.makeurl(APIURL, ['group', name])
        osc.core.http_PUT(url, data=meta)

    def create_user(self, name):
        if name in self.users: return
        meta = """
        <person>
          <login>{}</login>
          <email>{}@example.com</email>
          <state>confirmed</state>
        </person>
        """.format(name, name)
        self.users.append(name)
        url = osc.core.makeurl(APIURL, ['person', name])
        osc.core.http_PUT(url, data=meta)
        url = osc.core.makeurl(APIURL, ['person', name],
                               {'cmd': 'change_password'})
        osc.core.http_POST(url, data='opensuse')
        home_project = 'home:' + name
        self.projects[home_project] = Project(home_project, create=False)

    def create_target(self):
        if self.projects.get('target'): return
        self.create_user('staging-bot')
        self.create_group('factory-staging', users=['staging-bot'])
        p = Project(name=self.project,
                    reviewer={'groups': ['factory-staging']})
        self.projects['target'] = p
        self.projects[self.project] = p

        url = osc.core.makeurl(APIURL, ['staging', self.project, 'workflow'])
        data = "<workflow managers='factory-staging'/>"
        osc.core.http_POST(url, data=data)
        # creates A and B as well
        self.projects['staging:A'] = Project(self.project + ':Staging:A',
                                             create=False)
        self.projects['staging:B'] = Project(self.project + ':Staging:B',
                                             create=False)

    def setup_rings(self):
        self.create_target()
        self.projects['ring0'] = Project(name=self.project +
                                         ':Rings:0-Bootstrap')
        self.projects['ring1'] = Project(name=self.project +
                                         ':Rings:1-MinimalX')
        target_wine = Package(name='wine', project=self.projects['target'])
        target_wine.create_commit()
        self.create_link(target_wine, self.projects['ring1'])

    def create_package(self, project, package):
        project = self.create_project(project)
        return Package(name=package, project=project)

    def create_link(self, source_package, target_project, target_package=None):
        if not target_package:
            target_package = source_package.name
        target_package = Package(name=target_package, project=target_project)
        url = self.api.makeurl(
            ['source', target_project.name, target_package.name, '_link'])
        osc.core.http_PUT(url,
                          data='<link project="{}" package="{}"/>'.format(
                              source_package.project.name,
                              source_package.name))
        return target_package

    def create_project(self,
                       name,
                       reviewer={},
                       maintainer={},
                       project_links=[]):
        if isinstance(name, Project):
            return name
        if name in self.projects:
            return self.projects[name]
        self.projects[name] = Project(name,
                                      reviewer=reviewer,
                                      maintainer=maintainer,
                                      project_links=project_links)
        return self.projects[name]

    def submit_package(self, package=None, project=None):
        if not project:
            project = self.project
        request = Request(source_package=package, target_project=project)
        self.requests.append(request)
        return request

    def request_package_delete(self, package, project=None):
        if not project:
            project = package.project
        request = Request(target_package=package,
                          target_project=project,
                          type='delete')
        self.requests.append(request)
        return request

    def create_submit_request(self, project, package, text=None):
        project = self.create_project(project)
        package = Package(name=package, project=project)
        package.create_commit(text=text)
        return self.submit_package(package)

    def create_staging(self,
                       suffix,
                       freeze=False,
                       rings=None,
                       with_repo=False):
        staging_key = 'staging:{}'.format(suffix)
        # do not reattach if already present
        if not staging_key in self.projects:
            staging_name = self.project + ':Staging:' + suffix
            staging = Project(staging_name, create=False, with_repo=with_repo)
            url = osc.core.makeurl(
                APIURL, ['staging', self.project, 'staging_projects'])
            data = '<workflow><staging_project>{}</staging_project></workflow>'
            osc.core.http_POST(url, data=data.format(staging_name))
            self.projects[staging_key] = staging
        else:
            staging = self.projects[staging_key]

        project_links = []
        if rings == 0:
            project_links.append(self.project + ":Rings:0-Bootstrap")
        if rings == 1 or rings == 0:
            project_links.append(self.project + ":Rings:1-MinimalX")
        staging.update_meta(project_links=project_links,
                            maintainer={'groups': ['factory-staging']},
                            with_repo=with_repo)

        if freeze:
            FreezeCommand(self.api).perform(staging.name)

        return staging

    def __del__(self):
        if not self.api:
            return
        try:
            self.remove()
        except:
            # normally exceptions in destructors are ignored but a info
            # message is displayed. Make this a little more useful by
            # printing it into the capture log
            traceback.print_exc(None, sys.stdout)

    def remove(self):
        print('deleting staging workflow')
        for project in self.projects.values():
            project.remove()
        for request in self.requests:
            request.revoke()
        for group in self.groups:
            url = osc.core.makeurl(APIURL, ['group', group])
            try:
                osc.core.http_DELETE(url)
            except HTTPError:
                pass
        print('done')
        if hasattr(self.api, '_invalidate_all'):
            self.api._invalidate_all()
示例#46
0
def staging_api(args):
    apiurl = osc.conf.config['apiurl']
    Config(apiurl, args.project)
    return StagingAPI(apiurl, args.project)
示例#47
0
class StagingHelper(object):
    def __init__(self, project):
        self.project = project
        self.apiurl = osc.conf.config['apiurl']
        Config(self.project)
        self.api = StagingAPI(self.apiurl, self.project)

    def get_support_package_list(self, project, repository):
        f = osc.core.get_buildconfig(self.apiurl, project, repository).splitlines()
        pkg_list = []
        for line in f:
            if re.match('Preinstall', line) or re.match('VM[Ii]nstall', line) or re.match('Support', line):
                content = line.split(':')
                variables = [x.strip() for x in content[1].split(' ')]
                for var in variables:
                    if var != '' and var not in pkg_list:
                        if var.startswith('!') and var[1:] in pkg_list:
                            pkg_list.remove(var[1:])
                        else:
                            pkg_list.append(var)
        return pkg_list

    def get_project_binarylist(self, project, repository, arch):
        query = {'view': 'binarylist', 'repository': repository, 'arch': arch}
        root = ET.parse(http_GET(makeurl(self.apiurl, ['build', project, '_result'],
            query=query))).getroot()
        return root

    def process_project_binarylist(self, project, repository, arch):
        prj_binarylist = self.get_project_binarylist(project, repository, arch)
        files = {}
        for package in prj_binarylist.findall('./result/binarylist'):
            for binary in package.findall('binary'):
                result = re.match(r'(.*)-([^-]*)-([^-]*)\.([^-\.]+)\.rpm', binary.attrib['filename'])
                if not result:
                    continue
                bname = result.group(1)
                if bname.endswith('-debuginfo') or bname.endswith('-debuginfo-32bit'):
                    continue
                if bname.endswith('-debugsource'):
                    continue
                if bname.startswith('::import::'):
                    continue
                if result.group(4) == 'src':
                    continue
                files[bname] = package.attrib['package']

        return files

    def check_multiple_specs(self, project, packages):
        expanded_packages = []

        for pkg in packages:
            query = {'expand': 1}
            url = makeurl(self.apiurl, ['source', project, pkg], query=query)
            try:
                root = ET.parse(http_GET(url)).getroot()
            except urllib2.HTTPError as e:
                if e.code == 404:
                    continue
                raise
            for en in root.findall('entry'):
                if en.attrib['name'].endswith('.spec'):
                    expanded_packages.append(en.attrib['name'][:-5])

        return expanded_packages

    def crawl(self):
        """Main method"""
        rebuild_data = self.api.dashboard_content_load('support_pkg_rebuild')
        if rebuild_data is None:
            print "There is no support_pkg_rebuild file!"
            return

        logging.info('Gathering support package list from %s' % self.project)
        support_pkgs = self.get_support_package_list(self.project, 'standard')
        files = self.process_project_binarylist(self.project, 'standard', 'x86_64')
        staging_projects = ["%s:%s"%(self.api.cstaging, p) for p in self.api.get_staging_projects_short()]
        cand_sources = defaultdict(list)
        for stg in staging_projects:
            prj_meta = self.api.get_prj_pseudometa(stg)
            prj_staged_packages = [req['package'] for req in prj_meta['requests']]
            prj_expanded_packages = self.check_multiple_specs(self.project, prj_staged_packages)
            for pkg in support_pkgs:
                if files.get(pkg) and files.get(pkg) in prj_expanded_packages:
                    if files.get(pkg) not in cand_sources[stg]:
                        cand_sources[stg].append(files.get(pkg))

        root = ET.fromstring(rebuild_data)

        logging.info('Checking rebuild data...')

        for stg in root.findall('staging'):
            rebuild = stg.find('rebuild').text
            suppkg_list = stg.find('supportpkg').text
            need_rebuild = False
            suppkgs = []
            if suppkg_list:
                suppkgs = suppkg_list.split(',')

            stgname =  stg.get('name')
            if len(cand_sources[stgname]) and rebuild == 'unknown':
                need_rebuild = True
                stg.find('rebuild').text = 'needed'
                new_suppkg_list = ','.join(cand_sources[stgname])
                stg.find('supportpkg').text = new_suppkg_list
            elif len(cand_sources[stgname]) and rebuild != 'unknown':
                for cand in cand_sources[stgname]:
                    if cand not in suppkgs:
                        need_rebuild = True
                        stg.find('rebuild').text = 'needed'
                        break
                new_suppkg_list = ','.join(cand_sources[stgname])
                stg.find('supportpkg').text = new_suppkg_list
            elif not len(cand_sources[stgname]):
                stg.find('rebuild').text = 'unneeded'
                stg.find('supportpkg').text = ''

            if stg.find('rebuild').text == 'needed':
                need_rebuild = True

            if need_rebuild and not self.api.is_repo_dirty(stgname, 'standard'):
                logging.info('Rebuild %s' % stgname)
                osc.core.rebuild(self.apiurl, stgname, None, None, None)
                stg.find('rebuild').text = 'unneeded'

        logging.info('Updating support pkg list...')
        rebuild_data_updated = ET.tostring(root)
        logging.debug(rebuild_data_updated)
        if rebuild_data_updated != rebuild_data:
            self.api.dashboard_content_save(
                'support_pkg_rebuild', rebuild_data_updated, 'support package rebuild')
                self.update_status_comment(project, report, force=True)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Command to publish openQA status in Staging projects")
    parser.add_argument("-s", "--staging", type=str, default=None, help="staging project letter")
    parser.add_argument("-f", "--force", action="store_true", default=False, help="force the write of the comment")
    parser.add_argument(
        "-p", "--project", type=str, default="Factory", help="openSUSE version to make the check (Factory, 13.2)"
    )
    parser.add_argument("-d", "--debug", action="store_true", default=False, help="enable debug information")

    args = parser.parse_args()

    osc.conf.get_config()
    osc.conf.config["debug"] = args.debug

    if args.force:
        MARGIN_HOURS = 0

    Config("openSUSE:%s" % args.project)
    api = StagingAPI(osc.conf.config["apiurl"], "openSUSE:%s" % args.project)
    openQA = OpenQAReport(api)

    if args.staging:
        openQA.report(api.prj_from_letter(args.staging))
    else:
        for staging in api.get_staging_projects():
            if not staging.endswith(":DVD"):
                openQA.report(staging)
示例#49
0
class TestSelect(unittest.TestCase):
    def setUp(self):
        """
        Initialize the configuration
        """
        self.obs = OBS()
        Config('openSUSE:Factory')
        self.api = StagingAPI(APIURL, 'openSUSE:Factory')

    def test_old_frozen(self):
        self.assertEqual(
            self.api.prj_frozen_enough('openSUSE:Factory:Staging:A'), False)
        # check it won't allow selecting
        self.assertEqual(
            False,
            SelectCommand(self.api,
                          'openSUSE:Factory:Staging:A').perform(['gcc']))

    def test_select_comments(self):
        c_api = CommentAPI(self.api.apiurl)
        staging_b = 'openSUSE:Factory:Staging:B'
        comments = c_api.get_comments(project_name=staging_b)

        # First select
        self.assertEqual(
            True,
            SelectCommand(self.api, staging_b).perform(['gcc', 'wine']))
        first_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(first_select_comments.keys())[-1]
        first_select_comment = first_select_comments[last_id]
        # Only one comment is added
        self.assertEqual(len(first_select_comments), len(comments) + 1)
        # With the right content
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in
                        first_select_comment['comment'])

        # Second select
        self.assertEqual(
            True,
            SelectCommand(self.api, staging_b).perform(['puppet']))
        second_select_comments = c_api.get_comments(project_name=staging_b)
        last_id = sorted(second_select_comments.keys())[-1]
        second_select_comment = second_select_comments[last_id]
        # The number of comments remains, but they are different
        self.assertEqual(len(second_select_comments),
                         len(first_select_comments))
        self.assertNotEqual(second_select_comment['comment'],
                            first_select_comment['comment'])
        # The new comments contents both old and new information
        self.assertTrue('Request#123 for package gcc submitted by @Admin' in
                        second_select_comment['comment'])
        self.assertTrue('Request#321 for package puppet submitted by @Admin' in
                        second_select_comment['comment'])

    def test_no_matches(self):
        # search for requests
        with self.assertRaises(oscerr.WrongArgs) as cm:
            SelectCommand(self.api,
                          'openSUSE:Factory:Staging:B').perform(['bash'])
        self.assertEqual(str(cm.exception), "No SR# found for: bash")

    def test_selected(self):
        # make sure the project is frozen recently for other tests

        ret = SelectCommand(self.api,
                            'openSUSE:Factory:Staging:B').perform(['wine'])
        self.assertEqual(True, ret)
示例#50
0
 def setUp(self):
     self.obs = OBS()
     self.load_config()
     self.api = StagingAPI(APIURL, PROJECT)
示例#51
0
class Project(object):
    def __init__(self, name):
        self.name = name
        Config(apiurl, name)
        self.api = StagingAPI(apiurl, name)
        self.staging_projects = dict()
        self.listener = None
        self.logger = logging.getLogger(__name__)
        self.replace_string = self.api.attribute_value_load('OpenQAMapping')

    def init(self):
        for p in self.api.get_staging_projects():
            if self.api.is_adi_project(p):
                continue
            self.staging_projects[p] = self.initial_staging_state(p)
            self.update_staging_status(p)

    def staging_letter(self, name):
        return name.split(':')[-1]

    def map_iso(self, staging_project, iso):
        parts = self.replace_string.split('/')
        if parts[0] != 's':
            raise Exception("{}'s iso_replace_string does not start with s/".format(self.name))
        old = parts[1]
        new = parts[2]
        new = new.replace('$LETTER', self.staging_letter(staging_project))
        return re.compile(old).sub(new, iso)

    def gather_isos(self, name, repository):
        url = self.api.makeurl(['published', name, repository, 'iso'])
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        ret = []
        for entry in root.findall('entry'):
            if entry.get('name').endswith('iso'):
                ret.append(self.map_iso(name, entry.get('name')))
        return ret

    def gather_buildid(self, name, repository):
        url = self.api.makeurl(['published', name, repository], {'view': 'status'})
        f = self.api.retried_GET(url)
        id = ET.parse(f).getroot().find('buildid')
        if id is not None:
            return id.text

    def initial_staging_state(self, name):
        return {'isos': self.gather_isos(name, 'images'),
                'id': self.gather_buildid(name, 'images')}

    def fetch_openqa_jobs(self, staging, iso):
        buildid = self.staging_projects[staging].get('id')
        if not buildid:
            self.logger.info("I don't know the build id of " + staging)
            return
        # all openQA jobs are created at the same URL
        url = self.api.makeurl(['status_reports', 'published', staging, 'images', 'reports', buildid])
        openqa = self.listener.jobs_for_iso(iso)
        # collect job infos to pick names
        openqa_infos = dict()
        for job in openqa:
            print(staging, iso, job['id'], job['state'], job['result'],
                  job['settings']['MACHINE'], job['settings']['TEST'])
            openqa_infos[job['id']] = {'url': self.listener.test_url(job)}
            openqa_infos[job['id']]['state'] = self.map_openqa_result(job)
            openqa_infos[job['id']]['name'] = job['settings']['TEST']
            openqa_infos[job['id']]['machine'] = job['settings']['MACHINE']

        # make sure the names are unique
        taken_names = dict()
        for id in openqa_infos:
            name = openqa_infos[id]['name']
            if name in taken_names:
                openqa_infos[id]['name'] = openqa_infos[id]['name'] + "@" + openqa_infos[id]['machine']
                # the other id
                id = taken_names[name]
                openqa_infos[id]['name'] = openqa_infos[id]['name'] + "@" + openqa_infos[id]['machine']
            taken_names[name] = id

        for info in openqa_infos.values():
            xml = self.openqa_check_xml(info['url'], info['state'], 'openqa:' + info['name'])
            try:
                http_POST(url, data=xml)
            except HTTPError:
                self.logger.error('failed to post status to ' + url)

    def update_staging_status(self, project):
        for iso in self.staging_projects[project]['isos']:
            self.fetch_openqa_jobs(project, iso)

    def update_staging_buildid(self, project, repository, buildid):
        self.staging_projects[project]['id'] = buildid
        self.staging_projects[project]['isos'] = self.gather_isos(project, repository)
        self.update_staging_status(project)

    def check_published_repo(self, project, repository, buildid):
        if repository != 'images':
            return
        for p in self.staging_projects:
            if project == p:
                self.update_staging_buildid(project, repository, buildid)

    def matching_project(self, iso):
        for p in self.staging_projects:
            if iso in self.staging_projects[p]['isos']:
                return p

    def map_openqa_result(self, job):
        if job['result'] in ['passed', 'softfailed']:
            return 'success'
        if job['result'] == 'none':
            return 'pending'
        return 'failure'

    def openqa_job_change(self, iso):
        staging = self.matching_project(iso)
        if not staging:
            return
        # we fetch all openqa jobs so we can avoid long job names
        self.fetch_openqa_jobs(staging, iso)

    def openqa_check_xml(self, url, state, name):
        check = ET.Element('check')
        se = ET.SubElement(check, 'url')
        se.text = url
        se = ET.SubElement(check, 'state')
        se.text = state
        se = ET.SubElement(check, 'name')
        se.text = name
        return ET.tostring(check)
示例#52
0
class ToTestManager(ToolBase.ToolBase):

    def __init__(self, tool):
        ToolBase.ToolBase.__init__(self)
        # copy attributes
        self.logger = logging.getLogger(__name__)
        self.apiurl = tool.apiurl
        self.debug = tool.debug
        self.caching = tool.caching
        self.dryrun = tool.dryrun

    def setup(self, project):
        self.project = ToTest(project, self.apiurl)
        self.api = StagingAPI(self.apiurl, project=project)

    def version_file(self, target):
        return 'version_%s' % target

    def write_version_to_dashboard(self, target, version):
        if self.dryrun or self.project.do_not_release:
            return
        self.api.pseudometa_file_ensure(self.version_file(target), version,
                                        comment='Update version')

    def current_qa_version(self):
        return self.api.pseudometa_file_load(self.version_file('totest'))

    def iso_build_version(self, project, tree, repo=None, arch=None):
        for binary in self.binaries_of_product(project, tree, repo=repo, arch=arch):
            result = re.match(r'.*-(?:Build|Snapshot)([0-9.]+)(?:-Media.*\.iso|\.docker\.tar\.xz|\.raw\.xz)', binary)
            if result:
                return result.group(1)
        raise NotFoundException("can't find %s iso version" % project)

    def version_from_totest_project(self):
        if len(self.project.main_products):
            return self.iso_build_version(self.project.test_project, self.project.main_products[0])

        return self.iso_build_version(self.project.test_project, self.project.image_products[0].package,
                                      arch=self.project.image_products[0].archs[0])

    def binaries_of_product(self, project, product, repo=None, arch=None):
        if repo is None:
            repo = self.project.product_repo
        if arch is None:
            arch = self.project.product_arch

        url = self.api.makeurl(['build', project, repo, arch, product])
        try:
            f = self.api.retried_GET(url)
        except HTTPError:
            return []

        ret = []
        root = ET.parse(f).getroot()
        for binary in root.findall('binary'):
            ret.append(binary.get('filename'))

        return ret

    def ftp_build_version(self, project, tree):
        for binary in self.binaries_of_product(project, tree):
            result = re.match(r'.*-Build(.*)-Media1.report', binary)
            if result:
                return result.group(1)
        raise NotFoundException("can't find %s ftp version" % project)

    # make sure to update the attribute as atomic as possible - as such
    # only update the snapshot and don't erase anything else. The snapshots
    # have very different update times within the pipeline, so there is
    # normally no chance that releaser and publisher overwrite states
    def update_status(self, status, snapshot):
        status_dict = self.get_status_dict()
        if self.dryrun:
            self.logger.info('setting {} snapshot to {}'.format(status, snapshot))
            return
        if status_dict.get(status) != snapshot:
            status_dict[status] = snapshot
            text = yaml.safe_dump(status_dict)
            self.api.attribute_value_save('ToTestManagerStatus', text)

    def get_status_dict(self):
        text = self.api.attribute_value_load('ToTestManagerStatus')
        if text:
            return yaml.safe_load(text)
        return dict()

    def get_status(self, status):
        return self.get_status_dict().get(status)

    def release_package(self, project, package, set_release=None, repository=None,
                         target_project=None, target_repository=None):
        query = {'cmd': 'release'}

        if set_release:
            query['setrelease'] = set_release

        if repository is not None:
            query['repository'] = repository

        if target_project is not None:
            # Both need to be set
            query['target_project'] = target_project
            query['target_repository'] = target_repository

        baseurl = ['source', project, package]

        url = self.api.makeurl(baseurl, query=query)
        if self.dryrun or self.project.do_not_release:
            self.logger.info('release %s/%s (%s)' % (project, package, query))
        else:
            self.api.retried_POST(url)

    def all_repos_done(self, project, codes=None):
        """Check the build result of the project and only return True if all
        repos of that project are either published or unpublished

        """

        # coolo's experience says that 'finished' won't be
        # sufficient here, so don't try to add it :-)
        codes = ['published', 'unpublished'] if not codes else codes

        url = self.api.makeurl(
            ['build', project, '_result'], {'code': 'failed'})
        f = self.api.retried_GET(url)
        root = ET.parse(f).getroot()
        ready = True
        for repo in root.findall('result'):
            # ignore ports. 'factory' is used by arm for repos that are not
            # meant to use the totest manager.
            if repo.get('repository') in ('ports', 'factory', 'images_staging'):
                continue
            if repo.get('dirty') == 'true':
                self.logger.info('%s %s %s -> %s' % (repo.get('project'),
                                                repo.get('repository'), repo.get('arch'), 'dirty'))
                ready = False
            if repo.get('code') not in codes:
                self.logger.info('%s %s %s -> %s' % (repo.get('project'),
                                                repo.get('repository'), repo.get('arch'), repo.get('code')))
                ready = False
        return ready
示例#53
0
def do_staging(self, subcmd, opts, *args):
    """${cmd_name}: Commands to work with staging projects

    ${cmd_option_list}

    "accept" will accept all requests in
        $PROJECT:Staging:<LETTER> into $PROJECT
        If openSUSE:* project, requests marked ready from adi stagings will also
        be accepted.

    "acheck" will check if it is safe to accept new staging projects
        As $PROJECT is syncing the right package versions between
        /standard, /totest and /snapshot, it is important that the projects
        are clean prior to a checkin round.

    "adi" will list already staged requests, stage new requests, and supersede
        requests where applicable. New adi stagings will be created for new
        packages based on the grouping options used. The default grouping is by
        source project. When adi stagings are ready the request will be marked
        ready, unstaged, and the adi staging deleted.

    "check" will check if all packages are links without changes

    "check_duplicate_binaries" list binaries provided by multiple packages

    "config" will modify or view staging specific configuration

        Target project OSRT:Config attribute configuration applies to all
        stagings. Both configuration locations follow the .oscrc format (space
        separated list).

        config
            Print all staging configuration.
        config key
            Print the value of key for stagings.
        conf key value...
            Set the value of key for stagings.
        config --clear
            Clear all staging configuration.
        config --clear key
            Clear (unset) a single key from staging configuration
        config --append key value...
            Append value to existing value or set if no existing value.

        All of the above may be restricted to a set of stagings.

        The staging configuration is automatically cleared anytime staging
        psuedometa is cleared (accept, or unstage all requests).

        The keys that may be set in staging configuration are:

        - repo_checker-binary-whitelist[-arch]: appended to target project list
        - todo: text to be printed after staging is accepted

    "cleanup_rings" will try to cleanup rings content and print
        out problems

    "freeze" will freeze the sources of the project's links while not
        affecting the source packages

    "frozenage" will show when the respective staging project was last frozen

    "ignore" will ignore a request from "list" and "adi" commands until unignored

    "unignore" will remove from requests from ignore list
        If the --cleanup flag is included then all ignored requests that were
        changed from state new or review more than 3 days ago will be removed.

    "list" will list/supersede requests for ring packages or all if no rings.

    "lock" acquire a hold on the project in order to execute multiple commands
        and prevent others from interrupting. An example:

        lock -m "checkin round"

        list --supersede
        adi
        accept A B C D E

        unlock

        Each command will update the lock to keep it up-to-date.

    "repair" will attempt to repair the state of a request that has been
        corrupted.

        Use the --cleanup flag to include all untracked requests.

    "select" will add requests to the project
        Stagings are expected to be either in short-hand or the full project
        name. For example letter or named stagings can be specified simply as
        A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2,
        etc. Currently, adi stagings are not supported in proposal mode.

        Requests may either be the target package or the request ID.

        When using --filter-by or --group-by the xpath will be applied to the
        request node as returned by OBS. Use the following on a current request
        to see the XML structure.

        osc api /request/1337

        A number of additional values will supplement the normal request node.

        - ./action/target/@devel_project: the devel project for the package
        - ./action/target/@devel_project_super: super devel project if relevant
        - ./action/target/@ring: the ring to which the package belongs
        - ./@aged: either True or False based on splitter-request-age-threshold
        - ./@nonfree: set to nonfree if targetting nonfree sub project
        - ./@ignored: either False or the provided message

        Some useful examples:

        --filter-by './action/target[starts-with(@package, "yast-")]'
        --filter-by './action/target/[@devel_project="YaST:Head"]'
        --filter-by './action/target[starts-with(@ring, "1")]'
        --filter-by '@id!="1234567"'
        --filter-by 'contains(description, "#Portus")'

        --group-by='./action/target/@devel_project'
        --group-by='./action/target/@ring'

        Multiple filter-by or group-by options may be used at the same time.

        Note that when using proposal mode, multiple stagings to consider may be
        provided in addition to a list of requests by which to filter. A more
        complex example:

        select --group-by='./action/target/@devel_project' A B C 123 456 789

        This will separate the requests 123, 456, 789 by devel project and only
        consider stagings A, B, or C, if available, for placement.

        No arguments is also a valid choice and will propose all non-ignored
        requests into the first available staging. Note that bootstrapped
        stagings are only used when either required or no other stagings are
        available.

        Another useful example is placing all open requests into a specific
        letter staging with:

        select A

        Built in strategies may be specified as well. For example:

        select --strategy devel
        select --strategy quick
        select --strategy special
        select --strategy super

        The default is none and custom is used with any filter-by or group-by
        arguments are provided.

        To merge applicable requests into an existing staging.

        select --merge A

        To automatically try all available strategies.

        select --try-strategies

        These concepts can be combined and interactive mode allows the proposal
        to be modified before it is executed.

        Moving requests can be accomplished using the --move flag. For example,
        to move already staged pac1 and pac2 to staging B use the following.

        select --move B pac1 pac2

        The staging in which the requests are staged will automatically be
        determined and the requests will be removed from that staging and placed
        in the specified staging.

        Related to this, the --filter-from option may be used in conjunction
        with --move to only move requests already staged in a specific staging.
        This can be useful if a staging master is responsible for a specific set
        of packages and wants to move them into a different staging when they
        were already placed in a mixed staging. For example, if one had a file
        with a list of packages the following would move any of them found in
        staging A to staging B.

        select --move --filter-from A B $(< package.list)

    "unselect" will remove from the project - pushing them back to the backlog
        If a message is included the requests will be ignored first.

        Use the --cleanup flag to include all obsolete requests.

    "unlock" will remove the staging lock in case it gets stuck or a manual hold
        If a command lock gets stuck while a hold is placed on a project the
        unlock command will need to be run twice since there are two layers of
        locks.

    "rebuild" will rebuild broken packages in the given stagings or all
        The rebuild command will only trigger builds for packages with less than
        3 failures since the last success or if the build log indicates a stall.

        If the force option is included the rebuild checks will be ignored and
        all packages failing to build will be triggered.

    "setprio" will set priority of requests withing stagings
        If no stagings are specified all stagings will be used.
        The default priority is important, but the possible values are:
          "critical", "important", "moderate" or "low".

    "supersede" will supersede requests were applicable.
        A request list can be used to limit what is superseded.

    Usage:
        osc staging accept [--force] [--no-cleanup] [LETTER...]
        osc staging acheck
        osc staging adi [--move] [--by-develproject] [--split] [REQUEST...]
        osc staging check [--old] [STAGING...]
        osc staging check_duplicate_binaries
        osc staging config [--append] [--clear] [STAGING...] [key] [value]
        osc staging cleanup_rings
        osc staging freeze [--no-bootstrap] STAGING...
        osc staging frozenage [STAGING...]
        osc staging ignore [-m MESSAGE] REQUEST...
        osc staging unignore [--cleanup] [REQUEST...|all]
        osc staging list [--supersede]
        osc staging lock [-m MESSAGE]
        osc staging select [--no-freeze] [--move [--filter-from STAGING]]
            [--add PACKAGE]
            STAGING REQUEST...
        osc staging select [--no-freeze] [--interactive|--non-interactive]
            [--filter-by...] [--group-by...]
            [--merge] [--try-strategies] [--strategy]
            [STAGING...] [REQUEST...]
        osc staging unselect [--cleanup] [-m MESSAGE] [REQUEST...]
        osc staging unlock
        osc staging rebuild [--force] [STAGING...]
        osc staging repair [--cleanup] [REQUEST...]
        osc staging setprio [STAGING...] [priority]
        osc staging supersede [REQUEST...]
    """
    if opts.version:
        self._print_version()

    # verify the argument counts match the commands
    if len(args) == 0:
        raise oscerr.WrongArgs('No command given, see "osc help staging"!')
    cmd = args[0]
    if cmd in (
        'accept',
        'adi',
        'check',
        'config',
        'frozenage',
        'unignore',
        'select',
        'unselect',
        'rebuild',
        'repair',
        'setprio',
        'supersede',
    ):
        min_args, max_args = 0, None
    elif cmd in (
        'freeze',
        'ignore',
    ):
        min_args, max_args = 1, None
    elif cmd in (
        'acheck',
        'check_duplicate_binaries',
        'cleanup_rings',
        'list',
        'lock',
        'unlock',
    ):
        min_args, max_args = 0, 0
    else:
        raise oscerr.WrongArgs('Unknown command: %s' % cmd)
    args = clean_args(args)
    if len(args) - 1 < min_args:
        raise oscerr.WrongArgs('Too few arguments.')
    if max_args is not None and len(args) - 1 > max_args:
        raise oscerr.WrongArgs('Too many arguments.')

    # Allow for determining project from osc store.
    if not opts.project:
        if core.is_project_dir('.'):
            opts.project = core.store_read_project('.')
        else:
            opts.project = 'Factory'

    # Cache the remote config fetch.
    Cache.init()

    # Init the OBS access and configuration
    opts.project = self._full_project_name(opts.project)
    opts.apiurl = self.get_api_url()
    opts.verbose = False
    Config(opts.apiurl, opts.project)

    colorama.init(autoreset=True,
        strip=(opts.no_color or not bool(int(conf.config.get('staging.color', True)))))
    # Allow colors to be changed.
    for name in dir(Fore):
        if not name.startswith('_'):
            # .oscrc requires keys to be lower-case.
            value = conf.config.get('staging.color.' + name.lower())
            if value:
                setattr(Fore, name, ansi.code_to_chars(value))

    if opts.wipe_cache:
        Cache.delete_all()

    api = StagingAPI(opts.apiurl, opts.project)
    needed = lock_needed(cmd, opts)
    with OBSLock(opts.apiurl, opts.project, reason=cmd, needed=needed) as lock:

        # call the respective command and parse args by need
        if cmd == 'check':
            if len(args) == 1:
                CheckCommand(api).perform(None, opts.old)
            else:
                for prj in args[1:]:
                    CheckCommand(api).perform(prj, opts.old)
                    print()
        elif cmd == 'check_duplicate_binaries':
            CheckDuplicateBinariesCommand(api).perform(opts.save)
        elif cmd == 'config':
            projects = set()
            key = value = None
            stagings = api.get_staging_projects_short(None) + \
                       api.get_staging_projects()
            for arg in args[1:]:
                if arg in stagings:
                    projects.add(api.prj_from_short(arg))
                elif key is None:
                    key = arg
                elif value is None:
                    value = arg
                else:
                    value += ' ' + arg

            if not len(projects):
                projects = api.get_staging_projects()

            ConfigCommand(api).perform(projects, key, value, opts.append, opts.clear)
        elif cmd == 'freeze':
            for prj in args[1:]:
                prj = api.prj_from_short(prj)
                print(Fore.YELLOW + prj)
                FreezeCommand(api).perform(prj, copy_bootstrap=opts.bootstrap)
        elif cmd == 'frozenage':
            projects = api.get_staging_projects_short() if len(args) == 1 else args[1:]
            for prj in projects:
                prj = api.prj_from_letter(prj)
                print('{} last frozen {}{:.1f} days ago'.format(
                    Fore.YELLOW + prj + Fore.RESET,
                    Fore.GREEN if api.prj_frozen_enough(prj) else Fore.RED,
                    api.days_since_last_freeze(prj)))
        elif cmd == 'acheck':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64")
            if version_totest:
                version_openqa = api.pseudometa_file_load('version_totest')
                totest_dirty = api.is_repo_dirty(api.project, 'totest')
                print("version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % (version_openqa, version_totest, totest_dirty))
            else:
                print("acheck is unavailable in %s!\n" % (api.project))
        elif cmd == 'accept':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64")

            if version_totest is None or opts.force:
                # SLE does not have a totest_version or openqa_version - ignore it
                version_openqa = version_totest
                totest_dirty   = False
            else:
                version_openqa = api.pseudometa_file_load('version_totest')
                totest_dirty   = api.is_repo_dirty(api.project, 'totest')

            if version_openqa == version_totest and not totest_dirty:
                cmd = AcceptCommand(api)
                for prj in args[1:]:
                    if cmd.perform(api.prj_from_letter(prj), opts.force):
                        cmd.reset_rebuild_data(prj)
                    else:
                        return
                    if not opts.no_cleanup:
                        if api.item_exists(api.prj_from_letter(prj)):
                            cmd.cleanup(api.prj_from_letter(prj))
                cmd.accept_other_new()
                if opts.project.startswith('openSUSE:'):
                    cmd.update_factory_version()
                    if api.item_exists(api.crebuild):
                        cmd.sync_buildfailures()
            else:
                print("Not safe to accept: /totest is not yet synced")
        elif cmd == 'unselect':
            if opts.message:
                print('Ignoring requests first')
                IgnoreCommand(api).perform(args[1:], opts.message)
            UnselectCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'select':
            # Include list of all stagings in short-hand and by full name.
            existing_stagings = api.get_staging_projects_short(None)
            existing_stagings += api.get_staging_projects()
            stagings = []
            requests = []
            for arg in args[1:]:
                # Since requests may be given by either request ID or package
                # name and stagings may include multi-letter special stagings
                # there is no easy way to distinguish between stagings and
                # requests in arguments. Therefore, check if argument is in the
                # list of short-hand and full project name stagings, otherwise
                # consider it a request. This also allows for special stagings
                # with the same name as package, but the staging will be assumed
                # first time around. The current practice seems to be to start a
                # special staging with a capital letter which makes them unique.
                # lastly adi stagings are consistently prefix with adi: which
                # also makes it consistent to distinguish them from request IDs.
                if arg in existing_stagings and arg not in stagings:
                    stagings.append(api.extract_staging_short(arg))
                elif arg not in requests:
                    requests.append(arg)

            if len(stagings) != 1 or len(requests) == 0 or opts.filter_by or opts.group_by:
                if opts.move or opts.filter_from:
                    print('--move and --filter-from must be used with explicit staging and request list')
                    return

                open_requests = api.get_open_requests({'withhistory': 1})
                if len(open_requests) == 0:
                    print('No open requests to consider')
                    return

                splitter = RequestSplitter(api, open_requests, in_ring=True)

                considerable = splitter.stagings_load(stagings)
                if considerable == 0:
                    print('No considerable stagings on which to act')
                    return

                if opts.merge:
                    splitter.merge()
                if opts.try_strategies:
                    splitter.strategies_try()
                if len(requests) > 0:
                    splitter.strategy_do('requests', requests=requests)
                if opts.strategy:
                    splitter.strategy_do(opts.strategy)
                elif opts.filter_by or opts.group_by:
                    kwargs = {}
                    if opts.filter_by:
                        kwargs['filters'] = opts.filter_by
                    if opts.group_by:
                        kwargs['groups'] = opts.group_by
                    splitter.strategy_do('custom', **kwargs)
                else:
                    if opts.merge:
                        # Merge any none strategies before final none strategy.
                        splitter.merge(strategy_none=True)
                    splitter.strategy_do('none')
                    splitter.strategy_do_non_bootstrapped('none')

                proposal = splitter.proposal
                if len(proposal) == 0:
                    print('Empty proposal')
                    return

                if opts.interactive:
                    with tempfile.NamedTemporaryFile(suffix='.yml') as temp:
                        temp.write(yaml.safe_dump(splitter.proposal, default_flow_style=False) + '\n\n')

                        if len(splitter.requests):
                            temp.write('# remaining requests:\n')
                            for request in splitter.requests:
                                temp.write('#    {}: {}\n'.format(
                                    request.get('id'), request.find('action/target').get('package')))
                            temp.write('\n')

                        temp.write('# move requests between stagings or comment/remove them\n')
                        temp.write('# change the target staging for a group\n')
                        temp.write('# remove the group, requests, staging, or strategy to skip\n')
                        temp.write('# stagings\n')
                        if opts.merge:
                            temp.write('# - mergeable: {}\n'
                                       .format(', '.join(sorted(splitter.stagings_mergeable +
                                                                splitter.stagings_mergeable_none))))
                        temp.write('# - considered: {}\n'
                                   .format(', '.join(sorted(splitter.stagings_considerable))))
                        temp.write('# - remaining: {}\n'
                                   .format(', '.join(sorted(splitter.stagings_available))))
                        temp.flush()

                        editor = os.getenv('EDITOR')
                        if not editor:
                            editor = 'xdg-open'
                        return_code = subprocess.call(editor.split(' ') + [temp.name])

                        proposal = yaml.safe_load(open(temp.name).read())

                        # Filter invalidated groups from proposal.
                        keys = ['group', 'requests', 'staging', 'strategy']
                        for group, info in sorted(proposal.items()):
                            for key in keys:
                                if not info.get(key):
                                    del proposal[group]
                                    break

                print(yaml.safe_dump(proposal, default_flow_style=False))

                print('Accept proposal? [y/n] (y): ', end='')
                if opts.non_interactive:
                    print('y')
                else:
                    response = raw_input().lower()
                    if response != '' and response != 'y':
                        print('Quit')
                        return

                for group, info in sorted(proposal.items()):
                    print('Staging {} in {}'.format(group, info['staging']))

                    # SelectCommand expects strings.
                    request_ids = map(str, info['requests'].keys())
                    target_project = api.prj_from_short(info['staging'])

                    if 'merge' not in info:
                        # Assume that the original splitter_info is desireable
                        # and that this staging is simply manual followup.
                        api.set_splitter_info_in_prj_pseudometa(target_project, info['group'], info['strategy'])

                    SelectCommand(api, target_project) \
                        .perform(request_ids, no_freeze=opts.no_freeze)
            else:
                target_project = api.prj_from_short(stagings[0])
                if opts.add:
                    api.mark_additional_packages(target_project, [opts.add])
                else:
                    SelectCommand(api, target_project) \
                        .perform(requests, opts.move,
                                 api.prj_from_short(opts.filter_from), opts.no_freeze)
        elif cmd == 'cleanup_rings':
            CleanupRings(api).perform()
        elif cmd == 'ignore':
            IgnoreCommand(api).perform(args[1:], opts.message)
        elif cmd == 'unignore':
            UnignoreCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'list':
            ListCommand(api).perform(supersede=opts.supersede)
        elif cmd == 'lock':
            lock.hold(opts.message)
        elif cmd == 'adi':
            AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split)
        elif cmd == 'rebuild':
            RebuildCommand(api).perform(args[1:], opts.force)
        elif cmd == 'repair':
            RepairCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'setprio':
            stagings = []
            priority = None

            priorities = ['critical', 'important', 'moderate', 'low']
            for arg in args[1:]:
                if arg in priorities:
                    priority = arg
                else:
                    stagings.append(arg)

            PrioCommand(api).perform(stagings, priority)
        elif cmd == 'supersede':
            SupersedeCommand(api).perform(args[1:])
        elif cmd == 'unlock':
            lock.release(force=True)
示例#54
0
class TestApiCalls(unittest.TestCase):
    """
    Tests for various api calls to ensure we return expected content
    """

    def setUp(self):
        """
        Initialize the configuration
        """

        self.obs = OBS()
        Config("openSUSE:Factory")
        self.api = StagingAPI(APIURL, "openSUSE:Factory")

    def tearDown(self):
        """Clean internal cache"""
        self.api._invalidate_all()

    def test_ring_packages(self):
        """
        Validate the creation of the rings.
        """
        # our content in the XML files
        ring_packages = {
            "elem-ring-0": "openSUSE:Factory:Rings:0-Bootstrap",
            "elem-ring-1": "openSUSE:Factory:Rings:1-MinimalX",
            "elem-ring-2": "openSUSE:Factory:Rings:2-TestDVD",
            "git": "openSUSE:Factory:Rings:2-TestDVD",
            "wine": "openSUSE:Factory:Rings:1-MinimalX",
        }
        self.assertEqual(ring_packages, self.api.ring_packages)

    @unittest.skip("no longer approving non-ring packages")
    def test_dispatch_open_requests(self):
        """
        Test dispatching and closure of non-ring packages
        """

        # Get rid of open requests
        self.api.dispatch_open_requests()
        # Check that we tried to close it
        self.assertEqual(httpretty.last_request().method, "POST")
        self.assertEqual(httpretty.last_request().querystring[u"cmd"], [u"changereviewstate"])
        # Try it again
        self.api.dispatch_open_requests()
        # This time there should be nothing to close
        self.assertEqual(httpretty.last_request().method, "GET")

    def test_pseudometa_get_prj(self):
        """
        Test getting project metadata from YAML in project description
        """

        # Try to get data from project that has no metadata
        data = self.api.get_prj_pseudometa("openSUSE:Factory:Staging:A")
        # Should be empty, but contain structure to work with
        self.assertEqual(data, {"requests": []})
        # Add some sample data
        rq = {"id": "123", "package": "test-package"}
        data["requests"].append(rq)
        # Save them and read them back
        self.api.set_prj_pseudometa("openSUSE:Factory:Staging:A", data)
        test_data = self.api.get_prj_pseudometa("openSUSE:Factory:Staging:A")
        # Verify that we got back the same data
        self.assertEqual(data, test_data)

    def test_list_projects(self):
        """
        List projects and their content
        """

        # Prepare expected results
        data = []
        for prj in self.obs.staging_project:
            data.append("openSUSE:Factory:Staging:" + prj)

        # Compare the results
        self.assertEqual(data, self.api.get_staging_projects())

    def test_open_requests(self):
        """
        Test searching for open requests
        """

        requests = []

        # get the open requests
        requests = self.api.get_open_requests()

        # Compare the results, we only care now that we got 1 of them not the content
        self.assertEqual(1, len(requests))

    def test_get_package_information(self):
        """
        Test if we get proper project, name and revision from the staging informations
        """

        package_info = {
            "dir_srcmd5": "751efeae52d6c99de48164088a33d855",
            "project": "home:Admin",
            "rev": "7b98ac01b8071d63a402fa99dc79331c",
            "srcmd5": "7b98ac01b8071d63a402fa99dc79331c",
            "package": "wine",
        }

        # Compare the results, we only care now that we got 2 of them not the content
        self.assertEqual(package_info, self.api.get_package_information("openSUSE:Factory:Staging:B", "wine"))

    def test_request_id_package_mapping(self):
        """
        Test whether we can get correct id for sr in staging project
        """

        prj = "openSUSE:Factory:Staging:B"
        # Get rq
        num = self.api.get_request_id_for_package(prj, "wine")
        self.assertEqual(333, num)
        # Get package name
        self.assertEqual("wine", self.api.get_package_for_request_id(prj, num))

    def test_rm_from_prj(self):
        prj = "openSUSE:Factory:Staging:B"
        pkg = "wine"

        full_name = prj + "/" + pkg

        # Verify package is there
        self.assertTrue(full_name in self.obs.links)

        # Get rq number
        num = self.api.get_request_id_for_package(prj, pkg)

        # Delete the package
        self.api.rm_from_prj(prj, package="wine")

        # Verify package is not there
        self.assertTrue(full_name not in self.obs.links)

        # RQ is gone
        self.assertEqual(None, self.api.get_request_id_for_package(prj, pkg))
        self.assertEqual(None, self.api.get_package_for_request_id(prj, num))

        # Verify that review is closed
        self.assertEqual("accepted", self.obs.requests[str(num)]["review"])
        self.assertEqual("new", self.obs.requests[str(num)]["request"])

    def test_rm_from_prj_2(self):
        # Try the same with request number
        prj = "openSUSE:Factory:Staging:B"
        pkg = "wine"

        full_name = prj + "/" + pkg

        # Get rq number
        num = self.api.get_request_id_for_package(prj, pkg)

        # Delete the package
        self.api.rm_from_prj(prj, request_id=num)

        # Verify package is not there
        self.assertTrue(full_name not in self.obs.links)

        # RQ is gone
        self.assertEqual(None, self.api.get_request_id_for_package(prj, pkg))
        self.assertEqual(None, self.api.get_package_for_request_id(prj, num))

        # Verify that review is closed
        self.assertEqual("accepted", self.obs.requests[str(num)]["review"])
        self.assertEqual("new", self.obs.requests[str(num)]["request"])

    def test_add_sr(self):
        prj = "openSUSE:Factory:Staging:A"
        rq = "123"

        # Running it twice shouldn't change anything
        for i in range(2):
            # Add rq to the project
            self.api.rq_to_prj(rq, prj)
            # Verify that review is there
            self.assertEqual("new", self.obs.requests[rq]["review"])
            self.assertEqual("review", self.obs.requests[rq]["request"])
            self.assertEqual(
                self.api.get_prj_pseudometa("openSUSE:Factory:Staging:A"),
                {"requests": [{"id": 123, "package": "gcc", "author": "Admin"}]},
            )

    def test_create_package_container(self):
        """Test if the uploaded _meta is correct."""

        self.api.create_package_container("openSUSE:Factory:Staging:B", "wine")
        self.assertEqual(httpretty.last_request().method, "PUT")
        self.assertEqual(httpretty.last_request().body, '<package name="wine"><title/><description/></package>')
        self.assertEqual(httpretty.last_request().path, "/source/openSUSE:Factory:Staging:B/wine/_meta")

        self.api.create_package_container("openSUSE:Factory:Staging:B", "wine", disable_build=True)
        self.assertEqual(httpretty.last_request().method, "PUT")
        self.assertEqual(
            httpretty.last_request().body,
            '<package name="wine"><title /><description /><build><disable /></build></package>',
        )
        self.assertEqual(httpretty.last_request().path, "/source/openSUSE:Factory:Staging:B/wine/_meta")

    def test_review_handling(self):
        """Test whether accepting/creating reviews behaves correctly."""

        # Add review
        self.api.add_review("123", by_project="openSUSE:Factory:Staging:A")
        self.assertEqual(httpretty.last_request().method, "POST")
        self.assertEqual(httpretty.last_request().querystring[u"cmd"], [u"addreview"])
        # Try to readd, should do anything
        self.api.add_review("123", by_project="openSUSE:Factory:Staging:A")
        self.assertEqual(httpretty.last_request().method, "GET")
        # Accept review
        self.api.set_review("123", "openSUSE:Factory:Staging:A")
        self.assertEqual(httpretty.last_request().method, "POST")
        self.assertEqual(httpretty.last_request().querystring[u"cmd"], [u"changereviewstate"])
        # Try to accept it again should do anything
        self.api.set_review("123", "openSUSE:Factory:Staging:A")
        self.assertEqual(httpretty.last_request().method, "GET")
        # But we should be able to reopen it
        self.api.add_review("123", by_project="openSUSE:Factory:Staging:A")
        self.assertEqual(httpretty.last_request().method, "POST")
        self.assertEqual(httpretty.last_request().querystring[u"cmd"], [u"addreview"])

    def test_prj_from_letter(self):

        # Verify it works
        self.assertEqual(self.api.prj_from_letter("openSUSE:Factory"), "openSUSE:Factory")
        self.assertEqual(self.api.prj_from_letter("A"), "openSUSE:Factory:Staging:A")

    def test_frozen_mtime(self):
        """Test frozen mtime."""

        # Testing frozen mtime
        self.assertTrue(self.api.days_since_last_freeze("openSUSE:Factory:Staging:A") > 8)
        self.assertTrue(self.api.days_since_last_freeze("openSUSE:Factory:Staging:B") < 1)

        # U == unfrozen
        self.assertTrue(self.api.days_since_last_freeze("openSUSE:Factory:Staging:U") > 1000)

    def test_frozen_enough(self):
        """Test frozen enough."""

        # Testing frozen mtime
        self.assertEqual(self.api.prj_frozen_enough("openSUSE:Factory:Staging:B"), True)
        self.assertEqual(self.api.prj_frozen_enough("openSUSE:Factory:Staging:A"), False)

        # U == unfrozen
        self.assertEqual(self.api.prj_frozen_enough("openSUSE:Factory:Staging:U"), False)

    def test_move(self):
        """Test package movement."""

        init_data = self.api.get_package_information("openSUSE:Factory:Staging:B", "wine")
        self.api.move_between_project("openSUSE:Factory:Staging:B", 333, "openSUSE:Factory:Staging:A")
        test_data = self.api.get_package_information("openSUSE:Factory:Staging:A", "wine")
        self.assertEqual(init_data, test_data)
示例#55
0
class StagingWorkflow(object):
    def __init__(self, project=PROJECT):
        """
        Initialize the configuration
        """
        THIS_DIR = os.path.dirname(os.path.abspath(__file__))
        oscrc = os.path.join(THIS_DIR, 'test.oscrc')

        self.apiurl = APIURL
        logging.basicConfig()

        # clear cache from other tests - otherwise the VCR is replayed depending
        # on test order, which can be harmful
        memoize_session_reset()

        osc.core.conf.get_config(override_conffile=oscrc,
                                 override_no_keyring=True,
                                 override_no_gnome_keyring=True)
        if os.environ.get('OSC_DEBUG'):
            osc.core.conf.config['debug'] = 1
        self.project = project
        self.projects = {}
        self.requests = []
        self.groups = []
        self.users = []
        CacheManager.test = True
        # disable caching, the TTLs break any reproduciblity
        Cache.CACHE_DIR = None
        Cache.PATTERNS = {}
        Cache.init()
        self.setup_remote_config()
        self.load_config()
        self.api = StagingAPI(APIURL, project)

    def load_config(self, project=None):
        if project is None:
            project = self.project
        self.config = Config(APIURL, project)

    def create_attribute_type(self, namespace, name, values=None):
        meta="""
        <namespace name='{}'>
            <modifiable_by user='******'/>
        </namespace>""".format(namespace)
        url = osc.core.makeurl(APIURL, ['attribute', namespace, '_meta'])
        osc.core.http_PUT(url, data=meta)

        meta="<definition name='{}' namespace='{}'><description/>".format(name, namespace)
        if values:
            meta += "<count>{}</count>".format(values)
        meta += "<modifiable_by role='maintainer'/></definition>"
        url = osc.core.makeurl(APIURL, ['attribute', namespace, name, '_meta'])
        osc.core.http_PUT(url, data=meta)

    def setup_remote_config(self):
        self.create_target()
        self.create_attribute_type('OSRT', 'Config', 1)
        attribute_value_save(APIURL, self.project, 'Config', 'overridden-by-local = remote-nope\n'
                                                        'remote-only = remote-indeed\n')

    def create_group(self, name, users=[]):

        meta = """
        <group>
          <title>{}</title>
        </group>
        """.format(name)

        if len(users):
            root = ET.fromstring(meta)
            persons = ET.SubElement(root, 'person')
            for user in users:
                ET.SubElement(persons, 'person', { 'userid': user } )
            meta = ET.tostring(root)

        if not name in self.groups:
            self.groups.append(name)
        url = osc.core.makeurl(APIURL, ['group', name])
        osc.core.http_PUT(url, data=meta)

    def create_user(self, name):
        if name in self.users: return
        meta = """
        <person>
          <login>{}</login>
          <email>{}@example.com</email>
          <state>confirmed</state>
        </person>
        """.format(name, name)
        self.users.append(name)
        url = osc.core.makeurl(APIURL, ['person', name])
        osc.core.http_PUT(url, data=meta)
        url = osc.core.makeurl(APIURL, ['person', name], {'cmd': 'change_password'})
        osc.core.http_POST(url, data='opensuse')
        home_project = 'home:' + name
        self.projects[home_project] = Project(home_project, create=False)

    def create_target(self):
        if self.projects.get('target'): return
        self.create_group('factory-staging')
        self.projects['target'] = Project(name=self.project, reviewer={'groups': ['factory-staging']})

    def setup_rings(self):
        self.create_target()
        self.projects['ring0'] = Project(name=self.project + ':Rings:0-Bootstrap')
        self.projects['ring1'] = Project(name=self.project + ':Rings:1-MinimalX')
        target_wine = Package(name='wine', project=self.projects['target'])
        target_wine.create_commit()
        self.create_link(target_wine, self.projects['ring1'])

    def create_package(self, project, package):
        project = self.create_project(project)
        return Package(name=package, project=project)

    def create_link(self, source_package, target_project, target_package=None):
        if not target_package:
            target_package = source_package.name
        target_package = Package(name=target_package, project=target_project)
        url = self.api.makeurl(['source', target_project.name, target_package.name, '_link'])
        osc.core.http_PUT(url, data='<link project="{}" package="{}"/>'.format(source_package.project.name,
                                                                               source_package.name))
        return target_package

    def create_project(self, name,  reviewer={}, maintainer={}, project_links=[]):
        if isinstance(name, Project):
            return name
        if name in self.projects:
            return self.projects[name]
        self.projects[name] = Project(name, reviewer=reviewer,
                                      maintainer=maintainer,
                                      project_links=project_links)
        return self.projects[name]

    def submit_package(self, package=None):
        request = Request(source_package=package, target_project=self.project)
        self.requests.append(request)
        return request

    def create_submit_request(self, project, package, text=None):
        project = self.create_project(project)
        package = Package(name=package, project=project)
        package.create_commit(text=text)
        return self.submit_package(package)

    def create_staging(self, suffix, freeze=False, rings=None):
        project_links = []
        if rings == 0:
            project_links.append(self.project + ":Rings:0-Bootstrap")
        if rings == 1 or rings == 0:
            project_links.append(self.project + ":Rings:1-MinimalX")
        staging = Project(self.project + ':Staging:' + suffix, project_links=project_links)
        if freeze:
            FreezeCommand(self.api).perform(staging.name)
        self.projects['staging:{}'.format(suffix)] = staging
        return staging

    def __del__(self):
        try:
            self.remove()
        except:
            # normally exceptions in destructors are ignored but a info
            # message is displayed. Make this a little more useful by
            # printing it into the capture log
            traceback.print_exc(None, sys.stdout)

    def remove(self):
        print('deleting staging workflow')
        for project in self.projects.values():
            project.remove()
        for request in self.requests:
            request.revoke()
        for group in self.groups:
            url = osc.core.makeurl(APIURL, ['group', group])
            try:
                osc.core.http_DELETE(url)
            except HTTPError:
                pass
        print('done')
        if hasattr(self.api, '_invalidate_all'):
            self.api._invalidate_all()
            print(report)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Publish report on staging status as comment on staging project')
    parser.add_argument('-s', '--staging', type=str, default=None,
                        help='staging project')
    parser.add_argument('-f', '--force', action='store_true', default=False,
                        help='force a comment to be written')
    parser.add_argument('-p', '--project', type=str, default='openSUSE:Factory',
                        help='project to check (ex. openSUSE:Factory, openSUSE:Leap:15.1)')
    parser.add_argument('-d', '--debug', action='store_true', default=False,
                        help='enable debug information')

    args = parser.parse_args()

    osc.conf.get_config()
    osc.conf.config['debug'] = args.debug

    apiurl = osc.conf.config['apiurl']
    Config(apiurl, args.project)
    api = StagingAPI(apiurl, args.project)
    staging_report = StagingReport(api)

    if args.staging:
        staging_report.report(api.prj_from_letter(args.staging), False, args.force)
    else:
        for staging in api.get_staging_projects():
            staging_report.report(staging, True, args.force)