def __init__(self, project=PROJECT):
        """
        Initialize the configuration
        """
        THIS_DIR = os.path.dirname(os.path.abspath(__file__))
        oscrc = os.path.join(THIS_DIR, 'test.oscrc')

        self.apiurl = APIURL
        logging.basicConfig()

        # clear cache from other tests - otherwise the VCR is replayed depending
        # on test order, which can be harmful
        memoize_session_reset()

        osc.core.conf.get_config(override_conffile=oscrc,
                                 override_no_keyring=True,
                                 override_no_gnome_keyring=True)
        if os.environ.get('OSC_DEBUG'):
            osc.core.conf.config['debug'] = 1
        self.project = project
        self.projects = {}
        self.requests = []
        self.groups = []
        self.users = []
        CacheManager.test = True
        # disable caching, the TTLs break any reproduciblity
        Cache.CACHE_DIR = None
        Cache.PATTERNS = {}
        Cache.init()
        self.setup_remote_config()
        self.load_config()
        self.api = StagingAPI(APIURL, project)
Exemple #2
0
def main(args):
    global client
    client = InfluxDBClient(args.host, args.port, args.user, args.password,
                            args.project)

    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug

    # Use separate cache since it is persistent.
    Cache.CACHE_DIR = os.path.expanduser('~/.cache/osc-plugin-factory-metrics')
    if args.wipe_cache:
        Cache.delete_all()
    Cache.PATTERNS['/search/request'] = sys.maxint
    Cache.init()

    Config(args.project)
    api = StagingAPI(osc.conf.config['apiurl'], args.project)

    global who_workaround_swap, who_workaround_miss
    who_workaround_swap = who_workaround_miss = 0

    points_requests = ingest_requests(api, args.project)
    points_schedule = ingest_release_schedule(args.project)

    print('who_workaround_swap', who_workaround_swap)
    print('who_workaround_miss', who_workaround_miss)

    print('wrote {:,} points and {:,} annotation points to db'.format(
        points_requests, points_schedule))
def do_origin(self, subcmd, opts, *args):
    """${cmd_name}: tools for working with origin information

    ${cmd_option_list}

    config: print expanded OSRT:OriginConfig
    cron: update the lookup for all projects with an OSRT:OriginConfig attribute
    history: list requests containing an origin annotation
    list: print all packages and their origin
    package: print the origin of package
    potentials: list potential origins of a package
    projects: list all projects with an OSRT:OriginConfig attribute
    report: print origin summary report
    update: handle package source changes as either delete or submit requests

    Usage:
        osc origin config [--origins-only]
        osc origin cron
        osc origin history [--format json|yaml] PACKAGE
        osc origin list [--force-refresh] [--format json|yaml]
        osc origin package [--debug] PACKAGE
        osc origin potentials [--format json|yaml] PACKAGE
        osc origin projects [--format json|yaml]
        osc origin report [--diff] [--force-refresh] [--mail]
        osc origin update [--listen] [--listen-seconds] [PACKAGE...]
    """

    if len(args) == 0:
        raise oscerr.WrongArgs('A command must be indicated.')
    command = args[0]
    if command not in [
            'config', 'cron', 'history', 'list', 'package', 'potentials',
            'projects', 'report', 'update'
    ]:
        raise oscerr.WrongArgs('Unknown command: {}'.format(command))
    if command == 'package' and len(args) < 2:
        raise oscerr.WrongArgs('A package must be indicated.')

    level = logging.DEBUG if opts.debug else None
    logging.basicConfig(level=level, format='[%(levelname).1s] %(message)s')

    # Allow for determining project from osc store.
    if not opts.project and core.is_project_dir('.'):
        opts.project = core.store_read_project('.')

    Cache.init()
    apiurl = self.get_api_url()
    if command not in ['cron', 'projects', 'update']:
        if not opts.project:
            raise oscerr.WrongArgs('A project must be indicated.')
        config = config_load(apiurl, opts.project)
        if not config:
            raise oscerr.WrongArgs(
                'OSRT:OriginConfig attribute missing from {}'.format(
                    opts.project))

    function = 'osrt_origin_{}'.format(command)
    globals()[function](apiurl, opts, *args[1:])
def do_origin(self, subcmd, opts, *args):
    """${cmd_name}: tools for working with origin information

    ${cmd_option_list}

    config: print expanded OSRT:OriginConfig
    cron: update the lookup for all projects with an OSRT:OriginConfig attribute
    history: list requests containing an origin annotation
    list: print all packages and their origin
    package: print the origin of package
    potentials: list potential origins of a package
    projects: list all projects with an OSRT:OriginConfig attribute
    report: print origin summary report

    Usage:
        osc origin config [--origins-only]
        osc origin cron
        osc origin history [--format json|yaml] PACKAGE
        osc origin list [--force-refresh] [--format json|yaml]
        osc origin package [--debug] PACKAGE
        osc origin potentials [--format json|yaml] PACKAGE
        osc origin projects [--format json|yaml]
        osc origin report [--diff] [--force-refresh] [--mail]
    """

    if len(args) == 0:
        raise oscerr.WrongArgs('A command must be indicated.')
    command = args[0]
    if command not in ['config', 'cron', 'history', 'list', 'package', 'potentials', 'projects', 'report']:
        raise oscerr.WrongArgs('Unknown command: {}'.format(command))
    if command == 'package' and len(args) < 2:
        raise oscerr.WrongArgs('A package must be indicated.')

    level = logging.DEBUG if opts.debug else None
    logging.basicConfig(level=level, format='[%(levelname).1s] %(message)s')

    # Allow for determining project from osc store.
    if not opts.project and core.is_project_dir('.'):
        opts.project = core.store_read_project('.')

    Cache.init()
    apiurl = self.get_api_url()
    if command not in ['cron', 'projects']:
        if not opts.project:
            raise oscerr.WrongArgs('A project must be indicated.')
        config = config_load(apiurl, opts.project)
        if not config:
            raise oscerr.WrongArgs('OSRT:OriginConfig attribute missing from {}'.format(opts.project))

    function = 'osrt_origin_{}'.format(command)
    globals()[function](apiurl, opts, *args[1:])
Exemple #5
0
    def __init__(self, project=PROJECT):
        """Initializes the configuration

        Note this constructor calls :func:`create_target`, which implies several projects and users
        are created right away.

        :param project: default target project
        :type project: str
        """
        THIS_DIR = os.path.dirname(os.path.abspath(__file__))
        oscrc = os.path.join(THIS_DIR, 'test.oscrc')

        # set to None so we return the destructor early in case of exceptions
        self.api = None
        self.apiurl = APIURL
        self.project = project
        self.projects = {}
        self.requests = []
        self.groups = []
        self.users = []
        self.attr_types = {}
        logging.basicConfig()

        # clear cache from other tests - otherwise the VCR is replayed depending
        # on test order, which can be harmful
        memoize_session_reset()

        osc.core.conf.get_config(override_conffile=oscrc,
                                 override_no_keyring=True,
                                 override_no_gnome_keyring=True)
        os.environ['OSC_CONFIG'] = oscrc

        if os.environ.get('OSC_DEBUG'):
            osc.core.conf.config['debug'] = 1

        CacheManager.test = True
        # disable caching, the TTLs break any reproduciblity
        Cache.CACHE_DIR = None
        Cache.PATTERNS = {}
        Cache.init()
        # Note this implicitly calls create_target()
        self.setup_remote_config()
        self.load_config()
        self.api = StagingAPI(APIURL, project)
Exemple #6
0
def main(args):
    global client
    client = InfluxDBClient(args.host, args.port, args.user, args.password,
                            args.project)

    osc.conf.get_config(override_apiurl=args.apiurl)
    apiurl = osc.conf.config['apiurl']
    osc.conf.config['debug'] = args.debug

    # Ensure database exists.
    client.create_database(client._database)

    metrics_release.ingest(client)
    if args.release_only:
        return

    # Use separate cache since it is persistent.
    _, package = project_pseudometa_package(apiurl, args.project)
    if args.wipe_cache:
        Cache.delete_all()
    if args.heavy_cache:
        Cache.PATTERNS[r'/search/request'] = sys.maxsize
        Cache.PATTERNS[r'/source/[^/]+/{}/_history'.format(
            package)] = sys.maxsize
    Cache.PATTERNS[r'/source/[^/]+/{}/[^/]+\?rev=.*'.format(
        package)] = sys.maxsize
    Cache.init('metrics')

    Config(apiurl, args.project)
    api = StagingAPI(apiurl, args.project)

    print('dashboard: wrote {:,} points'.format(ingest_dashboard(api)))

    global who_workaround_swap, who_workaround_miss
    who_workaround_swap = who_workaround_miss = 0

    points_requests = ingest_requests(api, args.project)
    points_schedule = ingest_release_schedule(args.project)

    print('who_workaround_swap', who_workaround_swap)
    print('who_workaround_miss', who_workaround_miss)

    print('wrote {:,} points and {:,} annotation points to db'.format(
        points_requests, points_schedule))
def main(args):
    global client
    client = InfluxDBClient(args.host, args.port, args.user, args.password, args.project)

    osc.conf.get_config(override_apiurl=args.apiurl)
    apiurl = osc.conf.config['apiurl']
    osc.conf.config['debug'] = args.debug

    # Ensure database exists.
    client.create_database(client._database)

    metrics_release.ingest(client)
    if args.release_only:
        return

    # Use separate cache since it is persistent.
    _, package = project_pseudometa_package(apiurl, args.project)
    if args.wipe_cache:
        Cache.delete_all()
    if args.heavy_cache:
        Cache.PATTERNS[r'/search/request'] = sys.maxint
        Cache.PATTERNS[r'/source/[^/]+/{}/_history'.format(package)] = sys.maxint
    Cache.PATTERNS[r'/source/[^/]+/{}/[^/]+\?rev=.*'.format(package)] = sys.maxint
    Cache.init('metrics')

    Config(apiurl, args.project)
    api = StagingAPI(apiurl, args.project)

    print('dashboard: wrote {:,} points'.format(ingest_dashboard(api)))

    global who_workaround_swap, who_workaround_miss
    who_workaround_swap = who_workaround_miss = 0

    points_requests = ingest_requests(api, args.project)
    points_schedule = ingest_release_schedule(args.project)

    print('who_workaround_swap', who_workaround_swap)
    print('who_workaround_miss', who_workaround_miss)

    print('wrote {:,} points and {:,} annotation points to db'.format(
        points_requests, points_schedule))
Exemple #8
0
    def __new__(cls, *args, **kwargs):
        """Class constructor."""
        if not OBS._self:
            OBS._self = super(OBS, cls).__new__(cls, *args, **kwargs)

        Cache.delete_all()
        httpretty.reset()
        httpretty.enable()

        httpretty.register_uri(httpretty.GET,
                               re.compile(r'.*'),
                               body=router_handler_GET)
        httpretty.register_uri(httpretty.POST,
                               re.compile(r'.*'),
                               body=router_handler_POST)
        httpretty.register_uri(httpretty.PUT,
                               re.compile(r'.*'),
                               body=router_handler_PUT)
        httpretty.register_uri(httpretty.DELETE,
                               re.compile(r'.*'),
                               body=router_handler_DELETE)

        return OBS._self
Exemple #9
0
def main(args):
    global client
    client = InfluxDBClient(args.host, args.port, args.user, args.password,
                            args.project)

    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug

    # Ensure database exists.
    client.create_database(client._database)

    metrics_release.ingest(client)
    if args.release_only:
        return

    # Use separate cache since it is persistent.
    Cache.CACHE_DIR = Cache.CACHE_DIR + '-metrics'
    if args.wipe_cache:
        Cache.delete_all()
    Cache.PATTERNS['/search/request'] = sys.maxint
    Cache.init()

    Config(args.project)
    api = StagingAPI(osc.conf.config['apiurl'], args.project)

    global who_workaround_swap, who_workaround_miss
    who_workaround_swap = who_workaround_miss = 0

    points_requests = ingest_requests(api, args.project)
    points_schedule = ingest_release_schedule(args.project)

    print('who_workaround_swap', who_workaround_swap)
    print('who_workaround_miss', who_workaround_miss)

    print('wrote {:,} points and {:,} annotation points to db'.format(
        points_requests, points_schedule))
def do_staging(self, subcmd, opts, *args):
    """${cmd_name}: Commands to work with staging projects

    ${cmd_option_list}

    "accept" will accept all requests in
        $PROJECT:Staging:<LETTER> into $PROJECT
        If openSUSE:* project, requests marked ready from adi stagings will also
        be accepted.

    "acheck" will check if it is safe to accept new staging projects
        As $PROJECT is syncing the right package versions between
        /standard, /totest and /snapshot, it is important that the projects
        are clean prior to a checkin round.

    "adi" will list already staged requests, stage new requests, and supersede
        requests where applicable. New adi stagings will be created for new
        packages based on the grouping options used. The default grouping is by
        source project. When adi stagings are ready the request will be marked
        ready, unstaged, and the adi staging deleted.

    "check" will check if all packages are links without changes

    "check_duplicate_binaries" list binaries provided by multiple packages

    "config" will modify or view staging specific configuration

        Target project OSRT:Config attribute configuration applies to all
        stagings. Both configuration locations follow the .oscrc format (space
        separated list).

        config
            Print all staging configuration.
        config key
            Print the value of key for stagings.
        conf key value...
            Set the value of key for stagings.
        config --clear
            Clear all staging configuration.
        config --clear key
            Clear (unset) a single key from staging configuration
        config --append key value...
            Append value to existing value or set if no existing value.

        All of the above may be restricted to a set of stagings.

        The staging configuration is automatically cleared anytime staging
        psuedometa is cleared (accept, or unstage all requests).

        The keys that may be set in staging configuration are:

        - repo_checker-binary-whitelist[-arch]: appended to target project list
        - todo: text to be printed after staging is accepted

    "cleanup_rings" will try to cleanup rings content and print
        out problems

    "freeze" will freeze the sources of the project's links while not
        affecting the source packages

    "frozenage" will show when the respective staging project was last frozen

    "ignore" will ignore a request from "list" and "adi" commands until unignored

    "unignore" will remove from requests from ignore list
        If the --cleanup flag is included then all ignored requests that were
        changed from state new or review more than 3 days ago will be removed.

    "list" will list/supersede requests for ring packages or all if no rings.

    "lock" acquire a hold on the project in order to execute multiple commands
        and prevent others from interrupting. An example:

        lock -m "checkin round"

        list --supersede
        adi
        accept A B C D E

        unlock

        Each command will update the lock to keep it up-to-date.

    "repair" will attempt to repair the state of a request that has been
        corrupted.

        Use the --cleanup flag to include all untracked requests.

    "select" will add requests to the project
        Stagings are expected to be either in short-hand or the full project
        name. For example letter or named stagings can be specified simply as
        A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2,
        etc. Currently, adi stagings are not supported in proposal mode.

        Requests may either be the target package or the request ID.

        When using --filter-by or --group-by the xpath will be applied to the
        request node as returned by OBS. Use the following on a current request
        to see the XML structure.

        osc api /request/1337

        A number of additional values will supplement the normal request node.

        - ./action/target/@devel_project: the devel project for the package
        - ./action/target/@devel_project_super: super devel project if relevant
        - ./action/target/@ring: the ring to which the package belongs
        - ./@aged: either True or False based on splitter-request-age-threshold
        - ./@nonfree: set to nonfree if targetting nonfree sub project
        - ./@ignored: either False or the provided message

        Some useful examples:

        --filter-by './action/target[starts-with(@package, "yast-")]'
        --filter-by './action/target/[@devel_project="YaST:Head"]'
        --filter-by './action/target[starts-with(@ring, "1")]'
        --filter-by '@id!="1234567"'
        --filter-by 'contains(description, "#Portus")'

        --group-by='./action/target/@devel_project'
        --group-by='./action/target/@ring'

        Multiple filter-by or group-by options may be used at the same time.

        Note that when using proposal mode, multiple stagings to consider may be
        provided in addition to a list of requests by which to filter. A more
        complex example:

        select --group-by='./action/target/@devel_project' A B C 123 456 789

        This will separate the requests 123, 456, 789 by devel project and only
        consider stagings A, B, or C, if available, for placement.

        No arguments is also a valid choice and will propose all non-ignored
        requests into the first available staging. Note that bootstrapped
        stagings are only used when either required or no other stagings are
        available.

        Another useful example is placing all open requests into a specific
        letter staging with:

        select A

        Built in strategies may be specified as well. For example:

        select --strategy devel
        select --strategy quick
        select --strategy special
        select --strategy super

        The default is none and custom is used with any filter-by or group-by
        arguments are provided.

        To merge applicable requests into an existing staging.

        select --merge A

        To automatically try all available strategies.

        select --try-strategies

        These concepts can be combined and interactive mode allows the proposal
        to be modified before it is executed.

        Moving requests can be accomplished using the --move flag. For example,
        to move already staged pac1 and pac2 to staging B use the following.

        select --move B pac1 pac2

        The staging in which the requests are staged will automatically be
        determined and the requests will be removed from that staging and placed
        in the specified staging.

        Related to this, the --filter-from option may be used in conjunction
        with --move to only move requests already staged in a specific staging.
        This can be useful if a staging master is responsible for a specific set
        of packages and wants to move them into a different staging when they
        were already placed in a mixed staging. For example, if one had a file
        with a list of packages the following would move any of them found in
        staging A to staging B.

        select --move --filter-from A B $(< package.list)

    "unselect" will remove from the project - pushing them back to the backlog
        If a message is included the requests will be ignored first.

        Use the --cleanup flag to include all obsolete requests.

    "unlock" will remove the staging lock in case it gets stuck or a manual hold
        If a command lock gets stuck while a hold is placed on a project the
        unlock command will need to be run twice since there are two layers of
        locks.

    "rebuild" will rebuild broken packages in the given stagings or all
        The rebuild command will only trigger builds for packages with less than
        3 failures since the last success or if the build log indicates a stall.

        If the force option is included the rebuild checks will be ignored and
        all packages failing to build will be triggered.

    "setprio" will set priority of requests withing stagings
        If no stagings are specified all stagings will be used.
        The default priority is important, but the possible values are:
          "critical", "important", "moderate" or "low".

    "supersede" will supersede requests were applicable.
        A request list can be used to limit what is superseded.

    Usage:
        osc staging accept [--force] [--no-cleanup] [LETTER...]
        osc staging acheck
        osc staging adi [--move] [--by-develproject] [--split] [REQUEST...]
        osc staging check [--old] [STAGING...]
        osc staging check_duplicate_binaries
        osc staging config [--append] [--clear] [STAGING...] [key] [value]
        osc staging cleanup_rings
        osc staging freeze [--no-bootstrap] STAGING...
        osc staging frozenage [STAGING...]
        osc staging ignore [-m MESSAGE] REQUEST...
        osc staging unignore [--cleanup] [REQUEST...|all]
        osc staging list [--supersede]
        osc staging lock [-m MESSAGE]
        osc staging select [--no-freeze] [--move [--filter-from STAGING]]
            [--add PACKAGE]
            STAGING REQUEST...
        osc staging select [--no-freeze] [--interactive|--non-interactive]
            [--filter-by...] [--group-by...]
            [--merge] [--try-strategies] [--strategy]
            [STAGING...] [REQUEST...]
        osc staging unselect [--cleanup] [-m MESSAGE] [REQUEST...]
        osc staging unlock
        osc staging rebuild [--force] [STAGING...]
        osc staging repair [--cleanup] [REQUEST...]
        osc staging setprio [STAGING...] [priority]
        osc staging supersede [REQUEST...]
    """
    if opts.version:
        self._print_version()

    # verify the argument counts match the commands
    if len(args) == 0:
        raise oscerr.WrongArgs('No command given, see "osc help staging"!')
    cmd = args[0]
    if cmd in (
        'accept',
        'adi',
        'check',
        'config',
        'frozenage',
        'unignore',
        'select',
        'unselect',
        'rebuild',
        'repair',
        'setprio',
        'supersede',
    ):
        min_args, max_args = 0, None
    elif cmd in (
        'freeze',
        'ignore',
    ):
        min_args, max_args = 1, None
    elif cmd in (
        'acheck',
        'check_duplicate_binaries',
        'cleanup_rings',
        'list',
        'lock',
        'unlock',
    ):
        min_args, max_args = 0, 0
    else:
        raise oscerr.WrongArgs('Unknown command: %s' % cmd)
    args = clean_args(args)
    if len(args) - 1 < min_args:
        raise oscerr.WrongArgs('Too few arguments.')
    if max_args is not None and len(args) - 1 > max_args:
        raise oscerr.WrongArgs('Too many arguments.')

    # Allow for determining project from osc store.
    if not opts.project:
        if core.is_project_dir('.'):
            opts.project = core.store_read_project('.')
        else:
            opts.project = 'Factory'

    # Cache the remote config fetch.
    Cache.init()

    # Init the OBS access and configuration
    opts.project = self._full_project_name(opts.project)
    opts.apiurl = self.get_api_url()
    opts.verbose = False
    Config(opts.apiurl, opts.project)

    colorama.init(autoreset=True,
        strip=(opts.no_color or not bool(int(conf.config.get('staging.color', True)))))
    # Allow colors to be changed.
    for name in dir(Fore):
        if not name.startswith('_'):
            # .oscrc requires keys to be lower-case.
            value = conf.config.get('staging.color.' + name.lower())
            if value:
                setattr(Fore, name, ansi.code_to_chars(value))

    if opts.wipe_cache:
        Cache.delete_all()

    api = StagingAPI(opts.apiurl, opts.project)
    needed = lock_needed(cmd, opts)
    with OBSLock(opts.apiurl, opts.project, reason=cmd, needed=needed) as lock:

        # call the respective command and parse args by need
        if cmd == 'check':
            if len(args) == 1:
                CheckCommand(api).perform(None, opts.old)
            else:
                for prj in args[1:]:
                    CheckCommand(api).perform(prj, opts.old)
                    print()
        elif cmd == 'check_duplicate_binaries':
            CheckDuplicateBinariesCommand(api).perform(opts.save)
        elif cmd == 'config':
            projects = set()
            key = value = None
            stagings = api.get_staging_projects_short(None) + \
                       api.get_staging_projects()
            for arg in args[1:]:
                if arg in stagings:
                    projects.add(api.prj_from_short(arg))
                elif key is None:
                    key = arg
                elif value is None:
                    value = arg
                else:
                    value += ' ' + arg

            if not len(projects):
                projects = api.get_staging_projects()

            ConfigCommand(api).perform(projects, key, value, opts.append, opts.clear)
        elif cmd == 'freeze':
            for prj in args[1:]:
                prj = api.prj_from_short(prj)
                print(Fore.YELLOW + prj)
                FreezeCommand(api).perform(prj, copy_bootstrap=opts.bootstrap)
        elif cmd == 'frozenage':
            projects = api.get_staging_projects_short() if len(args) == 1 else args[1:]
            for prj in projects:
                prj = api.prj_from_letter(prj)
                print('{} last frozen {}{:.1f} days ago'.format(
                    Fore.YELLOW + prj + Fore.RESET,
                    Fore.GREEN if api.prj_frozen_enough(prj) else Fore.RED,
                    api.days_since_last_freeze(prj)))
        elif cmd == 'acheck':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64")
            if version_totest:
                version_openqa = api.pseudometa_file_load('version_totest')
                totest_dirty = api.is_repo_dirty(api.project, 'totest')
                print("version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % (version_openqa, version_totest, totest_dirty))
            else:
                print("acheck is unavailable in %s!\n" % (api.project))
        elif cmd == 'accept':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64")

            if version_totest is None or opts.force:
                # SLE does not have a totest_version or openqa_version - ignore it
                version_openqa = version_totest
                totest_dirty   = False
            else:
                version_openqa = api.pseudometa_file_load('version_totest')
                totest_dirty   = api.is_repo_dirty(api.project, 'totest')

            if version_openqa == version_totest and not totest_dirty:
                cmd = AcceptCommand(api)
                for prj in args[1:]:
                    if cmd.perform(api.prj_from_letter(prj), opts.force):
                        cmd.reset_rebuild_data(prj)
                    else:
                        return
                    if not opts.no_cleanup:
                        if api.item_exists(api.prj_from_letter(prj)):
                            cmd.cleanup(api.prj_from_letter(prj))
                cmd.accept_other_new()
                if opts.project.startswith('openSUSE:'):
                    cmd.update_factory_version()
                    if api.item_exists(api.crebuild):
                        cmd.sync_buildfailures()
            else:
                print("Not safe to accept: /totest is not yet synced")
        elif cmd == 'unselect':
            if opts.message:
                print('Ignoring requests first')
                IgnoreCommand(api).perform(args[1:], opts.message)
            UnselectCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'select':
            # Include list of all stagings in short-hand and by full name.
            existing_stagings = api.get_staging_projects_short(None)
            existing_stagings += api.get_staging_projects()
            stagings = []
            requests = []
            for arg in args[1:]:
                # Since requests may be given by either request ID or package
                # name and stagings may include multi-letter special stagings
                # there is no easy way to distinguish between stagings and
                # requests in arguments. Therefore, check if argument is in the
                # list of short-hand and full project name stagings, otherwise
                # consider it a request. This also allows for special stagings
                # with the same name as package, but the staging will be assumed
                # first time around. The current practice seems to be to start a
                # special staging with a capital letter which makes them unique.
                # lastly adi stagings are consistently prefix with adi: which
                # also makes it consistent to distinguish them from request IDs.
                if arg in existing_stagings and arg not in stagings:
                    stagings.append(api.extract_staging_short(arg))
                elif arg not in requests:
                    requests.append(arg)

            if len(stagings) != 1 or len(requests) == 0 or opts.filter_by or opts.group_by:
                if opts.move or opts.filter_from:
                    print('--move and --filter-from must be used with explicit staging and request list')
                    return

                open_requests = api.get_open_requests({'withhistory': 1}, include_nonfree=False)
                if len(open_requests) == 0:
                    print('No open requests to consider')
                    return

                splitter = RequestSplitter(api, open_requests, in_ring=True)

                considerable = splitter.stagings_load(stagings)
                if considerable == 0:
                    print('No considerable stagings on which to act')
                    return

                if opts.merge:
                    splitter.merge()
                if opts.try_strategies:
                    splitter.strategies_try()
                if len(requests) > 0:
                    splitter.strategy_do('requests', requests=requests)
                if opts.strategy:
                    splitter.strategy_do(opts.strategy)
                elif opts.filter_by or opts.group_by:
                    kwargs = {}
                    if opts.filter_by:
                        kwargs['filters'] = opts.filter_by
                    if opts.group_by:
                        kwargs['groups'] = opts.group_by
                    splitter.strategy_do('custom', **kwargs)
                else:
                    if opts.merge:
                        # Merge any none strategies before final none strategy.
                        splitter.merge(strategy_none=True)
                    splitter.strategy_do('none')
                    splitter.strategy_do_non_bootstrapped('none')

                proposal = splitter.proposal
                if len(proposal) == 0:
                    print('Empty proposal')
                    return

                if opts.interactive:
                    with tempfile.NamedTemporaryFile(suffix='.yml') as temp:
                        temp.write(yaml.safe_dump(splitter.proposal, default_flow_style=False) + '\n\n')

                        if len(splitter.requests):
                            temp.write('# remaining requests:\n')
                            for request in splitter.requests:
                                temp.write('#    {}: {}\n'.format(
                                    request.get('id'), request.find('action/target').get('package')))
                            temp.write('\n')

                        temp.write('# move requests between stagings or comment/remove them\n')
                        temp.write('# change the target staging for a group\n')
                        temp.write('# remove the group, requests, staging, or strategy to skip\n')
                        temp.write('# stagings\n')
                        if opts.merge:
                            temp.write('# - mergeable: {}\n'
                                       .format(', '.join(sorted(splitter.stagings_mergeable +
                                                                splitter.stagings_mergeable_none))))
                        temp.write('# - considered: {}\n'
                                   .format(', '.join(sorted(splitter.stagings_considerable))))
                        temp.write('# - remaining: {}\n'
                                   .format(', '.join(sorted(splitter.stagings_available))))
                        temp.flush()

                        editor = os.getenv('EDITOR')
                        if not editor:
                            editor = 'xdg-open'
                        return_code = subprocess.call(editor.split(' ') + [temp.name])

                        proposal = yaml.safe_load(open(temp.name).read())

                        # Filter invalidated groups from proposal.
                        keys = ['group', 'requests', 'staging', 'strategy']
                        for group, info in sorted(proposal.items()):
                            for key in keys:
                                if not info.get(key):
                                    del proposal[group]
                                    break

                print(yaml.safe_dump(proposal, default_flow_style=False))

                print('Accept proposal? [y/n] (y): ', end='')
                if opts.non_interactive:
                    print('y')
                else:
                    response = input().lower()
                    if response != '' and response != 'y':
                        print('Quit')
                        return

                for group, info in sorted(proposal.items()):
                    print('Staging {} in {}'.format(group, info['staging']))

                    # SelectCommand expects strings.
                    request_ids = map(str, info['requests'].keys())
                    target_project = api.prj_from_short(info['staging'])

                    if 'merge' not in info:
                        # Assume that the original splitter_info is desireable
                        # and that this staging is simply manual followup.
                        api.set_splitter_info_in_prj_pseudometa(target_project, info['group'], info['strategy'])

                    SelectCommand(api, target_project) \
                        .perform(request_ids, no_freeze=opts.no_freeze)
            else:
                target_project = api.prj_from_short(stagings[0])
                if opts.add:
                    api.mark_additional_packages(target_project, [opts.add])
                else:
                    filter_from = api.prj_from_short(opts.filter_from) if opts.filter_from else None
                    SelectCommand(api, target_project) \
                        .perform(requests, opts.move,
                                 filter_from, opts.no_freeze)
        elif cmd == 'cleanup_rings':
            CleanupRings(api).perform()
        elif cmd == 'ignore':
            IgnoreCommand(api).perform(args[1:], opts.message)
        elif cmd == 'unignore':
            UnignoreCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'list':
            ListCommand(api).perform(supersede=opts.supersede)
        elif cmd == 'lock':
            lock.hold(opts.message)
        elif cmd == 'adi':
            AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split)
        elif cmd == 'rebuild':
            RebuildCommand(api).perform(args[1:], opts.force)
        elif cmd == 'repair':
            RepairCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'setprio':
            stagings = []
            priority = None

            priorities = ['critical', 'important', 'moderate', 'low']
            for arg in args[1:]:
                if arg in priorities:
                    priority = arg
                else:
                    stagings.append(arg)

            PrioCommand(api).perform(stagings, priority)
        elif cmd == 'supersede':
            SupersedeCommand(api).perform(args[1:])
        elif cmd == 'unlock':
            lock.release(force=True)
Exemple #11
0
    def __init__(self, fixtures=FIXTURES):
        """Instance constructor."""
        self.fixtures = fixtures

        CacheManager.test = True
        Cache.init()
        Cache.delete_all()
        httpretty.enable()

        oscrc = os.path.join(fixtures, 'oscrc')
        osc.core.conf.get_config(override_conffile=oscrc,
                                 override_no_keyring=True,
                                 override_no_gnome_keyring=True)

        # Internal status of OBS.  The mockup will use this data to
        # build the responses.  We will try to put responses as XML
        # templates in the fixture directory.
        self.dashboard = {}

        self.requests = {
            '123': {
                'request': 'new',
                'review': 'accepted',
                'who': 'Admin',
                'by': 'group',
                'id': '123',
                'by_who': 'opensuse-review-team',
                'package': 'gcc',
            },
            '321': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'group',
                'id': '321',
                'by_who': 'factory-staging',
                'package': 'puppet',
            },
            '333': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'project',
                'id': '333',
                'by_who': 'openSUSE:Factory:Staging:B',
                'package': 'wine',
            },
            '501': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'project',
                'id': '501',
                'by_who': 'openSUSE:Factory:Staging:C',
                'package': 'apparmor',
            },
            '502': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'project',
                'id': '502',
                'by_who': 'openSUSE:Factory:Staging:C',
                'package': 'mariadb',
            },
            '1000': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'user',
                'id': '1000',
                'by_who': 'factory-repo-checker',
                'package': 'emacs',
            },
            '1001': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'user',
                'id': '1001',
                'by_who': 'factory-repo-checker',
                'package': 'python',
            },
        }

        self.staging_project = {
            'A': {
                'project': 'openSUSE:Factory:Staging:A',
                'title': '',
                'description': '',
            },
            'U': {
                'project': 'openSUSE:Factory:Staging:U',
                'title': 'Unfrozen',
                'description': '',
            },
            'B': {
                'project': 'openSUSE:Factory:Staging:B',
                'title': 'wine',
                'description': 'requests:\n- {id: 333, package: wine}',
            },
            'C': {
                'project':
                'openSUSE:Factory:Staging:C',
                'title':
                'A project ready to be accepted',
                'description':
                ('requests:\n- {id: 501, package: apparmor, author: Admin, type: submit}\n'
                 '- {id: 502, package: mariadb, author: Admin, type: submit}'),
            },
            'J': {
                'project':
                'openSUSE:Factory:Staging:J',
                'title':
                'A project to be checked',
                'description':
                ('requests:\n- {id: 1000, package: emacs, author: Admin}\n'
                 '- {id: 1001, package: python, author: Admin}'),
            },
        }

        self.links = {
            'openSUSE:Factory:Staging:B/wine': {
                'prj': 'openSUSE:Factory:Staging:B',
                'pkg': 'wine',
                'devprj': 'home:Admin',
            },
            'openSUSE:Factory:Staging:C/apparmor': {
                'prj': 'openSUSE:Factory:Staging:C',
                'pkg': 'apparmor',
                'devprj': 'home:Admin',
            },
            'openSUSE:Factory:Staging:C/mariadb': {
                'prj': 'openSUSE:Factory:Staging:C',
                'pkg': 'mariadb',
                'devprj': 'home:Admin',
            },
            'openSUSE:Factory:Staging:J/emacs': {
                'prj': 'openSUSE:Factory:Staging:J',
                'pkg': 'emacs',
                'devprj': 'home:Admin',
            },
            'openSUSE:Factory:Staging:J/python': {
                'prj': 'openSUSE:Factory:Staging:J',
                'pkg': 'python',
                'devprj': 'home:Admin',
            },
        }

        self.attributes = {
            'openSUSE:Factory': {
                'OSRT': {
                    'Config':
                    'overridden-by-local = remote-nope\n'
                    'remote-only = remote-indeed\n'
                }
            }
        }

        self.meta = {}

        self.package = {
            'home:Admin/gcc': {
                'rev': '1',
                'vrev': '1',
                'name': 'gcc',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/wine': {
                'rev': '1',
                'vrev': '1',
                'name': 'wine',
                'srcmd5': 'de9a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/puppet': {
                'rev': '1',
                'vrev': '1',
                'name': 'puppet',
                'srcmd5': 'de8a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/gcc': {
                'rev': '1',
                'vrev': '1',
                'name': 'gcc',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/wine': {
                'rev': '1',
                'vrev': '1',
                'name': 'wine',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory:Rings:0-Bootstrap/elem-ring0': {
                'rev': '1',
                'vrev': '1',
                'name': 'elem-ring0',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/binutils': {
                'rev': '1',
                'vrev': '1',
                'name': 'wine',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/apparmor': {
                'rev': '1',
                'vrev': '1',
                'name': 'apparmor',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/apparmor': {
                'rev': '1',
                'vrev': '1',
                'name': 'apparmor',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/mariadb': {
                'rev': '1',
                'vrev': '1',
                'name': 'mariadb',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/mariadb': {
                'rev': '1',
                'vrev': '1',
                'name': 'mariadb',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/emacs': {
                'rev': '1',
                'vrev': '1',
                'name': 'emacs',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
                'lsrcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
                'verifymd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/python': {
                'rev': '1',
                'vrev': '1',
                'name': 'python',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
                'lsrcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
                'verifymd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
        }

        self.comments = {
            'openSUSE:Factory:Staging:A': [{
                'who': 'Admin',
                'when': '2014-06-01 17:56:28 UTC',
                'id': '1',
                'body': 'Just a comment',
            }],
            'openSUSE:Factory:Staging:U': [],
            'openSUSE:Factory:Staging:B': [],
            'openSUSE:Factory:Staging:C': [{
                'who':
                'Admin',
                'when':
                '2014-06-01 17:56:28 UTC',
                'id':
                '2',
                'body':
                ("The list of requests tracked in openSUSE:Factory:Staging:C has changed:\n\n"
                 " * Request#501 for package apparmor submitted by Admin\n"
                 " * Request#502 for package mariadb submitted by Admin\n")
            }],
            'openSUSE:Factory:Staging:J': [],
        }

        # To track comments created during test execution, even if
        # they have been deleted afterward
        self.comment_bodies = []

        # Different spec files stored in some openSUSE:Factory
        # projects
        self.spec_list = {
            'openSUSE:Factory/apparmor': [{
                'spec': 'apparmor.spec',
            }],
        }
Exemple #12
0
def main(args):
    # Store the default apiurl in addition to the overriden url if the
    # option was set and thus overrides the default config value.
    # Using the OBS link does not work for ?view=issues.
    if args.apiurl is not None:
        osc.conf.get_config()
        apiurl_default = osc.conf.config['apiurl']
    else:
        apiurl_default = None

    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug
    apiurl = osc.conf.config['apiurl']

    Cache.init()

    git_repo_url = '[email protected]:jberry-suse/openSUSE-release-tools-issue-db.git'
    git_message = 'Sync issue-diff.py changes.'
    db_dir = sync(args.cache_dir, git_repo_url, git_message)
    db_file = os.path.join(db_dir, '{}.yml'.format(args.project))

    if os.path.exists(db_file):
        db = yaml.safe_load(open(db_file).read())
        if db is None:
            db = {}
        else:
            print('Loaded db file: {}'.format(db_file))
    else:
        db = {}

    if args.print_stats:
        print_stats(db)
        return

    print('Comparing {} against {}'.format(args.project, args.factory))

    bugzilla_api = bugzilla_init(args.bugzilla_apiurl)
    bugzilla_defaults = (args.bugzilla_product, args.bugzilla_component, args.bugzilla_version)

    trackers = issue_trackers(apiurl)
    packages_project = package_list(apiurl, args.project)
    packages_factory = package_list(apiurl_default, args.factory)
    packages = set(packages_project).intersection(set(packages_factory))
    new = 0
    shuffle(list(packages))
    for index, package in enumerate(packages, start=1):
        if index % 50 == 0:
            print('Checked {} of {}'.format(index, len(packages)))
        if package in db and db[package] == 'whitelist':
            print('Skipping package {}'.format(package))
            continue

        issues_project = issues_get(apiurl, args.project, package, trackers, db)
        issues_factory = issues_get(apiurl_default, args.factory, package, trackers, db)

        missing_from_factory = set(issues_project.keys()) - set(issues_factory.keys())

        # Filtering by age must be done after set diff in order to allow for
        # matches with issues newer than --newest.
        for label in set(missing_from_factory):
            if issues_project[label]['age'] < args.newest:
                missing_from_factory.remove(label)

        if len(missing_from_factory) == 0:
            continue

        print('{}: {} missing'.format(package, len(missing_from_factory)))

        # Generate summaries for issues missing from factory.
        changes = {}
        for issue in missing_from_factory:
            info = issues_project[issue]
            summary = ISSUE_SUMMARY if info['owner'] else ISSUE_SUMMARY_PLAIN
            changes[issue] = summary.format(
                label=issue, url=info['url'], owner=info['owner'], summary=info['summary'])

        # Prompt user to decide which issues to whitelist.
        changes_after = prompt_interactive(changes, args.project, package)

        # Determine if any real changes (vs typos) and create text issue list.
        issues = []
        cc = []
        if len(changes_after) > 0:
            for issue, summary in changes.items():
                if issue in changes_after:
                    info = issues_project[issue]
                    if issue.startswith('bsc'):
                        # Reformat for bugzilla markdown.
                        summary = ISSUE_SUMMARY_BUGZILLA if info['owner'] else ISSUE_SUMMARY_PLAIN_BUGZILLA
                        issue = issue.replace('bsc', 'bug')
                        summary = summary.format(
                            label=issue, url=info['url'], owner=info['owner'], summary=info['summary'])
                    issues.append('- ' + summary)
                    if info['owner'] is not None:
                        cc.append(info['owner'])

        # Prompt user about how to continue.
        response = prompt_continue(len(issues))
        if response == 'n':
            break
        if response == 's':
            continue

        # File a bug if not all issues whitelisted.
        if len(issues) > 0:
            summary = BUG_SUMMARY.format(project=args.project, factory=args.factory, package=package)
            message = BUG_TEMPLATE.format(
                message_start=MESSAGE_START.format(
                    project=args.project, factory=args.factory, package=package, newest=args.newest),
                issues='\n'.join(issues))
            if len(message) > 65535:
                # Truncate messages longer than bugzilla limit.
                message = message[:65535 - 3] + '...'

            # Determine bugzilla meta information to use when creating bug.
            meta = bug_meta(bugzilla_api, bugzilla_defaults, trackers, changes.keys())
            owner = bug_owner(apiurl, package)
            if args.bugzilla_cc:
                cc.append(args.bugzilla_cc)

            # Try to create bug, but allow for handling faults.
            tries = 0
            while tries < 10:
                try:
                    bug_id = bug_create(bugzilla_api, meta, owner, cc, summary, message)
                    break
                except Fault as e:
                    if 'There is no component named' in e.faultString:
                        print('Invalid component {}, fallback to default'.format(meta[1]))
                        meta = (meta[0], bugzilla_defaults[1], meta[2])
                    elif 'is not a valid username' in e.faultString:
                        username = e.faultString.split(' ', 3)[2]
                        cc.remove(username)
                        print('Removed invalid username {}'.format(username))
                    else:
                        raise e
                tries += 1

        # Mark changes in db.
        notified, whitelisted = 0, 0
        for issue in changes:
            if package not in db:
                db[package] = {}

            if issue in changes_after:
                db[package][issue] = str(bug_id)
                notified += 1
            else:
                db[package][issue] = 'whitelist'
                whitelisted += 1

        # Write out changes after each package to avoid loss.
        with open(db_file, 'w') as outfile:
            yaml.safe_dump(db, outfile, default_flow_style=False, default_style="'")

        if notified > 0:
            print('{}: {} notified in bug {}, {} whitelisted'.format(package, notified, bug_id, whitelisted))
        else:
            print('{}: {} whitelisted'.format(package, whitelisted))

        if response == 'b':
            break

        new += 1
        if new == args.limit:
            print('stopped at limit')
            break

    sync(args.cache_dir, git_repo_url, git_message)
Exemple #13
0
    parser.add_argument('-S', '--apiurl-source', metavar='URL', help='source API URL')
    parser.add_argument('-T', '--apiurl-target', metavar='URL', help='target API URL')
    parser.add_argument('-c', '--cache', action='store_true', help='cache source queries for 24 hours')
    parser.add_argument('-d', '--debug', action='store_true', help='print info useful for debuging')
    parser.add_argument('-p', '--project', default='openSUSE:Factory', help='project from which to clone')

    args = parser.parse_args()

    osc.conf.get_config(override_apiurl=args.apiurl_target)
    apiurl_target = osc.conf.config['apiurl']
    osc.conf.get_config(override_apiurl=args.apiurl_source)
    apiurl_source = osc.conf.config['apiurl']

    if apiurl_target == apiurl_source:
        print('target APIURL must not be the same as source APIURL')
        sys.exit(1)

    if args.cache:
        from osclib.cache import Cache
        Cache.CACHE_DIR = Cache.CACHE_DIR + '-clone'
        Cache.PATTERNS = {}
        # Prevent caching source information from local clone.
        Cache.PATTERNS['/source/[^/]+/[^/]+/[^/]+?rev'] = 0
        Cache.PATTERNS['.*'] = Cache.TTL_LONG * 2
        Cache.init()

    osc.conf.config['debug'] = args.debug
    project_fence.project = args.project
    sys.exit(args.func(apiurl_source, apiurl_target, args.project))
Exemple #14
0
 def __init__(self, *args, **kwargs):
     cmdln.Cmdln.__init__(self, args, kwargs)
     Cache.init()
     self.clazz = ReviewBot
Exemple #15
0
def do_staging(self, subcmd, opts, *args):
    """${cmd_name}: Commands to work with staging projects

    ${cmd_option_list}

    "accept" will accept all requests in
        $PROJECT:Staging:<LETTER> into $PROJECT
        If openSUSE:* project, requests marked ready from adi stagings will also
        be accepted.

    "acheck" will check if it is safe to accept new staging projects
        As $PROJECT is syncing the right package versions between
        /standard, /totest and /snapshot, it is important that the projects
        are clean prior to a checkin round.

    "adi" will list already staged requests, stage new requests, and supersede
        requests where applicable. New adi stagings will be created for new
        packages based on the grouping options used. The default grouping is by
        source project. When adi stagings are ready the request will be marked
        ready, unstaged, and the adi staging deleted.

    "check" will check if all packages are links without changes

    "cleanup_rings" will try to cleanup rings content and print
        out problems

    "freeze" will freeze the sources of the project's links while not
        affecting the source packages

    "frozenage" will show when the respective staging project was last frozen

    "ignore" will ignore a request from "list" and "adi" commands until unignored

    "unignore" will remove from requests from ignore list
        If the --cleanup flag is included then all ignored requests that were
        changed from state new or review more than 3 days ago will be removed.

    "list" will list/supersede requests for ring packages or all if no rings.
        The package list is used to limit what requests are superseded when
        called with the --supersede option.

    "repair" will attempt to repair the state of a request that has been
        corrupted.

        Use the --cleanup flag to include all untracked requests.

    "select" will add requests to the project
        Stagings are expected to be either in short-hand or the full project
        name. For example letter or named stagings can be specified simply as
        A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2,
        etc. Currently, adi stagings are not supported in proposal mode.

        Requests may either be the target package or the request ID.

        When using --filter-by or --group-by the xpath will be applied to the
        request node as returned by OBS. Several values will supplement the
        normal request node.

        - ./action/target/@devel_project: the devel project for the package
        - ./action/target/@ring: the ring to which the package belongs
        - ./@ignored: either false or the provided message

        Some useful examples:

        --filter-by './action/target[starts-with(@package, "yast-")]'
        --filter-by './action/target/[@devel_project="YaST:Head"]'
        --filter-by './action/target[starts-with(@ring, "1")]'
        --filter-by '@id!="1234567"'

        --group-by='./action/target/@devel_project'
        --group-by='./action/target/@ring'

        Multiple filter-by or group-by options may be used at the same time.

        Note that when using proposal mode, multiple stagings to consider may be
        provided in addition to a list of requests by which to filter. A more
        complex example:

        select --group-by='./action/target/@devel_project' A B C 123 456 789

        This will separate the requests 123, 456, 789 by devel project and only
        consider stagings A, B, or C, if available, for placement.

        No arguments is also a valid choice and will propose all non-ignored
        requests into the first available staging. Note that bootstrapped
        stagings are only used when either required or no other stagings are
        available.

        Another useful example is placing all open requests into a specific
        letter staging with:

        select A

        Interactive mode allows the proposal to be modified before application.

    "unselect" will remove from the project - pushing them back to the backlog
        If a message is included the requests will be ignored first.

        Use the --cleanup flag to include all obsolete requests.

    "unlock" will remove the staging lock in case it gets stuck

    "rebuild" will rebuild broken packages in the given stagings or all
        The rebuild command will only trigger builds for packages with less than
        3 failures since the last success or if the build log indicates a stall.

        If the force option is included the rebuild checks will be ignored and
        all packages failing to build will be triggered.

    Usage:
        osc staging accept [--force] [--no-cleanup] [LETTER...]
        osc staging acheck
        osc staging adi [--move] [--by-develproject] [--split] [REQUEST...]
        osc staging check [--old] STAGING
        osc staging cleanup_rings
        osc staging freeze [--no-boostrap] STAGING...
        osc staging frozenage [STAGING...]
        osc staging ignore [-m MESSAGE] REQUEST...
        osc staging unignore [--cleanup] [REQUEST...|all]
        osc staging list [--supersede] [PACKAGE...]
        osc staging select [--no-freeze] [--move [--from STAGING]]
            [--add PACKAGE]
            STAGING REQUEST...
        osc staging select [--no-freeze] [--interactive|--non-interactive]
            [--filter-by...] [--group-by...]
            [--merge] [--try-strategies] [--strategy]
            [STAGING...] [REQUEST...]
        osc staging unselect [--cleanup] [-m MESSAGE] [REQUEST...]
        osc staging unlock
        osc staging rebuild [--force] [STAGING...]
        osc staging repair [--cleanup] [REQUEST...]
        osc staging setprio [STAGING...]
    """
    if opts.version:
        self._print_version()

    # verify the argument counts match the commands
    if len(args) == 0:
        raise oscerr.WrongArgs('No command given, see "osc help staging"!')
    cmd = args[0]
    if cmd == 'freeze':
        min_args, max_args = 1, None
    elif cmd == 'repair':
        min_args, max_args = 0, None
    elif cmd == 'frozenage':
        min_args, max_args = 0, None
    elif cmd == 'setprio':
        min_args, max_args = 0, None
    elif cmd == 'check':
        min_args, max_args = 0, 1
    elif cmd == 'select':
        min_args, max_args = 0, None
    elif cmd == 'unselect':
        min_args, max_args = 0, None
    elif cmd == 'adi':
        min_args, max_args = 0, None
    elif cmd == 'ignore':
        min_args, max_args = 1, None
    elif cmd == 'unignore':
        min_args, max_args = 0, None
    elif cmd in ('list', 'accept'):
        min_args, max_args = 0, None
    elif cmd in ('cleanup_rings', 'acheck'):
        min_args, max_args = 0, 0
    elif cmd == 'unlock':
        min_args, max_args = 0, 0
    elif cmd == 'rebuild':
        min_args, max_args = 0, None
    else:
        raise oscerr.WrongArgs('Unknown command: %s' % cmd)
    if len(args) - 1 < min_args:
        raise oscerr.WrongArgs('Too few arguments.')
    if max_args is not None and len(args) - 1 > max_args:
        raise oscerr.WrongArgs('Too many arguments.')

    # Init the OBS access and configuration
    opts.project = self._full_project_name(opts.project)
    opts.apiurl = self.get_api_url()
    opts.verbose = False
    Config(opts.project)

    if opts.wipe_cache:
        Cache.delete_all()

    lock = OBSLock(opts.apiurl, opts.project)
    if cmd == 'unlock':
        lock.release()
        return

    with lock:
        api = StagingAPI(opts.apiurl, opts.project)

        # call the respective command and parse args by need
        if cmd == 'check':
            prj = args[1] if len(args) > 1 else None
            CheckCommand(api).perform(prj, opts.old)
        elif cmd == 'freeze':
            for prj in args[1:]:
                FreezeCommand(api).perform(api.prj_from_letter(prj),
                                           copy_bootstrap=opts.bootstrap)
        elif cmd == 'frozenage':
            projects = api.get_staging_projects_short() if len(
                args) == 1 else args[1:]
            for prj in projects:
                print("%s last frozen %0.1f days ago" %
                      (api.prj_from_letter(prj),
                       api.days_since_last_freeze(api.prj_from_letter(prj))))
        elif cmd == 'acheck':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project,
                                                    "openSUSE-release.rpm",
                                                    repository="totest",
                                                    arch="x86_64")
            if version_totest:
                version_openqa = api.load_file_content(
                    "%s:Staging" % api.project, "dashboard", "version_totest")
                totest_dirty = api.is_repo_dirty(api.project, 'totest')
                print(
                    "version_openqa: %s / version_totest: %s / totest_dirty: %s\n"
                    % (version_openqa, version_totest, totest_dirty))
            else:
                print("acheck is unavailable in %s!\n" % (api.project))
        elif cmd == 'accept':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project,
                                                    "openSUSE-release.rpm",
                                                    repository="totest",
                                                    arch="x86_64")

            if version_totest is None or opts.force:
                # SLE does not have a totest_version or openqa_version - ignore it
                version_openqa = version_totest
                totest_dirty = False
            else:
                version_openqa = api.load_file_content(
                    "%s:Staging" % api.project, "dashboard", "version_totest")
                totest_dirty = api.is_repo_dirty(api.project, 'totest')

            if version_openqa == version_totest and not totest_dirty:
                cmd = AcceptCommand(api)
                for prj in args[1:]:
                    if cmd.perform(api.prj_from_letter(prj), opts.force):
                        cmd.reset_rebuild_data(prj)
                    else:
                        return
                    if not opts.no_cleanup:
                        if api.item_exists(api.prj_from_letter(prj)):
                            cmd.cleanup(api.prj_from_letter(prj))
                        if api.item_exists("%s:DVD" %
                                           api.prj_from_letter(prj)):
                            cmd.cleanup("%s:DVD" % api.prj_from_letter(prj))
                if opts.project.startswith('openSUSE:'):
                    cmd.accept_other_new()
                    cmd.update_factory_version()
                    if api.item_exists(api.crebuild):
                        cmd.sync_buildfailures()
            else:
                print("Not safe to accept: /totest is not yet synced")
        elif cmd == 'unselect':
            if opts.message:
                print('Ignoring requests first')
                IgnoreCommand(api).perform(args[1:], opts.message)
            UnselectCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'select':
            # Include list of all stagings in short-hand and by full name.
            existing_stagings = api.get_staging_projects_short(None)
            existing_stagings += [
                p for p in api.get_staging_projects() if not p.endswith(':DVD')
            ]
            stagings = []
            requests = []
            for arg in args[1:]:
                # Since requests may be given by either request ID or package
                # name and stagings may include multi-letter special stagings
                # there is no easy way to distinguish between stagings and
                # requests in arguments. Therefore, check if argument is in the
                # list of short-hand and full project name stagings, otherwise
                # consider it a request. This also allows for special stagings
                # with the same name as package, but the staging will be assumed
                # first time around. The current practice seems to be to start a
                # special staging with a capital letter which makes them unique.
                # lastly adi stagings are consistently prefix with adi: which
                # also makes it consistent to distinguish them from request IDs.
                if arg in existing_stagings and arg not in stagings:
                    stagings.append(api.extract_staging_short(arg))
                elif arg not in requests:
                    requests.append(arg)

            if len(stagings) != 1 or len(
                    requests) == 0 or opts.filter_by or opts.group_by:
                if opts.move or opts.from_:
                    print(
                        '--move and --from must be used with explicit staging and request list'
                    )
                    return

                open_requests = api.get_open_requests()
                if len(open_requests) == 0:
                    print('No open requests to consider')
                    return

                splitter = RequestSplitter(api, open_requests, in_ring=True)

                considerable = splitter.stagings_load(stagings)
                if considerable == 0:
                    print('No considerable stagings on which to act')
                    return

                if opts.merge:
                    splitter.merge()
                if opts.try_strategies:
                    splitter.strategies_try()
                if len(requests) > 0:
                    splitter.strategy_do('requests', requests=requests)
                if opts.strategy:
                    splitter.strategy_do(opts.strategy)
                elif opts.filter_by or opts.group_by:
                    kwargs = {}
                    if opts.filter_by:
                        kwargs['filters'] = opts.filter_by
                    if opts.group_by:
                        kwargs['groups'] = opts.group_by
                    splitter.strategy_do('custom', **kwargs)
                else:
                    if opts.merge:
                        # Merge any none strategies before final none strategy.
                        splitter.merge(strategy_none=True)
                    splitter.strategy_do('none')
                    splitter.strategy_do_non_bootstrapped('none')

                proposal = splitter.proposal
                if len(proposal) == 0:
                    print('Empty proposal')
                    return

                if opts.interactive:
                    with tempfile.NamedTemporaryFile(suffix='.yml') as temp:
                        temp.write(
                            yaml.safe_dump(splitter.proposal,
                                           default_flow_style=False) + '\n\n')
                        temp.write(
                            '# move requests between stagings or comment/remove them\n'
                        )
                        temp.write('# change the target staging for a group\n')
                        temp.write('# stagings\n')
                        if opts.merge:
                            temp.write('# - merged: {}\n'.format(', '.join(
                                sorted(splitter.stagings_mergeable +
                                       splitter.stagings_mergeable_none))))
                        temp.write('# - considered: {}\n'.format(', '.join(
                            sorted(splitter.stagings_considerable))))
                        temp.write('# - remaining: {}\n'.format(', '.join(
                            sorted(splitter.stagings_available))))
                        temp.flush()

                        editor = os.getenv('EDITOR')
                        if not editor:
                            editor = 'xdg-open'
                        return_code = subprocess.call([editor, temp.name])

                        proposal = yaml.safe_load(open(temp.name).read())

                        # Filter invalidated groups from proposal.
                        keys = ['group', 'requests', 'staging', 'strategy']
                        for group, info in sorted(proposal.items()):
                            for key in keys:
                                if not info.get(key):
                                    del proposal[group]
                                    break

                print(yaml.safe_dump(proposal, default_flow_style=False))

                print('Accept proposal? [y/n] (y): ', end='')
                if opts.non_interactive:
                    print('y')
                else:
                    response = raw_input().lower()
                    if response != '' and response != 'y':
                        print('Quit')
                        return

                for group, info in sorted(proposal.items()):
                    print('Staging {} in {}'.format(group, info['staging']))

                    # SelectCommand expects strings.
                    request_ids = map(str, info['requests'].keys())
                    target_project = api.prj_from_short(info['staging'])

                    if 'merge' not in info:
                        # Assume that the original splitter_info is desireable
                        # and that this staging is simply manual followup.
                        api.set_splitter_info_in_prj_pseudometa(
                            target_project, info['group'], info['strategy'])

                    SelectCommand(api, target_project) \
                        .perform(request_ids, no_freeze=opts.no_freeze)
            else:
                target_project = api.prj_from_short(stagings[0])
                if opts.add:
                    api.mark_additional_packages(target_project, [opts.add])
                else:
                    SelectCommand(api, target_project) \
                        .perform(requests, opts.move, opts.from_, opts.no_freeze)
        elif cmd == 'cleanup_rings':
            CleanupRings(api).perform()
        elif cmd == 'ignore':
            IgnoreCommand(api).perform(args[1:], opts.message)
        elif cmd == 'unignore':
            UnignoreCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'list':
            ListCommand(api).perform(args[1:], supersede=opts.supersede)
        elif cmd == 'adi':
            AdiCommand(api).perform(args[1:],
                                    move=opts.move,
                                    by_dp=opts.by_develproject,
                                    split=opts.split)
        elif cmd == 'rebuild':
            RebuildCommand(api).perform(args[1:], opts.force)
        elif cmd == 'repair':
            RepairCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'setprio':
            PrioCommand(api).perform(args[1:])
Exemple #16
0
def main(args):
    # Store the default apiurl in addition to the overriden url if the
    # option was set and thus overrides the default config value.
    # Using the OBS link does not work for ?view=issues.
    if args.apiurl is not None:
        osc.conf.get_config()
        apiurl_default = osc.conf.config['apiurl']
    else:
        apiurl_default = None

    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug
    apiurl = osc.conf.config['apiurl']

    Cache.init()

    db_dir = os.path.join(args.config_dir, 'issue-db')
    db_file = os.path.join(db_dir, '{}.yml'.format(args.project))
    sync(args.config_dir, db_dir)

    if os.path.exists(db_file):
        db = yaml.safe_load(open(db_file).read())
        if db is None:
            db = {}
        else:
            print('Loaded db file: {}'.format(db_file))
    else:
        db = {}

    print('Comparing {} against {}'.format(args.project, args.factory))

    bugzilla_api = bugzilla_init(args.bugzilla_apiurl)
    bugzilla_defaults = (args.bugzilla_product, args.bugzilla_component,
                         args.bugzilla_version)

    trackers = issue_trackers(apiurl)
    packages_project = package_list(apiurl, args.project)
    packages_factory = package_list(apiurl_default, args.factory)
    packages = set(packages_project).intersection(set(packages_factory))
    new = 0
    for package in sorted(packages):
        issues_project = issues_get(apiurl, args.project, package, trackers,
                                    db)
        issues_factory = issues_get(apiurl_default, args.factory, package,
                                    trackers, db)

        missing_from_factory = set(issues_project.keys()) - set(
            issues_factory.keys())

        # Filtering by age must be done after set diff in order to allow for
        # matches with issues newer than --newest.
        for label in set(missing_from_factory):
            if issues_project[label]['age'] < args.newest:
                missing_from_factory.remove(label)

        if len(missing_from_factory) == 0:
            continue

        print('{}: {} missing'.format(package, len(missing_from_factory)))

        # Generate summaries for issues missing from factory.
        changes = {}
        for issue in missing_from_factory:
            info = issues_project[issue]
            summary = ISSUE_SUMMARY if info[
                'owner'] is not None else ISSUE_SUMMARY_PLAIN
            changes[issue] = summary.format(label=issue,
                                            url=info['url'],
                                            owner=info['owner'],
                                            summary=info['summary'])

        # Prompt user to decide which issues to whitelist.
        changes_after = prompt_interactive(changes, args.project, package)

        # Determine if any real changes (vs typos) and create text issue list.
        issues = []
        cc = []
        if len(changes_after) > 0:
            for issue, summary in changes.items():
                if issue in changes_after:
                    issues.append('- ' + summary)
                    owner = issues_project[issue]['owner']
                    if owner is not None:
                        cc.append(owner)

        # Prompt user about how to continue.
        response = prompt_continue(len(issues))
        if response == 'n':
            break
        if response == 's':
            continue

        # File a bug if not all issues whitelisted.
        if len(issues) > 0:
            summary = BUG_SUMMARY.format(project=args.project,
                                         factory=args.factory,
                                         package=package)
            message = BUG_TEMPLATE.format(message_start=MESSAGE_START.format(
                project=args.project,
                factory=args.factory,
                package=package,
                newest=args.newest),
                                          issues='\n'.join(issues))

            # Determine bugzilla meta information to use when creating bug.
            meta = bug_meta(bugzilla_api, bugzilla_defaults, trackers,
                            changes.keys())
            owner = bug_owner(apiurl, package)
            if args.bugzilla_cc:
                cc.append(args.bugzilla_cc)
            bug_id = bug_create(bugzilla_api, meta, owner, cc, summary,
                                message)

        # Mark changes in db.
        notified, whitelisted = 0, 0
        for issue in changes:
            if package not in db:
                db[package] = {}

            if issue in changes_after:
                db[package][issue] = str(bug_id)
                notified += 1
            else:
                db[package][issue] = 'whitelist'
                whitelisted += 1

        # Write out changes after each package to avoid loss.
        with open(db_file, 'w') as outfile:
            yaml.safe_dump(db,
                           outfile,
                           default_flow_style=False,
                           default_style="'")

        if notified > 0:
            print('{}: {} notified in bug #{}, {} whitelisted'.format(
                package, notified, bug_id, whitelisted))
        else:
            print('{}: {} whitelisted'.format(package, whitelisted))

        if response == 'b':
            break

        new += 1
        if new == args.limit:
            print('stopped at limit')
            break

    sync(args.config_dir, db_dir)
    parser.set_defaults(func=clone_do)

    parser.add_argument('-S', '--apiurl-source', metavar='URL', help='source API URL')
    parser.add_argument('-T', '--apiurl-target', metavar='URL', help='target API URL')
    parser.add_argument('-c', '--cache', action='store_true', help='cache source queries for 24 hours')
    parser.add_argument('-d', '--debug', action='store_true', help='print info useful for debuging')
    parser.add_argument('-p', '--project', default='openSUSE:Factory', help='project from which to clone')

    args = parser.parse_args()

    osc.conf.get_config(override_apiurl=args.apiurl_target)
    apiurl_target = osc.conf.config['apiurl']
    osc.conf.get_config(override_apiurl=args.apiurl_source)
    apiurl_source = osc.conf.config['apiurl']

    if apiurl_target == apiurl_source:
        print('target APIURL must not be the same as source APIURL')
        sys.exit(1)

    if args.cache:
        from osclib.cache import Cache
        Cache.PATTERNS = {}
        # Prevent caching source information from local clone.
        Cache.PATTERNS['/source/[^/]+/[^/]+/[^/]+?rev'] = 0
        Cache.PATTERNS['.*'] = Cache.TTL_LONG * 2
        Cache.init('clone')

    osc.conf.config['debug'] = args.debug
    project_fence.project = args.project
    sys.exit(args.func(apiurl_source, apiurl_target, args.project))
Exemple #18
0
    def __init__(self, fixtures=FIXTURES):
        """Instance constructor."""
        self.fixtures = fixtures

        CacheManager.test = True
        Cache.init()
        Cache.delete_all()
        httpretty.enable()

        oscrc = os.path.join(fixtures, 'oscrc')
        osc.core.conf.get_config(override_conffile=oscrc,
                                 override_no_keyring=True,
                                 override_no_gnome_keyring=True)

        # Internal status of OBS.  The mockup will use this data to
        # build the responses.  We will try to put responses as XML
        # templates in the fixture directory.
        self.dashboard = {}

        self.requests = {
            '123': {
                'request': 'new',
                'review': 'accepted',
                'who': 'Admin',
                'by': 'group',
                'id': '123',
                'by_who': 'opensuse-review-team',
                'package': 'gcc',
            },
            '321': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'group',
                'id': '321',
                'by_who': 'factory-staging',
                'package': 'puppet',
            },
            '333': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'project',
                'id': '333',
                'by_who': 'openSUSE:Factory:Staging:B',
                'package': 'wine',
            },
            '501': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'project',
                'id': '501',
                'by_who': 'openSUSE:Factory:Staging:C',
                'package': 'apparmor',
            },
            '502': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'project',
                'id': '502',
                'by_who': 'openSUSE:Factory:Staging:C',
                'package': 'mariadb',
            },
            '1000': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'user',
                'id': '1000',
                'by_who': 'factory-repo-checker',
                'package': 'emacs',
            },
            '1001': {
                'request': 'review',
                'review': 'new',
                'who': 'Admin',
                'by': 'user',
                'id': '1001',
                'by_who': 'factory-repo-checker',
                'package': 'python',
            },
        }

        self.staging_project = {
            'A': {
                'project': 'openSUSE:Factory:Staging:A',
                'title': '',
                'description': '',
            },
            'U': {
                'project': 'openSUSE:Factory:Staging:U',
                'title': 'Unfrozen',
                'description': '',
            },
            'B': {
                'project': 'openSUSE:Factory:Staging:B',
                'title': 'wine',
                'description': 'requests:\n- {id: 333, package: wine}',
            },
            'C': {
                'project': 'openSUSE:Factory:Staging:C',
                'title': 'A project ready to be accepted',
                'description': ('requests:\n- {id: 501, package: apparmor, author: Admin, type: submit}\n'
                                '- {id: 502, package: mariadb, author: Admin, type: submit}'),
            },
            'J': {
                'project': 'openSUSE:Factory:Staging:J',
                'title': 'A project to be checked',
                'description': ('requests:\n- {id: 1000, package: emacs, author: Admin}\n'
                                '- {id: 1001, package: python, author: Admin}'),
            },
        }

        self.links = {
            'openSUSE:Factory:Staging:B/wine': {
                'prj': 'openSUSE:Factory:Staging:B',
                'pkg': 'wine',
                'devprj': 'home:Admin',
            },
            'openSUSE:Factory:Staging:C/apparmor': {
                'prj': 'openSUSE:Factory:Staging:C',
                'pkg': 'apparmor',
                'devprj': 'home:Admin',
            },
            'openSUSE:Factory:Staging:C/mariadb': {
                'prj': 'openSUSE:Factory:Staging:C',
                'pkg': 'mariadb',
                'devprj': 'home:Admin',
            },
            'openSUSE:Factory:Staging:J/emacs': {
                'prj': 'openSUSE:Factory:Staging:J',
                'pkg': 'emacs',
                'devprj': 'home:Admin',
            },
            'openSUSE:Factory:Staging:J/python': {
                'prj': 'openSUSE:Factory:Staging:J',
                'pkg': 'python',
                'devprj': 'home:Admin',
            },
        }

        self.attributes = {
            'openSUSE:Factory': {
                'OSRT': {
                    'Config': 'overridden-by-local = remote-nope\n'
                              'remote-only = remote-indeed\n'
                }
            }
        }

        self.meta = {}

        self.package = {
            'home:Admin/gcc': {
                'rev': '1',
                'vrev': '1',
                'name': 'gcc',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/wine': {
                'rev': '1',
                'vrev': '1',
                'name': 'wine',
                'srcmd5': 'de9a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/puppet': {
                'rev': '1',
                'vrev': '1',
                'name': 'puppet',
                'srcmd5': 'de8a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/gcc': {
                'rev': '1',
                'vrev': '1',
                'name': 'gcc',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/wine': {
                'rev': '1',
                'vrev': '1',
                'name': 'wine',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory:Rings:0-Bootstrap/elem-ring0': {
                'rev': '1',
                'vrev': '1',
                'name': 'elem-ring0',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/binutils': {
                'rev': '1',
                'vrev': '1',
                'name': 'wine',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/apparmor': {
                'rev': '1',
                'vrev': '1',
                'name': 'apparmor',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/apparmor': {
                'rev': '1',
                'vrev': '1',
                'name': 'apparmor',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/mariadb': {
                'rev': '1',
                'vrev': '1',
                'name': 'mariadb',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'openSUSE:Factory/mariadb': {
                'rev': '1',
                'vrev': '1',
                'name': 'mariadb',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/emacs': {
                'rev': '1',
                'vrev': '1',
                'name': 'emacs',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
                'lsrcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
                'verifymd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
            'home:Admin/python': {
                'rev': '1',
                'vrev': '1',
                'name': 'python',
                'srcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
                'lsrcmd5': 'de7a9f5e3bedb01980465f3be3d236cb',
                'verifymd5': 'de7a9f5e3bedb01980465f3be3d236cb',
            },
        }

        self.comments = {
            'openSUSE:Factory:Staging:A': [
                {
                    'who': 'Admin',
                    'when': '2014-06-01 17:56:28 UTC',
                    'id': '1',
                    'body': 'Just a comment',
                }
            ],
            'openSUSE:Factory:Staging:U': [],
            'openSUSE:Factory:Staging:B': [],
            'openSUSE:Factory:Staging:C': [
                {
                    'who': 'Admin',
                    'when': '2014-06-01 17:56:28 UTC',
                    'id': '2',
                    'body': ("The list of requests tracked in openSUSE:Factory:Staging:C has changed:\n\n"
                             " * Request#501 for package apparmor submitted by Admin\n"
                             " * Request#502 for package mariadb submitted by Admin\n")
                }
            ],
            'openSUSE:Factory:Staging:J': [],
        }

        # To track comments created during test execution, even if
        # they have been deleted afterward
        self.comment_bodies = []

        # Different spec files stored in some openSUSE:Factory
        # projects
        self.spec_list = {
            'openSUSE:Factory/apparmor': [
                {
                    'spec': 'apparmor.spec',
                }
            ],
        }
 def __init__(self, *args, **kwargs):
     cmdln.Cmdln.__init__(self, args, kwargs)
     Cache.init()
     self.clazz = ReviewBot
Exemple #20
0
def main(args):
    # Store the default apiurl in addition to the overriden url if the
    # option was set and thus overrides the default config value.
    # Using the OBS link does not work for ?view=issues.
    if args.apiurl is not None:
        osc.conf.get_config()
        apiurl_default = osc.conf.config['apiurl']
    else:
        apiurl_default = None

    osc.conf.get_config(override_apiurl=args.apiurl)
    osc.conf.config['debug'] = args.debug
    apiurl = osc.conf.config['apiurl']

    Cache.init()

    git_repo_url = '[email protected]:jberry-suse/openSUSE-release-tools-issue-db.git'
    git_message = 'Sync issue-diff.py changes.'
    db_dir = sync(args.cache_dir, git_repo_url, git_message)
    db_file = os.path.join(db_dir, '{}.yml'.format(args.project))

    if os.path.exists(db_file):
        db = yaml.safe_load(open(db_file).read())
        if db is None:
            db = {}
        else:
            print('Loaded db file: {}'.format(db_file))
    else:
        db = {}

    if args.print_stats:
        print_stats(db)
        return

    print('Comparing {} against {}'.format(args.project, args.factory))

    bugzilla_api = bugzilla_init(args.bugzilla_apiurl)
    bugzilla_defaults = (args.bugzilla_product, args.bugzilla_component,
                         args.bugzilla_version)

    trackers = issue_trackers(apiurl)
    packages_project = package_list(apiurl, args.project)
    packages_factory = package_list(apiurl_default, args.factory)
    packages = set(packages_project).intersection(set(packages_factory))
    new = 0
    shuffle(list(packages))
    for index, package in enumerate(packages, start=1):
        if index % 50 == 0:
            print('Checked {} of {}'.format(index, len(packages)))
        if package in db and db[package] == 'whitelist':
            print('Skipping package {}'.format(package))
            continue

        issues_project = issues_get(apiurl, args.project, package, trackers,
                                    db)
        issues_factory = issues_get(apiurl_default, args.factory, package,
                                    trackers, db)

        missing_from_factory = set(issues_project.keys()) - set(
            issues_factory.keys())

        # Filtering by age must be done after set diff in order to allow for
        # matches with issues newer than --newest.
        for label in set(missing_from_factory):
            if issues_project[label]['age'] < args.newest:
                missing_from_factory.remove(label)

        if len(missing_from_factory) == 0:
            continue

        print('{}: {} missing'.format(package, len(missing_from_factory)))

        # Generate summaries for issues missing from factory.
        changes = {}
        for issue in missing_from_factory:
            info = issues_project[issue]
            summary = ISSUE_SUMMARY if info['owner'] else ISSUE_SUMMARY_PLAIN
            changes[issue] = summary.format(label=issue,
                                            url=info['url'],
                                            owner=info['owner'],
                                            summary=info['summary'])

        # Prompt user to decide which issues to whitelist.
        changes_after = prompt_interactive(changes, args.project, package)

        # Determine if any real changes (vs typos) and create text issue list.
        issues = []
        cc = []
        if len(changes_after) > 0:
            for issue, summary in changes.items():
                if issue in changes_after:
                    info = issues_project[issue]
                    if issue.startswith('bsc'):
                        # Reformat for bugzilla markdown.
                        summary = ISSUE_SUMMARY_BUGZILLA if info[
                            'owner'] else ISSUE_SUMMARY_PLAIN_BUGZILLA
                        issue = issue.replace('bsc', 'bug')
                        summary = summary.format(label=issue,
                                                 url=info['url'],
                                                 owner=info['owner'],
                                                 summary=info['summary'])
                    issues.append('- ' + summary)
                    if info['owner'] is not None:
                        cc.append(info['owner'])

        # Prompt user about how to continue.
        response = prompt_continue(len(issues))
        if response == 'n':
            break
        if response == 's':
            continue

        # File a bug if not all issues whitelisted.
        if len(issues) > 0:
            summary = BUG_SUMMARY.format(project=args.project,
                                         factory=args.factory,
                                         package=package)
            message = BUG_TEMPLATE.format(message_start=MESSAGE_START.format(
                project=args.project,
                factory=args.factory,
                package=package,
                newest=args.newest),
                                          issues='\n'.join(issues))
            if len(message) > 65535:
                # Truncate messages longer than bugzilla limit.
                message = message[:65535 - 3] + '...'

            # Determine bugzilla meta information to use when creating bug.
            meta = bug_meta(bugzilla_api, bugzilla_defaults, trackers,
                            changes.keys())
            owner = bug_owner(apiurl, package)
            if args.bugzilla_cc:
                cc.append(args.bugzilla_cc)

            # Try to create bug, but allow for handling faults.
            tries = 0
            while tries < 10:
                try:
                    bug_id = bug_create(bugzilla_api, meta, owner, cc, summary,
                                        message)
                    break
                except Fault as e:
                    if 'There is no component named' in e.faultString:
                        print(
                            'Invalid component {}, fallback to default'.format(
                                meta[1]))
                        meta = (meta[0], bugzilla_defaults[1], meta[2])
                    elif 'is not a valid username' in e.faultString:
                        username = e.faultString.split(' ', 3)[2]
                        cc.remove(username)
                        print('Removed invalid username {}'.format(username))
                    else:
                        raise e
                tries += 1

        # Mark changes in db.
        notified, whitelisted = 0, 0
        for issue in changes:
            if package not in db:
                db[package] = {}

            if issue in changes_after:
                db[package][issue] = str(bug_id)
                notified += 1
            else:
                db[package][issue] = 'whitelist'
                whitelisted += 1

        # Write out changes after each package to avoid loss.
        with open(db_file, 'w') as outfile:
            yaml.safe_dump(db,
                           outfile,
                           default_flow_style=False,
                           default_style="'")

        if notified > 0:
            print('{}: {} notified in bug {}, {} whitelisted'.format(
                package, notified, bug_id, whitelisted))
        else:
            print('{}: {} whitelisted'.format(package, whitelisted))

        if response == 'b':
            break

        new += 1
        if new == args.limit:
            print('stopped at limit')
            break

    sync(args.cache_dir, git_repo_url, git_message)
Exemple #21
0
def do_staging(self, subcmd, opts, *args):
    """${cmd_name}: Commands to work with staging projects

    ${cmd_option_list}

    "accept" will accept all requests in
        $PROJECT:Staging:<LETTER> into $PROJECT
        If openSUSE:* project, requests marked ready from adi stagings will also
        be accepted.

    "acheck" will check if it is safe to accept new staging projects
        As $PROJECT is syncing the right package versions between
        /standard, /totest and /snapshot, it is important that the projects
        are clean prior to a checkin round.

    "adi" will list already staged requests, stage new requests, and supersede
        requests where applicable. New adi stagings will be created for new
        packages based on the grouping options used. The default grouping is by
        source project. When adi stagings are ready the request will be marked
        ready, unstaged, and the adi staging deleted.

    "check" will check if all packages are links without changes

    "check_duplicate_binaries" list binaries provided by multiple packages

    "config" will modify or view staging specific configuration

        Target project OSRT:Config attribute configuration applies to all
        stagings. Both configuration locations follow the .oscrc format (space
        separated list).

        config
            Print all staging configuration.
        config key
            Print the value of key for stagings.
        conf key value...
            Set the value of key for stagings.
        config --clear
            Clear all staging configuration.
        config --clear key
            Clear (unset) a single key from staging configuration
        config --append key value...
            Append value to existing value or set if no existing value.

        All of the above may be restricted to a set of stagings.

        The staging configuration is automatically cleared anytime staging
        psuedometa is cleared (accept, or unstage all requests).

        The keys that may be set in staging configuration are:

        - repo_checker-binary-whitelist[-arch]: appended to target project list
        - todo: text to be printed after staging is accepted

    "cleanup_rings" will try to cleanup rings content and print
        out problems

    "freeze" will freeze the sources of the project's links while not
        affecting the source packages

    "frozenage" will show when the respective staging project was last frozen

    "ignore" will ignore a request from "list" and "adi" commands until unignored

    "unignore" will remove from requests from ignore list
        If the --cleanup flag is included then all ignored requests that were
        changed from state new or review more than 3 days ago will be removed.

    "list" will list/supersede requests for ring packages or all if no rings.

    "lock" acquire a hold on the project in order to execute multiple commands
        and prevent others from interrupting. An example:

        lock -m "checkin round"

        list --supersede
        adi
        accept A B C D E

        unlock

        Each command will update the lock to keep it up-to-date.

    "repair" will attempt to repair the state of a request that has been
        corrupted.

        Use the --cleanup flag to include all untracked requests.

    "select" will add requests to the project
        Stagings are expected to be either in short-hand or the full project
        name. For example letter or named stagings can be specified simply as
        A, B, Gcc6, etc, while adi stagings can be specified as adi:1, adi:2,
        etc. Currently, adi stagings are not supported in proposal mode.

        Requests may either be the target package or the request ID.

        When using --filter-by or --group-by the xpath will be applied to the
        request node as returned by OBS. Use the following on a current request
        to see the XML structure.

        osc api /request/1337

        A number of additional values will supplement the normal request node.

        - ./action/target/@devel_project: the devel project for the package
        - ./action/target/@devel_project_super: super devel project if relevant
        - ./action/target/@ring: the ring to which the package belongs
        - ./@aged: either True or False based on splitter-request-age-threshold
        - ./@nonfree: set to nonfree if targetting nonfree sub project
        - ./@ignored: either False or the provided message

        Some useful examples:

        --filter-by './action/target[starts-with(@package, "yast-")]'
        --filter-by './action/target/[@devel_project="YaST:Head"]'
        --filter-by './action/target[starts-with(@ring, "1")]'
        --filter-by '@id!="1234567"'
        --filter-by 'contains(description, "#Portus")'

        --group-by='./action/target/@devel_project'
        --group-by='./action/target/@ring'

        Multiple filter-by or group-by options may be used at the same time.

        Note that when using proposal mode, multiple stagings to consider may be
        provided in addition to a list of requests by which to filter. A more
        complex example:

        select --group-by='./action/target/@devel_project' A B C 123 456 789

        This will separate the requests 123, 456, 789 by devel project and only
        consider stagings A, B, or C, if available, for placement.

        No arguments is also a valid choice and will propose all non-ignored
        requests into the first available staging. Note that bootstrapped
        stagings are only used when either required or no other stagings are
        available.

        Another useful example is placing all open requests into a specific
        letter staging with:

        select A

        Built in strategies may be specified as well. For example:

        select --strategy devel
        select --strategy quick
        select --strategy special
        select --strategy super

        The default is none and custom is used with any filter-by or group-by
        arguments are provided.

        To merge applicable requests into an existing staging.

        select --merge A

        To automatically try all available strategies.

        select --try-strategies

        These concepts can be combined and interactive mode allows the proposal
        to be modified before it is executed.

        Moving requests can be accomplished using the --move flag. For example,
        to move already staged pac1 and pac2 to staging B use the following.

        select --move B pac1 pac2

        The staging in which the requests are staged will automatically be
        determined and the requests will be removed from that staging and placed
        in the specified staging.

        Related to this, the --filter-from option may be used in conjunction
        with --move to only move requests already staged in a specific staging.
        This can be useful if a staging master is responsible for a specific set
        of packages and wants to move them into a different staging when they
        were already placed in a mixed staging. For example, if one had a file
        with a list of packages the following would move any of them found in
        staging A to staging B.

        select --move --filter-from A B $(< package.list)

    "unselect" will remove from the project - pushing them back to the backlog
        If a message is included the requests will be ignored first.

        Use the --cleanup flag to include all obsolete requests.

    "unlock" will remove the staging lock in case it gets stuck or a manual hold
        If a command lock gets stuck while a hold is placed on a project the
        unlock command will need to be run twice since there are two layers of
        locks.

    "rebuild" will rebuild broken packages in the given stagings or all
        The rebuild command will only trigger builds for packages with less than
        3 failures since the last success or if the build log indicates a stall.

        If the force option is included the rebuild checks will be ignored and
        all packages failing to build will be triggered.

    "setprio" will set priority of requests withing stagings
        If no stagings are specified all stagings will be used.
        The default priority is important, but the possible values are:
          "critical", "important", "moderate" or "low".

    "supersede" will supersede requests were applicable.
        A request list can be used to limit what is superseded.

    Usage:
        osc staging accept [--force] [--no-cleanup] [LETTER...]
        osc staging acheck
        osc staging adi [--move] [--by-develproject] [--split] [REQUEST...]
        osc staging check [--old] [STAGING...]
        osc staging check_duplicate_binaries
        osc staging config [--append] [--clear] [STAGING...] [key] [value]
        osc staging cleanup_rings
        osc staging freeze [--no-bootstrap] STAGING...
        osc staging frozenage [STAGING...]
        osc staging ignore [-m MESSAGE] REQUEST...
        osc staging unignore [--cleanup] [REQUEST...|all]
        osc staging list [--supersede]
        osc staging lock [-m MESSAGE]
        osc staging select [--no-freeze] [--move [--filter-from STAGING]]
            [--add PACKAGE]
            STAGING REQUEST...
        osc staging select [--no-freeze] [--interactive|--non-interactive]
            [--filter-by...] [--group-by...]
            [--merge] [--try-strategies] [--strategy]
            [STAGING...] [REQUEST...]
        osc staging unselect [--cleanup] [-m MESSAGE] [REQUEST...]
        osc staging unlock
        osc staging rebuild [--force] [STAGING...]
        osc staging repair [--cleanup] [REQUEST...]
        osc staging setprio [STAGING...] [priority]
        osc staging supersede [REQUEST...]
    """
    if opts.version:
        self._print_version()

    # verify the argument counts match the commands
    if len(args) == 0:
        raise oscerr.WrongArgs('No command given, see "osc help staging"!')
    cmd = args[0]
    if cmd in (
        'accept',
        'adi',
        'check',
        'config',
        'frozenage',
        'unignore',
        'select',
        'unselect',
        'rebuild',
        'repair',
        'setprio',
        'supersede',
    ):
        min_args, max_args = 0, None
    elif cmd in (
        'freeze',
        'ignore',
    ):
        min_args, max_args = 1, None
    elif cmd in (
        'acheck',
        'check_duplicate_binaries',
        'cleanup_rings',
        'list',
        'lock',
        'unlock',
    ):
        min_args, max_args = 0, 0
    else:
        raise oscerr.WrongArgs('Unknown command: %s' % cmd)
    args = clean_args(args)
    if len(args) - 1 < min_args:
        raise oscerr.WrongArgs('Too few arguments.')
    if max_args is not None and len(args) - 1 > max_args:
        raise oscerr.WrongArgs('Too many arguments.')

    # Allow for determining project from osc store.
    if not opts.project:
        if core.is_project_dir('.'):
            opts.project = core.store_read_project('.')
        else:
            opts.project = 'Factory'

    # Cache the remote config fetch.
    Cache.init()

    # Init the OBS access and configuration
    opts.project = self._full_project_name(opts.project)
    opts.apiurl = self.get_api_url()
    opts.verbose = False
    Config(opts.apiurl, opts.project)

    colorama.init(autoreset=True,
        strip=(opts.no_color or not bool(int(conf.config.get('staging.color', True)))))
    # Allow colors to be changed.
    for name in dir(Fore):
        if not name.startswith('_'):
            # .oscrc requires keys to be lower-case.
            value = conf.config.get('staging.color.' + name.lower())
            if value:
                setattr(Fore, name, ansi.code_to_chars(value))

    if opts.wipe_cache:
        Cache.delete_all()

    api = StagingAPI(opts.apiurl, opts.project)
    needed = lock_needed(cmd, opts)
    with OBSLock(opts.apiurl, opts.project, reason=cmd, needed=needed) as lock:

        # call the respective command and parse args by need
        if cmd == 'check':
            if len(args) == 1:
                CheckCommand(api).perform(None, opts.old)
            else:
                for prj in args[1:]:
                    CheckCommand(api).perform(prj, opts.old)
                    print()
        elif cmd == 'check_duplicate_binaries':
            CheckDuplicateBinariesCommand(api).perform(opts.save)
        elif cmd == 'config':
            projects = set()
            key = value = None
            stagings = api.get_staging_projects_short(None) + \
                       api.get_staging_projects()
            for arg in args[1:]:
                if arg in stagings:
                    projects.add(api.prj_from_short(arg))
                elif key is None:
                    key = arg
                elif value is None:
                    value = arg
                else:
                    value += ' ' + arg

            if not len(projects):
                projects = api.get_staging_projects()

            ConfigCommand(api).perform(projects, key, value, opts.append, opts.clear)
        elif cmd == 'freeze':
            for prj in args[1:]:
                prj = api.prj_from_short(prj)
                print(Fore.YELLOW + prj)
                FreezeCommand(api).perform(prj, copy_bootstrap=opts.bootstrap)
        elif cmd == 'frozenage':
            projects = api.get_staging_projects_short() if len(args) == 1 else args[1:]
            for prj in projects:
                prj = api.prj_from_letter(prj)
                print('{} last frozen {}{:.1f} days ago'.format(
                    Fore.YELLOW + prj + Fore.RESET,
                    Fore.GREEN if api.prj_frozen_enough(prj) else Fore.RED,
                    api.days_since_last_freeze(prj)))
        elif cmd == 'acheck':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64")
            if version_totest:
                version_openqa = api.pseudometa_file_load('version_totest')
                totest_dirty = api.is_repo_dirty(api.project, 'totest')
                print("version_openqa: %s / version_totest: %s / totest_dirty: %s\n" % (version_openqa, version_totest, totest_dirty))
            else:
                print("acheck is unavailable in %s!\n" % (api.project))
        elif cmd == 'accept':
            # Is it safe to accept? Meaning: /totest contains what it should and is not dirty
            version_totest = api.get_binary_version(api.project, "openSUSE-release.rpm", repository="totest", arch="x86_64")

            if version_totest is None or opts.force:
                # SLE does not have a totest_version or openqa_version - ignore it
                version_openqa = version_totest
                totest_dirty   = False
            else:
                version_openqa = api.pseudometa_file_load('version_totest')
                totest_dirty   = api.is_repo_dirty(api.project, 'totest')

            if version_openqa == version_totest and not totest_dirty:
                cmd = AcceptCommand(api)
                for prj in args[1:]:
                    if cmd.perform(api.prj_from_letter(prj), opts.force):
                        cmd.reset_rebuild_data(prj)
                    else:
                        return
                    if not opts.no_cleanup:
                        if api.item_exists(api.prj_from_letter(prj)):
                            cmd.cleanup(api.prj_from_letter(prj))
                cmd.accept_other_new()
                if opts.project.startswith('openSUSE:'):
                    cmd.update_factory_version()
                    if api.item_exists(api.crebuild):
                        cmd.sync_buildfailures()
            else:
                print("Not safe to accept: /totest is not yet synced")
        elif cmd == 'unselect':
            if opts.message:
                print('Ignoring requests first')
                IgnoreCommand(api).perform(args[1:], opts.message)
            UnselectCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'select':
            # Include list of all stagings in short-hand and by full name.
            existing_stagings = api.get_staging_projects_short(None)
            existing_stagings += api.get_staging_projects()
            stagings = []
            requests = []
            for arg in args[1:]:
                # Since requests may be given by either request ID or package
                # name and stagings may include multi-letter special stagings
                # there is no easy way to distinguish between stagings and
                # requests in arguments. Therefore, check if argument is in the
                # list of short-hand and full project name stagings, otherwise
                # consider it a request. This also allows for special stagings
                # with the same name as package, but the staging will be assumed
                # first time around. The current practice seems to be to start a
                # special staging with a capital letter which makes them unique.
                # lastly adi stagings are consistently prefix with adi: which
                # also makes it consistent to distinguish them from request IDs.
                if arg in existing_stagings and arg not in stagings:
                    stagings.append(api.extract_staging_short(arg))
                elif arg not in requests:
                    requests.append(arg)

            if len(stagings) != 1 or len(requests) == 0 or opts.filter_by or opts.group_by:
                if opts.move or opts.filter_from:
                    print('--move and --filter-from must be used with explicit staging and request list')
                    return

                open_requests = api.get_open_requests({'withhistory': 1})
                if len(open_requests) == 0:
                    print('No open requests to consider')
                    return

                splitter = RequestSplitter(api, open_requests, in_ring=True)

                considerable = splitter.stagings_load(stagings)
                if considerable == 0:
                    print('No considerable stagings on which to act')
                    return

                if opts.merge:
                    splitter.merge()
                if opts.try_strategies:
                    splitter.strategies_try()
                if len(requests) > 0:
                    splitter.strategy_do('requests', requests=requests)
                if opts.strategy:
                    splitter.strategy_do(opts.strategy)
                elif opts.filter_by or opts.group_by:
                    kwargs = {}
                    if opts.filter_by:
                        kwargs['filters'] = opts.filter_by
                    if opts.group_by:
                        kwargs['groups'] = opts.group_by
                    splitter.strategy_do('custom', **kwargs)
                else:
                    if opts.merge:
                        # Merge any none strategies before final none strategy.
                        splitter.merge(strategy_none=True)
                    splitter.strategy_do('none')
                    splitter.strategy_do_non_bootstrapped('none')

                proposal = splitter.proposal
                if len(proposal) == 0:
                    print('Empty proposal')
                    return

                if opts.interactive:
                    with tempfile.NamedTemporaryFile(suffix='.yml') as temp:
                        temp.write(yaml.safe_dump(splitter.proposal, default_flow_style=False) + '\n\n')

                        if len(splitter.requests):
                            temp.write('# remaining requests:\n')
                            for request in splitter.requests:
                                temp.write('#    {}: {}\n'.format(
                                    request.get('id'), request.find('action/target').get('package')))
                            temp.write('\n')

                        temp.write('# move requests between stagings or comment/remove them\n')
                        temp.write('# change the target staging for a group\n')
                        temp.write('# remove the group, requests, staging, or strategy to skip\n')
                        temp.write('# stagings\n')
                        if opts.merge:
                            temp.write('# - mergeable: {}\n'
                                       .format(', '.join(sorted(splitter.stagings_mergeable +
                                                                splitter.stagings_mergeable_none))))
                        temp.write('# - considered: {}\n'
                                   .format(', '.join(sorted(splitter.stagings_considerable))))
                        temp.write('# - remaining: {}\n'
                                   .format(', '.join(sorted(splitter.stagings_available))))
                        temp.flush()

                        editor = os.getenv('EDITOR')
                        if not editor:
                            editor = 'xdg-open'
                        return_code = subprocess.call(editor.split(' ') + [temp.name])

                        proposal = yaml.safe_load(open(temp.name).read())

                        # Filter invalidated groups from proposal.
                        keys = ['group', 'requests', 'staging', 'strategy']
                        for group, info in sorted(proposal.items()):
                            for key in keys:
                                if not info.get(key):
                                    del proposal[group]
                                    break

                print(yaml.safe_dump(proposal, default_flow_style=False))

                print('Accept proposal? [y/n] (y): ', end='')
                if opts.non_interactive:
                    print('y')
                else:
                    response = raw_input().lower()
                    if response != '' and response != 'y':
                        print('Quit')
                        return

                for group, info in sorted(proposal.items()):
                    print('Staging {} in {}'.format(group, info['staging']))

                    # SelectCommand expects strings.
                    request_ids = map(str, info['requests'].keys())
                    target_project = api.prj_from_short(info['staging'])

                    if 'merge' not in info:
                        # Assume that the original splitter_info is desireable
                        # and that this staging is simply manual followup.
                        api.set_splitter_info_in_prj_pseudometa(target_project, info['group'], info['strategy'])

                    SelectCommand(api, target_project) \
                        .perform(request_ids, no_freeze=opts.no_freeze)
            else:
                target_project = api.prj_from_short(stagings[0])
                if opts.add:
                    api.mark_additional_packages(target_project, [opts.add])
                else:
                    SelectCommand(api, target_project) \
                        .perform(requests, opts.move,
                                 api.prj_from_short(opts.filter_from), opts.no_freeze)
        elif cmd == 'cleanup_rings':
            CleanupRings(api).perform()
        elif cmd == 'ignore':
            IgnoreCommand(api).perform(args[1:], opts.message)
        elif cmd == 'unignore':
            UnignoreCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'list':
            ListCommand(api).perform(supersede=opts.supersede)
        elif cmd == 'lock':
            lock.hold(opts.message)
        elif cmd == 'adi':
            AdiCommand(api).perform(args[1:], move=opts.move, by_dp=opts.by_develproject, split=opts.split)
        elif cmd == 'rebuild':
            RebuildCommand(api).perform(args[1:], opts.force)
        elif cmd == 'repair':
            RepairCommand(api).perform(args[1:], opts.cleanup)
        elif cmd == 'setprio':
            stagings = []
            priority = None

            priorities = ['critical', 'important', 'moderate', 'low']
            for arg in args[1:]:
                if arg in priorities:
                    priority = arg
                else:
                    stagings.append(arg)

            PrioCommand(api).perform(stagings, priority)
        elif cmd == 'supersede':
            SupersedeCommand(api).perform(args[1:])
        elif cmd == 'unlock':
            lock.release(force=True)
                        '--debug',
                        action='store_true',
                        help='print info useful for debuging')
    parser.add_argument('-p',
                        '--project',
                        default='openSUSE:Factory',
                        help='project from which to clone')

    args = parser.parse_args()

    osc.conf.get_config(override_apiurl=args.apiurl_target)
    apiurl_target = osc.conf.config['apiurl']
    osc.conf.get_config(override_apiurl=args.apiurl_source)
    apiurl_source = osc.conf.config['apiurl']

    if apiurl_target == apiurl_source:
        print('target APIURL must not be the same as source APIURL')
        sys.exit(1)

    if args.cache:
        from osclib.cache import Cache
        Cache.PATTERNS = {}
        # Prevent caching source information from local clone.
        Cache.PATTERNS['/source/[^/]+/[^/]+/[^/]+?rev'] = 0
        Cache.PATTERNS['.*'] = Cache.TTL_LONG * 2
        Cache.init('clone')

    osc.conf.config['debug'] = args.debug
    project_fence.project = args.project
    sys.exit(args.func(apiurl_source, apiurl_target, args.project))