Exemple #1
0
def download_file(url, relpath, quiet=False, noprogress=False, size=None, num=None):
    """Download files from remote"""

    if '/' in relpath:
        koji.ensuredir(os.path.dirname(relpath))
    if not quiet:
        if size and num:
            print(_("Downloading [%d/%d]: %s") % (num, size, relpath))
        else:
            print(_("Downloading: %s") % relpath)


    with closing(requests.get(url, stream=True)) as response:
        # raise error if occured
        response.raise_for_status()
        length = response.headers.get('content-length')
        f = open(relpath, 'wb')
        if length is None:
            f.write(response.content)
            length = len(response.content)
            if not (quiet or noprogress):
                _download_progress(length, length)
        else:
            l = 0
            length = int(length)
            for chunk in response.iter_content(chunk_size=65536):
                l += len(chunk)
                f.write(chunk)
                if not (quiet or noprogress):
                    _download_progress(length, l)
            f.close()

    if not (quiet or noprogress):
        print('')
Exemple #2
0
    def fetchDockerfile(self, src, build_tag):
        """
        Gets Dockerfile. Roughly corresponds to getSRPM method of build task
        """
        scm = SCM(src)
        scm.assert_allowed(self.options.allowed_scms)
        scmdir = os.path.join(self.workdir, 'sources')

        koji.ensuredir(scmdir)

        logfile = os.path.join(self.workdir, 'checkout-for-labels.log')
        uploadpath = self.getUploadDir()

        koji.ensuredir(uploadpath)

        self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(), build_tag=build_tag,
                           scratch=self.opts.get('scratch', False))

        # Check out sources from the SCM
        sourcedir = scm.checkout(scmdir, self.session, uploadpath, logfile)

        self.run_callbacks("postSCMCheckout", scminfo=scm.get_info(), build_tag=build_tag,
                           scratch=self.opts.get('scratch', False), srcdir=sourcedir)

        fn = os.path.join(sourcedir, 'Dockerfile')
        if not os.path.exists(fn):
            raise koji.BuildError("Dockerfile file missing: %s" % fn)
        return fn
Exemple #3
0
def maven_import(cbtype, *args, **kws):
    global config
    if not context.opts.get("EnableMaven", False):
        return
    if kws.get("type") != "rpm":
        return
    buildinfo = kws["build"]
    rpminfo = kws["rpm"]
    filepath = kws["filepath"]

    if not config:
        config = ConfigParser.SafeConfigParser()
        config.read(CONFIG_FILE)
    name_patterns = config.get("patterns", "rpm_names").split()
    for pattern in name_patterns:
        if fnmatch.fnmatch(rpminfo["name"], pattern):
            break
    else:
        return

    tmpdir = os.path.join(koji.pathinfo.work(), "rpm2maven", koji.buildLabel(buildinfo))
    try:
        if os.path.exists(tmpdir):
            shutil.rmtree(tmpdir)
        koji.ensuredir(tmpdir)
        expand_rpm(filepath, tmpdir)
        scan_and_import(buildinfo, rpminfo, tmpdir)
    finally:
        if os.path.exists(tmpdir):
            shutil.rmtree(tmpdir)
Exemple #4
0
def download_file(url, relpath, quiet=False, noprogress=False, size=None, num=None):
    """Download files from remote"""

    if '/' in relpath:
        koji.ensuredir(os.path.dirname(relpath))
    if not quiet:
        if size and num:
            print(_("Downloading [%d/%d]: %s") % (num, size, relpath))
        else:
            print(_("Downloading: %s") % relpath)

    # closing needs to be used for requests < 2.18.0
    with closing(requests.get(url, stream=True)) as response:
        # raise error if occured
        response.raise_for_status()
        length = int(response.headers.get('content-length') or 0)
        with open(relpath, 'wb') as f:
            l = 0
            for chunk in response.iter_content(chunk_size=65536):
                l += len(chunk)
                f.write(chunk)
                if not (quiet or noprogress):
                    _download_progress(length, l)
            if not length and not (quiet or noprogress):
                _download_progress(l, l)

    if not (quiet or noprogress):
        print('')
    def fetchDockerfile(self, src, build_tag):
        """
        Gets Dockerfile. Roughly corresponds to getSRPM method of build task
        """
        scm = SCM(src)
        scm.assert_allowed(self.options.allowed_scms)
        scmdir = os.path.join(self.workdir, 'sources')

        koji.ensuredir(scmdir)

        logfile = os.path.join(self.workdir, 'checkout-for-labels.log')
        uploadpath = self.getUploadDir()

        koji.ensuredir(uploadpath)

        self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(), build_tag=build_tag,
                           scratch=self.opts.get('scratch', False))

        # Check out sources from the SCM
        sourcedir = scm.checkout(scmdir, self.session, uploadpath, logfile)

        self.run_callbacks("postSCMCheckout", scminfo=scm.get_info(), build_tag=build_tag,
                           scratch=self.opts.get('scratch', False), srcdir=sourcedir)

        fn = os.path.join(sourcedir, 'Dockerfile')
        if not os.path.exists(fn):
            raise koji.BuildError("Dockerfile file missing: %s" % fn)
        return fn
Exemple #6
0
def maven_import(cbtype, *args, **kws):
    global config
    if not context.opts.get('EnableMaven', False):
        return
    if kws.get('type') != 'rpm':
        return
    buildinfo = kws['build']
    rpminfo = kws['rpm']
    filepath = kws['filepath']

    if not config:
        config = ConfigParser.SafeConfigParser()
        config.read(CONFIG_FILE)
    name_patterns = config.get('patterns', 'rpm_names').split()
    for pattern in name_patterns:
        if fnmatch.fnmatch(rpminfo['name'], pattern):
            break
    else:
        return

    tmpdir = os.path.join(koji.pathinfo.work(), 'rpm2maven',
                          koji.buildLabel(buildinfo))
    try:
        if os.path.exists(tmpdir):
            shutil.rmtree(tmpdir)
        koji.ensuredir(tmpdir)
        expand_rpm(filepath, tmpdir)
        scan_and_import(buildinfo, rpminfo, tmpdir)
    finally:
        if os.path.exists(tmpdir):
            shutil.rmtree(tmpdir)
Exemple #7
0
def maven_import(cbtype, *args, **kws):
    global config
    if not context.opts.get('EnableMaven', False):
        return
    if kws.get('type') != 'rpm':
        return
    buildinfo = kws['build']
    rpminfo = kws['rpm']
    filepath = kws['filepath']

    if not config:
        config = ConfigParser.SafeConfigParser()
        config.read(CONFIG_FILE)
    name_patterns = config.get('patterns', 'rpm_names').split()
    for pattern in name_patterns:
        if fnmatch.fnmatch(rpminfo['name'], pattern):
            break
    else:
        return

    tmpdir = os.path.join(koji.pathinfo.work(), 'rpm2maven', koji.buildLabel(buildinfo))
    try:
        if os.path.exists(tmpdir):
            shutil.rmtree(tmpdir)
        koji.ensuredir(tmpdir)
        expand_rpm(filepath, tmpdir)
        scan_and_import(buildinfo, rpminfo, tmpdir)
    finally:
        if os.path.exists(tmpdir):
            shutil.rmtree(tmpdir)
Exemple #8
0
def move_and_symlink(src, dst, relative=True, create_dir=False):
    """Move src to dest and create symlink instead of original file"""
    if create_dir:
        koji.ensuredir(os.path.dirname(dst))
    safer_move(src, dst)
    if relative:
        dst = os.path.relpath(dst, os.path.dirname(src))
    os.symlink(dst, src)
Exemple #9
0
def move_and_symlink(src, dst, relative=True, create_dir=False):
    """Move src to dest and create symlink instead of original file"""
    if create_dir:
        koji.ensuredir(os.path.dirname(dst))
    safer_move(src, dst)
    if relative:
        dst = os.path.relpath(dst, os.path.dirname(src))
    os.symlink(dst, src)
Exemple #10
0
 def do_mounts(self, rootdir, mounts):
     if not mounts:
         return
     self.logger.info('New runroot')
     self.logger.info("Runroot mounts: %s" % mounts)
     fn = '%s/tmp/runroot_mounts' % rootdir
     fslog = open(fn, 'a')
     logfile = "%s/do_mounts.log" % self.workdir
     uploadpath = self.getUploadDir()
     error = None
     for dev, path, type, opts in mounts:
         if not path.startswith('/'):
             raise koji.GenericError("invalid mount point: %s" % path)
         mpoint = "%s%s" % (rootdir, path)
         if opts is None:
             opts = []
         else:
             opts = opts.split(',')
         if 'bind' in opts:
             #make sure dir exists
             if not os.path.isdir(dev):
                 error = koji.GenericError(
                     "No such directory or mount: %s" % dev)
                 break
             type = 'none'
         if 'bg' in opts:
             error = koji.GenericError(
                 "bad config: background mount not allowed")
             break
         opts = ','.join(opts)
         cmd = ['mount', '-t', type, '-o', opts, dev, mpoint]
         self.logger.info("Mount command: %r" % cmd)
         koji.ensuredir(mpoint)
         if compat_mode:
             status = log_output(cmd[0],
                                 cmd,
                                 logfile,
                                 uploadpath,
                                 logerror=True,
                                 append=True)
         else:
             status = log_output(self.session,
                                 cmd[0],
                                 cmd,
                                 logfile,
                                 uploadpath,
                                 logerror=True,
                                 append=True)
         if not _isSuccess(status):
             error = koji.GenericError("Unable to mount %s: %s" \
                     % (mpoint, _parseStatus(status, cmd)))
             break
         fslog.write("%s\n" % mpoint)
         fslog.flush()
     fslog.close()
     if error is not None:
         self.undo_mounts(rootdir, fatal=False)
         raise error
Exemple #11
0
 def test_ensuredir(self, mock_isdir, mock_exists, mock_mkdir):
     mock_exists.side_effect = [False, False, True]
     mock_isdir.return_value = True
     ensuredir('/path/foo/bar/')
     self.assertEqual(mock_exists.call_count, 3)
     self.assertEqual(mock_isdir.call_count, 1)
     mock_mkdir.assert_has_calls(
         [mock.call('/path/foo'),
          mock.call('/path/foo/bar')])
 def make_tree(self, data):
     for filepath in data:
         path = "%s/%s" % (self.tempdir, filepath)
         if path.endswith('/'):
             # just make a directory
             dirpath = path
             path = None
         else:
             dirpath = os.path.dirname(path)
         koji.ensuredir(dirpath)
         if path:
             with open(path, 'w') as fo:
                 fo.write('TEST LOG FILE CONTENTS\n')
Exemple #13
0
 def make_tree(self, data):
     for filepath in data:
         path = "%s/%s" % (self.tempdir, filepath)
         if path.endswith('/'):
             # just make a directory
             dirpath = path
             path = None
         else:
             dirpath = os.path.dirname(path)
         koji.ensuredir(dirpath)
         if path:
             with file(path, 'w') as fo:
                 fo.write('TEST LOG FILE CONTENTS\n')
Exemple #14
0
    def test_ensuredir_errors(self, mock_isdir, mock_exists, mock_mkdir):
        mock_exists.return_value = False
        with self.assertRaises(OSError) as cm:
            ensuredir('/')
        self.assertEqual(cm.exception.args[0], 'root directory missing? /')
        mock_mkdir.assert_not_called()

        mock_exists.return_value = True
        mock_isdir.return_value = False
        with self.assertRaises(OSError) as cm:
            ensuredir('/path/foo/bar')
        self.assertEqual(cm.exception.args[0],
                         'Not a directory: /path/foo/bar')
        mock_mkdir.assert_not_called()

        mock_exists.return_value = False
        mock_isdir.return_value = False
        mock_mkdir.side_effect = OSError(errno.EEXIST, 'error msg')
        with self.assertRaises(OSError) as cm:
            ensuredir('path')
        self.assertEqual(cm.exception.args[0], errno.EEXIST)
        mock_mkdir.assert_called_once_with('path')

        mock_mkdir.reset_mock()
        mock_mkdir.side_effect = OSError(errno.EEXIST, 'error msg')
        mock_isdir.return_value = True
        ensuredir('path')
        mock_mkdir.assert_called_once_with('path')
Exemple #15
0
 def do_mounts(self, rootdir, mounts):
     if not mounts:
         return
     self.logger.info('New runroot')
     self.logger.info("Runroot mounts: %s" % mounts)
     fn = '%s/tmp/runroot_mounts' % rootdir
     fslog = file(fn, 'a')
     logfile = "%s/do_mounts.log" % self.workdir
     uploadpath = self.getUploadDir()
     error = None
     for dev,path,type,opts in mounts:
         if not path.startswith('/'):
             raise koji.GenericError("invalid mount point: %s" % path)
         mpoint = "%s%s" % (rootdir,path)
         if opts is None:
             opts = []
         else:
             opts = opts.split(',')
         if 'bind' in opts:
             #make sure dir exists
             if not os.path.isdir(dev):
                 error = koji.GenericError("No such directory or mount: %s" % dev)
                 break
             type = 'none'
             if path is None:
                 #shorthand for "same path"
                 path = dev
         if 'bg' in opts:
             error = koji.GenericError("bad config: background mount not allowed")
             break
         opts = ','.join(opts)
         cmd = ['mount', '-t', type, '-o', opts, dev, mpoint]
         self.logger.info("Mount command: %r" % cmd)
         koji.ensuredir(mpoint)
         if compat_mode:
             status = log_output(cmd[0], cmd, logfile, uploadpath, logerror=True, append=True)
         else:
             status = log_output(self.session, cmd[0], cmd, logfile, uploadpath, logerror=True, append=True)
         if not _isSuccess(status):
             error = koji.GenericError("Unable to mount %s: %s" \
                     % (mpoint, _parseStatus(status, cmd)))
             break
         fslog.write("%s\n" % mpoint)
         fslog.flush()
     fslog.close()
     if error is not None:
         self.undo_mounts(rootdir, fatal=False)
         raise error
Exemple #16
0
def linked_upload(localfile, path, name=None):
    """Link a file into the (locally writable) workdir, bypassing upload"""
    old_umask = os.umask(0o02)
    try:
        if name is None:
            name = os.path.basename(localfile)
        dest_dir = os.path.join(koji.pathinfo.work(), path)
        dst = os.path.join(dest_dir, name)
        koji.ensuredir(dest_dir)
        # fix uid/gid to keep httpd happy
        st = os.stat(koji.pathinfo.work())
        os.chown(dest_dir, st.st_uid, st.st_gid)
        print("Linking rpm to: %s" % dst)
        os.link(localfile, dst)
    finally:
        os.umask(old_umask)
Exemple #17
0
def linked_upload(localfile, path, name=None):
    """Link a file into the (locally writable) workdir, bypassing upload"""
    old_umask = os.umask(0o02)
    try:
        if name is None:
            name = os.path.basename(localfile)
        dest_dir = os.path.join(koji.pathinfo.work(), path)
        dst = os.path.join(dest_dir, name)
        koji.ensuredir(dest_dir)
        # fix uid/gid to keep httpd happy
        st = os.stat(koji.pathinfo.work())
        os.chown(dest_dir, st.st_uid, st.st_gid)
        print("Linking rpm to: %s" % dst)
        os.link(localfile, dst)
    finally:
        os.umask(old_umask)
Exemple #18
0
def _importURL(url, fn):
    """Import an rpm directly from a url"""
    serverdir = _unique_path('build-recent')
    #TODO - would be possible, using uploadFile directly, to upload without writing locally.
    #for now, though, just use uploadWrapper
    koji.ensuredir(workpath)
    dst = "%s/%s" % (workpath, fn)
    logging.info("Downloading %s to %s..." % (url, dst))
    _downloadURL(url, dst)
    #fsrc = urllib2.urlopen(url)
    #fdst = file(dst, 'w')
    #shutil.copyfileobj(fsrc, fdst)
    #fsrc.close()
    #fdst.close()
    logging.info("Uploading %s..." % dst)
    localkojisession.uploadWrapper(dst, serverdir, blocksize=65536)
    localkojisession.importRPM(serverdir, fn)
Exemple #19
0
def _importURL(url, fn):
    """Import an rpm directly from a url"""
    serverdir = _unique_path('build-recent')
    #TODO - would be possible, using uploadFile directly, to upload without writing locally.
    #for now, though, just use uploadWrapper
    koji.ensuredir(workpath)
    dst = "%s/%s" % (workpath, fn)
    print "Downloading %s to %s..." % (url, dst)
    _downloadURL(url, dst)
    #fsrc = urllib2.urlopen(url)
    #fdst = file(dst, 'w')
    #shutil.copyfileobj(fsrc, fdst)
    #fsrc.close()
    #fdst.close()
    print "Uploading %s..." % dst
    localkojisession.uploadWrapper(dst, serverdir, blocksize=65536)
    localkojisession.importRPM(serverdir, fn)
    def fetchDockerfile(self, src):
        """
        Gets Dockerfile. Roughly corresponds to getSRPM method of build task
        """
        scm = SCM(src)
        scm.assert_allowed(self.options.allowed_scms)
        scmdir = os.path.join(self.workdir, 'sources')

        koji.ensuredir(scmdir)

        logfile = os.path.join(self.workdir, 'checkout-for-labels.log')
        uploadpath = self.getUploadDir()

        koji.ensuredir(uploadpath)

        # Check out sources from the SCM
        sourcedir = scm.checkout(scmdir, self.session, uploadpath, logfile)

        fn = os.path.join(sourcedir, 'Dockerfile')
        if not os.path.exists(fn):
            raise koji.BuildError, "Dockerfile file missing: %s" % fn
        return fn
    def fetchDockerfile(self, src):
        """
        Gets Dockerfile. Roughly corresponds to getSRPM method of build task
        """
        scm = SCM(src)
        scm.assert_allowed(self.options.allowed_scms)
        scmdir = os.path.join(self.workdir, 'sources')

        koji.ensuredir(scmdir)

        logfile = os.path.join(self.workdir, 'checkout-for-labels.log')
        uploadpath = self.getUploadDir()

        koji.ensuredir(uploadpath)

        # Check out sources from the SCM
        sourcedir = scm.checkout(scmdir, self.session, uploadpath, logfile)

        fn = os.path.join(sourcedir, 'Dockerfile')
        if not os.path.exists(fn):
            raise koji.BuildError, "Dockerfile file missing: %s" % fn
        return fn
Exemple #22
0
def download_file(url,
                  relpath,
                  quiet=False,
                  noprogress=False,
                  size=None,
                  num=None):
    """Download files from remote"""

    if '/' in relpath:
        koji.ensuredir(os.path.dirname(relpath))
    if not quiet:
        if size and num:
            print(_("Downloading [%d/%d]: %s") % (num, size, relpath))
        else:
            print(_("Downloading: %s") % relpath)

    with closing(requests.get(url, stream=True)) as response:
        length = response.headers.get('content-length')
        f = open(relpath, 'wb')
        if length is None:
            f.write(response.content)
            length = len(response.content)
            if not (quiet or noprogress):
                _download_progress(length, length)
        else:
            l = 0
            length = int(length)
            for chunk in response.iter_content(chunk_size=65536):
                l += len(chunk)
                f.write(chunk)
                if not (quiet or noprogress):
                    _download_progress(length, l)
            f.close()

    if not (quiet or noprogress):
        print('')
Exemple #23
0
def download_file(url,
                  relpath,
                  quiet=False,
                  noprogress=False,
                  size=None,
                  num=None):
    """Download files from remote"""

    if '/' in relpath:
        koji.ensuredir(os.path.dirname(relpath))
    if not quiet:
        if size and num:
            print(_("Downloading [%d/%d]: %s") % (num, size, relpath))
        else:
            print(_("Downloading: %s") % relpath)
    c = pycurl.Curl()
    c.setopt(c.URL, url)
    # allow 301/302 redirect
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(c.WRITEDATA, open(relpath, 'wb'))
    if not (quiet or noprogress):
        proc_func_param = getattr(c, 'XFERINFOFUNCTION', None)
        if proc_func_param is None:
            proc_func_param = getattr(c, 'PROGRESSFUNCTION', None)
        if proc_func_param is not None:
            c.setopt(c.NOPROGRESS, False)
            c.setopt(proc_func_param, _download_progress)
        else:
            c.close()
            error(
                _('Error: XFERINFOFUNCTION and PROGRESSFUNCTION are not supported by pyCurl. Quit download progress'
                  ))
    c.perform()
    c.close()
    if not (quiet or noprogress):
        print('')
Exemple #24
0
    def createContainer(self,
                        src=None,
                        target_info=None,
                        arches=None,
                        scratch=None,
                        isolated=None,
                        yum_repourls=[],
                        branch=None,
                        push_url=None,
                        koji_parent_build=None,
                        release=None,
                        flatpak=False,
                        module=None,
                        signing_intent=None,
                        compose_ids=None):
        if not yum_repourls:
            yum_repourls = []

        this_task = self.session.getTaskInfo(self.id)
        self.logger.debug("This task: %r", this_task)
        owner_info = self.session.getUser(this_task['owner'])
        self.logger.debug("Started by %s", owner_info['name'])

        scm = My_SCM(src)
        scm.assert_allowed(self.options.allowed_scms)
        git_uri = scm.get_git_uri()
        component = scm.get_component()
        arch = None

        if not arches:
            raise koji.BuildError("arches aren't specified")

        if signing_intent and compose_ids:
            raise koji.BuildError("signing_intent used with compose_ids")

        if compose_ids and yum_repourls:
            raise koji.BuildError("compose_ids used with repo_url")

        create_build_args = {
            'git_uri': git_uri,
            'git_ref': scm.revision,
            'user': owner_info['name'],
            'component': component,
            'target': target_info['name'],
            'yum_repourls': yum_repourls,
            'scratch': scratch,
            'koji_task_id': self.id,
            'architecture': arch,
        }
        if branch:
            create_build_args['git_branch'] = branch
        if push_url:
            create_build_args['git_push_url'] = push_url
        if flatpak:
            create_build_args['flatpak'] = True
        if module:
            create_build_args['module'] = module

        try:
            orchestrator_create_build_args = create_build_args.copy()
            orchestrator_create_build_args['platforms'] = arches
            if signing_intent:
                orchestrator_create_build_args[
                    'signing_intent'] = signing_intent
            if compose_ids:
                orchestrator_create_build_args['compose_ids'] = compose_ids
            if koji_parent_build:
                orchestrator_create_build_args[
                    'koji_parent_build'] = koji_parent_build
            if isolated:
                orchestrator_create_build_args['isolated'] = isolated
            if release:
                orchestrator_create_build_args['release'] = release

            create_method = self.osbs().create_orchestrator_build
            self.logger.debug("Starting %s with params: '%s", create_method,
                              orchestrator_create_build_args)
            build_response = create_method(**orchestrator_create_build_args)
        except (AttributeError, OsbsOrchestratorNotEnabled):
            # Older osbs-client, or else orchestration not enabled
            create_build_args['architecture'] = arch = arches[0]
            create_method = self.osbs().create_build
            self.logger.debug("Starting %s with params: '%s'", create_method,
                              create_build_args)
            build_response = create_method(**create_build_args)

        build_id = build_response.get_build_name()
        self.logger.debug("OSBS build id: %r", build_id)

        # When builds are cancelled the builder plugin process gets SIGINT and SIGKILL
        # If osbs has started a build it should get cancelled
        def sigint_handler(*args, **kwargs):
            if not build_id:
                return

            self.logger.warn("Cannot read logs, cancelling build %s", build_id)
            self.osbs().cancel_build(build_id)

        signal.signal(signal.SIGINT, sigint_handler)

        self.logger.debug("Waiting for osbs build_id: %s to be scheduled.",
                          build_id)
        # we need to wait for kubelet to schedule the build, otherwise it's 500
        self.osbs().wait_for_build_to_get_scheduled(build_id)
        self.logger.debug("Build was scheduled")

        osbs_logs_dir = self.resultdir()
        koji.ensuredir(osbs_logs_dir)
        pid = os.fork()
        if pid:
            try:
                self._incremental_upload_logs(pid)
            except koji.ActionNotAllowed:
                pass
        else:
            self._osbs = None

            # Following retry code is here mainly to workaround bug which causes
            # connection drop while reading logs after about 5 minutes.
            # OpenShift bug with description:
            # https://github.com/openshift/origin/issues/2348
            # and upstream bug in Kubernetes:
            # https://github.com/GoogleCloudPlatform/kubernetes/issues/9013
            retry = 0
            max_retries = 30
            while retry < max_retries:
                try:
                    self._write_incremental_logs(build_id, osbs_logs_dir)
                except Exception, error:
                    self.logger.info(
                        "Error while saving incremental logs "
                        "(retry #%d): %s", retry, error)
                    retry += 1
                    time.sleep(10)
                    continue
                break
            else:
Exemple #25
0
    def createContainer(self, src=None, target_info=None, arches=None,
                        scratch=None, isolated=None, yum_repourls=None,
                        branch=None, push_url=None, koji_parent_build=None,
                        release=None, flatpak=False, signing_intent=None,
                        compose_ids=None, skip_build=False, triggered_after_koji_task=None,
                        dependency_replacements=None, operator_csv_modifications_url=None):
        if not yum_repourls:
            yum_repourls = []

        this_task = self.session.getTaskInfo(self.id)
        self.logger.debug("This task: %r", this_task)
        owner_info = self.session.getUser(this_task['owner'])
        self.logger.debug("Started by %s", owner_info['name'])

        scm = My_SCM(src)
        scm.assert_allowed(self.options.allowed_scms)
        git_uri = scm.get_git_uri()
        component = scm.get_component()
        arch = None

        if not arches:
            raise koji.BuildError("arches aren't specified")

        if signing_intent and compose_ids:
            raise koji.BuildError("signing_intent used with compose_ids")

        create_build_args = {
            'git_uri': git_uri,
            'git_ref': scm.revision,
            'user': owner_info['name'],
            'component': component,
            'target': target_info['name'],
            'dependency_replacements': dependency_replacements,
            'yum_repourls': yum_repourls,
            'scratch': scratch,
            'koji_task_id': self.id,
            'architecture': arch,
        }
        if branch:
            create_build_args['git_branch'] = branch
        if push_url:
            create_build_args['git_push_url'] = push_url
        if flatpak:
            create_build_args['flatpak'] = True
        if skip_build:
            create_build_args['skip_build'] = True
        if triggered_after_koji_task is not None:
            create_build_args['triggered_after_koji_task'] = triggered_after_koji_task
        if operator_csv_modifications_url:
            create_build_args['operator_csv_modifications_url'] = operator_csv_modifications_url

        orchestrator_create_build_args = create_build_args.copy()
        orchestrator_create_build_args['platforms'] = arches
        if signing_intent:
            orchestrator_create_build_args['signing_intent'] = signing_intent
        if compose_ids:
            orchestrator_create_build_args['compose_ids'] = compose_ids
        if koji_parent_build:
            orchestrator_create_build_args['koji_parent_build'] = koji_parent_build
        if isolated:
            orchestrator_create_build_args['isolated'] = isolated
        if release:
            orchestrator_create_build_args['release'] = release

        try:
            create_method = self.osbs().create_orchestrator_build
            self.logger.debug("Starting %s with params: '%s",
                              create_method, orchestrator_create_build_args)
            build_response = create_method(**orchestrator_create_build_args)
        except (AttributeError, OsbsOrchestratorNotEnabled):
            # Older osbs-client, or else orchestration not enabled
            create_build_args['architecture'] = arch = arches[0]
            create_build_args.pop('skip_build', None)
            create_method = self.osbs().create_build
            self.logger.debug("Starting %s with params: '%s'",
                              create_method, create_build_args)
            build_response = create_method(**create_build_args)
        except OsbsValidationException as exc:
            raise ContainerError('OSBS validation exception: {0}'.format(exc))
        if build_response is None:
            self.logger.debug("Build was skipped")

            osbs_logs_dir = self.resultdir()
            koji.ensuredir(osbs_logs_dir)
            try:
                self._incremental_upload_logs()
            except koji.ActionNotAllowed:
                pass

            return

        return self.handle_build_response(build_response, arch=arch)
Exemple #26
0
    def handle_build_response(self, build_response, arch=None):
        build_id = build_response.get_build_name()
        self.logger.debug("OSBS build id: %r", build_id)

        # When builds are cancelled the builder plugin process gets SIGINT and SIGKILL
        # If osbs has started a build it should get cancelled
        def sigint_handler(*args, **kwargs):
            if not build_id:
                return

            self.logger.warning("Cannot read logs, cancelling build %s", build_id)
            self.osbs().cancel_build(build_id)

        signal.signal(signal.SIGINT, sigint_handler)

        self.logger.debug("Waiting for osbs build_id: %s to be scheduled.",
                          build_id)
        # we need to wait for kubelet to schedule the build, otherwise it's 500
        self.osbs().wait_for_build_to_get_scheduled(build_id)
        self.logger.debug("Build was scheduled")

        osbs_logs_dir = self.resultdir()
        koji.ensuredir(osbs_logs_dir)
        pid = os.fork()
        if pid:
            try:
                self._incremental_upload_logs(pid)
            except koji.ActionNotAllowed:
                pass
        else:
            self._osbs = None

            try:
                self._write_incremental_logs(build_id, osbs_logs_dir)
            except Exception as error:
                self.logger.info("Error while saving incremental logs: %s", error)
                os._exit(1)
            os._exit(0)

        response = self.osbs().wait_for_build_to_finish(build_id)
        if response.is_succeeded():
            self.upload_build_annotations(response)

        self.logger.debug("OSBS build finished with status: %s. Build "
                          "response: %s.", response.status,
                          response.json)

        self.logger.info("Response status: %r", response.is_succeeded())

        if response.is_cancelled():
            self.session.cancelTask(self.id)
            raise ContainerCancelled('Image build was cancelled by OSBS.')

        elif response.is_failed():
            error_message = self._get_error_message(response)
            if error_message:
                raise ContainerError('Image build failed. %s. OSBS build id: %s' %
                                     (error_message, build_id))
            else:
                raise ContainerError('Image build failed. OSBS build id: %s' %
                                     build_id)

        repositories = []
        if response.is_succeeded():
            repositories = self._get_repositories(response)

        self.logger.info("Image available in the following repositories: %r",
                         repositories)

        koji_build_id = None
        if response.is_succeeded():
            koji_build_id = self._get_koji_build_id(response)

        self.logger.info("Koji content generator build ID: %s", koji_build_id)

        containerdata = {
            'task_id': self.id,
            'osbs_build_id': build_id,
            'files': [],
            'repositories': repositories,
            'koji_build_id': koji_build_id,
        }
        if arch:
            containerdata['arch'] = arch

        return containerdata
Exemple #27
0
    def setUp(self):
        self.topdir = tempfile.mkdtemp()
        self.rinfo = {
            'create_event': 2915,
            'create_ts': 1487256924.72718,
            'creation_time': '2017-02-16 14:55:24.727181',
            'id': 47,
            'state': 1,
            'tag_id': 2,
            'tag_name': 'my-tag'
        }
        self.arch = 'x86_64'

        # set up a fake koji topdir
        # koji.pathinfo._topdir = self.topdir
        mock.patch('koji.pathinfo._topdir', new=self.topdir).start()
        repodir = koji.pathinfo.distrepo(self.rinfo['id'],
                                         self.rinfo['tag_name'])
        archdir = "%s/%s" % (repodir, koji.canonArch(self.arch))
        os.makedirs(archdir)
        self.uploadpath = 'UNITTEST'
        workdir = koji.pathinfo.work()
        uploaddir = "%s/%s" % (workdir, self.uploadpath)
        os.makedirs(uploaddir)

        # place some test files
        self.files = ['foo.drpm', 'repomd.xml']
        self.expected = ['x86_64/drpms/foo.drpm', 'x86_64/repodata/repomd.xml']
        for fn in self.files:
            path = os.path.join(uploaddir, fn)
            koji.ensuredir(os.path.dirname(path))
            with open(path, 'w') as fo:
                fo.write('%s' % fn)

        # generate pkglist file and sigmap
        self.files.append('pkglist')
        plist = os.path.join(uploaddir, 'pkglist')
        nvrs = ['aaa-1.0-2', 'bbb-3.0-5', 'ccc-8.0-13', 'ddd-21.0-34']
        self.sigmap = []
        self.rpms = {}
        self.builds = {}
        self.key = '4c8da725'
        with open(plist, 'w') as f_pkglist:
            for nvr in nvrs:
                binfo = koji.parse_NVR(nvr)
                rpminfo = binfo.copy()
                rpminfo['arch'] = 'x86_64'
                builddir = koji.pathinfo.build(binfo)
                relpath = koji.pathinfo.signed(rpminfo, self.key)
                path = os.path.join(builddir, relpath)
                koji.ensuredir(os.path.dirname(path))
                basename = os.path.basename(path)
                with open(path, 'w') as fo:
                    fo.write('%s' % basename)
                f_pkglist.write(path)
                f_pkglist.write('\n')
                self.expected.append('x86_64/%s/%s' % (basename[0], basename))
                build_id = len(self.builds) + 10000
                rpm_id = len(self.rpms) + 20000
                binfo['id'] = build_id
                rpminfo['build_id'] = build_id
                rpminfo['id'] = rpm_id
                self.builds[build_id] = binfo
                self.rpms[rpm_id] = rpminfo
                self.sigmap.append([rpm_id, self.key])

        # mocks
        self.repo_info = mock.patch('kojihub.repo_info').start()
        self.repo_info.return_value = self.rinfo.copy()
        self.get_rpm = mock.patch('kojihub.get_rpm').start()
        self.get_build = mock.patch('kojihub.get_build').start()
        self.get_rpm.side_effect = self.our_get_rpm
        self.get_build.side_effect = self.our_get_build
    def setUp(self):
        self.topdir = tempfile.mkdtemp()
        self.rinfo = {
            'create_event': 2915,
            'create_ts': 1487256924.72718,
            'creation_time': '2017-02-16 14:55:24.727181',
            'id': 47,
            'state': 0,  # INIT
            'tag_id': 2,
            'tag_name': 'my-tag'}
        self.arch = 'x86_64'

        # set up a fake koji topdir
        # koji.pathinfo._topdir = self.topdir
        mock.patch('koji.pathinfo._topdir', new=self.topdir).start()
        repodir = koji.pathinfo.distrepo(self.rinfo['id'], self.rinfo['tag_name'])
        archdir = "%s/%s" % (repodir, koji.canonArch(self.arch))
        os.makedirs(archdir)
        self.uploadpath = 'UNITTEST'
        workdir = koji.pathinfo.work()
        uploaddir = "%s/%s" % (workdir, self.uploadpath)
        os.makedirs(uploaddir)

        # place some test files
        self.files = ['drpms/foo.drpm', 'repodata/repomd.xml']
        self.expected = ['x86_64/drpms/foo.drpm', 'x86_64/repodata/repomd.xml']
        for fn in self.files:
            path = os.path.join(uploaddir, fn)
            koji.ensuredir(os.path.dirname(path))
            with open(path, 'w') as fo:
                fo.write('%s' % os.path.basename(fn))

        # generate pkglist file
        self.files.append('pkglist')
        plist = os.path.join(uploaddir, 'pkglist')
        nvrs = ['aaa-1.0-2', 'bbb-3.0-5', 'ccc-8.0-13','ddd-21.0-34']
        self.rpms = {}
        self.builds ={}
        self.key = '4c8da725'
        with open(plist, 'w') as f_pkglist:
            for nvr in nvrs:
                binfo = koji.parse_NVR(nvr)
                rpminfo = binfo.copy()
                rpminfo['arch'] = 'x86_64'
                builddir = koji.pathinfo.build(binfo)
                relpath = koji.pathinfo.signed(rpminfo, self.key)
                path = os.path.join(builddir, relpath)
                koji.ensuredir(os.path.dirname(path))
                basename = os.path.basename(path)
                with open(path, 'w') as fo:
                    fo.write('%s' % basename)
                f_pkglist.write(path)
                f_pkglist.write('\n')
                self.expected.append('x86_64/Packages/%s/%s' % (basename[0], basename))
                build_id = len(self.builds) + 10000
                rpm_id = len(self.rpms) + 20000
                binfo['id'] = build_id
                rpminfo['build_id'] = build_id
                rpminfo['id'] = rpm_id
                rpminfo['sigkey'] = self.key
                rpminfo['size'] = 1024
                rpminfo['payloadhash'] = 'helloworld'
                self.builds[build_id] = binfo
                self.rpms[rpm_id] = rpminfo

        # write kojipkgs
        kojipkgs = {}
        for rpminfo in self.rpms.values():
            bnp = '%(name)s-%(version)s-%(release)s.%(arch)s.rpm' % rpminfo
            kojipkgs[bnp] = rpminfo
        with open("%s/kojipkgs" % uploaddir, "w") as fp:
            json.dump(kojipkgs, fp, indent=4)
        self.files.append('kojipkgs')

        # write manifest
        with open("%s/repo_manifest" % uploaddir, "w") as fp:
            json.dump(self.files, fp, indent=4)

        # mocks
        self.repo_info = mock.patch('kojihub.repo_info').start()
        self.repo_info.return_value = self.rinfo.copy()
        self.get_rpm = mock.patch('kojihub.get_rpm').start()
        self.get_build = mock.patch('kojihub.get_build').start()
        self.get_rpm.side_effect = self.our_get_rpm
        self.get_build.side_effect = self.our_get_build
Exemple #29
0
def download_file(url,
                  relpath,
                  quiet=False,
                  noprogress=False,
                  size=None,
                  num=None,
                  filesize=None):
    """Download files from remote

    :param str url: URL to be downloaded
    :param str relpath: where to save it
    :param bool quiet: no/verbose
    :param bool noprogress: print progress bar
    :param int size: total number of files being downloaded (printed in verbose
                     mode)
    :param int num: download index (printed in verbose mode)
    :param int filesize: expected file size, used for appending to file, no
                         other checks are performed, caller is responsible for
                         checking, that resulting file is valid."""

    if '/' in relpath:
        koji.ensuredir(os.path.dirname(relpath))
    if not quiet:
        if size and num:
            print("Downloading [%d/%d]: %s" % (num, size, relpath))
        else:
            print("Downloading: %s" % relpath)

    pos = 0
    headers = {}
    if filesize:
        # append the file
        f = open(relpath, 'ab')
        pos = f.tell()
        if pos:
            if filesize == pos:
                if not quiet:
                    print("File %s already downloaded, skipping" % relpath)
                return
            if not quiet:
                print("Appending to existing file %s" % relpath)
            headers['Range'] = ('bytes=%d-' % pos)
    else:
        # rewrite
        f = open(relpath, 'wb')

    try:
        # closing needs to be used for requests < 2.18.0
        with closing(requests.get(url, headers=headers,
                                  stream=True)) as response:
            if response.status_code in (
                    200, 416):  # full content provided or reaching behind EOF
                # rewrite in such case
                f.close()
                f = open(relpath, 'wb')
            response.raise_for_status()
            length = filesize or int(
                response.headers.get('content-length') or 0)
            for chunk in response.iter_content(chunk_size=1024**2):
                pos += len(chunk)
                f.write(chunk)
                if not (quiet or noprogress):
                    _download_progress(length, pos, filesize)
            if not length and not (quiet or noprogress):
                _download_progress(pos, pos, filesize)
    finally:
        f.close()
        if pos == 0:
            # nothing was downloaded, e.g file not found
            os.unlink(relpath)

    if not (quiet or noprogress):
        print('')
    def createContainer(self, src=None, target_info=None, arches=None,
                        scratch=None, yum_repourls=[], branch=None, push_url=None,
                        flatpak=False, module=None):
        if not yum_repourls:
            yum_repourls = []

        this_task = self.session.getTaskInfo(self.id)
        self.logger.debug("This task: %r", this_task)
        owner_info = self.session.getUser(this_task['owner'])
        self.logger.debug("Started by %s", owner_info['name'])

        scm = My_SCM(src)
        scm.assert_allowed(self.options.allowed_scms)
        git_uri = scm.get_git_uri()
        component = scm.get_component()
        arch = None

        if not arches:
            raise ContainerError("arches aren't specified")

        create_build_args = {
            'git_uri': git_uri,
            'git_ref': scm.revision,
            'user': owner_info['name'],
            'component': component,
            'target': target_info['name'],
            'yum_repourls': yum_repourls,
            'scratch': scratch,
            'koji_task_id': self.id,
            'architecture': arch
        }
        if branch:
            create_build_args['git_branch'] = branch
        if push_url:
            create_build_args['git_push_url'] = push_url
        if flatpak:
            create_build_args['flatpak'] = True
        if module:
            create_build_args['module'] = module

        try:
            create_method = self.osbs().create_orchestrator_build
            self.logger.debug("Starting %s with params: '%s, platforms:%s'",
                              create_method, create_build_args, arches)
            build_response = create_method(platforms=arches, **create_build_args)
        except (AttributeError, OsbsValidationException):
            # Older osbs-client, or else orchestration not enabled
            create_build_args['architecture'] = arch = arches[0]
            create_method = self.osbs().create_build
            self.logger.debug("Starting %s with params: '%s'",
                              create_method, create_build_args)
            build_response = create_method(**create_build_args)

        build_id = build_response.get_build_name()
        self.logger.debug("OSBS build id: %r", build_id)

        # When builds are cancelled the builder plugin process gets SIGINT and SIGKILL
        # If osbs has started a build it should get cancelled
        def sigint_handler(*args, **kwargs):
            if not build_id:
                return

            self.logger.warn("Cannot read logs, cancelling build %s", build_id)
            self.osbs().cancel_build(build_id)

        signal.signal(signal.SIGINT, sigint_handler)

        self.logger.debug("Waiting for osbs build_id: %s to be scheduled.",
                          build_id)
        # we need to wait for kubelet to schedule the build, otherwise it's 500
        self.osbs().wait_for_build_to_get_scheduled(build_id)
        self.logger.debug("Build was scheduled")

        osbs_logs_dir = self.resultdir()
        koji.ensuredir(osbs_logs_dir)
        pid = os.fork()
        if pid:
            try:
                self._incremental_upload_logs(pid)
            except koji.ActionNotAllowed:
                pass
        else:
            full_output_name = os.path.join(osbs_logs_dir,
                                            'openshift-incremental.log')

            # Make sure curl is initialized again otherwise connections via SSL
            # fails with NSS error -8023 and curl_multi.info_read()
            # returns error code 35 (SSL CONNECT failed).
            # See http://permalink.gmane.org/gmane.comp.web.curl.library/38759
            self._osbs = None
            self.logger.debug("Running pycurl global cleanup")
            pycurl.global_cleanup()

            # Following retry code is here mainly to workaround bug which causes
            # connection drop while reading logs after about 5 minutes.
            # OpenShift bug with description:
            # https://github.com/openshift/origin/issues/2348
            # and upstream bug in Kubernetes:
            # https://github.com/GoogleCloudPlatform/kubernetes/issues/9013
            retry = 0
            max_retries = 30
            while retry < max_retries:
                try:
                    self._write_incremental_logs(build_id,
                                                 full_output_name)
                except Exception, error:
                    self.logger.info("Error while saving incremental logs "
                                     "(retry #%d): %s", retry, error)
                    retry += 1
                    time.sleep(10)
                    continue
                break
            else:
    def createContainer(self, src=None, target_info=None, arches=None,
                        scratch=None, isolated=None, yum_repourls=[],
                        branch=None, push_url=None, koji_parent_build=None,
                        release=None, flatpak=False, signing_intent=None,
                        compose_ids=None):
        if not yum_repourls:
            yum_repourls = []

        this_task = self.session.getTaskInfo(self.id)
        self.logger.debug("This task: %r", this_task)
        owner_info = self.session.getUser(this_task['owner'])
        self.logger.debug("Started by %s", owner_info['name'])

        scm = My_SCM(src)
        scm.assert_allowed(self.options.allowed_scms)
        git_uri = scm.get_git_uri()
        component = scm.get_component()
        arch = None

        if not arches:
            raise koji.BuildError("arches aren't specified")

        if signing_intent and compose_ids:
            raise koji.BuildError("signing_intent used with compose_ids")

        if compose_ids and yum_repourls:
            raise koji.BuildError("compose_ids used with repo_url")

        create_build_args = {
            'git_uri': git_uri,
            'git_ref': scm.revision,
            'user': owner_info['name'],
            'component': component,
            'target': target_info['name'],
            'yum_repourls': yum_repourls,
            'scratch': scratch,
            'koji_task_id': self.id,
            'architecture': arch,
        }
        if branch:
            create_build_args['git_branch'] = branch
        if push_url:
            create_build_args['git_push_url'] = push_url
        if flatpak:
            create_build_args['flatpak'] = True

        try:
            orchestrator_create_build_args = create_build_args.copy()
            orchestrator_create_build_args['platforms'] = arches
            if signing_intent:
                orchestrator_create_build_args['signing_intent'] = signing_intent
            if compose_ids:
                orchestrator_create_build_args['compose_ids'] = compose_ids
            if koji_parent_build:
                orchestrator_create_build_args['koji_parent_build'] = koji_parent_build
            if isolated:
                orchestrator_create_build_args['isolated'] = isolated
            if release:
                orchestrator_create_build_args['release'] = release

            create_method = self.osbs().create_orchestrator_build
            self.logger.debug("Starting %s with params: '%s",
                              create_method, orchestrator_create_build_args)
            build_response = create_method(**orchestrator_create_build_args)
        except (AttributeError, OsbsOrchestratorNotEnabled):
            # Older osbs-client, or else orchestration not enabled
            create_build_args['architecture'] = arch = arches[0]
            create_method = self.osbs().create_build
            self.logger.debug("Starting %s with params: '%s'",
                              create_method, create_build_args)
            build_response = create_method(**create_build_args)

        build_id = build_response.get_build_name()
        self.logger.debug("OSBS build id: %r", build_id)

        # When builds are cancelled the builder plugin process gets SIGINT and SIGKILL
        # If osbs has started a build it should get cancelled
        def sigint_handler(*args, **kwargs):
            if not build_id:
                return

            self.logger.warn("Cannot read logs, cancelling build %s", build_id)
            self.osbs().cancel_build(build_id)

        signal.signal(signal.SIGINT, sigint_handler)

        self.logger.debug("Waiting for osbs build_id: %s to be scheduled.",
                          build_id)
        # we need to wait for kubelet to schedule the build, otherwise it's 500
        self.osbs().wait_for_build_to_get_scheduled(build_id)
        self.logger.debug("Build was scheduled")

        osbs_logs_dir = self.resultdir()
        koji.ensuredir(osbs_logs_dir)
        pid = os.fork()
        if pid:
            try:
                self._incremental_upload_logs(pid)
            except koji.ActionNotAllowed:
                pass
        else:
            self._osbs = None

            try:
                self._write_incremental_logs(build_id, osbs_logs_dir)
            except Exception as error:
                self.logger.info("Error while saving incremental logs: %s", error)
                os._exit(1)
            os._exit(0)

        response = self.osbs().wait_for_build_to_finish(build_id)

        self.logger.debug("OSBS build finished with status: %s. Build "
                          "response: %s.", response.status,
                          response.json)

        self.logger.info("Response status: %r", response.is_succeeded())

        if response.is_cancelled():
            self.session.cancelTask(self.id)
            raise ContainerCancelled(
                'Image build was cancelled by OSBS, maybe by automated rebuild.'
            )

        elif response.is_failed():
            error_message = self._get_error_message(response)
            if error_message:
                raise ContainerError('Image build failed. %s. OSBS build id: %s' %
                                     (error_message, build_id))
            else:
                raise ContainerError('Image build failed. OSBS build id: %s' %
                                     build_id)

        repositories = []
        if response.is_succeeded():
            repositories = self._get_repositories(response)

        self.logger.info("Image available in the following repositories: %r",
                         repositories)

        koji_build_id = None
        if response.is_succeeded():
            koji_build_id = self._get_koji_build_id(response)

        self.logger.info("Koji content generator build ID: %s", koji_build_id)

        containerdata = {
            'arch': arch,
            'task_id': self.id,
            'osbs_build_id': build_id,
            'files': [],
            'repositories': repositories,
            'koji_build_id': koji_build_id,
        }

        return containerdata
    def createContainer(self, src=None, target_info=None, arches=None,
                        scratch=None, isolated=None, yum_repourls=[],
                        branch=None, push_url=None, koji_parent_build=None,
                        release=None, flatpak=False, module=None, signing_intent=None,
                        compose_ids=None):
        if not yum_repourls:
            yum_repourls = []

        this_task = self.session.getTaskInfo(self.id)
        self.logger.debug("This task: %r", this_task)
        owner_info = self.session.getUser(this_task['owner'])
        self.logger.debug("Started by %s", owner_info['name'])

        scm = My_SCM(src)
        scm.assert_allowed(self.options.allowed_scms)
        git_uri = scm.get_git_uri()
        component = scm.get_component()
        arch = None

        if not arches:
            raise koji.BuildError("arches aren't specified")

        if signing_intent and compose_ids:
            raise koji.BuildError("signing_intent used with compose_ids")

        if compose_ids and yum_repourls:
            raise koji.BuildError("compose_ids used with repo_url")

        create_build_args = {
            'git_uri': git_uri,
            'git_ref': scm.revision,
            'user': owner_info['name'],
            'component': component,
            'target': target_info['name'],
            'yum_repourls': yum_repourls,
            'scratch': scratch,
            'koji_task_id': self.id,
            'architecture': arch,
        }
        if branch:
            create_build_args['git_branch'] = branch
        if push_url:
            create_build_args['git_push_url'] = push_url
        if flatpak:
            create_build_args['flatpak'] = True
        if module:
            create_build_args['module'] = module

        try:
            orchestrator_create_build_args = create_build_args.copy()
            orchestrator_create_build_args['platforms'] = arches
            if signing_intent:
                orchestrator_create_build_args['signing_intent'] = signing_intent
            if compose_ids:
                orchestrator_create_build_args['compose_ids'] = compose_ids
            if koji_parent_build:
                orchestrator_create_build_args['koji_parent_build'] = koji_parent_build
            if isolated:
                orchestrator_create_build_args['isolated'] = isolated
            if release:
                orchestrator_create_build_args['release'] = release

            create_method = self.osbs().create_orchestrator_build
            self.logger.debug("Starting %s with params: '%s",
                              create_method, orchestrator_create_build_args)
            build_response = create_method(**orchestrator_create_build_args)
        except (AttributeError, OsbsOrchestratorNotEnabled):
            # Older osbs-client, or else orchestration not enabled
            create_build_args['architecture'] = arch = arches[0]
            create_method = self.osbs().create_build
            self.logger.debug("Starting %s with params: '%s'",
                              create_method, create_build_args)
            build_response = create_method(**create_build_args)

        build_id = build_response.get_build_name()
        self.logger.debug("OSBS build id: %r", build_id)

        # When builds are cancelled the builder plugin process gets SIGINT and SIGKILL
        # If osbs has started a build it should get cancelled
        def sigint_handler(*args, **kwargs):
            if not build_id:
                return

            self.logger.warn("Cannot read logs, cancelling build %s", build_id)
            self.osbs().cancel_build(build_id)

        signal.signal(signal.SIGINT, sigint_handler)

        self.logger.debug("Waiting for osbs build_id: %s to be scheduled.",
                          build_id)
        # we need to wait for kubelet to schedule the build, otherwise it's 500
        self.osbs().wait_for_build_to_get_scheduled(build_id)
        self.logger.debug("Build was scheduled")

        osbs_logs_dir = self.resultdir()
        koji.ensuredir(osbs_logs_dir)
        pid = os.fork()
        if pid:
            try:
                self._incremental_upload_logs(pid)
            except koji.ActionNotAllowed:
                pass
        else:
            self._osbs = None

            # Following retry code is here mainly to workaround bug which causes
            # connection drop while reading logs after about 5 minutes.
            # OpenShift bug with description:
            # https://github.com/openshift/origin/issues/2348
            # and upstream bug in Kubernetes:
            # https://github.com/GoogleCloudPlatform/kubernetes/issues/9013
            retry = 0
            max_retries = 30
            while retry < max_retries:
                try:
                    self._write_incremental_logs(build_id, osbs_logs_dir)
                except Exception, error:
                    self.logger.info("Error while saving incremental logs "
                                     "(retry #%d): %s", retry, error)
                    retry += 1
                    time.sleep(10)
                    continue
                break
            else:
    def setUp(self):
        self.topdir = tempfile.mkdtemp()
        self.rinfo = {
            'create_event': 2915,
            'create_ts': 1487256924.72718,
            'creation_time': '2017-02-16 14:55:24.727181',
            'id': 47,
            'state': 0,  # INIT
            'tag_id': 2,
            'tag_name': 'my-tag'
        }
        self.arch = 'x86_64'

        # set up a fake koji topdir
        # koji.pathinfo._topdir = self.topdir
        mock.patch('koji.pathinfo._topdir', new=self.topdir).start()
        repodir = koji.pathinfo.distrepo(self.rinfo['id'],
                                         self.rinfo['tag_name'])
        archdir = "%s/%s" % (repodir, koji.canonArch(self.arch))
        os.makedirs(archdir)
        self.uploadpath = 'UNITTEST'
        workdir = koji.pathinfo.work()
        uploaddir = "%s/%s" % (workdir, self.uploadpath)
        os.makedirs(uploaddir)

        # place some test files
        self.files = ['drpms/foo.drpm', 'repodata/repomd.xml']
        self.expected = ['x86_64/drpms/foo.drpm', 'x86_64/repodata/repomd.xml']
        for fn in self.files:
            path = os.path.join(uploaddir, fn)
            koji.ensuredir(os.path.dirname(path))
            with open(path, 'w') as fo:
                fo.write('%s' % os.path.basename(fn))

        # generate pkglist file
        self.files.append('pkglist')
        plist = os.path.join(uploaddir, 'pkglist')
        nvrs = ['aaa-1.0-2', 'bbb-3.0-5', 'ccc-8.0-13', 'ddd-21.0-34']
        self.rpms = {}
        self.builds = {}
        self.key = '4c8da725'
        with open(plist, 'w') as f_pkglist:
            for nvr in nvrs:
                binfo = koji.parse_NVR(nvr)
                rpminfo = binfo.copy()
                rpminfo['arch'] = 'x86_64'
                builddir = koji.pathinfo.build(binfo)
                relpath = koji.pathinfo.signed(rpminfo, self.key)
                path = os.path.join(builddir, relpath)
                koji.ensuredir(os.path.dirname(path))
                basename = os.path.basename(path)
                with open(path, 'w') as fo:
                    fo.write('%s' % basename)
                f_pkglist.write(path)
                f_pkglist.write('\n')
                self.expected.append('x86_64/Packages/%s/%s' %
                                     (basename[0], basename))
                build_id = len(self.builds) + 10000
                rpm_id = len(self.rpms) + 20000
                binfo['id'] = build_id
                rpminfo['build_id'] = build_id
                rpminfo['id'] = rpm_id
                rpminfo['sigkey'] = self.key
                rpminfo['size'] = 1024
                rpminfo['payloadhash'] = 'helloworld'
                self.builds[build_id] = binfo
                self.rpms[rpm_id] = rpminfo

        # write kojipkgs
        kojipkgs = {}
        for rpminfo in self.rpms.values():
            bnp = '%(name)s-%(version)s-%(release)s.%(arch)s.rpm' % rpminfo
            kojipkgs[bnp] = rpminfo
        with open("%s/kojipkgs" % uploaddir, "w") as fp:
            json.dump(kojipkgs, fp, indent=4)
        self.files.append('kojipkgs')

        # write manifest
        with open("%s/repo_manifest" % uploaddir, "w") as fp:
            json.dump(self.files, fp, indent=4)

        # mocks
        self.repo_info = mock.patch('kojihub.repo_info').start()
        self.repo_info.return_value = self.rinfo.copy()
        self.get_rpm = mock.patch('kojihub.get_rpm').start()
        self.get_build = mock.patch('kojihub.get_build').start()
        self.get_rpm.side_effect = self.our_get_rpm
        self.get_build.side_effect = self.our_get_build