示例#1
0
def get_build_option(optname, cli_input=None, family=None, default=None):
    """Determine build option value.

    If there was input from the command line it takes precedence. Otherwise
    an existing family-specific option value is chosen, or a non-family
    specific default is returned. If neither condition is valid, ``default`` is
    returned.

    Parameters
    ----------
    optname : str
      Option name
    cli_input
      Potential input from a corresponding command line option
    family : str
      Optional build family identifier
    default :
      Value to return if no information is available
    """
    from bigmess import cfg
    if not cli_input is None:
        # got something meaningful as a commandline arg -- got with it
        return cli_input
    if not family is None and cfg.has_option('build', '%s %s' % (family, optname)):
        return cfg.get('build', '%s %s' % (family, optname))
    if cfg.has_option('build', optname):
        return cfg.get('build', optname)
    return default
示例#2
0
def run(args):
    if args.chroot_basedir is None:
        args.chroot_basedir = cfg.get('build', 'chroot basedir',
                                      default=opj(xdg.BaseDirectory.xdg_data_home,
                                                  'bigmess', 'chroots'))
        lgr.debug("using chroot base directory at '%s'" % args.chroot_basedir)
    if args.builder is None:
        args.builder = cfg.get('build', 'builder', default='pbuilder')
        lgr.debug("using '%s' for updating" % args.builder)

    family, codename = args.env
    cmd_opts = [] 

    if not args.mount is None:
        cmd_opts += ['--bindmounts'] + args.mount
    chroot_target = opj(args.chroot_basedir,
                       '%s-%s-%s' % (family, codename, args.arch))
    if args.builder == 'pbuilder':
        cmd_opts += ['--basetgz', '%s.tar.gz' % chroot_target]
    elif args.builder == 'cowbuilder':
        cmd_opts += ['--basepath', chroot_target]
    else:
        raise ValueError("unknown builder '%s'" % args.builder)

    if not args.script is None and len(args.script): 
        cmd_opts = ['--execute'] + cmd_opts + ['--'] + args.script
    else:
        cmd_opts = ['--login'] + cmd_opts

    ret = subprocess.call(['sudo', args.builder] + cmd_opts) 
    if ret:
        raise RuntimeError("running failed (cmd: '%s'; exit code: %s)"
                           % ('%s %s' % (args.builder, ' '.join(cmd_opts)),
                              ret))
示例#3
0
def run(args):
    from jinja2 import Environment, PackageLoader, FileSystemLoader

    mirror2name = {}
    mirror2url = {}
    code2relname = dict([(r, cfg.get('release names', r))
                         for r in cfg.options('release names')
                         if not r == 'data'])
    if cfg.has_section('mirror names'):
        mirror2name = dict([(m, codecs.decode(cfg.get('mirror names', m), 'utf-8'))
                            for m in cfg.options('mirrors')])
    if cfg.has_section('mirrors'):
        mirror2url = dict([(m, cfg.get('mirrors', m))
                           for m in cfg.options('mirrors')])
    if not args.template is None:
        templ_dir = os.path.dirname(args.template)
        templ_basename = os.path.basename(args.template)
        jinja_env = Environment(loader=FileSystemLoader(templ_dir))
        srclist_template = jinja_env.get_template(templ_basename)
    else:
        jinja_env = Environment(loader=PackageLoader('bigmess'))
        srclist_template = jinja_env.get_template('sources_lists.rst')
    print codecs.encode(
        srclist_template.render(code2name=code2relname,
                                mirror2name=mirror2name,
                                mirror2url=mirror2url),
        'utf-8')
示例#4
0
def _proc_env(family, codename, args):
    builder = args.builder
    if builder is None:
        builder = cfg.get('build', 'builder', default='pbuilder')
    lgr.debug("using '%s' for updating" % builder)
    chroot_basedir = get_dir_cfg('chroot basedir', args.chroot_basedir,
                                 family,
                                 default=opj(xdg.BaseDirectory.xdg_data_home,
                                            'bigmess', 'chroots'))
    lgr.debug("using chroot base directory at '%s'" % chroot_basedir)
    aptcache = args.aptcache
    if aptcache is None:
        aptcache = cfg.get('build', '%s aptcache' % family, default='')
    if aptcache:
        lgr.debug("using local apt cache at '%s'" % aptcache)
    else:
        lgr.debug("no local apt cache in use")

    cmd_opts = [
        '--update',
        '--aptcache', aptcache,
    ]

    keyring = get_path_cfg('keyring', None, family) 
    if not keyring is None:
        cmd_opts += ['--keyring', keyring]
    mirror = get_path_cfg('mirror', None, family) 
    if not mirror is None:
        cmd_opts += ['--mirror', mirror]
    othermirror = get_path_cfg('othermirror', None, family) 
    if not othermirror is None:
        cmd_opts += ['--othermirror', othermirror % dict(release=codename)]

    archs = args.arch
    if archs is None:
        if not cfg.has_option('build', '%s architectures' % family):
            raise ValueError("no architectures specified, use --arch or add to configuration file")
        archs = cfg.get('build', '%s architectures' % family).split()

    for arch in archs:
        lgr.debug("started updating architecture '%s'" % arch)
        chroot_target = opj(chroot_basedir,
                            '%s-%s-%s' % (family, codename, arch))
        if builder == 'pbuilder':
            cmd_opts += ['--basetgz', '%s.tar.gz' % chroot_target]
        elif builder == 'cowbuilder':
            cmd_opts += ['--basepath', chroot_target]
        else:
            raise ValueError("unknown builder '%s'" % builder)
        ret = subprocess.call(['sudo', builder] + cmd_opts) 
        if ret:
            raise RuntimeError("updating failed (cmd: '%s'; exit code: %s)"
                               % ('%s %s' % (builder, ' '.join(cmd_opts)),
                                  ret))
        lgr.debug("finished updating architecture '%s'" % arch)
示例#5
0
def run(args):
    query = args.query
    if len(query) < 1:
        # print the whole thing
        cfg.write(sys.stdout)
    elif len(query) < 2:
        # print an entire section
        for item in cfg.items(query[0]):
            print '%s = %s' % item
    else:
        # print just one item
        print cfg.get(query[0], query[1])
示例#6
0
def _find_release_origin_archive(cfg, release):
    # available
    origins = []
    for origin in cfg.options('release bases'):
        archive = cfg.get('release bases', origin)
        if not archive:
            continue
        url = '%s/dists/%s/Release' % (archive, release)
        try:
            urip = urllib2.urlopen(url)
            info = urip.info()
            origins.append(archive)
        except urllib2.HTTPError:
            lgr.debug("No '%s'" % url)
        except urllib2.URLError:
            lgr.debug("Can't connect to'%s'" % url)
    if len(origins) == 0:
        lgr.info("Found no origin for %r. Assuming it originates here."
                 % release)
        return None
    elif len(origins) > 1:
        lgr.warning("More than a single origin archive was found for %r: %s. "
                    "!Disambiguate (TODO)!" % (release, origins))
        return None
    return origins[0]
def run(args):
    if args.env is None:
        args.env = [env.split('-') for env in cfg.get('build', 'environments', default='').split()]
    lgr.debug("attempting to bootstrap %i environments: %s" % (len(args.env), args.env))
    if args.chroot_basedir is None:
        args.chroot_basedir = cfg.get('build', 'chroot basedir',
                                      default=opj(xdg.BaseDirectory.xdg_data_home,
                                                  'bigmess', 'chroots'))
        lgr.debug("using chroot base directory at '%s'" % args.chroot_basedir)
    if args.builder is None:
        args.builder = cfg.get('build', 'builder', default='pbuilder')
        lgr.debug("using '%s' for bootstrapping" % args.builder)

    for env in args.env:
        lgr.debug("started bootstrapping environment '%s'" % env)
        family, codename = env
        _proc_env(family, codename, args)
        lgr.debug("finished bootstrapping environment '%s'" % env)
示例#8
0
def _gen_pkg_page(pname, db, pkg_template):
    bindb = db['bin']
    srcdb = db['src']
    binpkginfo = bindb[pname]
    srcpkginfo = srcdb[binpkginfo['src_name']]
    pkginfo = {}
    pkginfo.update(binpkginfo)
    pkginfo.update(srcpkginfo)

    if 'short_description' in pkginfo:
        title = _underline_text('**%s** -- %s' % (pname,
                                                  pkginfo['short_description']),
                                '*')
    else:
        title = _underline_text('**%s**' % pname, '*')
    if 'long_description' in pkginfo:
        long_descr = _proc_long_descr(pkginfo['long_description'])
    else:
        long_descr = 'No description available.'

    maintainers = ', '.join([srcpkginfo.get(field)
                             for field in ('maintainer', 'uploaders')
                             if len(srcpkginfo.get(field, ''))]).split(',')
    maint_info = []
    for m in maintainers:
        if not len(m):
            continue
        mname, memail = re.match(r'(.*) <(.*)>', m).groups()
        emailhash=hashlib.md5(memail.lower().strip()).hexdigest(),
        maint_info.append((mname, memail, emailhash))
    in_base_release = srcpkginfo.get('in_base_release', {})
    in_release = binpkginfo['in_release']
    availability = {}
    for k in set(in_release.keys() + in_base_release.keys()):
        release = cfg.get('release names', k)
        versions = in_release.get(k, None)
        base_version = in_base_release.get(k, '')
        if not versions:
            availability[release] = [(base_version, '', [])]
        else:
            # List the same base version for every item in versions
            availability[release] = [(base_version, v_, a_)
                                     for v_, a_ in in_release[k].iteritems()]

    page = pkg_template.render(
        cfg=cfg,
        pname=pname,
        title=title,
        description=long_descr,
        availability=availability,
        maintainers=maint_info,
        **pkginfo)
    return page
示例#9
0
def run(args):
    import codecs, time, urllib2
    from jinja2 import Environment, PackageLoader, FileSystemLoader
    jinja_env = Environment(loader=PackageLoader('bigmess'))
    template = jinja_env.get_template('mirrors_status.rst')

    stampfile = cfg.get('mirrors monitor', 'stampfile', 'TIMESTAMP')
    warn_threshold = cfg.getfloat('mirrors monitor', 'warn-threshold') * 3600

    lgr.debug("using stampfile %(stampfile)s", locals())

    mirrors_info = {}
    for mirror in cfg.options('mirrors'):

        mirror_url = cfg.get('mirrors', mirror)
        mirror_name = cfg.get('mirror names', mirror)

        age = None
        age_str = None
        status = "**N/A**"

        try:
            url = '%(mirror_url)s/%(stampfile)s' % locals()
            u = urllib2.urlopen(url)
            stamp = u.read()
            age = (time.time() - int(stamp))   # age in hours
            age_str = _literal_seconds(age)
            if age > warn_threshold:
                lgr.warning("Mirror %(mirror)s is %(age_str)s old", locals())
                status = "**OLD**"
            else:
                status = "OK"
        except urllib2.URLError, e:
            lgr.error("Cannot fetch '%s': %s" % (url, e))
            # Here ideally we should revert to use previously known state
        except (TypeError, ValueError):
            # int conversion has failed -- there is smth else in that file
            lgr.error("Cannot assess the age. Retrieved stamp was %r" % stamp)
            status = "**BRK**"
示例#10
0
def run(args):
    if args.env is None:
        args.env = [env.split('-')
                        for env in cfg.get('build',
                                           'environments',
                                           default='').split()]
    lgr.debug("attempting to update %i environments: %s"
              % (len(args.env), args.env))
    for env in args.env:
        lgr.debug("started updating environment '%s'" % env)
        family, codename = env
        _proc_env(family, codename, args)
        lgr.debug("finished updating environment '%s'" % env)
示例#11
0
def run(args):
    if args.env is None:
        args.env = [env.split('-') for env in cfg.get('build', 'environments', default='').split()]
    lgr.debug("attempting to build in %i environments: %s" % (len(args.env), args.env))
    had_failures = False
    source_include = args.source_include
    for family, codename in args.env:
        lgr.debug("started building in environment '%s-%s'" % (family, codename))
        if args.backport:
            # start with default for each backport run, i.e. source package version
            source_include = args.source_include
        if source_include is None:
            # any configure source include strategy?
            source_include = cfg.get('build', 'source include', default=False)
        if _proc_env(family, codename, args, source_include):
            had_failures = True
        # don't include more than once per source package version - will cause
        # problem as parts of the source packages get regenerated and original
        # checksums no longer match
        source_include = False
        lgr.debug("finished building in environment '%s-%s'" % (family, codename))
    if had_failures:
        raise RuntimeError("some builds failed")
示例#12
0
def _backport_dsc(dsc, codename, family, args, dryrun=False):
    dsc_dict = deb822.Dsc(open(dsc))
    # assemble backport-dsc call
    bp_args = ['--target-distribution', codename]
    bp_mod_control = get_build_option('backport modify control',
                                      args.bp_mod_control,
                                      family)
    modcontrol_blacklist = get_build_option('backport modify control blacklist',
                                            family=family,
                                            default='').split()
    # if blacklisted for this source package: reset
    if dsc_dict['Source'] in modcontrol_blacklist:
        lgr.debug("source package '%s' is blacklisted for control file modification"
                  % dsc_dict['Source'])
        bp_mod_control = None
    if not bp_mod_control is None:
        bp_args += ['--mod-control', bp_mod_control]
    bp_maintainer = get_build_option('backport maintainer',
                                     args.bp_maintainer,
                                     family)
    if not bp_maintainer is None:
        bp_args += [
            '--maint-name', bp_maintainer.split('<')[0].strip(),
            '--maint-email', bp_maintainer.split('<')[1].strip()[:-1],
        ]
    if cfg.has_option('release backport ids', codename):
        bp_args += ['--version-suffix',
                    cfg.get('release backport ids', codename)]
    lgr.debug('attempting to backport source package')
    bp_success = False
    bp_cmd = ['backport-dsc'] + bp_args + [dsc]
    lgr.debug("calling: %s" % bp_cmd)
    if dryrun:
        print 'DRYRUN: %s' % bp_cmd
        return dsc
    # TODO use check_output() with 2.7+
    bp_proc = subprocess.Popen(bp_cmd, stdout=subprocess.PIPE)
    output, unused_err = bp_proc.communicate()
    retcode = bp_proc.poll()
    if retcode:
        raise RuntimeError(
                "failed to run 'backport-dsc' command (exit code %i): %s"
                % (retcode, bp_cmd))
    for line in output.split('\n'):
        if line.endswith('.dsc'):
            backported_dsc = line.split()[-1]
            bp_success = True
    if not bp_success:
        raise RuntimeError("failure to parse output of 'backport-dsc'")
    return backported_dsc
示例#13
0
def run(args):
    for release in cfg.options('release files'):
        if release == 'data':
            # no seperate list for the data archive
            continue
        for mirror in cfg.options('mirrors'):
            for areas, tag in (('main contrib non-free', 'full'),
                                ('main', 'libre')):
                listname = '%s.%s.%s' % (release, mirror, tag)
                lf = open(opj(args.dest_dir, listname), 'w')
                for rel in ('data', release):
                    aptcfg = '%s %s %s\n' % (cfg.get('mirrors', mirror),
                                             rel,
                                             areas)
                    lf.write('deb %s' % aptcfg)
                    lf.write('#deb-src %s' % aptcfg)
                lf.close()
示例#14
0
def _gen_pkg_page(pname, db, pkg_template):
    bindb = db['bin']
    srcdb = db['src']
    binpkginfo = bindb[pname]
    srcpkginfo = srcdb[binpkginfo['src_name']]
    pkginfo = {}
    pkginfo.update(binpkginfo)
    pkginfo.update(srcpkginfo)

    if 'short_description' in pkginfo:
        title = _underline_text('**%s** -- %s' % (pname,
                                                  pkginfo['short_description']),
                                '*')
    else:
        title = _underline_text('**%s**' % pname, '*')
    if 'long_description' in pkginfo:
        long_descr = _proc_long_descr(pkginfo['long_description'])
    else:
        long_descr = 'No description available.'

    in_base_release = srcpkginfo.get('in_base_release', {})
    in_release = binpkginfo['in_release']
    availability = {}
    for k in set(in_release.keys() + in_base_release.keys()):
        release = cfg.get('release names', k)
        versions = in_release.get(k, None)
        base_version = in_base_release.get(k, '')
        if not versions:
            availability[release] = [(base_version, '', [])]
        else:
            # List the same base version for every item in versions
            availability[release] = [(base_version, v_, a_)
                                     for v_, a_ in in_release[k].iteritems()]

    page = pkg_template.render(
        cfg=cfg,
        pname=pname,
        title=title,
        description=long_descr,
        availability=availability,
        **pkginfo)
    return page
示例#15
0
def _backport_dsc(dsc, codename, family, args):
    # assemble backport-dsc call
    bp_args = ['--target-distribution', codename]
    bp_mod_control = get_build_option('backport modify control',
                                      args.bp_mod_control,
                                      family)
    if not bp_mod_control is None:
        bp_args += ['--mod-control', bp_mod_control]
    bp_maintainer = get_build_option('backport maintainer',
                                     args.bp_maintainer,
                                     family)
    if not bp_maintainer is None:
        bp_args += [
            '--maint-name', bp_maintainer.split('<')[0].strip(),
            '--maint-email', bp_maintainer.split('<')[1].strip()[:-1],
        ]
    if cfg.has_option('codename backport ids', codename):
        bp_args += ['--version-suffix',
                    cfg.get('codename backport ids', codename)]
    lgr.debug('attempting to backport source package')
    bp_success = False
    bp_cmd = ['backport-dsc'] + bp_args + [dsc]
    lgr.debug("calling: %s" % bp_cmd)
    # use check_output() with 2.7+
    bp_proc = subprocess.Popen(bp_cmd, stdout=subprocess.PIPE)
    output, unused_err = bp_proc.communicate()
    retcode = bp_proc.poll()
    if retcode:
        raise RuntimeError("failed to run 'backport-dsc'")
    for line in output.split('\n'):
        if line.endswith('.dsc'):
            backported_dsc = line.split()[-1]
            bp_success = True
    if not bp_success:
        raise RuntimeError("failure to parse output of 'backport-dsc'")
    return backported_dsc
示例#16
0
def run(args):
    if args.env is None:
        # what to build for by default
        args.env = [env.split('-') for env in cfg.get('build', 'environments', default='').split()]
    lgr.debug("attempting to build in %i environments: %s" % (len(args.env), args.env))
    # post process argv to ready them for a subsequent build_pkg command
    argv = []
    i = 0
    while i < len(sys.argv):
        av = sys.argv[i]
        if av in ('-c', '--cfg', '--config'):
            # config needs to be sent with the job
            i += 1
        elif av == '--env':
            # ignore, there will be individual build_pkg call per environment
            i += 2
        elif av == '--arch':
            # ignore, there will be individual build_pkg call per arch
            i += 1
            while i < len(sys.argv) - 1 and not sys.argv[i+1].startswith('-'):
                i += 1
        elif av == '--arch':
            # ignore, there will be individual build_pkg call per arch
            i += 1
        elif av == '--':
            pass
        elif av.startswith('--condor-'):
            # handled in this call
            i += 1
        elif av.startswith('--build-basedir') or av.startswith('--result-dir'):
            # to be ignored for a condor submission
            i += 1
        elif av == '--backport':
            # backporting is done in this call, prior to build_pkg
            pass
        elif av in ('--source-include', '--debbuild-options'):
            # this is handled in this call
            i += 1
        elif av == 'build_pkg_condor':
            argv.append('build_pkg')
        else:
            # pass on the rest
            argv.append(av)
        i += 1
    # make dsc arg explicit
    dsc_fname = os.path.abspath(argv[-1])
    argv = argv[:-1]
    dsc = deb822.Dsc(open(dsc_fname))
    settings = {
        'niceuser': args.condor_nice_user,
        'request_memory': args.condor_request_memory,
        'request_cpus': args.condor_request_cpus,
        'src_name': dsc['Source'],
        'src_version': dsc['Version'],
        'executable': argv[0]
    }
    submit = """
universe = vanilla
requirements = HasBigmess
should_transfer_files = YES
when_to_transfer_output = ON_EXIT
getenv = True
notification = Never
transfer_executable = FALSE
request_memory = %(request_memory)i
request_cpus = %(request_cpus)i
want_graceful_removal = TRUE
nice_user = %(niceuser)s
executable = %(executable)s


""" % settings
    # mangle debbuild options
    if args.debbuild_options is None:
        debbuild_options = '-j%i' % args.condor_request_cpus
    else:
        debbuild_options = '%s -j%i' % (args.debbuild_options,
                                        args.condor_request_cpus)
    source_include = args.source_include
    for family, codename in args.env:
        # change into the 'result-dir' to have Condor transfer all output here
        result_dir = get_build_option('result directory', args.result_dir, family)
        if not result_dir is None:
            lgr.debug("placing build results in '%s'" % result_dir)
            if not os.path.exists(result_dir):
                os.makedirs(result_dir)
            os.chdir(result_dir)
        # do any backports locally
        if args.backport:
            lgr.info("backporting to %s-%s" % (family, codename))
            dist_dsc_fname = _backport_dsc(dsc_fname, codename, family, args)
            # start with default for each backport run, i.e. source package version
            source_include = args.source_include
        else:
            dist_dsc_fname = dsc_fname
        if source_include is None:
            # any configure source include strategy?
            source_include = cfg.get('build', '%s source include' % family, default=False)
        dist_dsc = deb822.Dsc(open(dist_dsc_fname))
        dist_dsc_dir = os.path.dirname(dist_dsc_fname)
        # some verbosity for debugging
        submit += "\n# %s-%s\n" % (family, codename)
        # what files to transfer
        transfer_files = [dist_dsc_fname] \
                + [opj(dist_dsc_dir, f['name']) for f in dist_dsc['Files']]
        if not args.common_config_file is None:
            transfer_files += args.common_config_file
        # logfile destination?
        logdir = get_build_option('condor logdir', args.condor_logdir, family, default=os.curdir)
        if not os.path.exists(logdir):
            os.makedirs(logdir)
        archs = get_build_option('architectures', args.arch, family)
        # TODO limit to default arch for arch:all packages
        if isinstance(archs, basestring):
            archs = archs.split()
        first_arch = True
        for arch in archs:
            # basetgz
            basetgz = '%s.tar.gz' % _get_chroot_base(family, codename, arch, args)
            if source_include and first_arch:
                src_incl = 'yes'
            else:
                src_incl = 'no'
            arch_settings = {
                'condorlog': os.path.abspath(logdir),
                'arch': arch,
                'arguments': '"%s"' % ' '.join(argv[1:]
                                      + ['--env', family, codename,
                                         '--build-basedir', 'buildbase',
                                         '--result-dir', '.',
                                         '--arch', arch,
                                         '--chroot-basedir', '.',
                                         '--source-include', src_incl,
                                         "--debbuild-options ' %s'" % debbuild_options,
                                         '--']
                                      + [os.path.basename(dist_dsc_fname)]),
                'transfer_files': ','.join(transfer_files + [basetgz]),
            }
            # do we need to send the config?
            if not args.common_config_file is None:
                arch_settings['arguments'] = '"-c %s %s'\
                        % (os.path.basename(args.common_config_file[0]),
                           arch_settings['arguments'][1:])
            arch_settings.update(settings)
            submit += """
# %(arch)s
arguments = %(arguments)s
transfer_input_files = %(transfer_files)s
error = %(condorlog)s/%(src_name)s_%(src_version)s_%(arch)s.$(Cluster).$(Process).err
output = %(condorlog)s/%(src_name)s_%(src_version)s_%(arch)s.$(Cluster).$(Process).out
log = %(condorlog)s/%(src_name)s_%(src_version)s_%(arch)s.$(Cluster).$(Process).log
queue
""" % arch_settings
            first_arch = False
        # stop including source for all families -- backport might reenable
        source_include = False
    # store submit file
    if args.condor_dryrun:
        print '== submit spec ========\n%s\n== end submit spec ====' % submit
    else:
        condor_submit = subprocess.Popen(['condor_submit'], stdin=subprocess.PIPE)
        condor_submit.communicate(input=submit)
        if condor_submit.wait():
            raise RuntimeError("could not submit build; SPEC follows\n---\n%s---\n)" % submit)
def _proc_env(family, codename, args):
    aptcache = args.aptcache
    if aptcache is None:
        aptcache = cfg.get('build', '%s aptcache' % family,
                           default='')
    if aptcache:
        lgr.debug("using local apt cache at '%s'" % aptcache)
    else:
        lgr.debug("no local apt cache in use")
    components = args.components
    if components is None:
        components = cfg.get('build', '%s components' % family,
                             default='main').split()
    lgr.debug("enabling components %s" % components)

    cmd_opts = [
        '--create',
        '--distribution', codename,
        '--debootstrap', 'debootstrap', # make me an option
        '--aptcache', aptcache,
        '--components', ' '.join(components), 
    ]

    if cfg.has_option('build', '%s bootstrap keyring' % family):
        cmd_opts += ['--debootstrapopts',
                     '--keyring=%s' % cfg.get('build', '%s bootstrap keyring' % family)]
    if cfg.has_option('build', '%s keyring' % family):
        cmd_opts += ['--keyring', cfg.get('build', '%s keyring' % family)]
    if cfg.has_option('build', '%s mirror' % family):
        cmd_opts += ['--mirror', cfg.get('build', '%s mirror' % family)]
    if cfg.has_option('build', '%s othermirror' % family):
        cmd_opts += ['--othermirror', cfg.get('build', '%s othermirror' % family) % codename]

    if not os.path.exists(args.chroot_basedir):
        os.makedirs(args.chroot_basedir)

    archs = args.arch
    if archs is None:
        if not cfg.has_option('build', '%s architectures' % family):
            raise ValueError("no architectures specified, use --arch or add to configuration file")
        archs = cfg.get('build', '%s architectures' % family).split()

    for arch in archs:
        lgr.debug("started bootstrapping architecture '%s'" % arch)
        chroot_targetdir = opj(args.chroot_basedir,
                               '%s-%s-%s' % (family, codename, arch))
        if os.path.exists(chroot_targetdir):
            lgr.warning("'%s' exists -- ignoring architecture '%s'" % (chroot_targetdir, arch))
            continue
        if args.builder == 'pbuilder':
            cmd_opts += ['--basetgz', '%s.tar.gz' % chroot_targetdir]
        elif args.builder == 'cowbuilder':
            cmd_opts += ['--basepath', chroot_targetdir]
        else:
            raise ValueError("unknown builder '%s'" % args.builder)
        cmd_opts += ['--debootstrapopts', '--arch=%s' % arch ]
        ret = subprocess.call(['sudo', args.builder] + cmd_opts) 
        if ret:
            raise RuntimeError("bootstrapping failed (cmd: '%s'; exit code: %s)"
                               % ('%s %s' % (args.builder, ' '.join(cmd_opts)),
                                  ret))
        lgr.debug("finished bootstrapping architecture '%s'" % arch)
示例#18
0
                    if apt_pkg.version_compare(
                            bin_version,
                            bindb[bin_name]['latest_version']) >= 0:
                        # most recent -> store description
                        descr = bpkg['Description'].split('\n')

                        bindb[bin_name]['short_description'] = descr[0].strip()
                        bindb[bin_name]['long_description'] = descr[1:]

    # Review availability of (source) packages in the base
    # releases.  Since we might not have something already
    # available in the base release, we do it in a separate loop,
    # after we got information on all packages which we do have in
    # some release in our repository
    for release in releases:
        rurl = cfg.get('release files', release)
        rname = cfg.get('release names', release)
        if not rname:
            continue

        rorigin = rname.split()[0].lower()   # debian or ubuntu
        omirror = cfg.get('release bases', rorigin)
        if not omirror:
            continue

        bbaseurl = '%s/%s' % (omirror, '/'.join(rurl.split('/')[-3:-1]))
        brurl = '%s/Release' % bbaseurl

        # first 'Release' files
        brelf_path = _url2filename(args.filecache, brurl)
        codename, comps, archs = _proc_release_file(brelf_path, bbaseurl)
示例#19
0
def run(args):
    lgr.debug("using file cache at '%s'" % args.filecache)
    # get all metadata files from the repo
    meta_baseurl = cfg.get('metadata', 'source extracts baseurl',
                           default=None)
    meta_filenames = cfg.get('metadata', 'source extracts filenames',
                             default='').split()
    rurls = cfg.get('release files', 'urls', default='').split()
    if args.init_db is None:
        db = {'src': {}, 'bin': {}, 'task': {}}
    else:
        db = load_db(args.init_db)
    srcdb = db['src']
    bindb = db['bin']
    taskdb = db['task']
    releases = cfg.options('release files')
    for release in releases:
        rurl = cfg.get('release files', release)
        # first 'Release' files
        relf_path = _url2filename(args.filecache, rurl)
        baseurl = '/'.join(rurl.split('/')[:-1])
        codename, comps, archs = _proc_release_file(relf_path, baseurl)
        for comp in comps:
            # also get 'Sources.gz' for each component
            surl = '/'.join((baseurl, comp, 'source', 'Sources.gz'))
            srcf_path = _url2filename(args.filecache, surl)
            for spkg in deb822.Sources.iter_paragraphs(gzip.open(srcf_path)):
                src_name = spkg['Package']
                sdb = srcdb.get(src_name, {})
                src_version = spkg['Version']
                if apt_pkg.version_compare(src_version,
                                           sdb.get('latest_version', '')) > 0:
                    # this is a more recent version, so let's update all info
                    sdb['latest_version'] = src_version
                    for field in ('Homepage', 'Vcs-Browser', 'Maintainer',
                                  'Uploaders'):
                        sdb[field.lower().replace('-', '_')] = spkg.get(field, '')
                # record all binary packages
                bins = [s.strip() for s in spkg.get('Binary', '').split(',')]
                sdb['binary'] = bins
                for b in bins:
                    if not b in bindb:
                        bindb[b] = {'in_release': {codename: {src_version: []}},
                                    'src_name': src_name,
                                    'latest_version': src_version}
                    else:
                        bindb[b]['in_release'][codename] = {src_version: []}
                        if apt_pkg.version_compare(
                                src_version,  bindb[b].get('latest_version', '')) > 0:
                            bindb[b]['src_name'] = src_name
                            bindb[b]['latest_version'] = src_version
                if 'upstream' in meta_filenames and not meta_baseurl is None:
                    import yaml
                    mfn = 'upstream'
                    mfurl = '/'.join((meta_baseurl, src_name, mfn))
                    mfpath = _url2filename(args.filecache, mfurl)
                    if os.path.exists(mfpath):
                        lgr.debug("import metadata for source package '%s'"
                                  % src_name)
                        try:
                            upstream = yaml.safe_load(open(mfpath))
                        except yaml.scanner.ScannerError, e:
                            lgr.warning("Malformed upstream YAML data for '%s'"
                                        % src_name)
                            lgr.debug("Caught exception was: %s" % (e,))
                        # uniformize structure
                        if 'Reference' in upstream and not isinstance(upstream['Reference'], list):
                            upstream['Reference'] = [upstream['Reference']]
                        sdb['upstream'] = upstream
                sdb['component'] = comp
                for mf in meta_filenames:
                    if os.path.exists(_url2filename(args.filecache,
                                                    '/'.join((meta_baseurl,
                                                              src_name,
                                                              mf)))):
                        sdb['havemeta_%s' % mf.replace('.', '_').replace('-', '_')] = True
                srcdb[src_name] = sdb
            for arch in archs:
                # next 'Packages.gz' for each component and architecture
                purl = '/'.join((baseurl, comp, 'binary-%s' % arch, 'Packages.gz'))
                pkgf_path = _url2filename(args.filecache, purl)
                for bpkg in deb822.Packages.iter_paragraphs(gzip.open(pkgf_path)):
                    bin_name = bpkg['Package']
                    bin_version = bpkg['Version']
                    try:
                        bin_srcname = bpkg['Source']
                    except KeyError:
                        # if a package has no source name, let's hope it is the
                        # same as the binary name
                        bin_srcname = bin_name  # unused bin_srcname ???
                    if not bin_name in bindb:
                        lgr.warning("No corresponding source package for "
                                    "binary package '%s' in [%s, %s, %s]"
                                    % (bin_name, codename, comp, arch))
                        continue
                    try:
                        bindb[bin_name]['in_release'][codename][bin_version].append(arch)
                    except KeyError:
                        if not codename in bindb[bin_name]['in_release']:
                            # package not listed in this release?
                            bindb[bin_name]['in_release'][codename] = {bin_version: [arch]}
                        elif not bin_version in bindb[bin_name]['in_release'][codename]:
                            # package version not listed in this release?
                            bindb[bin_name]['in_release'][codename][bin_version] = [arch]
                        else:
                            raise
                    if apt_pkg.version_compare(
                            bin_version,
                            bindb[bin_name]['latest_version']) >= 0:
                        # most recent -> store description
                        descr = bpkg['Description'].split('\n')

                        bindb[bin_name]['short_description'] = descr[0].strip()
                        bindb[bin_name]['long_description'] = descr[1:]
示例#20
0
def run(args):
    from jinja2 import Environment as JinjaEnvironment
    from jinja2 import PackageLoader as JinjaPackageLoader
    lgr.debug("using package DB at '%s'" % args.pkgdb)
    # read entire DB
    db = load_db(args.pkgdb)
    bindb = db['bin']
    srcdb = db['src']
    taskdb = db['task']
    by_release = {}
    by_maintainer = {}
    maintainer_name = {}
    by_task = {}
    for pname, pkg in db['bin'].iteritems():
        for release in pkg['in_release']:
            by_release[release] = by_release.get(release, list()) + [pname]
        src_name = pkg['src_name']
        if 'upstream' in srcdb[src_name] and 'Tags' in srcdb[src_name]['upstream']:
            # we have some tags
            for tag in srcdb[src_name]['upstream']['Tags']:
                if tag.startswith('task::'):
                    task = tag[6:]
                    by_task[task] = by_task.get(task, list()) + [pname]
        maintainer = srcdb[pkg['src_name']]['maintainer']
        uploaders = [u.strip() for u in srcdb[src_name]['uploaders'].split(',')]
        for maint in uploaders + [maintainer]:
            if not len(maint.strip()):
                continue
            try:
                mname, memail = re.match(r'(.*) <(.*)>', maint).groups()
            except AttributeError:
                lgr.warning('malformed maintainer listing for %s: %s' % (pname, maint))
                mname = memail = maint
            # normalize
            memail = memail.lower()
            maintainer_name[memail] = mname
            by_maintainer[memail] = by_maintainer.get(memail, set()).union((src_name,))
        # XXX extend when blend is ready

    # write TOCs for all releases
    jinja_env = JinjaEnvironment(loader=JinjaPackageLoader('bigmess'))
    bintoc_template = jinja_env.get_template('binpkg_toc.rst')
    toctoc = {'release': {}, 'maintainer': {}, 'field': {}}
    release_tocs = toctoc['release']
    for release_name, release_content in by_release.iteritems():
        label = 'toc_pkgs_for_release_%s' % release_name
        title = 'Packages for %s' % cfg.get('release names', release_name)
        release_tocs[label] = title
        page = bintoc_template.render(label=label,
                                      title=title,
                                      pkgs=release_content,
                                      db=bindb)
        _write_page(page, args.dest_dir, label)
    task_tocs = toctoc['field']
    for task_name, task_content in by_task.iteritems():
        label = 'toc_pkgs_for_field_%s' % task_name
        title = 'Packages for %s' % taskdb[task_name]
        task_tocs[label] = title
        page = bintoc_template.render(label=label,
                                      title=title,
                                      pkgs=set(task_content),
                                      db=bindb)
        _write_page(page, args.dest_dir, label)
    # full TOC
    _write_page(bintoc_template.render(label='toc_all_pkgs',
                                       title='Complete package list',
                                       pkgs=bindb.keys(),
                                       db=bindb),
                args.dest_dir,
                'toc_all_pkgs')
    # TOC by maintainer
    srctoc_template = jinja_env.get_template('srcpkg_toc.rst')
    maintainer_tocs = toctoc['maintainer']
    for memail, mpkgs in by_maintainer.iteritems():
        label = 'toc_pkgs_for_maintainer_%s' % memail.replace('@', '_at_')
        title = 'Packages made by %s <%s>' % (maintainer_name[memail], memail)
        maintainer_tocs[label] = title
        page = srctoc_template.render(
            label=label,
            title=title,
            pkgs=mpkgs,
            srcdb=srcdb,
            bindb=bindb)
        _write_page(page, args.dest_dir, label)

    # TOC of TOCs
    toctoc_template = jinja_env.get_template('pkg_tocs.rst')
    print codecs.encode(toctoc_template.render(toctoc=toctoc), 'utf-8')
示例#21
0
def run(args):
    lgr.debug("using file cache at '%s'" % args.filecache)
    # get all metadata files from the repo
    meta_baseurl = cfg.get('metadata', 'source extracts baseurl',
                           default=None)
    meta_filenames = cfg.get('metadata', 'source extracts filenames',
                             default='').split()

    #
    # Releases archives
    #
    releases = cfg.options('release files')
    # for preventing unnecessary queries
    lookupcache = {}
    # ensure the cache is there
    if not os.path.exists(args.filecache):
        os.makedirs(args.filecache)
    for release in releases:
        rurl = cfg.get('release files', release)
        # first get 'Release' files
        dst_path = _url2filename(args.filecache, rurl)
        if not _download_file(rurl, dst_path, args.force_update):
            continue
        baseurl = '/'.join(rurl.split('/')[:-1])
        comps, archs = _proc_release_file(dst_path, baseurl)
        # Fetch information on binary packages
        for comp in comps:
            for arch in archs:
                # also get 'Packages.gz' for each component and architecture
                purl = '/'.join((baseurl, comp,
                                 'binary-%s' % arch, 'Packages.gz'))
                dst_path = _url2filename(args.filecache, purl)
                if not _download_file(purl, dst_path, args.force_update):
                    continue
            # also get 'Sources.gz' for each component
            surl = '/'.join((baseurl, comp, 'source', 'Sources.gz'))
            dst_path = _url2filename(args.filecache, surl)
            if not _download_file(surl, dst_path, args.force_update):
                continue
            # TODO go through the source file and try getting 'debian/upstream'
            # from the referenced repo
            for spkg in deb822.Sources.iter_paragraphs(gzip.open(dst_path)):
                # TODO pull stuff directly form VCS
                #vcsurl = spkg.get('Vcs-Browser', None)
                #if vcsurl is None:
                #    lgr.warning("no VCS URL for '%s'" % spkg['Package'])
                #    continue
                #print vcsurl
                #http://github.com/yarikoptic/vowpal_wabbit
                #->
                #http://raw.github.com/yarikoptic/vowpal_wabbit/debian/debian/compat
                src_name = spkg['Package']
                if not len(meta_filenames) or meta_baseurl is None:
                    continue
                lgr.debug("query metadata for source package '%s'" % src_name)
                for mfn in meta_filenames:
                    mfurl = '/'.join((meta_baseurl, src_name, mfn))
                    dst_path = _url2filename(args.filecache, mfurl)
                    if dst_path in lookupcache:
                        continue
                    _download_file(mfurl, dst_path, args.force_update,
                                   ignore_missing=True)
                    lookupcache[dst_path] = None

        # Also fetch corresponding Release from the base distribution
        # Figure out the base distribution based on the release description
        rname = cfg.get('release names', release)
        if not rname:
            continue

        # Look-up release bases for the release among available bases
        oarchive = _find_release_origin_archive(cfg, release)
        if not oarchive:
            continue

        obaseurl = '%s/%s' % (oarchive, '/'.join(rurl.split('/')[-3:-1]))
        orurl = '%s/Release' % obaseurl
        # first get 'Release' files
        dst_path = _url2filename(args.filecache, orurl)
        if not _download_file(orurl, dst_path, args.force_update):
            continue

        comps, _ = _proc_release_file(dst_path, obaseurl)
        for comp in comps:
            # Fetch information on source packages -- we are not interested
            # to provide a thorough coverage -- just the version
            osurl = '/'.join((obaseurl, comp, 'source', 'Sources.gz'))
            dst_path = _url2filename(args.filecache, osurl)
            if not _download_file(osurl, dst_path, args.force_update):
                continue

    #
    # Tasks
    #
    tasks = cfg.options('task files')
    for task in tasks:
        rurl = cfg.get('task files', task)
        dst_path = opj(args.filecache, 'task_%s' % task)
        if not _download_file(rurl, dst_path, args.force_update):
            continue