コード例 #1
0
ファイル: process_policy.py プロジェクト: fatman2021/dak
def real_comment_reject(upload, srcqueue, comments, transaction, notify=True, manual=False):
    cnf = Config()

    fs = transaction.fs
    session = transaction.session
    changesname = upload.changes.changesname
    queuedir = upload.policy_queue.path
    rejectdir = cnf['Dir::Reject']

    ### Copy files to reject/

    poolfiles = [b.poolfile for b in upload.binaries]
    if upload.source is not None:
        poolfiles.extend([df.poolfile for df in upload.source.srcfiles])
    # Not beautiful...
    files = [ af.path for af in session.query(ArchiveFile) \
                  .filter_by(archive=upload.policy_queue.suite.archive) \
                  .join(ArchiveFile.file) \
                  .filter(PoolFile.file_id.in_([ f.file_id for f in poolfiles ])) ]
    for byhand in upload.byhand:
        path = os.path.join(queuedir, byhand.filename)
        if os.path.exists(path):
            files.append(path)
    files.append(os.path.join(queuedir, changesname))

    for fn in files:
        dst = utils.find_next_free(os.path.join(rejectdir, os.path.basename(fn)))
        fs.copy(fn, dst, link=True)

    ### Write reason

    dst = utils.find_next_free(os.path.join(rejectdir, '{0}.reason'.format(changesname)))
    fh = fs.create(dst)
    fh.write(comments)
    fh.close()

    ### Send mail notification

    if notify:
        rejected_by = None
        reason = comments

        # Try to use From: from comment file if there is one.
        # This is not very elegant...
        match = re.match(r"\AFrom: ([^\n]+)\n\n", comments)
        if match:
            rejected_by = match.group(1)
            reason = '\n'.join(comments.splitlines()[2:])

        pu = get_processed_upload(upload)
        daklib.announce.announce_reject(pu, reason, rejected_by)

    print "  REJECT"
    if not Options["No-Action"]:
        Logger.log(["Policy Queue REJECT", srcqueue.queue_name, upload.changes.changesname])

    changes = upload.changes
    remove_upload(upload, transaction)
    session.delete(changes)
コード例 #2
0
ファイル: process_policy.py プロジェクト: pombreda/tanglu-dak
def real_comment_reject(upload, srcqueue, comments, transaction, notify=True, manual=False):
    cnf = Config()

    fs = transaction.fs
    session = transaction.session
    changesname = upload.changes.changesname
    queuedir = upload.policy_queue.path
    rejectdir = cnf['Dir::Reject']

    ### Copy files to reject/

    poolfiles = [b.poolfile for b in upload.binaries]
    if upload.source is not None:
        poolfiles.extend([df.poolfile for df in upload.source.srcfiles])
    # Not beautiful...
    files = [ af.path for af in session.query(ArchiveFile) \
                  .filter_by(archive=upload.policy_queue.suite.archive) \
                  .join(ArchiveFile.file) \
                  .filter(PoolFile.file_id.in_([ f.file_id for f in poolfiles ])) ]
    for byhand in upload.byhand:
        path = os.path.join(queuedir, byhand.filename)
        if os.path.exists(path):
            files.append(path)
    files.append(os.path.join(queuedir, changesname))

    for fn in files:
        dst = utils.find_next_free(os.path.join(rejectdir, os.path.basename(fn)))
        fs.copy(fn, dst, link=True)

    ### Write reason

    dst = utils.find_next_free(os.path.join(rejectdir, '{0}.reason'.format(changesname)))
    fh = fs.create(dst)
    fh.write(comments)
    fh.close()

    ### Send mail notification

    if notify:
        rejected_by = None
        reason = comments

        # Try to use From: from comment file if there is one.
        # This is not very elegant...
        match = re.match(r"\AFrom: ([^\n]+)\n\n", comments)
        if match:
            rejected_by = match.group(1)
            reason = '\n'.join(comments.splitlines()[2:])

        pu = get_processed_upload(upload)
        daklib.announce.announce_reject(pu, reason, rejected_by)

    print "  REJECT"
    if not Options["No-Action"]:
        Logger.log(["Policy Queue REJECT", srcqueue.queue_name, upload.changes.changesname])

    changes = upload.changes
    remove_upload(upload, transaction)
    session.delete(changes)
コード例 #3
0
ファイル: process_commands.py プロジェクト: abhi11/dak
def main(argv=None):
    if argv is None:
        argv = sys.argv

    arguments = [('h', 'help', 'Process-Commands::Options::Help'),
                 ('d', 'directory', 'Process-Commands::Options::Directory', 'HasArg')]

    cnf = Config()
    cnf['Process-Commands::Options::Dummy'] = ''
    filenames = apt_pkg.parse_commandline(cnf.Cnf, arguments, argv)
    options = cnf.subtree('Process-Commands::Options')

    if 'Help' in options or (len(filenames) == 0 and 'Directory' not in options):
        usage()
        sys.exit(0)

    log = Logger('command')

    now = datetime.datetime.now()
    donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
    rejectdir = cnf['Dir::Reject']

    if len(filenames) == 0:
        filenames = [ fn for fn in os.listdir(options['Directory']) if fn.endswith('.dak-commands') ]

    for fn in filenames:
        basename = os.path.basename(fn)
        if not fn.endswith('.dak-commands'):
            log.log(['unexpected filename', basename])
            continue

        with open(fn, 'r') as fh:
            data = fh.read()

        try:
            command = CommandFile(basename, data, log)
            command.evaluate()
        except:
            created = os.stat(fn).st_mtime
            now = time.time()
            too_new = (now - created < int(cnf.get('Dinstall::SkipTime', '60')))
            if too_new:
                log.log(['skipped (too new)'])
                continue
            log.log(['reject', basename])
            dst = find_next_free(os.path.join(rejectdir, basename))
        else:
            log.log(['done', basename])
            dst = find_next_free(os.path.join(donedir, basename))

        with FilesystemTransaction() as fs:
            fs.unlink(fn)
            fs.create(dst, mode=0o644).write(data)
            fs.commit()

    log.close()
コード例 #4
0
ファイル: process_policy.py プロジェクト: fatman2021/dak
def do_comments(dir, srcqueue, opref, npref, line, fn, transaction):
    session = transaction.session
    actions = []
    for comm in [ x for x in os.listdir(dir) if x.startswith(opref) ]:
        lines = open(os.path.join(dir, comm)).readlines()
        if len(lines) == 0 or lines[0] != line + "\n": continue

        # If the ACCEPT includes a _<arch> we only accept that .changes.
        # Otherwise we accept all .changes that start with the given prefix
        changes_prefix = comm[len(opref):]
        if changes_prefix.count('_') < 2:
            changes_prefix = changes_prefix + '_'
        else:
            changes_prefix = changes_prefix + '.changes'

        # We need to escape "_" as we use it with the LIKE operator (via the
        # SQLA startwith) later.
        changes_prefix = changes_prefix.replace("_", r"\_")

        uploads = session.query(PolicyQueueUpload).filter_by(policy_queue=srcqueue) \
            .join(PolicyQueueUpload.changes).filter(DBChange.changesname.startswith(changes_prefix)) \
            .order_by(PolicyQueueUpload.source_id)
        reason = "".join(lines[1:])
        actions.extend((u, reason) for u in uploads)

        if opref != npref:
            newcomm = npref + comm[len(opref):]
            newcomm = utils.find_next_free(os.path.join(dir, newcomm))
            transaction.fs.move(os.path.join(dir, comm), newcomm)

    actions.sort()

    for u, reason in actions:
        print("Processing changes file: {0}".format(u.changes.changesname))
        fn(u, srcqueue, reason, transaction)
コード例 #5
0
def do_comments(dir, srcqueue, opref, npref, line, fn, transaction):
    session = transaction.session
    actions = []
    for comm in [ x for x in os.listdir(dir) if x.startswith(opref) ]:
        lines = open(os.path.join(dir, comm)).readlines()
        if len(lines) == 0 or lines[0] != line + "\n": continue

        # If the ACCEPT includes a _<arch> we only accept that .changes.
        # Otherwise we accept all .changes that start with the given prefix
        changes_prefix = comm[len(opref):]
        if changes_prefix.count('_') < 2:
            changes_prefix = changes_prefix + '_'
        else:
            changes_prefix = changes_prefix + '.changes'

        # We need to escape "_" as we use it with the LIKE operator (via the
        # SQLA startwith) later.
        changes_prefix = changes_prefix.replace("_", r"\_")

        uploads = session.query(PolicyQueueUpload).filter_by(policy_queue=srcqueue) \
            .join(PolicyQueueUpload.changes).filter(DBChange.changesname.startswith(changes_prefix)) \
            .order_by(PolicyQueueUpload.source_id)
        reason = "".join(lines[1:])
        actions.extend((u, reason) for u in uploads)

        if opref != npref:
            newcomm = npref + comm[len(opref):]
            newcomm = utils.find_next_free(os.path.join(dir, newcomm))
            transaction.fs.move(os.path.join(dir, comm), newcomm)

    actions.sort()

    for u, reason in actions:
        print("Processing changes file: {0}".format(u.changes.changesname))
        fn(u, srcqueue, reason, transaction)
コード例 #6
0
ファイル: process_upload.py プロジェクト: stapelberg/dak
def real_reject(directory, upload, reason=None, notify=True):
    # XXX: rejection itself should go to daklib.archive.ArchiveUpload
    cnf = Config()

    Logger.log(['REJECT', upload.changes.filename])
    print "REJECT"

    fs = upload.transaction.fs
    rejectdir = cnf['Dir::Reject']

    files = [f.filename for f in upload.changes.files.itervalues()]
    files.append(upload.changes.filename)

    for fn in files:
        src = os.path.join(upload.directory, fn)
        dst = utils.find_next_free(os.path.join(rejectdir, fn))
        if not os.path.exists(src):
            continue
        fs.copy(src, dst)

    if upload.reject_reasons is not None:
        if reason is None:
            reason = ''
        reason = reason + '\n' + '\n'.join(upload.reject_reasons)

    if reason is None:
        reason = '(Unknown reason. Please check logs.)'

    dst = utils.find_next_free(
        os.path.join(rejectdir, '{0}.reason'.format(upload.changes.filename)))
    fh = fs.create(dst)
    fh.write(reason)
    fh.close()

    if notify:
        pu = get_processed_upload(upload)
        daklib.announce.announce_reject(pu, reason)

    SummaryStats().reject_count += 1
コード例 #7
0
ファイル: process_upload.py プロジェクト: Debian/dak
def real_reject(directory, upload, reason=None, notify=True):
    # XXX: rejection itself should go to daklib.archive.ArchiveUpload
    cnf = Config()

    Logger.log(['REJECT', upload.changes.filename])
    print("REJECT")

    fs = upload.transaction.fs
    rejectdir = cnf['Dir::Reject']

    files = [f.filename for f in upload.changes.files.itervalues()]
    files.append(upload.changes.filename)

    for fn in files:
        src = os.path.join(upload.directory, fn)
        dst = utils.find_next_free(os.path.join(rejectdir, fn))
        if not os.path.exists(src):
            continue
        fs.copy(src, dst)

    if upload.reject_reasons is not None:
        if reason is None:
            reason = ''
        reason = reason + '\n' + '\n'.join(upload.reject_reasons)

    if reason is None:
        reason = '(Unknown reason. Please check logs.)'

    dst = utils.find_next_free(os.path.join(rejectdir, '{0}.reason'.format(upload.changes.filename)))
    fh = fs.create(dst)
    fh.write(reason)
    fh.close()

    if notify:
        pu = get_processed_upload(upload)
        daklib.announce.announce_reject(pu, reason)

    SummaryStats().reject_count += 1
コード例 #8
0
ファイル: process_upload.py プロジェクト: Debian/dak
def process_buildinfos(upload):
    cnf = Config()

    if 'Dir::BuildinfoArchive' not in cnf:
        return

    target_dir = os.path.join(
        cnf['Dir::BuildinfoArchive'],
        datetime.datetime.now().strftime('%Y/%m/%d'),
    )

    for f in upload.changes.buildinfo_files:
        src = os.path.join(upload.directory, f.filename)
        dst = utils.find_next_free(os.path.join(target_dir, f.filename))

        Logger.log(["Archiving", f.filename])
        upload.transaction.fs.copy(src, dst, mode=0o644)
コード例 #9
0
ファイル: process_upload.py プロジェクト: stapelberg/dak
def process_buildinfos(upload):
    cnf = Config()

    if not cnf.has_key('Dir::BuildinfoArchive'):
        return

    target_dir = os.path.join(
        cnf['Dir::BuildinfoArchive'],
        datetime.datetime.now().strftime('%Y/%m/%d'),
    )

    for f in upload.changes.buildinfo_files:
        src = os.path.join(upload.directory, f.filename)
        dst = utils.find_next_free(os.path.join(target_dir, f.filename))

        Logger.log(["Archiving", f.filename])
        upload.transaction.fs.copy(src, dst, mode=0o644)
コード例 #10
0
ファイル: clean_queues.py プロジェクト: carlosduclos/dak
def remove (from_dir, f):
    fname = os.path.basename(f)
    if os.access(f, os.R_OK):
        Logger.log(["move file to morgue", from_dir, fname, del_dir])
        if Options["Verbose"]:
            print "Removing '%s' (to '%s')."  % (fname, del_dir)
        if Options["No-Action"]:
            return

        dest_filename = os.path.join(del_dir, fname)
        # If the destination file exists; try to find another filename to use
        if os.path.exists(dest_filename):
            dest_filename = utils.find_next_free(dest_filename, 10)
            Logger.log(["change destination file name", os.path.basename(dest_filename)])
        utils.move(f, dest_filename, 0o660)
    else:
        Logger.log(["skipping file because of permission problem", fname])
        utils.warn("skipping '%s', permission denied." % fname)
コード例 #11
0
ファイル: process_upload.py プロジェクト: os-develop/dak
def accept(directory, upload):
    cnf = Config()

    Logger.log(['ACCEPT', upload.changes.filename])
    print("ACCEPT")

    upload.install()
    utils.process_buildinfos(upload.directory, upload.changes.buildinfo_files,
                             upload.transaction.fs, Logger)

    accepted_to_real_suite = any(suite.policy_queue is None
                                 for suite in upload.final_suites)
    sourceful_upload = upload.changes.sourceful

    control = upload.changes.changes
    if sourceful_upload and not Options['No-Action']:
        urgency = control.get('Urgency')
        # As per policy 5.6.17, the urgency can be followed by a space and a
        # comment.  Extract only the urgency from the string.
        if ' ' in urgency:
            urgency, comment = urgency.split(' ', 1)
        if urgency not in cnf.value_list('Urgency::Valid'):
            urgency = cnf['Urgency::Default']
        UrgencyLog().log(control['Source'], control['Version'], urgency)

    pu = get_processed_upload(upload)
    daklib.announce.announce_accept(pu)

    # Move .changes to done, but only for uploads that were accepted to a
    # real suite.  process-policy will handle this for uploads to queues.
    if accepted_to_real_suite:
        src = os.path.join(upload.directory, upload.changes.filename)

        now = datetime.datetime.now()
        donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
        dst = os.path.join(donedir, upload.changes.filename)
        dst = utils.find_next_free(dst)

        upload.transaction.fs.copy(src, dst, mode=0o644)

    SummaryStats().accept_count += 1
    SummaryStats().accept_bytes += upload.changes.bytes
コード例 #12
0
ファイル: clean_queues.py プロジェクト: os-develop/dak
def remove(from_dir, f):
    fname = os.path.basename(f)
    if os.access(f, os.R_OK):
        Logger.log(["move file to morgue", from_dir, fname, del_dir])
        if Options["Verbose"]:
            print("Removing '%s' (to '%s')." % (fname, del_dir))
        if Options["No-Action"]:
            return

        dest_filename = os.path.join(del_dir, fname)
        # If the destination file exists; try to find another filename to use
        if os.path.exists(dest_filename):
            dest_filename = utils.find_next_free(dest_filename, 10)
            Logger.log([
                "change destination file name",
                os.path.basename(dest_filename)
            ])
        utils.move(f, dest_filename, 0o660)
    else:
        Logger.log(["skipping file because of permission problem", fname])
        utils.warn("skipping '%s', permission denied." % fname)
コード例 #13
0
ファイル: process_upload.py プロジェクト: Debian/dak
def accept(directory, upload):
    cnf = Config()

    Logger.log(['ACCEPT', upload.changes.filename])
    print("ACCEPT")

    upload.install()
    process_buildinfos(upload)

    accepted_to_real_suite = any(suite.policy_queue is None for suite in upload.final_suites)
    sourceful_upload = 'source' in upload.changes.architectures

    control = upload.changes.changes
    if sourceful_upload and not Options['No-Action']:
        urgency = control.get('Urgency')
        # As per policy 5.6.17, the urgency can be followed by a space and a
        # comment.  Extract only the urgency from the string.
        if ' ' in urgency:
            urgency, comment = urgency.split(' ', 1)
        if urgency not in cnf.value_list('Urgency::Valid'):
            urgency = cnf['Urgency::Default']
        UrgencyLog().log(control['Source'], control['Version'], urgency)

    pu = get_processed_upload(upload)
    daklib.announce.announce_accept(pu)

    # Move .changes to done, but only for uploads that were accepted to a
    # real suite.  process-policy will handle this for uploads to queues.
    if accepted_to_real_suite:
        src = os.path.join(upload.directory, upload.changes.filename)

        now = datetime.datetime.now()
        donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
        dst = os.path.join(donedir, upload.changes.filename)
        dst = utils.find_next_free(dst)

        upload.transaction.fs.copy(src, dst, mode=0o644)

    SummaryStats().accept_count += 1
    SummaryStats().accept_bytes += upload.changes.bytes
コード例 #14
0
ファイル: process_upload.py プロジェクト: stapelberg/dak
def accept(directory, upload):
    cnf = Config()

    Logger.log(['ACCEPT', upload.changes.filename])
    print "ACCEPT"

    upload.install()
    process_buildinfos(upload)

    accepted_to_real_suite = False
    for suite in upload.final_suites:
        accepted_to_real_suite = accepted_to_real_suite or suite.policy_queue is None

    sourceful_upload = 'source' in upload.changes.architectures

    control = upload.changes.changes
    if sourceful_upload and not Options['No-Action']:
        urgency = control.get('Urgency')
        if urgency not in cnf.value_list('Urgency::Valid'):
            urgency = cnf['Urgency::Default']
        UrgencyLog().log(control['Source'], control['Version'], urgency)

    pu = get_processed_upload(upload)
    daklib.announce.announce_accept(pu)

    # Move .changes to done, but only for uploads that were accepted to a
    # real suite.  process-policy will handle this for uploads to queues.
    if accepted_to_real_suite:
        src = os.path.join(upload.directory, upload.changes.filename)

        now = datetime.datetime.now()
        donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
        dst = os.path.join(donedir, upload.changes.filename)
        dst = utils.find_next_free(dst)

        upload.transaction.fs.copy(src, dst, mode=0o644)

    SummaryStats().accept_count += 1
    SummaryStats().accept_bytes += upload.changes.bytes
コード例 #15
0
ファイル: process_policy.py プロジェクト: ximion/dak
def comment_accept(upload, srcqueue, comments, transaction):
    for byhand in upload.byhand:
        path = os.path.join(srcqueue.path, byhand.filename)
        if os.path.exists(path):
            raise Exception("E: cannot ACCEPT upload with unprocessed byhand file {0}".format(byhand.filename))

    cnf = Config()

    fs = transaction.fs
    session = transaction.session
    changesname = upload.changes.changesname
    allow_tainted = srcqueue.suite.archive.tainted

    # We need overrides to get the target component
    overridesuite = upload.target_suite
    if overridesuite.overridesuite is not None:
        overridesuite = session.query(Suite).filter_by(suite_name=overridesuite.overridesuite).one()

    def binary_component_func(db_binary):
        section = db_binary.proxy["Section"]
        component_name = "main"
        if section.find("/") != -1:
            component_name = section.split("/", 1)[0]
        return get_mapped_component(component_name, session=session)

    def is_debug_binary(db_binary):
        return daklib.utils.is_in_debug_section(db_binary.proxy)

    def has_debug_binaries(upload):
        return any((is_debug_binary(x) for x in upload.binaries))

    def source_component_func(db_source):
        package_list = PackageList(db_source.proxy)
        component = source_component_from_package_list(package_list, upload.target_suite)
        if component is not None:
            return get_mapped_component(component.component_name, session=session)

        # Fallback for packages without Package-List field
        query = (
            session.query(Override)
            .filter_by(suite=overridesuite, package=db_source.source)
            .join(OverrideType)
            .filter(OverrideType.overridetype == "dsc")
            .join(Component)
        )
        return query.one().component

    all_target_suites = [upload.target_suite]
    all_target_suites.extend([q.suite for q in upload.target_suite.copy_queues])

    for suite in all_target_suites:
        debug_suite = suite.debug_suite

        if upload.source is not None:
            # If we have Source in this upload, let's include it into
            # upload suite.
            transaction.copy_source(
                upload.source, suite, source_component_func(upload.source), allow_tainted=allow_tainted
            )

            if debug_suite is not None and has_debug_binaries(upload):
                # If we're handing a debug package, we also need to include the
                # source in the debug suite as well.
                transaction.copy_source(
                    upload.source, debug_suite, source_component_func(upload.source), allow_tainted=allow_tainted
                )

        for db_binary in upload.binaries:
            # Now, let's work out where to copy this guy to -- if it's
            # a debug binary, and the suite has a debug suite, let's go
            # ahead and target the debug suite rather then the stock
            # suite.
            copy_to_suite = suite
            if debug_suite is not None and is_debug_binary(db_binary):
                copy_to_suite = debug_suite

            # build queues may miss the source package if this is a
            # binary-only upload.
            if suite != upload.target_suite:
                transaction.copy_source(
                    db_binary.source,
                    copy_to_suite,
                    source_component_func(db_binary.source),
                    allow_tainted=allow_tainted,
                )

            transaction.copy_binary(
                db_binary,
                copy_to_suite,
                binary_component_func(db_binary),
                allow_tainted=allow_tainted,
                extra_archives=[upload.target_suite.archive],
            )

    # Copy .changes if needed
    if upload.target_suite.copychanges:
        src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
        dst = os.path.join(upload.target_suite.path, upload.changes.changesname)
        fs.copy(src, dst, mode=upload.target_suite.archive.mode)

    # Copy upload to Process-Policy::CopyDir
    # Used on security.d.o to sync accepted packages to ftp-master, but this
    # should eventually be replaced by something else.
    copydir = cnf.get("Process-Policy::CopyDir") or None
    if copydir is not None:
        mode = upload.target_suite.archive.mode
        if upload.source is not None:
            for f in [df.poolfile for df in upload.source.srcfiles]:
                dst = os.path.join(copydir, f.basename)
                if not os.path.exists(dst):
                    fs.copy(f.fullpath, dst, mode=mode)

        for db_binary in upload.binaries:
            f = db_binary.poolfile
            dst = os.path.join(copydir, f.basename)
            if not os.path.exists(dst):
                fs.copy(f.fullpath, dst, mode=mode)

        src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
        dst = os.path.join(copydir, upload.changes.changesname)
        if not os.path.exists(dst):
            fs.copy(src, dst, mode=mode)

    if upload.source is not None and not Options["No-Action"]:
        urgency = upload.changes.urgency
        if urgency not in cnf.value_list("Urgency::Valid"):
            urgency = cnf["Urgency::Default"]
        UrgencyLog().log(upload.source.source, upload.source.version, urgency)

    print "  ACCEPT"
    if not Options["No-Action"]:
        Logger.log(["Policy Queue ACCEPT", srcqueue.queue_name, changesname])

    pu = get_processed_upload(upload)
    daklib.announce.announce_accept(pu)

    # TODO: code duplication. Similar code is in process-upload.
    # Move .changes to done
    src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
    now = datetime.datetime.now()
    donedir = os.path.join(cnf["Dir::Done"], now.strftime("%Y/%m/%d"))
    dst = os.path.join(donedir, upload.changes.changesname)
    dst = utils.find_next_free(dst)
    fs.copy(src, dst, mode=0o644)

    remove_upload(upload, transaction)
コード例 #16
0
def comment_accept(upload, srcqueue, comments, transaction):
    for byhand in upload.byhand:
        path = os.path.join(srcqueue.path, byhand.filename)
        if os.path.exists(path):
            raise Exception('E: cannot ACCEPT upload with unprocessed byhand file {0}'.format(byhand.filename))

    cnf = Config()

    fs = transaction.fs
    session = transaction.session
    changesname = upload.changes.changesname
    allow_tainted = srcqueue.suite.archive.tainted

    # We need overrides to get the target component
    overridesuite = upload.target_suite
    if overridesuite.overridesuite is not None:
        overridesuite = session.query(Suite).filter_by(suite_name=overridesuite.overridesuite).one()

    def binary_component_func(db_binary):
        section = db_binary.proxy['Section']
        component_name = 'main'
        if section.find('/') != -1:
            component_name = section.split('/', 1)[0]
        return get_mapped_component(component_name, session=session)

    def is_debug_binary(db_binary):
        return daklib.utils.is_in_debug_section(db_binary.proxy)

    def has_debug_binaries(upload):
        return any((is_debug_binary(x) for x in upload.binaries))

    def source_component_func(db_source):
        package_list = PackageList(db_source.proxy)
        component = source_component_from_package_list(package_list, upload.target_suite)
        if component is not None:
            return get_mapped_component(component.component_name, session=session)

        # Fallback for packages without Package-List field
        query = session.query(Override).filter_by(suite=overridesuite, package=db_source.source) \
            .join(OverrideType).filter(OverrideType.overridetype == 'dsc') \
            .join(Component)
        return query.one().component

    all_target_suites = [upload.target_suite]
    all_target_suites.extend([q.suite for q in upload.target_suite.copy_queues])

    for suite in all_target_suites:
        debug_suite = suite.debug_suite

        if upload.source is not None:
            # If we have Source in this upload, let's include it into
            # upload suite.
            transaction.copy_source(
                upload.source,
                suite,
                source_component_func(upload.source),
                allow_tainted=allow_tainted,
            )

            if debug_suite is not None and has_debug_binaries(upload):
                # If we're handing a debug package, we also need to include the
                # source in the debug suite as well.
                transaction.copy_source(
                    upload.source,
                    debug_suite,
                    source_component_func(upload.source),
                    allow_tainted=allow_tainted,
                )

        for db_binary in upload.binaries:
            # Now, let's work out where to copy this guy to -- if it's
            # a debug binary, and the suite has a debug suite, let's go
            # ahead and target the debug suite rather then the stock
            # suite.
            copy_to_suite = suite
            if debug_suite is not None and is_debug_binary(db_binary):
                copy_to_suite = debug_suite

            # build queues and debug suites may miss the source package
            # if this is a binary-only upload.
            if copy_to_suite != upload.target_suite:
                transaction.copy_source(
                    db_binary.source,
                    copy_to_suite,
                    source_component_func(db_binary.source),
                    allow_tainted=allow_tainted,
                )

            transaction.copy_binary(
                db_binary,
                copy_to_suite,
                binary_component_func(db_binary),
                allow_tainted=allow_tainted,
                extra_archives=[upload.target_suite.archive],
            )

            check_upload_for_external_signature_request(session, suite, copy_to_suite, db_binary)

        suite.update_last_changed()

    # Copy .changes if needed
    if upload.target_suite.copychanges:
        src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
        dst = os.path.join(upload.target_suite.path, upload.changes.changesname)
        fs.copy(src, dst, mode=upload.target_suite.archive.mode)

    # List of files in the queue directory
    queue_files = [changesname]
    chg = daklib.upload.Changes(upload.policy_queue.path, changesname, keyrings=[], require_signature=False)
    queue_files.extend(f.filename for f in chg.buildinfo_files)

    # Copy upload to Process-Policy::CopyDir
    # Used on security.d.o to sync accepted packages to ftp-master, but this
    # should eventually be replaced by something else.
    copydir = cnf.get('Process-Policy::CopyDir') or None
    if copydir is not None:
        mode = upload.target_suite.archive.mode
        if upload.source is not None:
            for f in [ df.poolfile for df in upload.source.srcfiles ]:
                dst = os.path.join(copydir, f.basename)
                if not os.path.exists(dst):
                    fs.copy(f.fullpath, dst, mode=mode)

        for db_binary in upload.binaries:
            f = db_binary.poolfile
            dst = os.path.join(copydir, f.basename)
            if not os.path.exists(dst):
                fs.copy(f.fullpath, dst, mode=mode)

        for fn in queue_files:
            src = os.path.join(upload.policy_queue.path, fn)
            dst = os.path.join(copydir, fn)
            # We check for `src` to exist as old uploads in policy queues
            # might still miss the `.buildinfo` files.
            if os.path.exists(src) and not os.path.exists(dst):
                fs.copy(src, dst, mode=mode)

    if upload.source is not None and not Options['No-Action']:
        urgency = upload.changes.urgency
        # As per policy 5.6.17, the urgency can be followed by a space and a
        # comment.  Extract only the urgency from the string.
        if ' ' in urgency:
          (urgency, comment) = urgency.split(' ', 1)
        if urgency not in cnf.value_list('Urgency::Valid'):
            urgency = cnf['Urgency::Default']
        UrgencyLog().log(upload.source.source, upload.source.version, urgency)

    print "  ACCEPT"
    if not Options['No-Action']:
        Logger.log(["Policy Queue ACCEPT", srcqueue.queue_name, changesname])

    pu = get_processed_upload(upload)
    daklib.announce.announce_accept(pu)

    # TODO: code duplication. Similar code is in process-upload.
    # Move .changes to done
    now = datetime.datetime.now()
    donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
    for fn in queue_files:
        src = os.path.join(upload.policy_queue.path, fn)
        if os.path.exists(src):
            dst = os.path.join(donedir, fn)
            dst = utils.find_next_free(dst)
            fs.copy(src, dst, mode=0o644)

    remove_upload(upload, transaction)
コード例 #17
0
def clean(now_date, archives, max_delete, session):
    cnf = Config()

    count = 0
    size = 0

    Logger.log(["Cleaning out packages..."])

    morguedir = cnf.get("Dir::Morgue", os.path.join("Dir::Pool", 'morgue'))
    morguesubdir = cnf.get("Clean-Suites::MorgueSubDir", 'pool')

    # Build directory as morguedir/morguesubdir/year/month/day
    dest = os.path.join(morguedir, morguesubdir, str(now_date.year),
                        '%.2d' % now_date.month, '%.2d' % now_date.day)

    if not Options["No-Action"] and not os.path.exists(dest):
        os.makedirs(dest)

    # Delete from source
    Logger.log(["Deleting from source table..."])
    q = session.execute("""
      WITH
      deleted_sources AS (
        DELETE FROM source
         USING files f
         WHERE source.file = f.id
           AND NOT EXISTS (SELECT 1 FROM files_archive_map af
                                    JOIN archive_delete_date ad ON af.archive_id = ad.archive_id
                                   WHERE af.file_id = source.file
                                     AND (af.last_used IS NULL OR af.last_used > ad.delete_date))
        RETURNING source.id AS id, f.filename AS filename
      ),
      deleted_dsc_files AS (
        DELETE FROM dsc_files df WHERE df.source IN (SELECT id FROM deleted_sources)
        RETURNING df.file AS file_id
      ),
      now_unused_source_files AS (
        UPDATE files_archive_map af
           SET last_used = '1977-03-13 13:37:42' -- Kill it now. We waited long enough before removing the .dsc.
         WHERE af.file_id IN (SELECT file_id FROM deleted_dsc_files)
           AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = af.file_id)
      )
      SELECT filename FROM deleted_sources""")
    for s in q:
        Logger.log(["delete source", s[0]])

    if not Options["No-Action"]:
        session.commit()

    # Delete files from the pool
    old_files = session.query(ArchiveFile).filter(
        sql.text(
            'files_archive_map.last_used <= (SELECT delete_date FROM archive_delete_date ad WHERE ad.archive_id = files_archive_map.archive_id)'
        )).join(Archive)
    if max_delete is not None:
        old_files = old_files.limit(max_delete)
        Logger.log(["Limiting removals to %d" % max_delete])

    if archives is not None:
        archive_ids = [a.archive_id for a in archives]
        old_files = old_files.filter(ArchiveFile.archive_id.in_(archive_ids))

    for af in old_files:
        filename = af.path
        try:
            st = os.lstat(filename)
        except FileNotFoundError:
            Logger.log(["database referred to non-existing file", filename])
            session.delete(af)
            continue
        Logger.log(["delete archive file", filename])
        if stat.S_ISLNK(st.st_mode):
            count += 1
            Logger.log(["delete symlink", filename])
            if not Options["No-Action"]:
                os.unlink(filename)
                session.delete(af)
        elif stat.S_ISREG(st.st_mode):
            size += st.st_size
            count += 1

            dest_filename = dest + '/' + os.path.basename(filename)
            # If the destination file exists; try to find another filename to use
            if os.path.lexists(dest_filename):
                dest_filename = utils.find_next_free(dest_filename)

            if not Options["No-Action"]:
                if af.archive.use_morgue:
                    Logger.log(["move to morgue", filename, dest_filename])
                    utils.move(filename, dest_filename)
                else:
                    Logger.log(["removed file", filename])
                    os.unlink(filename)
                session.delete(af)

        else:
            utils.fubar("%s is neither symlink nor file?!" % (filename))

    if count > 0:
        Logger.log(["total", count, utils.size_type(size)])

    # Delete entries in files no longer referenced by any archive
    query = """
       DELETE FROM files f
        WHERE NOT EXISTS (SELECT 1 FROM files_archive_map af WHERE af.file_id = f.id)
    """
    session.execute(query)

    if not Options["No-Action"]:
        session.commit()
コード例 #18
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    arguments = [('h', 'help', 'Process-Commands::Options::Help'),
                 ('d', 'directory', 'Process-Commands::Options::Directory',
                  'HasArg')]

    cnf = Config()
    cnf['Process-Commands::Options::Dummy'] = ''
    filenames = apt_pkg.parse_commandline(cnf.Cnf, arguments, argv)
    options = cnf.subtree('Process-Commands::Options')

    if 'Help' in options or (len(filenames) == 0
                             and 'Directory' not in options):
        usage()
        sys.exit(0)

    log = Logger('command')

    now = datetime.datetime.now()
    donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
    rejectdir = cnf['Dir::Reject']

    if len(filenames) == 0:
        cdir = options['Directory']
        filenames = [
            os.path.join(cdir, fn) for fn in os.listdir(cdir)
            if fn.endswith('.dak-commands')
        ]

    for fn in filenames:
        basename = os.path.basename(fn)
        if not fn.endswith('.dak-commands'):
            log.log(['unexpected filename', basename])
            continue

        with open(fn, 'r') as fh:
            data = fh.read()

        try:
            command = CommandFile(basename, data, log)
            command.evaluate()
        except CommandError as e:
            created = os.stat(fn).st_mtime
            now = time.time()
            too_new = (now - created < int(cnf.get('Dinstall::SkipTime',
                                                   '60')))
            if too_new:
                log.log(['skipped (too new)'])
                continue
            log.log(['reject', basename, e])
        except Exception as e:
            log.log_traceback('Exception while processing %s:' % (basename), e)
            dst = find_next_free(os.path.join(rejectdir, basename))
        else:
            log.log(['done', basename])
            dst = find_next_free(os.path.join(donedir, basename))

        with FilesystemTransaction() as fs:
            fs.unlink(fn)
            fs.create(dst, mode=0o644).write(data)
            fs.commit()

    log.close()
コード例 #19
0
ファイル: clean_suites.py プロジェクト: evgeni/dak
def clean(now_date, delete_date, max_delete, session):
    cnf = Config()

    count = 0
    size = 0

    print "Cleaning out packages..."

    morguedir = cnf.get("Dir::Morgue", os.path.join("Dir::Pool", 'morgue'))
    morguesubdir = cnf.get("Clean-Suites::MorgueSubDir", 'pool')

    # Build directory as morguedir/morguesubdir/year/month/day
    dest = os.path.join(morguedir,
                        morguesubdir,
                        str(now_date.year),
                        '%.2d' % now_date.month,
                        '%.2d' % now_date.day)

    if not Options["No-Action"] and not os.path.exists(dest):
        os.makedirs(dest)

    # Delete from source
    print "Deleting from source table... "
    q = session.execute("""
SELECT s.id, f.filename FROM source s, files f
  WHERE f.last_used <= :deletedate
        AND s.file = f.id
        AND s.id NOT IN (SELECT src_id FROM extra_src_references)""", {'deletedate': delete_date})
    for s in q.fetchall():
        Logger.log(["delete source", s[1], s[0]])
        if not Options["No-Action"]:
            session.execute("DELETE FROM dsc_files WHERE source = :s_id", {"s_id":s[0]})
            session.execute("DELETE FROM source WHERE id = :s_id", {"s_id":s[0]})

    if not Options["No-Action"]:
        session.commit()

    # Delete files from the pool
    old_files = session.query(PoolFile).filter(PoolFile.last_used <= delete_date)
    if max_delete is not None:
        old_files = old_files.limit(max_delete)
        print "Limiting removals to %d" % max_delete

    for pf in old_files:
        filename = os.path.join(pf.location.path, pf.filename)
        if not os.path.exists(filename):
            utils.warn("can not find '%s'." % (filename))
            continue
        Logger.log(["delete pool file", filename])
        if os.path.isfile(filename):
            if os.path.islink(filename):
                count += 1
                Logger.log(["delete symlink", filename])
                if not Options["No-Action"]:
                    os.unlink(filename)
            else:
                size += os.stat(filename)[stat.ST_SIZE]
                count += 1

                dest_filename = dest + '/' + os.path.basename(filename)
                # If the destination file exists; try to find another filename to use
                if os.path.exists(dest_filename):
                    dest_filename = utils.find_next_free(dest_filename)

                Logger.log(["move to morgue", filename, dest_filename])
                if not Options["No-Action"]:
                    utils.move(filename, dest_filename)

            if not Options["No-Action"]:
                session.delete(pf)
                session.commit()

        else:
            utils.fubar("%s is neither symlink nor file?!" % (filename))

    if count > 0:
        Logger.log(["total", count, utils.size_type(size)])
        print "Cleaned %d files, %s." % (count, utils.size_type(size))
コード例 #20
0
ファイル: process_policy.py プロジェクト: Debian/dak
def comment_accept(upload, srcqueue, comments, transaction):
    for byhand in upload.byhand:
        path = os.path.join(srcqueue.path, byhand.filename)
        if os.path.exists(path):
            raise Exception('E: cannot ACCEPT upload with unprocessed byhand file {0}'.format(byhand.filename))

    cnf = Config()

    fs = transaction.fs
    session = transaction.session
    changesname = upload.changes.changesname
    allow_tainted = srcqueue.suite.archive.tainted

    # We need overrides to get the target component
    overridesuite = upload.target_suite
    if overridesuite.overridesuite is not None:
        overridesuite = session.query(Suite).filter_by(suite_name=overridesuite.overridesuite).one()

    def binary_component_func(db_binary):
        section = db_binary.proxy['Section']
        component_name = 'main'
        if section.find('/') != -1:
            component_name = section.split('/', 1)[0]
        return get_mapped_component(component_name, session=session)

    def is_debug_binary(db_binary):
        return daklib.utils.is_in_debug_section(db_binary.proxy)

    def has_debug_binaries(upload):
        return any((is_debug_binary(x) for x in upload.binaries))

    def source_component_func(db_source):
        package_list = PackageList(db_source.proxy)
        component = source_component_from_package_list(package_list, upload.target_suite)
        if component is not None:
            return get_mapped_component(component.component_name, session=session)

        # Fallback for packages without Package-List field
        query = session.query(Override).filter_by(suite=overridesuite, package=db_source.source) \
            .join(OverrideType).filter(OverrideType.overridetype == 'dsc') \
            .join(Component)
        return query.one().component

    all_target_suites = [upload.target_suite]
    all_target_suites.extend([q.suite for q in upload.target_suite.copy_queues])

    for suite in all_target_suites:
        debug_suite = suite.debug_suite

        if upload.source is not None:
            # If we have Source in this upload, let's include it into
            # upload suite.
            transaction.copy_source(
                upload.source,
                suite,
                source_component_func(upload.source),
                allow_tainted=allow_tainted,
            )

            if debug_suite is not None and has_debug_binaries(upload):
                # If we're handing a debug package, we also need to include the
                # source in the debug suite as well.
                transaction.copy_source(
                    upload.source,
                    debug_suite,
                    source_component_func(upload.source),
                    allow_tainted=allow_tainted,
                )

        for db_binary in upload.binaries:
            # Now, let's work out where to copy this guy to -- if it's
            # a debug binary, and the suite has a debug suite, let's go
            # ahead and target the debug suite rather then the stock
            # suite.
            copy_to_suite = suite
            if debug_suite is not None and is_debug_binary(db_binary):
                copy_to_suite = debug_suite

            # build queues and debug suites may miss the source package
            # if this is a binary-only upload.
            if copy_to_suite != upload.target_suite:
                transaction.copy_source(
                    db_binary.source,
                    copy_to_suite,
                    source_component_func(db_binary.source),
                    allow_tainted=allow_tainted,
                )

            transaction.copy_binary(
                db_binary,
                copy_to_suite,
                binary_component_func(db_binary),
                allow_tainted=allow_tainted,
                extra_archives=[upload.target_suite.archive],
            )

            check_upload_for_external_signature_request(session, suite, copy_to_suite, db_binary)

        suite.update_last_changed()

    # Copy .changes if needed
    if upload.target_suite.copychanges:
        src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
        dst = os.path.join(upload.target_suite.path, upload.changes.changesname)
        fs.copy(src, dst, mode=upload.target_suite.archive.mode)

    # List of files in the queue directory
    queue_files = [changesname]
    chg = daklib.upload.Changes(upload.policy_queue.path, changesname, keyrings=[], require_signature=False)
    queue_files.extend(f.filename for f in chg.buildinfo_files)

    # Copy upload to Process-Policy::CopyDir
    # Used on security.d.o to sync accepted packages to ftp-master, but this
    # should eventually be replaced by something else.
    copydir = cnf.get('Process-Policy::CopyDir') or None
    if copydir is not None:
        mode = upload.target_suite.archive.mode
        if upload.source is not None:
            for f in [df.poolfile for df in upload.source.srcfiles]:
                dst = os.path.join(copydir, f.basename)
                if not os.path.exists(dst):
                    fs.copy(f.fullpath, dst, mode=mode)

        for db_binary in upload.binaries:
            f = db_binary.poolfile
            dst = os.path.join(copydir, f.basename)
            if not os.path.exists(dst):
                fs.copy(f.fullpath, dst, mode=mode)

        for fn in queue_files:
            src = os.path.join(upload.policy_queue.path, fn)
            dst = os.path.join(copydir, fn)
            # We check for `src` to exist as old uploads in policy queues
            # might still miss the `.buildinfo` files.
            if os.path.exists(src) and not os.path.exists(dst):
                fs.copy(src, dst, mode=mode)

    if upload.source is not None and not Options['No-Action']:
        urgency = upload.changes.urgency
        # As per policy 5.6.17, the urgency can be followed by a space and a
        # comment.  Extract only the urgency from the string.
        if ' ' in urgency:
            urgency, comment = urgency.split(' ', 1)
        if urgency not in cnf.value_list('Urgency::Valid'):
            urgency = cnf['Urgency::Default']
        UrgencyLog().log(upload.source.source, upload.source.version, urgency)

    print("  ACCEPT")
    if not Options['No-Action']:
        Logger.log(["Policy Queue ACCEPT", srcqueue.queue_name, changesname])

    pu = get_processed_upload(upload)
    daklib.announce.announce_accept(pu)

    # TODO: code duplication. Similar code is in process-upload.
    # Move .changes to done
    now = datetime.datetime.now()
    donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
    for fn in queue_files:
        src = os.path.join(upload.policy_queue.path, fn)
        if os.path.exists(src):
            dst = os.path.join(donedir, fn)
            dst = utils.find_next_free(dst)
            fs.copy(src, dst, mode=0o644)

    remove_upload(upload, transaction)
コード例 #21
0
ファイル: process_policy.py プロジェクト: ximion/dak-dep11
def comment_accept(upload, srcqueue, comments, transaction):
    for byhand in upload.byhand:
        path = os.path.join(srcqueue.path, byhand.filename)
        if os.path.exists(path):
            raise Exception(
                'E: cannot ACCEPT upload with unprocessed byhand file {0}'.
                format(byhand.filename))

    cnf = Config()

    fs = transaction.fs
    session = transaction.session
    changesname = upload.changes.changesname
    allow_tainted = srcqueue.suite.archive.tainted

    # We need overrides to get the target component
    overridesuite = upload.target_suite
    if overridesuite.overridesuite is not None:
        overridesuite = session.query(Suite).filter_by(
            suite_name=overridesuite.overridesuite).one()

    def binary_component_func(db_binary):
        override = session.query(Override).filter_by(suite=overridesuite, package=db_binary.package) \
            .join(OverrideType).filter(OverrideType.overridetype == db_binary.binarytype) \
            .join(Component).one()
        return override.component

    def source_component_func(db_source):
        override = session.query(Override).filter_by(suite=overridesuite, package=db_source.source) \
            .join(OverrideType).filter(OverrideType.overridetype == 'dsc') \
            .join(Component).one()
        return override.component

    all_target_suites = [upload.target_suite]
    all_target_suites.extend(
        [q.suite for q in upload.target_suite.copy_queues])

    for suite in all_target_suites:
        if upload.source is not None:
            transaction.copy_source(upload.source,
                                    suite,
                                    source_component_func(upload.source),
                                    allow_tainted=allow_tainted)
        for db_binary in upload.binaries:
            # build queues may miss the source package if this is a binary-only upload
            if suite != upload.target_suite:
                transaction.copy_source(db_binary.source,
                                        suite,
                                        source_component_func(
                                            db_binary.source),
                                        allow_tainted=allow_tainted)
            transaction.copy_binary(
                db_binary,
                suite,
                binary_component_func(db_binary),
                allow_tainted=allow_tainted,
                extra_archives=[upload.target_suite.archive])

    # Copy .changes if needed
    if upload.target_suite.copychanges:
        src = os.path.join(upload.policy_queue.path,
                           upload.changes.changesname)
        dst = os.path.join(upload.target_suite.path,
                           upload.changes.changesname)
        fs.copy(src, dst, mode=upload.target_suite.archive.mode)

    # Copy upload to Process-Policy::CopyDir
    # Used on security.d.o to sync accepted packages to ftp-master, but this
    # should eventually be replaced by something else.
    copydir = cnf.get('Process-Policy::CopyDir') or None
    if copydir is not None:
        mode = upload.target_suite.archive.mode
        if upload.source is not None:
            for f in [df.poolfile for df in upload.source.srcfiles]:
                dst = os.path.join(copydir, f.basename)
                if not os.path.exists(dst):
                    fs.copy(f.fullpath, dst, mode=mode)

        for db_binary in upload.binaries:
            f = db_binary.poolfile
            dst = os.path.join(copydir, f.basename)
            if not os.path.exists(dst):
                fs.copy(f.fullpath, dst, mode=mode)

        src = os.path.join(upload.policy_queue.path,
                           upload.changes.changesname)
        dst = os.path.join(copydir, upload.changes.changesname)
        if not os.path.exists(dst):
            fs.copy(src, dst, mode=mode)

    if upload.source is not None and not Options['No-Action']:
        urgency = upload.changes.urgency
        if urgency not in cnf.value_list('Urgency::Valid'):
            urgency = cnf['Urgency::Default']
        UrgencyLog().log(upload.source.source, upload.source.version, urgency)

    print "  ACCEPT"
    if not Options['No-Action']:
        Logger.log(["Policy Queue ACCEPT", srcqueue.queue_name, changesname])

    pu = get_processed_upload(upload)
    daklib.announce.announce_accept(pu)

    # TODO: code duplication. Similar code is in process-upload.
    # Move .changes to done
    src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
    now = datetime.datetime.now()
    donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
    dst = os.path.join(donedir, upload.changes.changesname)
    dst = utils.find_next_free(dst)
    fs.copy(src, dst, mode=0o644)

    remove_upload(upload, transaction)
コード例 #22
0
ファイル: process_policy.py プロジェクト: fatman2021/dak
def comment_accept(upload, srcqueue, comments, transaction):
    for byhand in upload.byhand:
        path = os.path.join(srcqueue.path, byhand.filename)
        if os.path.exists(path):
            raise Exception('E: cannot ACCEPT upload with unprocessed byhand file {0}'.format(byhand.filename))

    cnf = Config()

    fs = transaction.fs
    session = transaction.session
    changesname = upload.changes.changesname
    allow_tainted = srcqueue.suite.archive.tainted

    # We need overrides to get the target component
    overridesuite = upload.target_suite
    if overridesuite.overridesuite is not None:
        overridesuite = session.query(Suite).filter_by(suite_name=overridesuite.overridesuite).one()

    def binary_component_func(db_binary):
        section = db_binary.proxy['Section']
        component_name = 'main'
        if section.find('/') != -1:
            component_name = section.split('/', 1)[0]
        return get_mapped_component(component_name, session=session)

    def source_component_func(db_source):
        package_list = PackageList(db_source.proxy)
        component = source_component_from_package_list(package_list, upload.target_suite)
        if component is not None:
            return get_mapped_component(component.component_name, session=session)

        # Fallback for packages without Package-List field
        query = session.query(Override).filter_by(suite=overridesuite, package=db_source.source) \
            .join(OverrideType).filter(OverrideType.overridetype == 'dsc') \
            .join(Component)
        return query.one().component

    all_target_suites = [upload.target_suite]
    all_target_suites.extend([q.suite for q in upload.target_suite.copy_queues])

    for suite in all_target_suites:
        if upload.source is not None:
            transaction.copy_source(upload.source, suite, source_component_func(upload.source), allow_tainted=allow_tainted)
        for db_binary in upload.binaries:
            # build queues may miss the source package if this is a binary-only upload
            if suite != upload.target_suite:
                transaction.copy_source(db_binary.source, suite, source_component_func(db_binary.source), allow_tainted=allow_tainted)
            transaction.copy_binary(db_binary, suite, binary_component_func(db_binary), allow_tainted=allow_tainted, extra_archives=[upload.target_suite.archive])

    # Copy .changes if needed
    if upload.target_suite.copychanges:
        src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
        dst = os.path.join(upload.target_suite.path, upload.changes.changesname)
        fs.copy(src, dst, mode=upload.target_suite.archive.mode)

    # Copy upload to Process-Policy::CopyDir
    # Used on security.d.o to sync accepted packages to ftp-master, but this
    # should eventually be replaced by something else.
    copydir = cnf.get('Process-Policy::CopyDir') or None
    if copydir is not None:
        mode = upload.target_suite.archive.mode
        if upload.source is not None:
            for f in [ df.poolfile for df in upload.source.srcfiles ]:
                dst = os.path.join(copydir, f.basename)
                if not os.path.exists(dst):
                    fs.copy(f.fullpath, dst, mode=mode)

        for db_binary in upload.binaries:
            f = db_binary.poolfile
            dst = os.path.join(copydir, f.basename)
            if not os.path.exists(dst):
                fs.copy(f.fullpath, dst, mode=mode)

        src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
        dst = os.path.join(copydir, upload.changes.changesname)
        if not os.path.exists(dst):
            fs.copy(src, dst, mode=mode)

    if upload.source is not None and not Options['No-Action']:
        urgency = upload.changes.urgency
        if urgency not in cnf.value_list('Urgency::Valid'):
            urgency = cnf['Urgency::Default']
        UrgencyLog().log(upload.source.source, upload.source.version, urgency)

    print "  ACCEPT"
    if not Options['No-Action']:
        Logger.log(["Policy Queue ACCEPT", srcqueue.queue_name, changesname])

    pu = get_processed_upload(upload)
    daklib.announce.announce_accept(pu)

    # TODO: code duplication. Similar code is in process-upload.
    # Move .changes to done
    src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
    now = datetime.datetime.now()
    donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
    dst = os.path.join(donedir, upload.changes.changesname)
    dst = utils.find_next_free(dst)
    fs.copy(src, dst, mode=0o644)

    remove_upload(upload, transaction)
コード例 #23
0
ファイル: clean_suites.py プロジェクト: abhi11/dak
def clean(now_date, archives, max_delete, session):
    cnf = Config()

    count = 0
    size = 0

    Logger.log(["Cleaning out packages..."])

    morguedir = cnf.get("Dir::Morgue", os.path.join("Dir::Pool", 'morgue'))
    morguesubdir = cnf.get("Clean-Suites::MorgueSubDir", 'pool')

    # Build directory as morguedir/morguesubdir/year/month/day
    dest = os.path.join(morguedir,
                        morguesubdir,
                        str(now_date.year),
                        '%.2d' % now_date.month,
                        '%.2d' % now_date.day)

    if not Options["No-Action"] and not os.path.exists(dest):
        os.makedirs(dest)

    # Delete from source
    Logger.log(["Deleting from source table..."])
    q = session.execute("""
      WITH
      deleted_sources AS (
        DELETE FROM source
         USING files f
         WHERE source.file = f.id
           AND NOT EXISTS (SELECT 1 FROM files_archive_map af
                                    JOIN archive_delete_date ad ON af.archive_id = ad.archive_id
                                   WHERE af.file_id = source.file
                                     AND (af.last_used IS NULL OR af.last_used > ad.delete_date))
        RETURNING source.id AS id, f.filename AS filename
      ),
      deleted_dsc_files AS (
        DELETE FROM dsc_files df WHERE df.source IN (SELECT id FROM deleted_sources)
        RETURNING df.file AS file_id
      ),
      now_unused_source_files AS (
        UPDATE files_archive_map af
           SET last_used = '1977-03-13 13:37:42' -- Kill it now. We waited long enough before removing the .dsc.
         WHERE af.file_id IN (SELECT file_id FROM deleted_dsc_files)
           AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = af.file_id)
      )
      SELECT filename FROM deleted_sources""")
    for s in q:
        Logger.log(["delete source", s[0]])

    if not Options["No-Action"]:
        session.commit()

    # Delete files from the pool
    old_files = session.query(ArchiveFile).filter('files_archive_map.last_used <= (SELECT delete_date FROM archive_delete_date ad WHERE ad.archive_id = files_archive_map.archive_id)').join(Archive)
    if max_delete is not None:
        old_files = old_files.limit(max_delete)
        Logger.log(["Limiting removals to %d" % max_delete])

    if archives is not None:
        archive_ids = [ a.archive_id for a in archives ]
        old_files = old_files.filter(ArchiveFile.archive_id.in_(archive_ids))

    for af in old_files:
        filename = af.path
        if not os.path.exists(filename):
            Logger.log(["database referred to non-existing file", af.path])
            session.delete(af)
            continue
        Logger.log(["delete archive file", filename])
        if os.path.isfile(filename):
            if os.path.islink(filename):
                count += 1
                Logger.log(["delete symlink", filename])
                if not Options["No-Action"]:
                    os.unlink(filename)
            else:
                size += os.stat(filename)[stat.ST_SIZE]
                count += 1

                dest_filename = dest + '/' + os.path.basename(filename)
                # If the destination file exists; try to find another filename to use
                if os.path.lexists(dest_filename):
                    dest_filename = utils.find_next_free(dest_filename)

                if not Options["No-Action"]:
                    if af.archive.use_morgue:
                        Logger.log(["move to morgue", filename, dest_filename])
                        utils.move(filename, dest_filename)
                    else:
                        Logger.log(["removed file", filename])
                        os.unlink(filename)

            if not Options["No-Action"]:
                session.delete(af)
                session.commit()

        else:
            utils.fubar("%s is neither symlink nor file?!" % (filename))

    if count > 0:
        Logger.log(["total", count, utils.size_type(size)])

    # Delete entries in files no longer referenced by any archive
    query = """
       DELETE FROM files f
        WHERE NOT EXISTS (SELECT 1 FROM files_archive_map af WHERE af.file_id = f.id)
    """
    session.execute(query)

    if not Options["No-Action"]:
        session.commit()