def run(self): session = session_factory() dev_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.devel_target == True).one() # noqa: E712 # update the seed (contained in the metapackage repository) self._update_seed_data() # NOTE: We make a hardcoded assumption on where the seed is located. # Since germinate expects it there currently, this isn't an issue today, # but could become one in future. seed_src_dir = os.path.join(self._meta_src_dir, 'seed') # create target directory results_dir = os.path.join(self._results_base_dir, '{}.{}'.format(self._project_name.lower(), dev_suite.name)) os.makedirs(results_dir, exist_ok=True) # prepare parameters ge_args = ['-S', 'file://' + seed_src_dir, # seed source '-s', dev_suite.name, # suite name '-d', dev_suite.name, # suite / dist name '-m', 'file://' + self._lconf.archive_root_dir, # mirror '-c', ' '.join([c.name for c in dev_suite.components]), # components to check '-a', dev_suite.primary_architecture.name] # NOTE: Maybe we want to limit the seed to only stuff in the primary (main) component? # execute germinator ret, out = self._run_germinate(results_dir, ge_args) if not ret: log.error('Germinate run has failed: {}'.format(out)) return False return True
def add_hint(options): from laniakea.db import SpearsHint if not options.source_suite: print_error_exit('The source-suite parameter is missing!') if not options.target_suite: print_error_exit('The target-suite parameter is missing!') if not options.hint: print_error_exit('The hint parameter is missing!') if not options.reason: print_error_exit('The reason parameter is missing!') session = session_factory() migration_id = '{}-to-{}'.format(options.source_suite, options.target_suite) # remove a preexisting hint session.query(SpearsHint) \ .filter(SpearsHint.migration_id == migration_id, SpearsHint.hint == options.hint) \ .delete() hint = SpearsHint() hint.migration_id = migration_id hint.hint = options.hint hint.reason = options.reason session.add(hint) session.commit()
def command_autosync(options): ''' Automatically synchronize packages ''' bconf, sconf = get_sync_config() incoming_suite = get_incoming_suite_info() engine = SyncEngine(bconf, sconf, incoming_suite) blacklist_pkgnames = get_package_blacklist() engine.setBlacklist(blacklist_pkgnames) ret, issue_data = engine.autosync() if not ret: sys.exit(2) return session = session_factory() for ssuite in sconf.source.suites: session.query(SynchrotronIssue) \ .filter(SynchrotronIssue.source_suite == ssuite.name, SynchrotronIssue.target_suite == incoming_suite.name) \ .delete() for info in issue_data: issue = SynchrotronIssue() issue.kind = SynchrotronIssueKind(info.kind) issue.package_name = info.packageName issue.source_suite = info.sourceSuite issue.target_suite = info.targetSuite issue.source_version = info.sourceVersion issue.target_version = info.targetVersion issue.details = info.details session.add(issue) session.commit()
def get_suiteinfo_all_suites(): session = session_factory() suite_infos = [] suites = session.query(ArchiveSuite).all() for suite in suites: suite_infos.append(get_suiteinfo_for_suite(suite)) return suite_infos
def config_get_value(mod, key): ''' Get a value from the configuration store. ''' from laniakea.db import session_factory session = session_factory() entry = session.query(ConfigEntry).filter_by(id='{}.{}'.format(mod, key)).one_or_none() if not entry: return None return entry.value
def config_get_value(mod, key): ''' Get a value from the configuration store. ''' from laniakea.db import session_factory session = session_factory() entry = session.query(ConfigEntry).filter_by( id='{}.{}'.format(mod, key)).one_or_none() if not entry: return None return entry.value
def schedule_builds(repo_name, simulate=False, limit_architecture=None, limit_count=0): ''' Schedule builds for packages in the incoming suite. ''' session = session_factory() # FIXME: We need much better ways to select the right suite to synchronize with incoming_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.accept_uploads == True).one() # noqa: E712 return schedule_builds_for_suite(repo_name, incoming_suite.name, simulate, limit_architecture, limit_count)
def ask_settings(options): from laniakea.db.core import config_set_project_name, config_set_distro_tag from laniakea.db import ArchiveRepository, ArchiveSuite database_init(options) print_header('Configuring base settings for Laniakea') session = session_factory() config_set_project_name(input_str('Name of this project')) # we only support one repository at time, so add the default repo = session.query(ArchiveRepository) \ .filter(ArchiveRepository.name == 'master').one_or_none() if not repo: repo = ArchiveRepository('master') session.add(repo) session.commit() add_suite = True while add_suite: _add_new_suite(session) add_suite = input_bool('Add another suite?') incoming_suite = None while not incoming_suite: incoming_suite_name = input_str( 'Name of the \'incoming\' suite which new packages are usually uploaded to' ) incoming_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == incoming_suite_name).one_or_none() if not incoming_suite: print_note('Suite with the name "{}" was not found.'.format( incoming_suite_name)) incoming_suite.accept_uploads = True devel_suite = None while not devel_suite: devel_suite_name = input_str( 'Name of the "development" suite which is rolling or will become a final release' ) devel_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == devel_suite_name).one_or_none() if not devel_suite: print_note('Suite with the name "{}" was not found.'.format( devel_suite_name)) devel_suite.devel_target = True config_set_distro_tag( input_str( 'Distribution version tag (commonly found in package versions, e.g. \'tanglu\' for OS \'Tanglu\' with versions like \'1.0-0tanglu1\'' )) session.commit()
def config_set_value(mod, key, value): ''' Set a value in the configuration store ''' from laniakea.db import session_factory session = session_factory() entry = session.query(ConfigEntry).filter_by(id='{}.{}'.format(mod, key)).one_or_none() if entry: entry.value = value else: entry = ConfigEntry(mod, key, value) session.add(entry) session.commit()
def get_incoming_suite_info(): from lknative import SuiteInfo session = session_factory() si = SuiteInfo() # FIXME: We need much better ways to select the right suite to synchronize with suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.accept_uploads == True).one() # noqa: E712 si.name = suite.name si.architectures = list(a.name for a in suite.architectures) si.components = list(c.name for c in suite.components) return si
def run(self): session = session_factory() dev_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.devel_target == True).one_or_none() # noqa: E712 if not dev_suite: log.info('No development target suite found, doing nothing.') return True # update the seed (contained in the metapackage repository) self._update_seed_data() # NOTE: We make a hardcoded assumption on where the seed is located. # Since germinate expects it there currently, this isn't an issue today, # but could become one in future. seed_src_dir = os.path.join(self._meta_src_dir, 'seed') # create target directory results_dir = os.path.join( self._results_base_dir, '{}.{}'.format(self._project_name.lower(), dev_suite.name)) os.makedirs(results_dir, exist_ok=True) # prepare parameters ge_args = [ '-S', 'file://' + seed_src_dir, # seed source '-s', dev_suite.name, # suite name '-d', dev_suite.name, # suite / dist name '-m', 'file://' + self._lconf.archive_root_dir, # mirror '-c', ' '.join([c.name for c in dev_suite.components]), # components to check '-a', dev_suite.primary_architecture.name ] # NOTE: Maybe we want to limit the seed to only stuff in the primary (main) component? # execute germinator ret, out = self._run_germinate(results_dir, ge_args) if not ret: log.error('Germinate run has failed: {}'.format(out)) return False return True
def schedule_builds(repo_name, simulate=False, limit_architecture=None, limit_count=0): ''' Schedule builds for packages in the incoming suite. ''' session = session_factory() # FIXME: We need much better ways to select the right suite to synchronize with incoming_suites = session.query(ArchiveSuite) \ .filter(ArchiveSuite.accept_uploads == True).all() # noqa: E712 for incoming_suite in incoming_suites: if not schedule_builds_for_suite(repo_name, incoming_suite.name, simulate, limit_architecture, limit_count): return False return True
def command_binaries(options): ''' Check binary packages ''' session = session_factory() debcheck, scan_suites = _create_debcheck(session, options.suite) # FIXME: Don't hardcode the "master" repository here, fully implement # the "multiple repositories" feature repo_name = 'master' repo = session.query(ArchiveRepository) \ .filter(ArchiveRepository.name == repo_name).one() for si in scan_suites: ret, issues = debcheck.getDepCheckIssues(si) _update_debcheck_issues(session, repo, si, issues, PackageType.BINARY)
def config_set_value(mod, key, value): ''' Set a value in the configuration store ''' from laniakea.db import session_factory session = session_factory() entry = session.query(ConfigEntry).filter_by( id='{}.{}'.format(mod, key)).one_or_none() if entry: entry.value = value else: entry = ConfigEntry(mod, key, value) session.add(entry) session.commit()
def remove_hint(options): from laniakea.db import SpearsHint if not options.source_suite: print_error_exit('The source-suite parameter is missing!') if not options.target_suite: print_error_exit('The target-suite parameter is missing!') if not options.hint: print_error_exit('The hint parameter is missing!') session = session_factory() migration_id = '{}-to-{}'.format(options.source_suite, options.target_suite) session.query(SpearsHint) \ .filter(SpearsHint.migration_id == migration_id, SpearsHint.hint == options.hint) \ .delete()
def create_native_baseconfig(): session = session_factory() bconf = BaseConfig() bconf.projectName = config_get_project_name() bconf.archive.distroTag = config_get_distro_tag() dev_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.devel_target == True).one() # noqa: E712 bconf.archive.develSuite = dev_suite.name lconf = LocalConfig() bconf.cacheDir = lconf.cache_dir bconf.workspace = lconf.workspace bconf.archive.rootPath = lconf.archive_root_dir return bconf
def ask_settings(options): from laniakea.db.core import config_set_project_name, config_set_distro_tag from laniakea.db import ArchiveRepository, ArchiveSuite database_init(options) print_header('Configuring base settings for Laniakea') session = session_factory() config_set_project_name(input_str('Name of this project')) # we only support one repository at time, so add the default repo = session.query(ArchiveRepository) \ .filter(ArchiveRepository.name == 'master').one_or_none() if not repo: repo = ArchiveRepository('master') session.add(repo) session.commit() add_suite = True while add_suite: _add_new_suite(session) add_suite = input_bool('Add another suite?') incoming_suite = None while not incoming_suite: incoming_suite_name = input_str('Name of the \'incoming\' suite which new packages are usually uploaded to') incoming_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == incoming_suite_name).one_or_none() if not incoming_suite: print_note('Suite with the name "{}" was not found.'.format(incoming_suite_name)) incoming_suite.accept_uploads = True devel_suite = None while not devel_suite: devel_suite_name = input_str('Name of the "development" suite which is rolling or will become a final release') devel_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == devel_suite_name).one_or_none() if not devel_suite: print_note('Suite with the name "{}" was not found.'.format(devel_suite_name)) devel_suite.devel_target = True config_set_distro_tag(input_str('Distribution version tag (commonly found in package versions, e.g. \'tanglu\' for OS \'Tanglu\' with versions like \'1.0-0tanglu1\'')) session.commit()
def get_spears_config(): from laniakea.native import SpearsHint as LknSpearsHint from laniakea.native import create_native_baseconfig, get_suiteinfo_all_suites, \ SpearsConfig, SpearsConfigEntry, int_to_versionpriority from laniakea.localconfig import ExternalToolsUrls bconf = create_native_baseconfig() ext_urls = ExternalToolsUrls() sconf = SpearsConfig() sconf.britneyGitOriginUrl = ext_urls.britney_git_repository session = session_factory() migration_entries = session.query(SpearsMigrationEntry).all() mdict = {} for entry in migration_entries: centry = SpearsConfigEntry() centry.sourceSuites = entry.source_suites centry.targetSuite = entry.target_suite d = {} for k, v in entry.delays.items(): d[int_to_versionpriority(int(k))] = int(v) centry.delays = d hints = session.query(SpearsHint).filter( SpearsHint.migration_id == entry.idname).all() chints = [] for hint in hints: chint = LknSpearsHint() chint.hint = hint.hint chint.reason = hint.reason chint.date = hint.time chints.append(chint) centry.hints = chints mdict[entry.idname] = centry sconf.migrations = mdict suites = get_suiteinfo_all_suites() return bconf, sconf, suites
def create_native_baseconfig(): from laniakea import LocalConfig session = session_factory() bconf = BaseConfig() bconf.projectName = config_get_project_name() bconf.archive.distroTag = config_get_distro_tag() dev_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.devel_target == True).one() # noqa: E712 bconf.archive.develSuite = dev_suite.name lconf = LocalConfig() bconf.cacheDir = lconf.cache_dir bconf.workspace = lconf.workspace bconf.archive.rootPath = lconf.archive_root_dir return bconf
def get_spears_config(): from lknative import SpearsHint as LknSpearsHint from lknative import SpearsConfig, SpearsConfigEntry, int_to_versionpriority from laniakea.lknative_utils import create_native_baseconfig, get_suiteinfo_all_suites from laniakea.localconfig import ExternalToolsUrls bconf = create_native_baseconfig() ext_urls = ExternalToolsUrls() sconf = SpearsConfig() sconf.britneyGitOriginUrl = ext_urls.britney_git_repository session = session_factory() migration_entries = session.query(SpearsMigrationEntry).all() mdict = {} for entry in migration_entries: centry = SpearsConfigEntry() centry.sourceSuites = entry.source_suites centry.targetSuite = entry.target_suite d = {} for k, v in entry.delays.items(): d[int_to_versionpriority(int(k))] = int(v) centry.delays = d hints = session.query(SpearsHint).filter(SpearsHint.migration_id == entry.idname).all() chints = [] for hint in hints: chint = LknSpearsHint() chint.hint = hint.hint chint.reason = hint.reason chint.date = hint.time chints.append(chint) centry.hints = chints mdict[entry.idname] = centry sconf.migrations = mdict suites = get_suiteinfo_all_suites() return bconf, sconf, suites
def ask_settings(options): from laniakea.db import SpearsMigrationEntry, VersionPriority print_header('Configuring settings for Spears (migrations)') session = session_factory() add_migration = True while add_migration: entry = SpearsMigrationEntry() entry.source_suites = input_list('Migrate from suites (source names)') entry.target_suite = input_str('Migrate to suite (target name)') entry.delays = {} for prio in VersionPriority: entry.delays[int(prio)] = input_int('Delay for packages of priority "{}" in days'.format(repr(prio))) # FIXME: We need to check for uniqueness of the migration task! entry.idname = entry.make_migration_id() session.add(entry) session.commit() add_migration = input_bool('Add another migration task?')
def schedule_builds_for_suite(repo_name, incoming_suite_name, simulate=False, limit_architecture=None, limit_count=0): ''' Schedule builds for packages in a particular suite. ''' session = session_factory() repo = session.query(ArchiveRepository) \ .filter(ArchiveRepository.name == repo_name).one() incoming_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == incoming_suite_name).one_or_none() if not incoming_suite: log.error('Incoming suite "{}" was not found in the database.'.format(incoming_suite_name)) return False src_packages = get_newest_sources_index(session, repo, incoming_suite) arch_all = None for arch in incoming_suite.architectures: if arch.name == 'all': arch_all = arch break if not arch_all: log.warning('Suite "{}" does not have arch:all in its architecture set, some packages can not be built.'.format(incoming_suite.name)) if simulate: log.info('Simulation, not scheduling any actual builds.') if limit_architecture: log.info('Only scheduling builds for architecture "{}".'.format(limit_architecture)) if limit_count > 0: log.info('Only scheduling maximally {} builds.'.format(limit_count)) scheduled_count = 0 for spkg in src_packages.values(): # if the package is arch:all only, it needs a dedicated build job if len(spkg.architectures) == 1 and spkg.architectures[0] == 'all': if not arch_all: continue if limit_architecture and limit_architecture != 'all': continue # Skip, we are not scheduling builds for arch:all if schedule_build_for_arch(session, repo, spkg, arch_all, incoming_suite, simulate): scheduled_count += 1 if limit_count > 0 and scheduled_count >= limit_count: break continue # deal with all other architectures for arch in incoming_suite.architectures: # The pseudo-architecture arch:all is treated specially if arch.name == 'all': continue if limit_architecture and limit_architecture != arch.name: continue # Skip, we are not scheduling builds for this architecture if schedule_build_for_arch(session, repo, spkg, arch, incoming_suite, simulate): scheduled_count += 1 if limit_count > 0 and scheduled_count >= limit_count: break if limit_count > 0 and scheduled_count >= limit_count: break # cleanup delete_orphaned_jobs(session, simulate) # write all changes to database session.commit() log.info('Scheduled {} build jobs.'.format(scheduled_count)) return True
def schedule_builds_for_suite(repo_name, incoming_suite_name, simulate=False, limit_architecture=None, limit_count=0): ''' Schedule builds for packages in a particular suite. ''' session = session_factory() # where to build pure arch:all packages? arch_indep_affinity = config_get_value(LkModule.ARIADNE, 'indep_arch_affinity') repo = session.query(ArchiveRepository) \ .filter(ArchiveRepository.name == repo_name).one() incoming_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == incoming_suite_name).one_or_none() if not incoming_suite: log.error('Incoming suite "{}" was not found in the database.'.format( incoming_suite_name)) return False src_packages = get_newest_sources_index(session, repo, incoming_suite) arch_all = None for arch in incoming_suite.architectures: if arch.name == 'all': arch_all = arch break if not arch_all: log.warning( 'Suite "{}" does not have arch:all in its architecture set, some packages can not be built.' .format(incoming_suite.name)) if simulate: log.info('Simulation, not scheduling any actual builds.') if limit_architecture: log.info('Only scheduling builds for architecture "{}".'.format( limit_architecture)) if limit_count > 0: log.info('Only scheduling maximally {} builds.'.format(limit_count)) scheduled_count = 0 for spkg in src_packages.values(): # if the package is arch:all only, it needs a dedicated build job if len(spkg.architectures) == 1 and spkg.architectures[0] == 'all': if not arch_all: continue if limit_architecture and limit_architecture != 'all': continue # Skip, we are not scheduling builds for arch:all # check if we can build the package on the current architecture if not any_arch_matches(arch_all.name, spkg.architectures): continue if schedule_build_for_arch(session, repo, spkg, arch_all, incoming_suite, simulate=simulate): scheduled_count += 1 if limit_count > 0 and scheduled_count >= limit_count: break continue # deal with all other architectures build_for_archs = [] for arch in incoming_suite.architectures: # The pseudo-architecture arch:all is treated specially if arch.name == 'all': continue if limit_architecture and limit_architecture != arch.name: continue # Skip, we are not scheduling builds for this architecture # check if we can build the package on the current architecture if any_arch_matches(arch.name, spkg.architectures): build_for_archs.append(arch) force_indep = False if len(build_for_archs ) == 1 and 'all' in spkg.architectures and build_for_archs[ 0].name != arch_indep_affinity: # if we only build for one non-all architecture, and that is not already # our arch-indep affinity (in which case the arch:all packages would be built regardless), then we # need to add a marker to enforce a built of arch-independent packages on a non-affinity architecture # The shedule function will take care to see if binaries for all already exist in that case # # NOTE: We intentionally ignore the case where a package has an architecture restriction like "all bar baz" where we only # can build arch:foo - presumably building this package won't be useful, if we only can use the arch:all parts of a package # that's not for us in every other regard. force_indep = True for arch in build_for_archs: if schedule_build_for_arch(session, repo, spkg, arch, incoming_suite, enforce_indep=force_indep, arch_all=arch_all, simulate=simulate): scheduled_count += 1 if limit_count > 0 and scheduled_count >= limit_count: break if limit_count > 0 and scheduled_count >= limit_count: break # cleanup delete_orphaned_jobs(session, simulate) # write all changes to database session.commit() log.info('Scheduled {} build jobs.'.format(scheduled_count)) return True
def import_suite_packages(suite_name): # FIXME: Don't hardcode the "master" repository here, fully implement # the "multiple repositories" feature repo_name = 'master' session = session_factory() suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == suite_name).one() repo = session.query(ArchiveRepository) \ .filter(ArchiveRepository.name == repo_name).one() lconf = LocalConfig() local_repo = Repository(lconf.archive_root_dir, repo.name, trusted_keyrings=[], entity=repo) # we unconditionally trust the local repository - for now local_repo.set_trusted(True) # event emitted for message passing emitter = EventEmitter(LkModule.ARCHIVE) for component in suite.components: # fetch all source packages for the given repository # FIXME: Urgh... Can this be more efficient? existing_spkgs = dict() all_existing_src_packages = session.query(SourcePackage) \ .options(joinedload(SourcePackage.suites)) \ .filter(SourcePackage.repo_id == repo.id) \ .filter(SourcePackage.component_id == component.id).all() for e_spkg in all_existing_src_packages: existing_spkgs[e_spkg.uuid] = e_spkg for spkg in local_repo.source_packages(suite, component): db_spkg = existing_spkgs.pop(spkg.uuid, None) if db_spkg: session.expunge(spkg) if suite in db_spkg.suites: continue # the source package is already registered with this suite db_spkg.suites.append(suite) _emit_package_event(emitter, 'source-package-published-in-suite', spkg, {'suite_new': suite.name}) continue session.add(spkg) _emit_package_event(emitter, 'source-package-published', spkg) for old_spkg in existing_spkgs.values(): if suite in old_spkg.suites: old_spkg.suites.remove(suite) _emit_package_event(emitter, 'source-package-suite-removed', old_spkg, {'suite_old': suite.name}) if len(old_spkg.suites) <= 0: for f in old_spkg.files: session.delete(f) session.delete(old_spkg) _emit_package_event(emitter, 'removed-source-package', old_spkg) # commit the source package changes already session.commit() for arch in suite.architectures: # Get all binary packages UUID/suite-id combinations for the given architecture and suite # FIXME: Urgh... Can this be more efficient? bpkg_b = Bundle('bin_package', BinaryPackage.uuid) suite_b = Bundle('archive_suite', ArchiveSuite.id) existing_bpkgs = dict() for e_bpkg, suite_i in session.query(bpkg_b, suite_b) \ .filter(BinaryPackage.repo_id == repo.id) \ .filter(BinaryPackage.component_id == component.id) \ .filter(BinaryPackage.architecture_id == arch.id).join(BinaryPackage.suites): sl = existing_bpkgs.get(e_bpkg.uuid) if not sl: existing_bpkgs[e_bpkg.uuid] = [ suite_i.id ] # if there is just one suite, we may get a scalar here else: sl.append(suite_i.id) # add information about regular binary packages existing_bpkgs = _register_binary_packages( session, repo, suite, component, arch, existing_bpkgs, local_repo.binary_packages(suite, component, arch), emitter) # add information about debian-installer packages existing_bpkgs = _register_binary_packages( session, repo, suite, component, arch, existing_bpkgs, local_repo.installer_packages(suite, component, arch), emitter) session.commit() for old_bpkg_uuid, suites in existing_bpkgs.items(): suites_count = len(suites) if suite.id in suites: rc = session.query(binpkg_suite_assoc_table) \ .filter(binpkg_suite_assoc_table.c.suite_id == suite.id) \ .filter(binpkg_suite_assoc_table.c.bin_package_uuid == old_bpkg_uuid) \ .delete(synchronize_session=False) if rc > 0: suites_count -= 1 if suites_count <= 0: # delete the old package, we don't need it anymore if it is in no suites session.query(ArchiveFile) \ .filter(ArchiveFile.binpkg_id == old_bpkg_uuid).delete() session.query(BinaryPackage) \ .filter(BinaryPackage.uuid == old_bpkg_uuid).delete() # NOTE: We do not emit messages for removed binary packages, as they are usually # deleted with their source package (unless we have an arch-specific removal) and we # don't want to spam messages which may be uninteresting to current Laniakea modules. session.commit() # import new AppStream component metadata / delete old components update_appstream_data(session, local_repo, repo, suite, component, arch) # delete orphaned AppStream metadata for cpt in session.query(SoftwareComponent).filter( ~SoftwareComponent.bin_packages.any()).all(): session.delete(cpt) session.commit()
def command_migrate(options): ''' Run a Britney migration ''' bconf, sconf, suites = get_spears_config() engine = SpearsEngine(bconf, sconf, suites) session = session_factory() excuses = [] if options.suite1: if not options.suite2: print('Target suite parameter is missing!') sys.exit(1) ret, excuses = engine.runMigration(options.suite1, options.suite2) if not ret: sys.exit(2) # remove old excuses migration_id = '{}-to-{}'.format(options.suite1, options.suite2) session.query(SpearsExcuse).filter(SpearsExcuse.migration_id == migration_id).delete() else: migration_entries = session.query(SpearsMigrationEntry).all() for entry in migration_entries: print('\nRunning migration: {} to {}\n'.format('+'.join(entry.source_suites), entry.target_suite)) ret, tmp_excuses = engine.runMigration('+'.join(entry.source_suites), entry.target_suite) if not ret: sys.exit(2) excuses.extend(tmp_excuses) # remove old excuses for entry in migration_entries: session.query(SpearsExcuse).filter(SpearsExcuse.migration_id == entry.make_migration_id()).delete() for ex in excuses: excuse = SpearsExcuse() #excuse.time = ex.date # noqa excuse.migration_id = ex.migrationId excuse.suite_source = ex.sourceSuite excuse.suite_target = ex.targetSuite excuse.is_candidate = ex.isCandidate excuse.source_package = ex.sourcePackage excuse.maintainer = ex.maintainer excuse.age_current = ex.age.currentAge excuse.age_required = ex.age.requiredAge excuse.version_new = ex.newVersion excuse.version_old = ex.oldVersion excuse.missing_archs_primary = ex.missingBuilds.primaryArchs excuse.missing_archs_secondary = ex.missingBuilds.secondaryArchs obins = [] for ob in ex.oldBinaries: obin = SpearsOldBinaries() obin.pkg_version = ob.pkgVersion obin.binaries = ob.binaries obins.append(obin) excuse.set_old_binaries(obins) excuse.blocked_by = ex.reason.blockedBy excuse.migrate_after = ex.reason.migrateAfter excuse.manual_block = ex.reason.manualBlock excuse.other = ex.reason.other excuse.log_excerpt = ex.reason.logExcerpt session.add(excuse) session.commit()
def command_migrate(options): ''' Run a Britney migration ''' bconf, sconf, suites = get_spears_config() engine = SpearsEngine(bconf, sconf, suites) session = session_factory() excuses = [] if options.suite1: if not options.suite2: print('Target suite parameter is missing!') sys.exit(1) ret, excuses = engine.runMigration(options.suite1, options.suite2) if not ret: sys.exit(2) # remove old excuses migration_id = '{}-to-{}'.format(options.suite1, options.suite2) session.query(SpearsExcuse).filter( SpearsExcuse.migration_id == migration_id).delete() else: migration_entries = session.query(SpearsMigrationEntry).all() for entry in migration_entries: print('\nRunning migration: {} to {}\n'.format( '+'.join(entry.source_suites), entry.target_suite)) ret, tmp_excuses = engine.runMigration( '+'.join(entry.source_suites), entry.target_suite) if not ret: sys.exit(2) excuses.extend(tmp_excuses) # remove old excuses for entry in migration_entries: session.query(SpearsExcuse).filter( SpearsExcuse.migration_id == entry.make_migration_id()).delete() for ex in excuses: excuse = SpearsExcuse() #excuse.time = ex.date # noqa excuse.migration_id = ex.migrationId excuse.suite_source = ex.sourceSuite excuse.suite_target = ex.targetSuite excuse.is_candidate = ex.isCandidate excuse.source_package = ex.sourcePackage excuse.maintainer = ex.maintainer excuse.age_current = ex.age.currentAge excuse.age_required = ex.age.requiredAge excuse.version_new = ex.newVersion excuse.version_old = ex.oldVersion excuse.missing_archs_primary = ex.missingBuilds.primaryArchs excuse.missing_archs_secondary = ex.missingBuilds.secondaryArchs obins = [] for ob in ex.oldBinaries: obin = SpearsOldBinaries() obin.pkg_version = ob.pkgVersion obin.binaries = ob.binaries obins.append(obin) excuse.set_old_binaries(obins) excuse.blocked_by = ex.reason.blockedBy excuse.migrate_after = ex.reason.migrateAfter excuse.manual_block = ex.reason.manualBlock excuse.other = ex.reason.other excuse.log_excerpt = ex.reason.logExcerpt session.add(excuse) session.commit()
def get_package_blacklist(): session = session_factory() pkgnames = [value for value, in session.query(SyncBlacklistEntry.pkgname)] return pkgnames
def command_migrate(options): ''' Run a Britney migration ''' bconf, sconf, suites = get_spears_config() engine = SpearsEngine(bconf, sconf, suites) session = session_factory() migration_entries = session.query(SpearsMigrationEntry).all() if options.suite1: # we have parameters, so limit which migration entries we act on if not options.suite2: print('Target suite parameter is missing!') sys.exit(1) migration_found = False migration_id = '{}-to-{}'.format(options.suite1, options.suite2) for entry in migration_entries: if entry.make_migration_id() == migration_id: migration_found = True migration_entries = [entry] break if not migration_found: print('Could not find migration recipe with ID "{}"'.format( migration_id)) sys.exit(1) # event emitted for message publishing emitter = EventEmitter(LkModule.SPEARS) for entry in migration_entries: print('\nRunning migration: {} to {}\n'.format( '+'.join(entry.source_suites), entry.target_suite)) ret, n_excuses = engine.runMigration('+'.join(entry.source_suites), entry.target_suite) if not ret: sys.exit(2) migration_id = entry.make_migration_id() # list existing excuses existing_excuses = {} all_excuses = session.query(SpearsExcuse).filter( SpearsExcuse.migration_id == migration_id).all() for excuse in all_excuses: eid = '{}-{}:{}-{}/{}'.format(excuse.suite_source, excuse.suite_target, excuse.source_package, excuse.version_new, excuse.version_old) existing_excuses[eid] = excuse for ex in n_excuses: eid = '{}-{}:{}-{}/{}'.format(ex.sourceSuite, ex.targetSuite, ex.sourcePackage, ex.newVersion, ex.oldVersion) excuse = existing_excuses.pop(eid, None) if excuse: # the excuse already exists, so we just update it new_excuse = False else: new_excuse = True excuse = SpearsExcuse() #excuse.time = ex.date # noqa excuse.migration_id = migration_id excuse.suite_source = ex.sourceSuite excuse.suite_target = ex.targetSuite excuse.source_package = ex.sourcePackage excuse.version_new = ex.newVersion excuse.version_old = ex.oldVersion excuse.is_candidate = ex.isCandidate excuse.maintainer = ex.maintainer excuse.age_current = ex.age.currentAge excuse.age_required = ex.age.requiredAge excuse.missing_archs_primary = ex.missingBuilds.primaryArchs excuse.missing_archs_secondary = ex.missingBuilds.secondaryArchs obins = [] for ob in ex.oldBinaries: obin = SpearsOldBinaries() obin.pkg_version = ob.pkgVersion obin.binaries = ob.binaries obins.append(obin) excuse.set_old_binaries(obins) excuse.blocked_by = ex.reason.blockedBy excuse.migrate_after = ex.reason.migrateAfter excuse.manual_block = ex.reason.manualBlock excuse.other = ex.reason.other excuse.log_excerpt = ex.reason.logExcerpt if new_excuse: excuse.uuid = uuid4( ) # we need an UUID immediately to submit it in the event payload session.add(excuse) data = { 'uuid': str(excuse.uuid), 'suite_source': excuse.suite_source, 'suite_target': excuse.suite_target, 'source_package': excuse.source_package, 'version_new': excuse.version_new, 'version_old': excuse.version_old } emitter.submit_event('new-excuse', data) for excuse in existing_excuses.values(): data = { 'uuid': str(excuse.uuid), 'suite_source': excuse.suite_source, 'suite_target': excuse.suite_target, 'source_package': excuse.source_package, 'version_new': excuse.version_new, 'version_old': excuse.version_old } emitter.submit_event('excuse-removed', data) session.delete(excuse) # add changes to the database early session.commit() # ensure everything is committed session.commit()
def import_suite_packages(suite_name): # FIXME: Don't hardcode the "master" repository here, fully implement # the "multiple repositories" feature repo_name = 'master' session = session_factory() suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == suite_name).one() repo = session.query(ArchiveRepository) \ .filter(ArchiveRepository.name == repo_name).one() lconf = LocalConfig() local_repo = Repository(lconf.archive_root_dir, lconf.cache_dir, repo_name, []) for component in suite.components: # fetch all source packages for the given repository # FIXME: Urgh... We need to do this better, this is not efficient. existing_spkgs = dict() all_existing_src_packages = session.query(SourcePackage) \ .options(joinedload(SourcePackage.suites)) \ .filter(SourcePackage.repo_id == repo.id) \ .filter(SourcePackage.component_id == component.id).all() for e_spkg in all_existing_src_packages: existing_spkgs[e_spkg.uuid] = e_spkg for spi in local_repo.getSourcePackages(suite.name, component.name): spkg = SourcePackage() spkg.name = spi.name spkg.version = spi.ver spkg.repo = repo spkg.update_uuid( ) # we can generate the uuid from name/version/repo-name now db_spkg = existing_spkgs.pop(spkg.uuid, None) if db_spkg: if suite in db_spkg.suites: continue # the source package is already registered with this suite db_spkg.suites.append(suite) continue # if we are here, the source package is completely new and is only in one suite spkg.suites = [suite] spkg.component = component spkg.architectures = spi.architectures spkg.standards_version = spi.standardsVersion spkg.format_version = spi.format spkg.homepage = spi.homepage spkg.vcs_browser = spi.vcsBrowser spkg.maintainer = spi.maintainer spkg.uploaders = spi.uploaders spkg.build_depends = spi.buildDepends spkg.directory = spi.directory binaries = [] for b in spi.binaries: binfo = PackageInfo() binfo.deb_type = b.debType binfo.name = b.name binfo.version = b.ver binaries.append(binfo) spkg.binaries = binfo for fi in spi.files: f = ArchiveFile() f.fname = fi.fname f.size = fi.size f.sha256sum = fi.sha256sum spkg.files.append(f) session.add(spkg) for old_spkg in existing_spkgs.values(): if suite in old_spkg.suites: old_spkg.suites.remove(suite) if len(old_spkg.suites) <= 0: for f in old_spkg.files: session.delete(f) session.delete(old_spkg) # commit the source package changes already session.commit() for arch in suite.architectures: # Get all binary packages for the given architecture # FIXME: Urgh... We need to do this better, this is not efficient. existing_bpkgs = dict() for e_bpkg in session.query(BinaryPackage) \ .options(joinedload(BinaryPackage.suites)) \ .filter(BinaryPackage.repo_id == repo.id) \ .filter(BinaryPackage.component_id == component.id) \ .filter(BinaryPackage.architecture_id == arch.id).all(): existing_bpkgs[e_bpkg.uuid] = e_bpkg # add information about regular binary packages existing_bpkgs = _register_binary_packages( session, repo, suite, component, arch, existing_bpkgs, local_repo.getBinaryPackages(suite.name, component.name, arch.name)) session.commit() # add information about debian-installer packages existing_bpkgs = _register_binary_packages( session, repo, suite, component, arch, existing_bpkgs, local_repo.getInstallerPackages(suite.name, component.name, arch.name)) session.commit() for old_bpkg in existing_bpkgs.values(): if suite in old_bpkg.suites: old_bpkg.suites.remove(suite) if len(old_bpkg.suites) <= 0: session.delete(old_bpkg.pkg_file) session.delete(old_bpkg) session.commit() # import new AppStream component metadata import_appstream_data(session, local_repo, repo, suite, component, arch) # delete orphaned AppStream metadata for cpt in session.query(SoftwareComponent).filter( ~SoftwareComponent.bin_packages.any()).all(): session.delete(cpt) session.commit()
def command_repo(options): ''' Import repository data ''' suite_name = options.suite if not suite_name: print('Suite parameter is missing!') sys.exit(1) # FIXME: Don't hardcode the "master" repository here, fully implement # the "multiple repositories" feature repo_name = 'master' session = session_factory() suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == suite_name).one() repo = session.query(ArchiveRepository) \ .filter(ArchiveRepository.name == repo_name).one() lconf = LocalConfig() local_repo = Repository(lconf.archive_root_dir, lconf.cache_dir, repo_name, []) for component in suite.components: # fetch all source packages for the given repository # FIXME: Urgh... We need to do this better, this is not efficient. existing_spkgs = dict() all_existing_src_packages = session.query(SourcePackage) \ .filter(SourcePackage.repo_id == repo.id).all() for e_spkg in all_existing_src_packages: existing_spkgs[e_spkg.uuid] = e_spkg for spi in local_repo.getSourcePackages(suite.name, component.name): spkg = SourcePackage() spkg.name = spi.name spkg.version = spi.ver spkg.repo = repo spkg.update_uuid() # we can generate the uuid from name/version/repo-name now db_spkg = existing_spkgs.pop(spkg.uuid, None) if db_spkg: if suite in db_spkg.suites: continue # the source package is already registered with this suite db_spkg.suites.append(suite) continue # if we are here, the source package is completely new and is only in one suite spkg.suites = [suite] spkg.component = component spkg.architectures = spi.architectures spkg.standards_version = spi.standardsVersion #spkg.pkgformat = spi.pkgformat # noqa spkg.homepage = spi.homepage spkg.vcs_browser = spi.vcsBrowser spkg.maintainer = spi.maintainer spkg.uploaders = spi.uploaders spkg.build_depends = spi.buildDepends spkg.directory = spi.directory for fi in spi.files: f = ArchiveFile() f.fname = fi.fname f.size = fi.size f.sha256sum = fi.sha256sum spkg.files.append(f) session.add(spkg) for old_spkg in existing_spkgs.values(): if suite in old_spkg.suites: old_spkg.suites.remove(suite) if len(old_spkg.suites) <= 0: for f in old_spkg.files: session.delete(f) session.delete(old_spkg) # commit the source package changes already session.commit() for arch in suite.architectures: # Get all binary packages for the given architecture # FIXME: Urgh... We need to do this better, this is not efficient. existing_bpkgs = dict() for e_bpkg in session.query(BinaryPackage) \ .filter(BinaryPackage.repo_id == repo.id) \ .filter(BinaryPackage.architecture_id == arch.id).all(): existing_bpkgs[e_bpkg.uuid] = e_bpkg # add information about regular binary packages existing_bpkgs = _register_binary_packages(session, repo, suite, component, arch, existing_bpkgs, local_repo.getBinaryPackages(suite.name, component.name, arch.name)) session.commit() # add information about debian-installer packages existing_bpkgs = _register_binary_packages(session, repo, suite, component, arch, existing_bpkgs, local_repo.getInstallerPackages(suite.name, component.name, arch.name)) session.commit() for old_bpkg in existing_bpkgs.values(): if suite in old_bpkg.suites: old_bpkg.suites.remove(suite) if len(old_bpkg.suites) <= 0: session.delete(old_bpkg.pkg_file) session.delete(old_bpkg) session.commit()