def ask_settings(options): print_header('Configuring base settings for Synchrotron') print_section('Add synchronization sources') source_distro_name = input_str('Name of the source distribution') source_repo_url = input_str('Source repository URL') add_suite = input_bool('Add a new source suite?') while add_suite: with session_scope() as session: sync_source = SynchrotronSource() sync_source.os_name = source_distro_name sync_source.repo_url = source_repo_url sync_source.suite_name = input_str( 'Adding a new source suite. Please set a name') sync_source.components = input_list( 'List of components for suite \'{}\''.format( sync_source.suite_name)) sync_source.architectures = input_list( 'List of architectures for suite \'{}\''.format( sync_source.suite_name)) session.add(sync_source) add_suite = input_bool('Add another suite?') print_section('Add sync tasks') add_sync_tasks = True while add_sync_tasks: with session_scope() as session: autosync = SynchrotronConfig() sync_source = None while not sync_source: src_suite = input_str('Source suite name') sync_source = session.query(SynchrotronSource).filter( SynchrotronSource.suite_name == src_suite).one_or_none() if not sync_source: print_note( 'Could not find sync source with suite name "{}"'. format(src_suite)) autosync.source = sync_source dest_suite = None while not dest_suite: dest_suite_name = input_str('Destination suite name') dest_suite = session.query(ArchiveSuite).filter( ArchiveSuite.name == dest_suite_name).one_or_none() if not dest_suite: print_note('Could not find suite with name "{}"'.format( dest_suite_name)) autosync.destination_suite = dest_suite autosync.sync_auto_enabled = input_bool( 'Enable automatic synchronization?') autosync.sync_enabled = input_bool('Enable synchronization?') autosync.sync_binaries = input_bool('Synchronize binary packages?') session.add(autosync) add_sync_tasks = input_bool('Add another sync task?')
def section_view(suite_name, section_name, page): with session_scope() as session: suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == suite_name) \ .one_or_none() if not suite: abort(404) pkgs_per_page = 50 pkg_query = session.query(BinaryPackage) \ .filter(BinaryPackage.suites.any(ArchiveSuite.id == suite.id)) \ .filter(BinaryPackage.section == section_name) \ .distinct(BinaryPackage.name, BinaryPackage.version) \ .order_by(BinaryPackage.name) pkgs_total = pkg_query.count() page_count = math.ceil(pkgs_total / pkgs_per_page) packages = pkg_query.options(joinedload(BinaryPackage.component)) \ .slice((page - 1) * pkgs_per_page, page * pkgs_per_page) \ .all() return render_template('section_view.html', section_name=section_name, suite=suite, packages=packages, pkgs_per_page=pkgs_per_page, pkgs_total=pkgs_total, current_page=page, page_count=page_count)
def details(cid): with session_scope() as session: # FIXME: Fetch all components with the ID and display them by version sw = session.query(SoftwareComponent) \ .options(joinedload(SoftwareComponent.bin_packages) .joinedload(BinaryPackage.suites)) \ .filter(SoftwareComponent.cid == cid) \ .first() if not sw: abort(404) # FIXME: This loop is probably inefficient too... packages_map = dict() for bpkg in sw.bin_packages: for suite in bpkg.suites: if suite.name not in packages_map: packages_map[suite.name] = list() packages_map[suite.name].append(bpkg) # parse AppStream metadata # FIXME: Parsing XML is expensive, we can cache this aggressively cpt = sw.load() screenshots = cpt.get_screenshots() return render_template( 'software/sw_details.html', AppStream=AppStream, screenshot_get_orig_image_url=screenshot_get_orig_image_url, sw=sw, cpt=cpt, component_id=cid, packages_map=packages_map, screenshots=screenshots)
def process_client_message(self, request): ''' Process the message / request of a Spark worker. ''' req_kind = request.get('request') if not req_kind: return self._error_reply('Request was malformed.') try: with session_scope() as session: if req_kind == 'job': return self._process_job_request(session, request) if req_kind == 'job-accepted': return self._process_job_accepted_request(session, request) if req_kind == 'job-rejected': return self._process_job_rejected_request(session, request) if req_kind == 'job-status': self._process_job_status_request(session, request) return None if req_kind == 'job-success': return self._process_job_finished_request(session, request, True) if req_kind == 'job-failed': return self._process_job_finished_request(session, request, False) return self._error_reply('Request type is unknown.') except Exception as e: import traceback log.error('Failed to handle request: {} => {}'.format(str(request), str(e))) traceback.print_exc() return self._error_reply('Failed to handle request: {}'.format(str(e))) return None
def search_software(): term = request.args.get('term') if not term: flash('The search term was invalid.') return redirect(url_for('portal.index')) with session_scope() as session: q = session.query(SoftwareComponent) \ .join(SoftwareComponent.bin_packages) \ .filter(SoftwareComponent.__ts_vector__.op('@@')(func.plainto_tsquery(term))) \ .order_by(SoftwareComponent.cid, BinaryPackage.version.desc()) \ .distinct(SoftwareComponent.cid) results_count = q.count() software = q.all() results_per_page = results_count page_count = math.ceil(results_count / results_per_page) if results_per_page > 0 else 1 return render_template('software_search_results.html', term=term, results_count=results_count, results_per_page=results_per_page, page_count=page_count, software=software)
def src_package_details(suite_name, name): with session_scope() as session: suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == suite_name) \ .one_or_none() if not suite: abort(404) spkgs = session.query(SourcePackage) \ .options(undefer(SourcePackage.version)) \ .filter(SourcePackage.suites.any(ArchiveSuite.id == suite.id)) \ .filter(SourcePackage.name == name) \ .order_by(SourcePackage.version.desc()) \ .all() if not spkgs: abort(404) suites = [ s[0] for s in session.query(ArchiveSuite.name.distinct()).filter( ArchiveSuite.src_packages.any( SourcePackage.name == name)).all() ] spkg_rep = spkgs[0] # the first package is always the most recent one broken_archs = architectures_with_issues_for_spkg(suite, spkg_rep) migration_infos = migration_excuse_info(spkg_rep, suite_name) return render_template('packages/src_details.html', pkg=spkg_rep, pkgs_all=spkgs, pkg_suite_name=suite_name, suites=suites, broken_archs=broken_archs, migration_infos=migration_infos, make_linked_dependency=make_linked_dependency)
def workers(): with session_scope() as session: workers = session.query(SparkWorker).all() return render_template('jobs/workers.html', workers=workers, humanized_timediff=humanized_timediff)
def issue_details(suite_name, uuid): if not is_uuid(uuid): abort(404) with session_scope() as session: suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == suite_name) \ .one_or_none() if not suite: abort(404) issue = session.query(DebcheckIssue) \ .filter(DebcheckIssue.suite_id == suite.id) \ .filter(DebcheckIssue.uuid == uuid) \ .one_or_none() if not issue: abort(404) missing = issue.get_issues_missing() conflicts = issue.get_issues_conflicts() ptype = 'source' if issue.package_type == PackageType.SOURCE else 'binary' return render_template('depcheck/issue.html', PackageType=PackageType, ptype=ptype, issue=issue, arch_name=issue.architecture, suite=suite, missing=missing, conflicts=conflicts)
def issue_details(suite_name, uuid): if not is_uuid(uuid): abort(404) with session_scope() as session: suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == suite_name) \ .one_or_none() if not suite: abort(404) issue = session.query(DebcheckIssue) \ .filter(DebcheckIssue.suite_id == suite.id) \ .filter(DebcheckIssue.uuid == uuid) \ .one_or_none() if not issue: abort(404) # cache information (as it has to be decoded from json) missing = issue.missing conflicts = issue.conflicts ptype = 'source' if issue.package_type == PackageType.SOURCE else 'binary' return render_template('depcheck/issue.html', PackageType=PackageType, ptype=ptype, issue=issue, arch_name=', '.join(issue.architectures), suite=suite, missing=missing, conflicts=conflicts)
def queue(page): with session_scope() as session: jobs_per_page = 50 jobs_total = session.query(Job) \ .filter(Job.status != JobStatus.DONE) \ .filter(Job.status != JobStatus.TERMINATED) \ .count() page_count = math.ceil(jobs_total / jobs_per_page) jobs = session.query(Job) \ .filter(Job.status != JobStatus.DONE) \ .filter(Job.status != JobStatus.TERMINATED) \ .order_by(Job.time_created) \ .slice((page - 1) * jobs_per_page, page * jobs_per_page) \ .all() return render_template('jobs/queue.html', JobStatus=JobStatus, humanized_timediff=humanized_timediff, session=session, title_for_job=title_for_job, jobs=jobs, jobs_per_page=jobs_per_page, jobs_total=jobs_total, current_page=page, page_count=page_count)
def add_image_recipe(options): print_header('Add new ISO/IMG image build recipe') with session_scope() as session: recipe = ImageBuildRecipe() recipe.distribution = input_str('Name of the distribution to build the image for') recipe.suite = input_str('Name of the suite to build the image for') recipe.flavor = input_str('Flavor to build') recipe.architectures = input_list('List of architectures to build for') while True: kind_str = input_str('Type of image that we are building (iso/img)').lower() if kind_str == 'iso': recipe.kind = ImageKind.ISO break if kind_str == 'img': recipe.kind = ImageKind.IMG break print_note('The selected image kind is unknown.') recipe.git_url = input_str('Git repository URL containing the image build configuration') recipe.result_move_to = input_str('Place to move the build result to (placeholders like %{DATE} are allowed)') # ensure we have a name recipe.regenerate_name() # add recipe to the database session.add(recipe) session.commit() print_done('Created recipe with name: {}'.format(recipe.name))
def get_sync_config(): import laniakea.native from laniakea.native import SyncSourceSuite, create_native_baseconfig lconf = LocalConfig() bconf = create_native_baseconfig() with session_scope() as session: sync_sources = session.query(SynchrotronSource).all() # FIXME: SynchrotronConfig needs adjustments in the D code to work # better with the new "multiple autosync tasks" model. # Maybe when doing this there's a good opportunity to rewrite some of # the D code in Python... sconf = laniakea.native.SynchrotronConfig() sconf.sourceName = sync_sources[0].os_name sconf.syncBinaries = False sconf.sourceKeyrings = lconf.synchrotron_sourcekeyrings sconf.source.defaultSuite = None sconf.source.repoUrl = sync_sources[0].repo_url source_suites = [] for sd in sync_sources: sssuite = SyncSourceSuite() sssuite.name = sd.suite_name sssuite.architectures = sd.architectures sssuite.components = sd.components source_suites.append(sssuite) sconf.source.suites = source_suites return bconf, sconf
def job_retry(options): job_uuid = options.retry if not job_uuid: print('No job ID to retry was set!') sys.exit(1) with session_scope() as session: job = session.query(Job) \ .options(undefer(Job.status)) \ .options(undefer(Job.result)) \ .filter(Job.uuid == job_uuid) \ .one_or_none() if not job: print('Did not find job with ID "{}"'.format(job_uuid)) sys.exit(1) if job.status == JobStatus.WAITING: print_note('Job is already waiting to be scheduled. Doing nothing.') sys.exit(2) if job.status == JobStatus.SCHEDULED: print_note('Job is already scheduled. Doing nothing.') sys.exit(2) if job.status == JobStatus.RUNNING: print_note('Job is currently running. Doing nothing.') sys.exit(2) # if we are here, it should be safe to reschedule the job job.status = JobStatus.WAITING job.result = JobResult.UNKNOWN job.time_assigned = None job.time_finished = None job.latest_log_excerpt = None print_note('Job {}/{}::{} was rescheduled.'.format(str(job.module), str(job.kind), str(job.uuid)))
def index(): with session_scope() as session: issues = session.query(SynchrotronIssue).all() return render_template('synchronization/index.html', issues=issues, SyncIssueKind=SynchrotronIssueKind)
def run_migration(self, source_suite_name: str, target_suite_name: str): with session_scope() as session: migration_entries = session.query(SpearsMigrationEntry).all() if source_suite_name: # we have parameters, so limit which migration entries we act on if not target_suite_name: log.error('Target suite parameter is missing!') return False migration_found = False migration_id = '{}-to-{}'.format(source_suite_name, target_suite_name) for entry in migration_entries: if entry.make_migration_id() == migration_id: migration_found = True migration_entries = [entry] break if not migration_found: log.error( 'Could not find migration recipe with ID "{}"'.format( migration_id)) return False return self._run_migration_for_entries(session, migration_entries)
def migration_excuse_info(spkg, suite_name): with session_scope() as session: qres = session.query(SpearsExcuse.uuid, SpearsExcuse.version_new, SpearsExcuse.suite_source, SpearsExcuse.suite_target, SpearsExcuse.age_current, SpearsExcuse.age_required) \ .filter(or_(SpearsExcuse.suite_source == suite_name, SpearsExcuse.suite_target == suite_name,)) \ .filter(SpearsExcuse.source_package == spkg.name) \ .all() if not qres: return [] infos = [] for e in qres: if e[4] is None: continue stuck = e[4] >= e[5] infos.append({ 'uuid': e[0], 'version_new': e[1], 'source': e[2], 'target': e[3], 'stuck': stuck }) return infos
def pgsql_test_available(session_scope): ''' test if PostgreSQL is available with the current configuration ''' try: with session_scope() as session: session.execute('SELECT CURRENT_TIME;') except Exception: # noqa: E722 return False return True
def __init__(self): self._lconf = LocalConfig() self._arch_indep_affinity = config_get_value(LkModule.ARIADNE, 'indep_arch_affinity') with session_scope() as session: # FIXME: We need much better ways to select the right suite to synchronize with incoming_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.accept_uploads == True).one() # noqa: E712 self._incoming_suite_name = incoming_suite.name
def all_architectures(): with session_scope() as session: arches = session.query(ArchiveArchitecture) \ .options(undefer(ArchiveArchitecture.id)) \ .options(undefer(ArchiveArchitecture.name)) \ .all() for a in arches: session.expunge(a) return arches
def command_binaries(options): ''' Check binary packages ''' with session_scope() as session: debcheck, repo, scan_suites = _create_debcheck(session, options.suite) for suite in scan_suites: issues = debcheck.depcheck_issues(suite) _update_debcheck_issues(session, repo, suite, issues, PackageType.BINARY)
def index(): with session_scope() as session: entries = session.query(SpearsMigrationEntry).all() migrations = [] for e in entries: migrations.append({'id': e.idname, 'from': ', '.join(e.source_suites), 'to': e.target_suite}) return render_template('migrations/index.html', migrations=migrations)
def command_sources(options): ''' Check source packages ''' with session_scope() as session: debcheck, repo, scan_suites = _create_debcheck(session, options.suite) for suite in scan_suites: issues = debcheck.build_depcheck_issues(suite) _update_debcheck_issues(session, repo, suite, issues, PackageType.SOURCE)
def index(): with session_scope() as session: recipes = session.query(ImageBuildRecipe).all() return render_template('osimages/index.html', session=session, last_jobs_for_recipe=last_jobs_for_recipe, humanized_timediff=humanized_timediff, ImageFormat=ImageFormat, JobResult=JobResult, recipes=recipes)
def remove_blacklist_entry(pkgname): with session_scope() as session: # delete existing entry in case it exists entry = session.query(SyncBlacklistEntry).filter( SyncBlacklistEntry.pkgname == pkgname).one_or_none() if entry: session.delete(entry) else: print_note( 'The selected package was not in blacklist. Nothing was removed.' )
def view_excuse(uuid): if not is_uuid(uuid): abort(404) with session_scope() as session: excuse = session.query(SpearsExcuse).filter(SpearsExcuse.uuid == uuid).one_or_none() if not excuse: abort(404) migration = {'idname': excuse.migration_id} return render_template('migrations/excuse.html', excuse=excuse, migration=migration)
def index(): with session_scope() as session: entries = session.query(SpearsMigrationEntry).all() migrations = [] for e in entries: migrations.append({ 'id': e.idname, 'from': ', '.join(e.source_suites), 'to': e.target_suite }) return render_template('migrations/index.html', migrations=migrations)
def sections_index(suite_name): with session_scope() as session: suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == suite_name) \ .one_or_none() if not suite: abort(404) sections = get_archive_sections() return render_template('sections_index.html', suite=suite, sections=sections)
def __init__(self, event_pub_queue): self._lconf = LocalConfig() self._arch_indep_affinity = config_get_value(LkModule.ARIADNE, 'indep_arch_affinity') self._event_pub_queue = event_pub_queue with session_scope() as session: # FIXME: We need much better ways to select the right suite to synchronize with incoming_suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.accept_uploads == True) \ .order_by(ArchiveSuite.name) \ .first() # noqa: E712 self._default_incoming_suite_name = incoming_suite.name
def add_blacklist_entry(pkgname, reason): with session_scope() as session: # delete existing entry in case it exists entry = session.query(SyncBlacklistEntry).filter( SyncBlacklistEntry.pkgname == pkgname).one_or_none() if entry: print_note('Updating existing entry for this package.') else: entry = SyncBlacklistEntry() session.add(entry) entry.pkgname = pkgname entry.reason = reason
def architectures_with_issues_for_spkg(suite, spkg): with session_scope() as session: results = session.query(DebcheckIssue.architectures.distinct()) \ .filter(DebcheckIssue.package_type == PackageType.SOURCE) \ .filter(DebcheckIssue.suite_id == suite.id) \ .filter(DebcheckIssue.package_name == spkg.name) \ .filter(DebcheckIssue.package_version == spkg.version) \ .all() arches = set() for r in results: arches.update(r[0]) return arches
def add_image_recipe(options): print_header('Add new ISO/IMG image build recipe') with session_scope() as session: recipe = ImageBuildRecipe() recipe.distribution = input_str( 'Name of the distribution to build the image for') recipe.suite = input_str('Name of the suite to build the image for') recipe.flavor = input_str('Flavor to build') recipe.architectures = input_list('List of architectures to build for') while True: kind_str = input_str( 'Type of image that we are building (iso/img)').lower() if kind_str == 'iso': recipe.kind = ImageKind.ISO break if kind_str == 'img': recipe.kind = ImageKind.IMG break print_note('The selected image kind is unknown.') recipe.git_url = input_str( 'Git repository URL containing the image build configuration') recipe.result_move_to = input_str( 'Place to move the build result to (placeholders like %{DATE} are allowed)' ) # ensure we have a name recipe.regenerate_name() # add recipe to the database session.add(recipe) session.commit() # announce the event emitter = EventEmitter(LkModule.ADMINCLI) ev_data = { 'name': recipe.name, 'kind': kind_str, 'architectures': recipe.architectures, 'distribution': recipe.distribution, 'suite': recipe.suite, 'flavor': recipe.flavor } emitter.submit_event_for_mod(LkModule.ISOTOPE, 'recipe-created', ev_data) print_done('Created recipe with name: {}'.format(recipe.name))
def view_excuse(uuid): if not is_uuid(uuid): abort(404) with session_scope() as session: excuse = session.query(SpearsExcuse).filter( SpearsExcuse.uuid == uuid).one_or_none() if not excuse: abort(404) return render_template('packages/excuse_details.html', excuse=excuse, link_for_bin_package_id=link_for_bin_package_id)
def build_details(uuid): if not is_uuid(uuid): abort(404) with session_scope() as session: job = session.query(Job).filter(Job.uuid == uuid).one_or_none() if not job: abort(404) worker = session.query(SparkWorker).filter( SparkWorker.uuid == job.worker).one_or_none() log_url = None if job.result == JobResult.SUCCESS or job.result == JobResult.FAILURE: log_url = current_app.config[ 'LOG_STORAGE_URL'] + '/' + get_dir_shorthand_for_uuid( job.uuid) + '/' + str(job.uuid) + '.log' spkg = session.query(SourcePackage) \ .filter(SourcePackage.source_uuid == job.trigger) \ .filter(SourcePackage.version == job.version) \ .one_or_none() if not spkg: abort(404) suite_name = 'unknown' if job.data: suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == job.data.get('suite')) \ .one_or_none() suite_name = suite.name dep_issues = session.query(DebcheckIssue) \ .filter(DebcheckIssue.package_type == PackageType.SOURCE) \ .filter(DebcheckIssue.suite_id == suite.id) \ .filter(DebcheckIssue.package_name == spkg.name) \ .filter(DebcheckIssue.package_version == spkg.version) \ .filter(DebcheckIssue.architectures.overlap([job.architecture, 'any'])) \ .all() return render_template('packages/build_details.html', humanized_timediff=humanized_timediff, JobStatus=JobStatus, JobResult=JobResult, job=job, worker=worker, spkg=spkg, dep_issues=dep_issues, suite_name=suite_name, log_url=log_url, link_for_bin_package_id=link_for_bin_package_id)
def view_excuse(uuid): if not is_uuid(uuid): abort(404) with session_scope() as session: excuse = session.query(SpearsExcuse).filter( SpearsExcuse.uuid == uuid).one_or_none() if not excuse: abort(404) migration = {'idname': excuse.migration_id} return render_template('migrations/excuse.html', excuse=excuse, migration=migration)
def __init__(self, target_suite_name: str, source_suite_name: str): self._lconf = LocalConfig() self._dak = DakBridge() # FIXME: Don't hardcode this! repo_name = 'master' # the repository of the distribution we import stuff into self._target_repo = Repository(self._lconf.archive_root_dir, repo_name) self._target_repo.set_trusted(True) self._target_suite_name = target_suite_name self._source_suite_name = source_suite_name self._distro_tag = config_get_distro_tag() self._synced_source_pkgs: list[SourcePackage] = [] with session_scope() as session: sync_source = session.query(SynchrotronSource) \ .filter(SynchrotronSource.suite_name == self._source_suite_name).one() # FIXME: Synchrotron needs adjustments to work # better with the new "multiple autosync tasks" model. # This code will need to be revised for that # (currently it is just a 1:1 translation from D code) # the repository of the distribution we use to sync stuff from self._source_repo = Repository( sync_source.repo_url, sync_source.os_name, self._lconf.synchrotron_sourcekeyrings) # we trust everything by default self._imports_trusted = True with session_scope() as session: self._sync_blacklist = set([ value for value, in session.query(SyncBlacklistEntry.pkgname) ])
def accept_upload(conf, dud): ''' Accept the upload and move its data to the right places. ''' job_success = dud.get('X-Spark-Success') == 'Yes' job_id = dud.get('X-Spark-Job') # mark job as accepted and done with session_scope() as session: job = session.query(Job).filter(Job.uuid == job_id).one_or_none() if not job: log.error('Unable to mark job \'{}\' as done: The Job was not found.'.format(job_id)) # this is a weird situation, there is no proper way to handle it as this indicates a bug # in the Laniakea setup or some other oddity. # The least harmful thing to do is to just leave the upload alone and try again later. return job.result = JobResult.SUCCESS if job_success else JobResult.FAILURE # move the log file and Firehose reports to the log storage log_target_dir = os.path.join(conf.log_storage_dir, get_dir_shorthand_for_uuid(job_id)) firehose_target_dir = os.path.join(log_target_dir, 'firehose') for fname in dud.get_files(): if fname.endswith('.log'): os.makedirs(log_target_dir, exist_ok=True) # move the logfile to its destination and ensure it is named correctly target_fname = os.path.join(log_target_dir, job_id + '.log') safe_rename(fname, target_fname) elif fname.endswith('.firehose.xml'): os.makedirs(firehose_target_dir, exist_ok=True) # move the firehose report to its own directory and rename it fh_target_fname = os.path.join(firehose_target_dir, job_id + '.firehose.xml') safe_rename(fname, fh_target_fname) # some modules get special treatment if job_success: from .import_isotope import handle_isotope_upload if job.module == LkModule.ISOTOPE: handle_isotope_upload(session, conf, dud, job) # remove the upload description file from incoming os.remove(dud.get_dud_file()) log.info("Upload {} accepted.", dud.get_filename())
def issue_list(suite_name, ptype, arch_name, page): if ptype == 'binary': package_type = PackageType.BINARY else: package_type = PackageType.SOURCE with session_scope() as session: suite = session.query(ArchiveSuite) \ .filter(ArchiveSuite.name == suite_name) \ .one_or_none() if not suite: abort(404) issues_per_page = 50 issues_total = session.query(DebcheckIssue) \ .filter(DebcheckIssue.package_type == package_type) \ .filter(DebcheckIssue.suite_id == suite.id) \ .filter(DebcheckIssue.architecture == arch_name) \ .count() page_count = math.ceil(issues_total / issues_per_page) issues = session.query(DebcheckIssue) \ .filter(DebcheckIssue.package_type == package_type) \ .filter(DebcheckIssue.suite_id == suite.id) \ .filter(DebcheckIssue.architecture == arch_name) \ .order_by(DebcheckIssue.package_name) \ .slice((page - 1) * issues_per_page, page * issues_per_page) \ .all() return render_template('depcheck/issues_list.html', ptype=ptype, issues=issues, suite=suite, arch_name=arch_name, issues_per_page=issues_per_page, issues_total=issues_total, current_page=page, page_count=page_count)
def trigger_image_build(options): recipe_name = options.trigger_build with session_scope() as session: recipe = session.query(ImageBuildRecipe).filter(ImageBuildRecipe.name == recipe_name).one_or_none() if not recipe: print_note('Recipe with name "{}" was not found!'.format(recipe_name)) sys.exit(2) job_count = 0 for arch in recipe.architectures: job = Job() job.module = LkModule.ISOTOPE job.kind = JobKind.OS_IMAGE_BUILD job.trigger = recipe.uuid job.architecture = arch session.add(job) job_count += 1 session.commit() print_done('Scheduled {} job(s) for {}.'.format(job_count, recipe.name))
def excuses_list(migration_id, page): with session_scope() as session: migration = session.query(SpearsMigrationEntry) \ .filter(SpearsMigrationEntry.idname == migration_id).one() excuses_per_page = 50 excuses_total = session.query(SpearsExcuse) \ .filter(SpearsExcuse.migration_id == migration_id).count() page_count = math.ceil(excuses_total / excuses_per_page) excuses = session.query(SpearsExcuse) \ .filter(SpearsExcuse.migration_id == migration_id) \ .order_by(SpearsExcuse.source_package) \ .slice((page - 1) * excuses_per_page, page * excuses_per_page) \ .all() return render_template('migrations/excuses.html', excuses=excuses, migration=migration, excuses_per_page=excuses_per_page, excuses_total=excuses_total, current_page=page, page_count=page_count)
def blacklist(): with session_scope() as session: entries = session.query(SyncBlacklistEntry).all() return render_template('synchronization/blacklist.html', entries=entries)
def index(): with session_scope() as session: suites = session.query(ArchiveSuite).all() return render_template('depcheck/index.html', suites=suites)
def job(uuid): if not is_uuid(uuid): abort(404) with session_scope() as session: job = session.query(Job).filter(Job.uuid == uuid).one_or_none() if not job: abort(404) worker = session.query(SparkWorker).filter(SparkWorker.uuid == job.worker).one_or_none() log_url = None if job.result == JobResult.SUCCESS or job.result == JobResult.FAILURE: log_url = current_app.config['LOG_STORAGE_URL'] + '/' + get_dir_shorthand_for_uuid(job.uuid) + '/' + str(job.uuid) + '.log' job_title = 'Job for {}'.format(job.module) if job.kind == JobKind.PACKAGE_BUILD: spkg = session.query(SourcePackage) \ .filter(SourcePackage.source_uuid == job.trigger) \ .filter(SourcePackage.version == job.version) \ .one_or_none() if spkg: job_title = 'Build {} {} on {}'.format(spkg.name, job.version, job.architecture) suite_name = 'unknown' if job.data: suite_name = job.data.get('suite') return render_template('jobs/job_pkgbuild.html', humanized_timediff=humanized_timediff, JobStatus=JobStatus, JobResult=JobResult, job=job, job_title=job_title, worker=worker, spkg=spkg, suite_name=suite_name, log_url=log_url) elif job.kind == JobKind.OS_IMAGE_BUILD: recipe = session.query(ImageBuildRecipe) \ .filter(ImageBuildRecipe.uuid == job.trigger).one_or_none() if recipe: job_title = 'OS Image {}'.format(recipe.name) return render_template('jobs/job_osimage.html', humanized_timediff=humanized_timediff, JobStatus=JobStatus, JobResult=JobResult, job=job, job_title=job_title, worker=worker, recipe=recipe, log_url=log_url) else: return render_template('jobs/job_generic.html', humanized_timediff=humanized_timediff, JobStatus=JobStatus, JobResult=JobResult, job=job, job_title=job_title, worker=worker, log_url=log_url)