def preload_hook(rows): person_ids = set() work_items_by_spec = defaultdict(list) for spec in rows: if need_people: person_ids |= set( [spec._assigneeID, spec._approverID, spec._drafterID]) if need_branches: get_property_cache(spec).linked_branches = [] if need_workitems: work_items = load_referencing( SpecificationWorkItem, rows, ['specification_id'], extra_conditions=[SpecificationWorkItem.deleted == False]) for workitem in work_items: person_ids.add(workitem.assignee_id) work_items_by_spec[workitem.specification_id].append(workitem) person_ids -= set([None]) if need_people: list( getUtility(IPersonSet).getPrecachedPersonsFromIDs( person_ids, need_validity=True)) if need_workitems: for spec in rows: get_property_cache(spec).work_items = sorted( work_items_by_spec[spec.id], key=lambda wi: wi.sequence) if need_branches: spec_branches = load_referencing(SpecificationBranch, rows, ['specificationID']) for sbranch in spec_branches: spec_cache = get_property_cache(sbranch.specification) spec_cache.linked_branches.append(sbranch)
def preload_hook(rows): person_ids = set() work_items_by_spec = defaultdict(list) for spec in rows: if need_people: person_ids |= set( [spec._assigneeID, spec._approverID, spec._drafterID]) if need_branches: get_property_cache(spec).linked_branches = [] if need_workitems: work_items = load_referencing( SpecificationWorkItem, rows, ['specification_id'], extra_conditions=[SpecificationWorkItem.deleted == False]) for workitem in work_items: person_ids.add(workitem.assignee_id) work_items_by_spec[workitem.specification_id].append(workitem) person_ids -= set([None]) if need_people: list(getUtility(IPersonSet).getPrecachedPersonsFromIDs( person_ids, need_validity=True)) if need_workitems: for spec in rows: get_property_cache(spec).work_items = sorted( work_items_by_spec[spec.id], key=lambda wi: wi.sequence) if need_branches: spec_branches = load_referencing( SpecificationBranch, rows, ['specificationID']) for sbranch in spec_branches: spec_cache = get_property_cache(sbranch.specification) spec_cache.linked_branches.append(sbranch)
def decoratedQueueBatch(self): """Return the current batch, converted to decorated objects. Each batch item, a PackageUpload, is converted to a CompletePackageUpload. This avoids many additional SQL queries in the +queue template. """ uploads = list(self.batchnav.currentBatch()) if len(uploads) == 0: return None upload_ids = [upload.id for upload in uploads] puses = load_referencing(PackageUploadSource, uploads, ['packageuploadID']) pubs = load_referencing(PackageUploadBuild, uploads, ['packageuploadID']) source_sprs = load_related(SourcePackageRelease, puses, ['sourcepackagereleaseID']) bpbs = load_related(BinaryPackageBuild, pubs, ['buildID']) bprs = load_referencing(BinaryPackageRelease, bpbs, ['buildID']) source_files = load_referencing(SourcePackageReleaseFile, source_sprs, ['sourcepackagereleaseID']) binary_files = load_referencing(BinaryPackageFile, bprs, ['binarypackagereleaseID']) file_lfas = load_related(LibraryFileAlias, source_files + binary_files, ['libraryfileID']) load_related(LibraryFileContent, file_lfas, ['contentID']) # Get a dictionary of lists of binary files keyed by upload ID. package_upload_builds_dict = self.builds_dict(upload_ids, binary_files) build_upload_files, binary_package_names = self.binary_files_dict( package_upload_builds_dict, binary_files) # Get a dictionary of lists of source files keyed by upload ID. package_upload_source_dict = {} for pus in puses: package_upload_source_dict[pus.sourcepackagereleaseID] = pus source_upload_files = self.source_files_dict( package_upload_source_dict, source_files) # Get a list of binary package names that already exist in # the distribution. The avoids multiple queries to is_new # on IBinaryPackageRelease. self.old_binary_packages = self.calculateOldBinaries( binary_package_names) package_sets = self.getPackagesetsFor(source_sprs) self.loadPackageCopyJobs(uploads) return [ CompletePackageUpload(item, build_upload_files, source_upload_files, package_sets) for item in uploads ]
def decoratedQueueBatch(self): """Return the current batch, converted to decorated objects. Each batch item, a PackageUpload, is converted to a CompletePackageUpload. This avoids many additional SQL queries in the +queue template. """ uploads = list(self.batchnav.currentBatch()) if len(uploads) == 0: return None upload_ids = [upload.id for upload in uploads] puses = load_referencing( PackageUploadSource, uploads, ['packageuploadID']) pubs = load_referencing( PackageUploadBuild, uploads, ['packageuploadID']) source_sprs = load_related( SourcePackageRelease, puses, ['sourcepackagereleaseID']) bpbs = load_related(BinaryPackageBuild, pubs, ['buildID']) bprs = load_referencing(BinaryPackageRelease, bpbs, ['buildID']) source_files = load_referencing( SourcePackageReleaseFile, source_sprs, ['sourcepackagereleaseID']) binary_files = load_referencing( BinaryPackageFile, bprs, ['binarypackagereleaseID']) file_lfas = load_related( LibraryFileAlias, source_files + binary_files, ['libraryfileID']) load_related(LibraryFileContent, file_lfas, ['contentID']) # Get a dictionary of lists of binary files keyed by upload ID. package_upload_builds_dict = self.builds_dict(upload_ids, binary_files) build_upload_files, binary_package_names = self.binary_files_dict( package_upload_builds_dict, binary_files) # Get a dictionary of lists of source files keyed by upload ID. package_upload_source_dict = {} for pus in puses: package_upload_source_dict[pus.sourcepackagereleaseID] = pus source_upload_files = self.source_files_dict( package_upload_source_dict, source_files) # Get a list of binary package names that already exist in # the distribution. The avoids multiple queries to is_new # on IBinaryPackageRelease. self.old_binary_packages = self.calculateOldBinaries( binary_package_names) package_sets = self.getPackagesetsFor(source_sprs) self.loadPackageCopyJobs(uploads) return [ CompletePackageUpload( item, build_upload_files, source_upload_files, package_sets) for item in uploads]
def getCandidateUploads(self, source_series, source_pocket=PackagePublishingPocket.RELEASE): """Find custom uploads that may need copying.""" uploads = source_series.getPackageUploads( pocket=source_pocket, custom_type=self.copyable_types.keys()) load_referencing(PackageUploadCustom, uploads, ['packageuploadID']) customs = sum([list(upload.customfiles) for upload in uploads], []) customs = filter(self.isCopyable, customs) customs.sort(key=attrgetter('id'), reverse=True) return customs
def do_eager_load(rows): branch_ids = set(branch.id for branch in rows) if not branch_ids: return GenericBranchCollection.preloadDataForBranches(rows) # So far have only needed the persons for their canonical_url - no # need for validity etc in the /branches API call. load_related(Person, rows, ['ownerID', 'registrantID', 'reviewerID']) load_referencing(BugBranch, rows, ['branchID'])
def do_eager_load(rows): branch_ids = set(branch.id for branch in rows) if not branch_ids: return GenericBranchCollection.preloadDataForBranches(rows) load_related(Product, rows, ['productID']) # So far have only needed the persons for their canonical_url - no # need for validity etc in the /branches API call. load_related(Person, rows, ['ownerID', 'registrantID', 'reviewerID']) load_referencing(BugBranch, rows, ['branchID'])
def preloadForBuilders(self, builders): # Populate builders' currentjob cachedproperty. queues = load_referencing(BuildQueue, builders, ['builderID']) queue_builders = dict((queue.builderID, queue) for queue in queues) for builder in builders: cache = get_property_cache(builder) cache.currentjob = queue_builders.get(builder.id, None) return queues
def preloadGrantsForRules(rules): """Preload the access grants related to an iterable of rules.""" grants = load_referencing(GitRuleGrant, rules, ["rule_id"]) grants_map = defaultdict(list) for grant in grants: grants_map[grant.rule_id].append(grant) for rule in rules: get_property_cache(rule).grants = grants_map[rule.id] load_related(Person, grants, ["grantee_id"])
def test_load_referencing(self): owned_objects = [ self.factory.makeBranch(), self.factory.makeBranch(), ] expected = set(list(owned_objects[0].subscriptions) + list(owned_objects[1].subscriptions)) self.assertNotEqual(0, len(expected)) self.assertEqual(expected, set(bulk.load_referencing(BranchSubscription, owned_objects, ['branchID'])))
def preLoadDataForSourcePackageRecipes(sourcepackagerecipes): # Load the referencing SourcePackageRecipeData. spr_datas = load_referencing( SourcePackageRecipeData, sourcepackagerecipes, ['sourcepackage_recipe_id']) # Load the related branches. load_related(Branch, spr_datas, ['base_branch_id']) # Store the SourcePackageRecipeData in the sourcepackagerecipes # objects. for spr_data in spr_datas: cache = get_property_cache(spr_data.sourcepackage_recipe) cache._recipe_data = spr_data SourcePackageRecipeData.preLoadReferencedBranches(spr_datas)
def preLoadDataForSourcePackageRecipes(sourcepackagerecipes): # Load the referencing SourcePackageRecipeData. spr_datas = load_referencing(SourcePackageRecipeData, sourcepackagerecipes, ['sourcepackage_recipe_id']) # Load the related branches. load_related(Branch, spr_datas, ['base_branch_id']) # Store the SourcePackageRecipeData in the sourcepackagerecipes # objects. for spr_data in spr_datas: cache = get_property_cache(spr_data.sourcepackage_recipe) cache._recipe_data = spr_data SourcePackageRecipeData.preLoadReferencedBranches(spr_datas)
def initialize(self): self.attendees = [] attendee_set = set() for attendance in self.context.attendances: self.attendees.append( dict(name=attendance.attendee.name, displayname=attendance.attendee.displayname, start=attendance.time_starts.strftime( '%Y-%m-%dT%H:%M:%SZ'), end=attendance.time_ends.strftime('%Y-%m-%dT%H:%M:%SZ'))) attendee_set.add(attendance.attendeeID) model_specs = [] for spec in self.context.specifications( self.user, filter=[SpecificationFilter.ACCEPTED]): # Skip sprints with no priority or less than LOW. if spec.priority < SpecificationPriority.UNDEFINED: continue model_specs.append(spec) people = defaultdict(dict) # Attendees per specification. for subscription in load_referencing(SpecificationSubscription, model_specs, ['specificationID']): if subscription.personID not in attendee_set: continue people[subscription.specificationID][ subscription.personID] = subscription.essential # Spec specials - drafter/assignee. Don't need approver for # performance, as specifications() above eager-loaded the # people, and approvers don't count as "required persons." for spec in model_specs: # Get the list of attendees that will attend the sprint. spec_people = people[spec.id] if spec.assignee is not None: spec_people[spec.assignee.id] = True attendee_set.add(spec.assignee.id) if spec.drafter is not None: spec_people[spec.drafter.id] = True attendee_set.add(spec.drafter.id) people_by_id = dict((person.id, person) for person in getUtility( IPersonSet).getPrecachedPersonsFromIDs(attendee_set)) self.specifications = [ dict(spec=spec, interested=[ dict(name=people_by_id[person_id].name, required=required) for (person_id, required) in people[spec.id].items() ]) for spec in model_specs ]
def test_load_referencing(self): owned_objects = [ self.factory.makeBranch(), self.factory.makeBranch(), ] expected = set( list(owned_objects[0].subscriptions) + list(owned_objects[1].subscriptions)) self.assertNotEqual(0, len(expected)) self.assertEqual( expected, set( bulk.load_referencing(BranchSubscription, owned_objects, ['branchID'])))
def preLoadDataForSourcePackageRecipes(sourcepackagerecipes): # Load the referencing SourcePackageRecipeData. spr_datas = load_referencing( SourcePackageRecipeData, sourcepackagerecipes, ['sourcepackage_recipe_id']) # Store the SourcePackageRecipeData in the sourcepackagerecipes # objects. for spr_data in spr_datas: cache = get_property_cache(spr_data.sourcepackage_recipe) cache._recipe_data = spr_data SourcePackageRecipeData.preLoadReferencedBranches(spr_datas) owner_ids = set(map(attrgetter('owner_id'), sourcepackagerecipes)) list(getUtility(IPersonSet).getPrecachedPersonsFromIDs( owner_ids, need_validity=True))
def initialize(self): self.attendees = [] attendee_set = set() for attendance in self.context.attendances: self.attendees.append(dict( name=attendance.attendee.name, displayname=attendance.attendee.displayname, start=attendance.time_starts.strftime('%Y-%m-%dT%H:%M:%SZ'), end=attendance.time_ends.strftime('%Y-%m-%dT%H:%M:%SZ'))) attendee_set.add(attendance.attendeeID) model_specs = [] for spec in self.context.specifications( self.user, filter=[SpecificationFilter.ACCEPTED]): # Skip sprints with no priority or less than LOW. if spec.priority < SpecificationPriority.UNDEFINED: continue model_specs.append(spec) people = defaultdict(dict) # Attendees per specification. for subscription in load_referencing(SpecificationSubscription, model_specs, ['specificationID']): if subscription.personID not in attendee_set: continue people[subscription.specificationID][ subscription.personID] = subscription.essential # Spec specials - drafter/assignee. Don't need approver for # performance, as specifications() above eager-loaded the # people, and approvers don't count as "required persons." for spec in model_specs: # Get the list of attendees that will attend the sprint. spec_people = people[spec.id] if spec.assignee is not None: spec_people[spec.assignee.id] = True attendee_set.add(spec.assignee.id) if spec.drafter is not None: spec_people[spec.drafter.id] = True attendee_set.add(spec.drafter.id) people_by_id = dict((person.id, person) for person in getUtility(IPersonSet).getPrecachedPersonsFromIDs(attendee_set)) self.specifications = [ dict(spec=spec, interested=[ dict(name=people_by_id[person_id].name, required=required) for (person_id, required) in people[spec.id].items()] ) for spec in model_specs]
def preLoadReferencedBranches(sourcepackagerecipedatas): # Circular imports. from lp.code.model.branchcollection import GenericBranchCollection from lp.code.model.gitcollection import GenericGitCollection # Load the related Branch, _SourcePackageRecipeDataInstruction. base_branches = load_related(Branch, sourcepackagerecipedatas, ['base_branch_id']) base_repositories = load_related(GitRepository, sourcepackagerecipedatas, ['base_git_repository_id']) sprd_instructions = load_referencing( _SourcePackageRecipeDataInstruction, sourcepackagerecipedatas, ['recipe_data_id']) sub_branches = load_related(Branch, sprd_instructions, ['branch_id']) sub_repositories = load_related(GitRepository, sprd_instructions, ['git_repository_id']) all_branches = base_branches + sub_branches all_repositories = base_repositories + sub_repositories # Pre-load branches'/repositories' data. if all_branches: GenericBranchCollection.preloadDataForBranches(all_branches) if all_repositories: GenericGitCollection.preloadDataForRepositories(all_repositories) # Store the pre-fetched objects on the sourcepackagerecipedatas # objects. branch_to_recipe_data = { instr.branch_id: instr.recipe_data_id for instr in sprd_instructions if instr.branch_id is not None } repository_to_recipe_data = { instr.git_repository_id: instr.recipe_data_id for instr in sprd_instructions if instr.git_repository_id is not None } caches = { sprd.id: [sprd, get_property_cache(sprd)] for sprd in sourcepackagerecipedatas } for _, [sprd, cache] in caches.items(): cache._referenced_branches = [sprd.base] for branch in sub_branches: cache = caches[branch_to_recipe_data[branch.id]][1] cache._referenced_branches.append(branch) for repository in sub_repositories: cache = caches[repository_to_recipe_data[repository.id]][1] cache._referenced_branches.append(repository)
def preLoadReferencedBranches(sourcepackagerecipedatas): # Load the related Branch, _SourcePackageRecipeDataInstruction. load_related( Branch, sourcepackagerecipedatas, ['base_branch_id']) sprd_instructions = load_referencing( _SourcePackageRecipeDataInstruction, sourcepackagerecipedatas, ['recipe_data_id']) sub_branches = load_related( Branch, sprd_instructions, ['branch_id']) # Store the pre-fetched objects on the sourcepackagerecipedatas # objects. branch_to_recipe_data = dict([ (instr.branch_id, instr.recipe_data_id) for instr in sprd_instructions]) caches = dict((sprd.id, [sprd, get_property_cache(sprd)]) for sprd in sourcepackagerecipedatas) for unused, [sprd, cache] in caches.items(): cache._referenced_branches = [sprd.base_branch] for recipe_data_id, branches in groupby( sub_branches, lambda branch: branch_to_recipe_data[branch.id]): cache = caches[recipe_data_id][1] cache._referenced_branches.extend(list(branches))