def checkValidity(self, theGame): """ generated source for method checkValidity """ try: sm.initialize(theGame.getRules()) if len(bases) == 0: raise ValidatorException("Could not find base propositions.") elif len(inputs) == 0: raise ValidatorException("Could not find input propositions.") for base in bases: truesFromBases.add(GdlPool.getRelation(self.TRUE, base.getBody())) for input in inputs: legalsFromInputs.add(GdlPool.getRelation(self.LEGAL, input.getBody())) if truesFromBases.isEmpty() and legalsFromInputs.isEmpty(): return ImmutableList.of() while System.currentTimeMillis() < startTime + self.millisecondsToTest: # Check state against bases, inputs if not truesFromBases.isEmpty(): if not truesFromBases.containsAll(state.getContents()): missingBases.addAll(state.getContents()) missingBases.removeAll(truesFromBases) raise ValidatorException("Found missing bases: " + missingBases) if not legalsFromInputs.isEmpty(): for role in sm.getRoles(): for move in legalMoves: legalSentences.add(GdlPool.getRelation(self.LEGAL, [None]*)) if not legalsFromInputs.containsAll(legalSentences): missingInputs.addAll(legalSentences) missingInputs.removeAll(legalsFromInputs) raise ValidatorException("Found missing inputs: " + missingInputs) state = sm.getRandomNextState(state) if sm.isTerminal(state): state = initialState except MoveDefinitionException as mde: raise ValidatorException("Could not find legal moves while simulating: " + mde) except TransitionDefinitionException as tde: raise ValidatorException("Could not find transition definition while simulating: " + tde) except RuntimeException as e: raise ValidatorException("Ran into a runtime exception while simulating: " + e) except StackOverflowError as e: raise ValidatorException("Ran into a stack overflow while simulating: " + e) except OutOfMemoryError as e: raise ValidatorException("Ran out of memory while simulating: " + e) return ImmutableList.of()
def _map_omeka_item_element_dc_rights(self, object_builder, text): object_builder.setRights( RightsSet.builder() .setElements(ImmutableList.of( Rights.builder() # .setRightsHolder(self.__institution_title) .setText(text) .setType(RightsType.UNDETERMINED) .build() )) .build() )
def _map_omeka_item_element_itm_accession_number(self, object_builder, text): object_builder.locations.append( Location.builder() .setRefids(ImmutableList.of( LocationRefid.builder() .setText(text) .setType(LocationRefidType.ACCESSION) .build(), )) .setType(LocationType.REPOSITORY) .build() )
def getCartesianDomainsFromModel(self): """ generated source for method getCartesianDomainsFromModel """ results = Maps.newHashMap() for sentenceEntry in sentencesModel.entrySet(): ConcurrencyUtils.checkForInterruption() # We'll end up taking the Cartesian product of the different # types of terms we have available if nameAndArity.getArity() == 0: results.put(form, CartesianSentenceFormDomain.create(form, ImmutableList.of())) else: for terms in Sets.cartesianProduct(sampleTerms): ConcurrencyUtils.checkForInterruption() results.put(form, domain) return results
def _map_omeka_item_element_itm_label(self, object_builder, text): text = text.strip("'").strip() if len(text) == 0: return object_builder.inscriptions.append( Inscription.builder() .setTexts(ImmutableList.of( InscriptionText.builder() .setText(text) .setType(InscriptionTextType.LABEL) .build() )) .build() )
def _map_omeka_item_element_dc_subject(self, object_builder, text): for subject in text.split(';'): subject = subject.strip() if len(subject) == 0: continue object_builder.subjects.append( Subject.builder() .setTerms(ImmutableList.of( SubjectTerm.builder() .setText(subject) .setType(SubjectTermType.OTHER_TOPIC) .build() )) .build() ) self._update_vocabulary_used('Dublin Core', 'Subject', subject)
def put_institution(institution_id, institution_title, institution_url, store_parameters, collection_store_uri=None, data_rights=None): if data_rights is None: data_rights = \ RightsSet.Builder()\ .setElements(ImmutableList.of( Rights.Builder() .setRightsHolder(institution_title) .setText("Copyright %s %s" % (datetime.now().year, institution_title)) .setType(RightsType.COPYRIGHTED) .build() ))\ .build() PythonApi.getInstance().getInstitutionCommandService().putInstitution( institution_id, Institution.Builder() .setCollectionStoreUri(Optional.fromNullable(collection_store_uri)) .setDataRights(data_rights) .setStoreParameters(store_parameters) .setTitle(institution_title) .setUrl(institution_url) .build() )
def _parse_record_metadata_subject_element(self, element, object_builder): text = element.text.strip() if len(text) == 0: return subject_term_builder = \ SubjectTerm.Builder()\ .setText(text)\ .setType(SubjectTermType.OTHER_TOPIC)\ qualifier = element.get('qualifier', None) if qualifier is not None: try: vocab = getattr(Vocab, qualifier) subject_term_builder.setVocabRef(VocabRef.Builder().setVocab(vocab).build()) except AttributeError: if qualifier not in ('named_person', 'UNTL-BS',): self._logger.warn("unknown subject vocabulary '%s'", qualifier) object_builder.subjects.append( Subject.Builder() .setTerms(ImmutableList.of(subject_term_builder.build())) .build() )
def __init__(self, headAssignment, rule, varDomains, functionInfoMap, completedSentenceFormValues): """ generated source for method __init__ """ super(AssignmentsImpl, self).__init__() self.empty = False self.headAssignment = headAssignment # We first have to find the remaining variables in the body self.varsToAssign = GdlUtils.getVariables(rule) # Remove all the duplicates; we do, however, want to keep the ordering newVarsToAssign = ArrayList() for v in varsToAssign: if not newVarsToAssign.contains(v): newVarsToAssign.add(v) self.varsToAssign = newVarsToAssign self.varsToAssign.removeAll(headAssignment.keySet()) # varsToAssign is set at this point # We see if iterating over entire tuples will give us a # better result, and we look for the best way of doing that. # Let's get the domains of the variables # Map<GdlVariable, Set<GdlConstant>> varDomains = model.getVarDomains(rule); # Since we're looking at a particular rule, we can do this one step better # by looking at the domain of the head, which may be more restrictive # and taking the intersections of the two domains where applicable # Map<GdlVariable, Set<GdlConstant>> headVarDomains = model.getVarDomainsInSentence(rule.getHead()); # We can run the A* search for a good set of source conjuncts # at this point, then use the result to build the rest. completedSentenceFormSizes = HashMap() if completedSentenceFormValues != None: for form in completedSentenceFormValues.keySet(): completedSentenceFormSizes.put(form, completedSentenceFormValues.get(form).size()) varDomainSizes = HashMap() for var in varDomains.keySet(): varDomainSizes.put(var, varDomains.get(var).size()) bestOrdering = IterationOrderCandidate() bestOrdering = getBestIterationOrderCandidate(rule, varDomains, functionInfoMap, completedSentenceFormSizes, headAssignment, False)# model, # TODO: True here? # Want to replace next few things with order # Need a few extra things to handle the use of iteration over existing tuples self.varsToAssign = bestOrdering.getVariableOrdering() # For each of these vars, we have to find one or the other. # Let's start by finding all the domains, a task already done. self.valuesToIterate = Lists.newArrayListWithCapacity(len(self.varsToAssign)) for var in varsToAssign: if varDomains.containsKey(var): if not varDomains.get(var).isEmpty(): self.valuesToIterate.add(ImmutableList.copyOf(varDomains.get(var))) else: self.valuesToIterate.add(ImmutableList.of(GdlPool.getConstant("0"))) else: self.valuesToIterate.add(ImmutableList.of(GdlPool.getConstant("0"))) # Okay, the iteration-over-domain is done. # Now let's look at sourced iteration. self.sourceDefiningSlot = ArrayList(len(self.varsToAssign)) i = 0 while i < len(self.varsToAssign): self.sourceDefiningSlot.add(-1) i += 1 # We also need to convert values into tuples # We should do so while constraining to any constants in the conjunct # Let's convert the conjuncts sourceConjuncts = bestOrdering.getSourceConjuncts() self.tuplesBySource = Lists.newArrayListWithCapacity(len(sourceConjuncts)) # new ArrayList<List<List<GdlConstant>>>(len(sourceConjuncts)); self.varsChosenBySource = Lists.newArrayListWithCapacity(len(sourceConjuncts)) # new ArrayList<List<Integer>>(len(sourceConjuncts)); self.putDontCheckBySource = Lists.newArrayListWithCapacity(len(sourceConjuncts)) # new ArrayList<List<Boolean>>(len(sourceConjuncts)); j = 0 while j < len(sourceConjuncts): # flatten into a tuple # Go through the vars/constants in the tuple while i < len(conjunctTuple): if isinstance(term, (GdlConstant, )): constraintSlots.add(i) constraintValues.add(term) # TODO: What if tuple size ends up being 0? # Need to keep that in mind elif isinstance(term, (GdlVariable, )): varsChosen.add(varIndex) if self.sourceDefiningSlot.get(varIndex) == -1: # We define it self.sourceDefiningSlot.set(varIndex, j) putDontCheck.add(True) else: # It's an overlap; we just check for consistency putDontCheck.add(False) else: raise RuntimeException("Function returned in tuple") i += 1 self.varsChosenBySource.add(ImmutableList.copyOf(varsChosen)) self.putDontCheckBySource.add(ImmutableList.copyOf(putDontCheck)) # Now we put the tuples together # We use constraintSlots and constraintValues to check that the # tuples have compatible values for sentence in sentences: # Check that it doesn't conflict with our headAssignment if not headAssignment.isEmpty(): for var in headAssignment.keySet(): if tupleAssignment.containsKey(var) and tupleAssignment.get(var) != headAssignment.get(var): continue while c < len(constraintSlots): if not longTuple.get(slot) == value: continue c += 1 while s < len(longTuple): # constraintSlots is sorted in ascending order if c < len(constraintSlots) and constraintSlots.get(c) == s: c += 1 else: shortTuple.add(longTuple.get(s)) s += 1 # The tuple fits the source conjunct tuples.add(ImmutableList.copyOf(shortTuple)) # sortTuples(tuples); //Needed? Useful? Not sure. Probably not? self.tuplesBySource.add(ImmutableList.copyOf(tuples)) j += 1 # We now want to see which we can give assignment functions to self.valuesToCompute = ArrayList(len(self.varsToAssign)) for var in varsToAssign: self.valuesToCompute.add(None) self.indicesToChangeWhenNull = ArrayList(len(self.varsToAssign)) i = 0 while i < len(self.varsToAssign): # Change itself, why not? # Actually, instead let's try -1, to catch bugs better self.indicesToChangeWhenNull.add(-1) i += 1 # Now we have our functions already selected by the ordering # bestOrdering.functionalConjunctIndices; # Make AssignmentFunctions out of the ordering functionalConjuncts = bestOrdering.getFunctionalConjuncts() # print "functionalConjuncts: " + functionalConjuncts; i = 0 while i < len(functionalConjuncts): if functionalConjunct != None: # These are the only ones that could be constant functions if functionInfoMap != None: functionInfo = functionInfoMap.get(conjForm) if functionInfo != None: # Now we need to figure out which variables are involved # and which are suitable as functional outputs. # 1) Which vars are in this conjunct? # 2) Of these vars, which is "rightmost"? # 3) Is it only used once in the relation? if Collections.frequency(varsInSentence, rightmostVar) != 1: continue # Can't use it # 4) Which slot is it used in in the relation? # 5) Build an AssignmentFunction if appropriate. # This should be able to translate from values of # the other variables to the value of the wanted # variable. # We don't guarantee that this works until we check if not function_.functional(): continue self.valuesToCompute.set(index, function_) remainingVarsInSentence.remove(rightmostVar) self.indicesToChangeWhenNull.set(index, self.varsToAssign.indexOf(nextRightmostVar)) i += 1 # We now have the remainingVars also assigned their domains # We also cover the distincts here # Assume these are just variables and constants self.distincts = ArrayList() for literal in rule.getBody(): if isinstance(literal, (GdlDistinct, )): self.distincts.add(literal) computeVarsToChangePerDistinct() # Need to add "distinct" restrictions to head assignment, too... checkDistinctsAgainstHead()
.setStoreParameters(store_parameters) .setTitle(institution_title) .setUrl(institution_url) .build() ) data_dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'extracted')) assert os.path.isdir(data_dir_path) put_institution( data_rights=\ RightsSet.Builder() .setElements(ImmutableList.of( Rights.Builder() .setRightsHolder('University of North Texas') .setText("The contents of Texas Fashion Collection, hosted by the University of North Texas Libraries (digital content including images, text, and sound and video recordings) are made publicly available by the collection-holding partners for use in research, teaching, and private study. For the full terms of use, see http://digital.library.unt.edu/terms-of-use/") .setType(RightsType.COPYRIGHTED) .build() )) .build(), institution_id=InstitutionId.parse('untvca'), institution_title='Texas Fashion Collection', institution_url=Url.parse('http://digital.library.unt.edu/explore/collections/TXFC/'), store_parameters=ImmutableMap.of( 'record_mapper', TxfcOaiPmhRecordMapper.__module__ + '.' + TxfcOaiPmhRecordMapper.__name__ # @UndefinedVariable ), ) put_collection( collection_id=CollectionId.parse('untvca/txfc'), institution_id=InstitutionId.parse('untvca'), object_store_uri=Uri.parse(OaiPmhFsObjectStore.URI_SCHEME + ':/' + os.path.join(data_dir_path, 'untvca', 'txfc').replace(os.path.sep, '/')),
def map_omeka_item(self, collection_id, endpoint_url, omeka_item, omeka_item_files, square_thumbnail_height_px, square_thumbnail_width_px): object_id = ObjectId.parse(str(collection_id) + '/' + str(omeka_item.id)) vocab_ref = VocabRef.Builder().setVocab(Vocab.COSTUME_CORE).build() feature_name = None omeka_collection_id = int(collection_id.getUnqualifiedCollectionId()) for item in self.OMEKA_COLLECTIONS.iteritems(): if item[1] == omeka_collection_id: feature_name = item[0] break assert feature_name is not None feature_value = None item_image_credit_line = item_image_license = None for element_text in omeka_item.element_texts: if len(element_text.text) == 0: continue if element_text.element_set.name == 'Dublin Core': if element_text.element.name == 'Title': if feature_value is None: feature_value = element_text.text elif element_text.element_set.name == 'Item Type Metadata': if element_text.element.name == 'Image Creator': item_image_credit_line = element_text.text elif element_text.element.name == 'Image License': item_image_license = element_text.text else: self._logger.warn("Omeka item %d has unknown element set name '%s'", omeka_item.id, element_text.element_set.name) object_builder = \ Object.Builder()\ .setCollectionId(collection_id)\ .setHidden(True)\ .setInstitutionId(collection_id.getInstitutionId())\ .setStructures(\ StructureSet.Builder().setElements(ImmutableList.of( Structure.Builder() .setText(feature_value) .setType( StructureType.Builder() .setText(feature_name) .setVocabRef(vocab_ref) .build() ) .build() )) .build() )\ .setTitles( TitleSet.Builder().setElements(ImmutableList.of( Title.Builder() .setText("%(feature_value)s" % locals()) .setType(TitleType.DESCRIPTIVE) .build() )) .build() )\ .setViewType(ViewType.DETAIL) images = [] for file_ in omeka_item_files: if not file_.mime_type.startswith('image/'): continue image_credit_line = item_image_credit_line image_license = item_image_license for element_text in file_.element_texts: if element_text.element_set.name == 'Dublin Core': if element_text.element.name == 'License': image_license = element_text.text elif element_text.element.name == 'Provenance': image_credit_line = element_text.text if image_credit_line is None or len(image_credit_line) == 0: self._logger.warn("Omeka item %d has a file %d missing a Provenance", omeka_item.id, file_.id) continue if image_license is None or len(image_license) == 0: self._logger.warn("Omeka item %d has a file %d missing a License", omeka_item.id, file_.id) continue license_vocab_ref = None if image_license.lower() == 'public domain': rights_type = RightsType.PUBLIC_DOMAIN elif image_license == 'CC0': rights_type = RightsType.LICENSED license_vocab_ref = \ VocabRef.Builder()\ .setVocab(Vocab.CREATIVE_COMMONS)\ .setUri(Uri.parse('https://creativecommons.org/publicdomain/zero/1.0/'))\ .build() elif image_license.startswith('CC BY-SA '): rights_type = RightsType.LICENSED version = image_license[len('CC BY-SA '):] float(version) license_vocab_ref = \ VocabRef.Builder()\ .setVocab(Vocab.CREATIVE_COMMONS)\ .setUri(Uri.parse("https://creativecommons.org/licenses/by-sa/%s/" % version))\ .build() else: rights_type = RightsType.LICENSED image_builder = Image.Builder() # @UndefinedVariable file_urls = file_.file_urls image_builder.setOriginal(ImageVersion.Builder().setUrl(Url.parse(file_urls.original)).build()) image_builder.setRights( RightsSet.Builder().setElements(ImmutableList.of( Rights.Builder() .setLicenseVocabRef(Optional.fromNullable(license_vocab_ref)) .setRightsHolder(image_credit_line) .setText(image_license) .setType(rights_type) .build() )) .build() ) if file_urls.square_thumbnail is None: self._logger.warn("Omeka item %d has a file %d missing a square thumbnail", omeka_item.id, file_.id) continue image_builder.setSquareThumbnail( ImageVersion.Builder() .setHeightPx(UnsignedInteger.valueOf(square_thumbnail_height_px)) .setUrl(Url.parse(file_urls.square_thumbnail)) .setWidthPx(UnsignedInteger.valueOf(square_thumbnail_width_px)) .build() ) images.append(image_builder.build()) if len(images) > 0: object_builder.setImages(ImmutableList.copyOf(images)) else: self._logger.warn("Omeka item %d has no valid images", omeka_item.id) object_ = object_builder.build() object_id = ObjectId.parse(str(collection_id) + '/' + urllib.quote(feature_value, '')) return ObjectEntry(object_id, object_)
def getSentences(cls, rule, includeHead): """ generated source for method getSentences """ if includeHead == cls.VarDomainOpts.INCLUDE_HEAD: return Iterables.concat(ImmutableList.of(rule.getHead()), rule.getBody()) else: return rule.getBody()