def __init__(self, context, request): Tile.__init__(self, context, request) DisplayForm.__init__(self, context, request) components = self.data['field'].split('-', 1) self.schema = None if len(components) > 1: for schema in iterSchemata(self.context): if schema.__identifier__.endswith(components[0]): self.schema = schema else: self.schema = tuple(iterSchemata(self.context))[0] self.field = components[-1] self.fields = Fields(self.schema).select(self.field)
def listAnchorNames(self, fieldname=None): """Return a list of Anchor names""" results = [] tree = HTMLTreeBuilder.TreeBuilder() content_field = None for schema in iterSchemata(self.context): if content_field is not None: break for name, field in getFieldsInOrder(schema): if (not fieldname and IPrimaryField.providedBy(field)) or name == fieldname: content_field = field break if content_field is None: return [] try: content = content_field.get(self.context).output except AttributeError: # Not a text field. return [] tree.feed('<root>%s</root>' % content) rootnode = tree.close() for x in rootnode.getiterator(): if x.tag == "a": if "name" in x.keys(): results.append(x.attrib['name']) return results
def listAnchorNames(self, fieldname=None): """Return a list of Anchor names""" content_field = None for schema in iterSchemata(self.context): if content_field is not None: break for name, field in getFieldsInOrder(schema): if (not fieldname and IPrimaryField.providedBy(field)) or name == fieldname: content_field = field break if content_field is None: return [] try: content = content_field.get(self.context).output except AttributeError: # Not a text field. return [] try: tree = fromstring(content) except ConflictError: raise except Exception: return [] return [anchor.get('name') for anchor in tree.findall(SEARCHPATTERN) if "name" in anchor.keys()]
def __call__(self): parent = aq_parent(aq_inner(self.context)) parent_summary = getMultiAdapter( (parent, self.request), ISerializeToJsonSummary)() result = { # '@context': 'http://www.w3.org/ns/hydra/context.jsonld', '@id': self.context.absolute_url(), 'id': self.context.id, '@type': self.context.portal_type, 'parent': parent_summary, 'created': json_compatible(self.context.created()), 'modified': json_compatible(self.context.modified()), 'review_state': self._get_workflow_state(), 'UID': self.context.UID(), } for schema in iterSchemata(self.context): read_permissions = mergedTaggedValueDict( schema, READ_PERMISSIONS_KEY) for name, field in getFields(schema).items(): if not self.check_permission(read_permissions.get(name)): continue serializer = queryMultiAdapter( (field, self.context, self.request), IFieldSerializer) value = serializer() result[json_compatible(name)] = value return result
def set_defaults(self, obj): """set the default value for all fields on the mail object (including additional behaviors)""" for schema in iterSchemata(obj): for name, field in getFieldsInOrder(schema): # Remove acquisition wrapper when getting field value so # determining if a field is already set works as expected value = field.get(field.interface(aq_base(obj))) if value == field.missing_value or value is None: # bind the field for choices with named vocabularies field = field.bind(obj) # No value is set, so we try to set the default value # otherwise we set the missing value default = queryMultiAdapter(( self.context, self.request, # request None, # form field, None, # Widget ), IValue, name='default') if default is not None: default = default.get() if default is None: default = getattr(field, 'default', None) if default is None: try: default = field.missing_value except: pass field.set(field.interface(obj), default) obj.reindexObject()
def create_subtask(task, data): subtask = createContent('opengever.task.task', id=data['title'], **data) notify(ObjectCreatedEvent(subtask)) subtask = addContentToContainer(task, subtask, checkConstraints=True) for schemata in iterSchemata(subtask): super_repr = schemata(task) repr = schemata(subtask) for name, field in schema.getFieldsInOrder(schemata): if name in data: value = data[name] else: value = getattr(super_repr, name, None) setattr(repr, name, value) activity = TaskAddedActivity(subtask, task.REQUEST, task) activity.record() notify(ObjectModifiedEvent(subtask)) return subtask
def __call__(self): result = { '@context': 'http://www.w3.org/ns/hydra/context.jsonld', '@id': self.context.absolute_url(), '@type': self.context.portal_type, 'parent': { '@id': aq_parent(aq_inner(self.context)).absolute_url(), 'title': aq_parent(aq_inner(self.context)).title, 'description': aq_parent(aq_inner(self.context)).description }, 'created': json_compatible(self.context.created()), 'modified': json_compatible(self.context.modified()), 'UID': self.context.UID(), } for schema in iterSchemata(self.context): read_permissions = mergedTaggedValueDict( schema, READ_PERMISSIONS_KEY) for name, field in getFields(schema).items(): if not self.check_permission(read_permissions.get(name)): continue serializer = queryMultiAdapter( (field, self.context, self.request), IFieldSerializer) value = serializer() result[json_compatible(name)] = value return result
def set_defaults(obj, container): """set the default value for all fields on the mail object (including additional behaviors)""" for schema in iterSchemata(obj): for name, field in getFieldsInOrder(schema): # Remove acquisition wrapper when getting field value so # determining if a field is already set works as expected value = field.get(field.interface(aq_base(obj))) if value == field.missing_value or value is None: # No value is set, so we try to set the default value # otherwise we set the missing value default = queryMultiAdapter(( container, container.REQUEST, # request None, # form field, None, # Widget ), IValue, name='default') if default is not None: default = default.get() if default is None: default = getattr(field, 'default', None) if default is None: try: default = field.missing_value except: pass field.set(field.interface(obj), default) return obj
def copy_fields(self, translation): doomed = False target_language = queryAdapter(translation, ILanguage).get_language() relation_copier = lambda r, l=target_language, f=self.copy_relation: f(r, l) for schema in iterSchemata(self.context): for field_name in schema: if ILanguageIndependentField.providedBy(schema[field_name]): value = getattr(schema(self.context), field_name, _marker) if value == _marker: continue elif IRelationValue.providedBy(value): value = self.copy_relation(value, target_language) elif IRelationList.providedBy(schema[field_name]): value = map(relation_copier, value or []) doomed = True setattr(schema(translation), field_name, value) # If at least one field has been copied over to the translation # we need to inform subscriber to trigger an ObjectModifiedEvent # on that translation. return doomed
def __call__(self, name, value): # Short circuit for things like views or viewlets if name == '': return 1 context = aq_parent(self) # we may want to cache this based on the combined mod-times # of fti and context, but even this is not save in the case someone # decides to have behaviors bound on something different than context # or fti, i.e. schemas for subtrees. protection_dict = all_merged_tagged_values_dict( iterSchemata(context), READ_PERMISSIONS_KEY ) if name not in protection_dict: return 1 permission = queryUtility(IPermission, name=protection_dict[name]) if permission is not None: return getSecurityManager().checkPermission( permission.title, context ) return 0
def applyBehaviors(self, item, data): schemas = utils.iterSchemata(item) for schema in schemas: i = schema(item) for name in schema.names(): if name in data.keys(): setattr(i, name, data[name])
def _replaceBaseline( self, baseline ): wc_id = self.context.getId() wc_container = aq_parent( self.context ) # copy all field values from the working copy to the baseline for schema in iterSchemata( baseline ): for name, field in getFieldsInOrder( schema ): # Skip read-only fields if field.readonly: continue try: value = field.get( schema( self.context ) ) except: value = None # TODO: We need a way to identify the DCFieldProperty # fields and use the appropriate set_name/get_name if name == 'effective': baseline.effective_date = self.context.effective() elif name == 'expires': baseline.expiration_date = self.context.expires() elif name == 'subjects': baseline.setSubject(self.context.Subject()) else: field.set( baseline, value ) baseline.reindexObject() # delete the working copy wc_container._delObject( wc_id ) return baseline
def set_defaults(obj): for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): try: value = field.get(field.interface(obj)) if value: # field is present with a truthy value, nothing to do continue except AttributeError: # Field not present, set default pass default = queryMultiAdapter( (obj, obj.REQUEST, None, field, None), IValue, name='default') if default is not None: default = default.get() if default is None: default = getattr(field, 'default', None) if default is None: try: default = field.missing_value except AttributeError: pass field.set(field.interface(obj), default) return obj
def getReferencedAttributes(self, obj): file_data = {} # Try to get last revision, only store a new blob if the # contents differ from the prior one, otherwise store a # reference to the prior one. # The implementation is mostly based on CMFEditions's CloneBlobs # modifier. repo = getToolByName(obj, "portal_repository") try: prior_rev = repo.retrieve(obj) except ArchivistRetrieveError: prior_rev = None for schemata in iterSchemata(obj): for name, field in getFields(schemata).items(): if INamedBlobFileField.providedBy(field) or INamedBlobImageField.providedBy(field): try: # field.get may raise an AttributeError if the field # is provided by a behavior and hasn't been # initialized yet field_value = field.get(field.interface(obj)) except AttributeError: field_value = None if field_value is None: continue blob_file = field_value.open() save_new = True dotted_name = ".".join([schemata.__identifier__, name]) if prior_rev is not None: prior_obj = prior_rev.object prior_blob = field.get(field.interface(prior_obj)) if prior_blob is not None: prior_file = prior_blob.open() # Check for file size differences if os.fstat(prior_file.fileno()).st_size == os.fstat(blob_file.fileno()).st_size: # Files are the same size, compare line by line for line, prior_line in izip(blob_file, prior_file): if line != prior_line: break else: # The files are the same, save a reference # to the prior versions blob on this # version file_data[dotted_name] = prior_blob._blob save_new = False if save_new: new_blob = file_data[dotted_name] = Blob() new_blob_file = new_blob.open("w") try: blob_file.seek(0) new_blob_file.writelines(blob_file) finally: blob_file.close() new_blob_file.close() return file_data
def deserialize(self, fieldname, value): for schema in iterSchemata(self.portal.doc1): if fieldname in schema: field = schema.get(fieldname) break deserializer = getMultiAdapter((field, self.portal.doc1, self.request), IFieldDeserializer) return deserializer(value)
def afterRetrieveModifier(self, obj, repo_clone, preserve=()): """Restore relations from the working copy.""" if IDexterityContent.providedBy(obj): for schemata in iterSchemata(obj): for name, field in getFields(schemata).items(): if IRelationChoice.providedBy(field) or IRelationList.providedBy(field): field.set(field.interface(repo_clone), field.query(field.interface(obj))) return [], [], {}
def get_primary_field(obj): primary = None for i in iterSchemata(obj): fields = getFieldsInOrder(i) for name, field in fields: if IPrimaryField.providedBy(field): primary = (name, field) break return primary
def __iter__(self): for item in self.previous: # not enough info if '_path' not in item: yield item continue obj = self.context.unrestrictedTraverse(str(item['_path'].lstrip('/')), None) # path doesn't exist if obj is None: yield item continue # do nothing if we got a wrong object through acquisition path = item['_path'] if path.startswith('/'): path = path[1:] if '/'.join(obj.getPhysicalPath()[self.root_path_length:]) != path: yield item continue for key in item.keys(): if not key.startswith(self.datafield_prefix): continue fieldname = key[len(self.datafield_prefix):] if IBaseObject.providedBy(obj): field = obj.getField(fieldname) if field is None: continue if item[key].has_key('data'): value = base64.b64decode(item[key]['data']) else: value = '' # XXX: handle other data field implementations old_value = field.get(obj).data if value != old_value: field.set(obj, value) obj.setFilename(item[key]['filename']) obj.setContentType(item[key]['content_type']) else: # We have a destination DX type field = None for schemata in iterSchemata(obj): for name, s_field in getFieldsInOrder(schemata): if name == fieldname: field = s_field deserializer = IDeserializer(field) value = deserializer(item[key], None, item) field.set(field.interface(obj), value) if not field: print('Can\'t find a suitable destination field '.format(fieldname)) yield item
def serialize(self, fieldname, value): for schema in iterSchemata(self.doc1): if fieldname in schema: field = schema.get(fieldname) break dm = getMultiAdapter((self.doc1, field), IDataManager) dm.set(value) serializer = getMultiAdapter((field, self.doc1, self.request), IFieldSerializer) return serializer()
def _getMessage(self): """Construct message on demand """ message = constructMessageFromSchemata(self.context, iterSchemata(self.context)) # Store the portal type in a header, to allow it to be identifed later message['Portal-Type'] = self.context.portal_type return message
def __call__(self, config): data = {} for schemata in iterSchemata(self.context): for name, field in getFieldsInOrder(schemata): extractor = getMultiAdapter( (self.context, self.request, field), IFieldExtractor) extractor.extract(name, data, config) return data
def getFieldValues(obj, *ifaces): if IDexterityContent.providedBy(obj): for schemata in iterSchemata(obj): for name, field in getFields(schemata).items(): for iface in ifaces: if iface.providedBy(field): field_value = field.query(field.interface(obj)) if field_value is not None: yield field_value
def create_file(self, filename, data, obj): # filename must be unicode if not isinstance(filename, unicode): filename = filename.decode('utf-8') for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if IPrimaryField.providedBy(field): return field._type(data=data, filename=filename)
def _ce_fields(self): field_dict = {} for schemata in iterSchemata(self.context): for name, field in getFieldsInOrder(schemata): if ITextLine.providedBy(field) or \ IText.providedBy(field) or \ IRichText.providedBy(field): field_dict[name] = field return field_dict
def deserialize(self, fieldname, value): for schema in iterSchemata(self.portal.doc1): if fieldname in schema: field = schema.get(fieldname) break deserializer = getMultiAdapter( (field, self.portal.doc1, self.request), IFieldDeserializer ) return deserializer(value)
def create_file(self, filename, data, obj): # filename must be unicode if not isinstance(filename, unicode): filename = filename.decode("utf-8") for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if IPrimaryField.providedBy(field): return field._type(data=data, filename=filename)
def close(self): self._message = self._parser.close() self._closed = True initializeObjectFromSchemata( self.context, iterSchemata(self.context), self._message, self._encoding )
def _getMessage(self): # Construct message on demand. message = constructMessageFromSchemata(self.context, iterSchemata(self.context)) # Store the portal type in a header, to allow it to be identifed later message['Portal-Type'] = self.context.portal_type return message
def test_searchabletext(self): """Check the searchable text of an object """ self.provideUtility(DummyVocabulary(), name='opengever.ogds.base.ContactsVocabulary') for dossier_type, additional_searchable_attr in \ self.dossier_types.items(): dossier = self.create_dossier(dossier_type) wrapper = queryMultiAdapter( (dossier, self.portal.portal_catalog), IIndexableObject) #merge default and additional searchable attr searchable_attr = self.default_searchable_attr searchable_attr.update(additional_searchable_attr) for schemata in iterSchemata(dossier): for name, field in getFieldsInOrder(schemata): value = searchable_attr.get(name, '') if not value: continue field.set(field.interface(dossier), value) # search value if isinstance(value, list): for v in value: val = self.map_with_vocab(schemata, name, v) # the field reference_number is handled special. # the searchable text is set over an adapter whos # return a reference-number. so we cant set the # reference_number filed like the other attributes. elif value == 'test_reference_number': refNumb = getAdapter(dossier, IReferenceNumber) val = refNumb.get_number() else: val = self.map_with_vocab(schemata, name, value) self.assertIn( val.encode('utf-8'), wrapper.SearchableText) # We pop the field if we found it to check at the # end whether all attributes where found in the schema searchable_attr.pop(name) # Test sequencenumber if searchable_attr.get( 'sequence_number', '') == 'test_sequence_number': seqNumb = getUtility(ISequenceNumber) self.assertIn( str(seqNumb.get_number(dossier)), wrapper.SearchableText) searchable_attr.pop('sequence_number') self.assertTrue(searchable_attr.values() == [])
def export_images(self, imagesize): '''Returns zip file with images ''' # Write ZIP archive zip_filename = tempfile.mktemp() ZIP = zipfile.ZipFile(zip_filename, 'w') catalog = api.portal.get_tool(name='portal_catalog') folder_path = '/'.join(self.context.getPhysicalPath()) brains = catalog(Language="", show_inactive=True, path={'query': folder_path, 'depth': -1}) for obj in brains: obj = obj.getObject() try: #this is for archetype #imageformat is image/jpg so we are skipping the first part #this leaves us with png / jpg / gif or something else. imageformat = obj.getContentType() imageformat = imageformat.split("/") image_suffix = imageformat[1] #hack for news item image if image_suffix == 'html': image_suffix = 'jpg' if image_suffix == 'jpeg': image_suffix = 'jpg' full_image_name = obj.getId() + '.' + image_suffix img = obj.Schema().getField('image').getScale(obj,scale=imagesize) ZIP.writestr(self.context.getId() + '/' + full_image_name, str(img.data)) except: #this is for dexterity blob fields if IDexterityContent.providedBy(obj): for schemata in iterSchemata(obj): for name, field in getFields(schemata).items(): #checking for image field if INamedImageField.providedBy(field): #copied this line from somewhere field_value = field.get(field.interface(obj)) if field_value is not None: #field_value is not correct, this gets the image, not the scale ZIP.writestr(self.context.getId() + '/' + str((field_value.filename).encode("utf8")), str(field_value.data)) finally: pass ZIP.close() data = file(zip_filename).read() os.unlink(zip_filename) R = self.request.RESPONSE R.setHeader('content-type', 'application/zip') R.setHeader('content-length', len(data)) R.setHeader('content-disposition', 'attachment; filename="%s.zip"' % self.context.getId()) return R.write(data)
def export_images(self, imagesize): '''Returns the file (with the preview images ''' # Write ZIP archive zip_filename = tempfile.mktemp() ZIP = zipfile.ZipFile(zip_filename, 'w') #hack for new collection try: all_folder_contents = self.context.results(batch=False) except AttributeError: #for folder and old collection all_folder_contents = self.context.getFolderContents() for obj in all_folder_contents: obj = obj.getObject() try: #this is for archetype #imageformat is image/jpg so we are skipping the first part #this leaves us with png / jpg / gif or something else. imageformat = obj.getContentType() imageformat = imageformat.split("/") image_suffix = imageformat[1] #hack for news item image if image_suffix == 'html': image_suffix = 'jpg' if image_suffix == 'jpeg': image_suffix = 'jpg' full_image_name = obj.getId() + '.' + image_suffix img = obj.Schema().getField('image').getScale(obj,scale=imagesize) ZIP.writestr(self.context.getId() + '/' + full_image_name, str(img.data)) except: #this is for dexterity blob fields if IDexterityContent.providedBy(obj): for schemata in iterSchemata(obj): for name, field in getFields(schemata).items(): #checking for image field if INamedImageField.providedBy(field): #copied this line from somewhere field_value = field.get(field.interface(obj)) if field_value is not None: #field_value is not correct, this gets the image, not the scale ZIP.writestr(self.context.getId() + '/' + str((field_value.filename).encode("utf8")), str(field_value.data)) finally: pass ZIP.close() data = file(zip_filename).read() os.unlink(zip_filename) R = self.request.RESPONSE R.setHeader('content-type', 'application/zip') R.setHeader('content-length', len(data)) R.setHeader('content-disposition', 'attachment; filename="%s.zip"' % self.context.getId()) return R.write(data)
def __iter__(self): for item in self.previous: pathkey = self.pathkey(*item.keys())[0] if not pathkey: # not enough info yield item continue obj = self.context.unrestrictedTraverse(item[pathkey].lstrip("/"), None) if obj is None: # path doesn't exist yield item continue # AT if IBaseObject.providedBy(obj): for key in item.keys(): if not key.startswith(self.datafield_prefix): continue if not os.path.exists(item[key]): continue fieldname = key[len(self.datafield_prefix) :] field = obj.getField(fieldname) f = open(item[key]) value = f.read() f.close() if len(value) != len(field.get(obj)): field.set(obj, value) # dexterity if dexterity_available and IDexterityContent.providedBy(obj): for key in item.keys(): if not key.startswith(self.datafield_prefix): continue if not os.path.exists(item[key]): continue fieldname = key[len(self.datafield_prefix) :] f = open(item[key]) value = f.read() f.close() filename = item["id"].decode("utf-8") contenttype = "" # get all fields for this obj for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if field.__name__ == fieldname: # create a blob instance instance = field._type(data=value, filename=filename, contentType=contenttype) # set it field.set(field.interface(obj), instance) continue yield item
def set_semantic_data(obj, event): text = [] for schema in iterSchemata(obj): fields = getFieldsInOrder(schema) for name, field in fields: if IText.providedBy(field): text.append(field.get(obj)) full_text = "\n".join(text) pos_tags = tag(full_text) obj.semantic = extract_keywords(pos_tags)
def _field_items(self): for context_schema in iterSchemata(self.context): for name, field in getFields(context_schema).items(): if name not in self.tracking_field_names: continue serializer = queryMultiAdapter( (field, self.context, self.request), IFieldSerializer) yield name, serializer()
def afterRetrieveModifier(self, obj, repo_clone, preserve=()): """Restore relations from the working copy.""" if IDexterityContent.providedBy(obj): for schemata in iterSchemata(obj): for name, field in getFields(schemata).items(): if (IRelationChoice.providedBy(field) or IRelationList.providedBy(field)): field.set(field.interface(repo_clone), field.query(field.interface(obj))) return [], [], {}
def _get_primary_field_type(self, obj): """Determine the type of an objects primary field (e.g. NamedBlobFile) so we can use it as a factory when setting the new document's primary field. """ for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if IPrimaryField.providedBy(field): return field._type
def __iter__(self): for item in self.previous: # not enough info if "_path" not in item: yield item continue obj = self.context.unrestrictedTraverse( str(item["_path"].lstrip("/")), None) # path doesn't exist if obj is None: yield item continue # do nothing if we got a wrong object through acquisition path = item["_path"] if path.startswith("/"): path = path[1:] if ("/".join(obj.getPhysicalPath()[self.root_path_length:]) != path): yield item continue for key in item.keys(): if not key.startswith(self.datafield_prefix): continue fieldname = key[len(self.datafield_prefix):] field = None for schemata in iterSchemata(obj): for name, s_field in getFieldsInOrder(schemata): if name == fieldname: field = s_field try: deserializer = queryMultiAdapter((field, obj), IDeserializer) value = deserializer(item[key], None, item) except Exception as e: logger.exception(e) self.errored.append({ "path": path, "reason": "Deserialization Error", }) continue field.set(field.interface(obj), value) if not field: logger.warning( "Can't find a suitable destination field ".format( fieldname)) yield item
def test_searchabletext(self): """Check the searchable text of an object """ self.provideUtility(DummyVocabulary(), name='opengever.ogds.base.ContactsVocabulary') for dossier_type, additional_searchable_attr in \ self.dossier_types.items(): dossier = self.create_dossier(dossier_type) wrapper = queryMultiAdapter((dossier, self.portal.portal_catalog), IIndexableObject) #merge default and additional searchable attr searchable_attr = self.default_searchable_attr searchable_attr.update(additional_searchable_attr) for schemata in iterSchemata(dossier): for name, field in getFieldsInOrder(schemata): value = searchable_attr.get(name, '') if not value: continue field.set(field.interface(dossier), value) # search value if isinstance(value, list): for v in value: val = self.map_with_vocab(schemata, name, v) # the field reference_number is handled special. # the searchable text is set over an adapter whos # return a reference-number. so we cant set the # reference_number filed like the other attributes. elif value == 'test_reference_number': refNumb = getAdapter(dossier, IReferenceNumber) val = refNumb.get_number() else: val = self.map_with_vocab(schemata, name, value) self.assertIn(val.encode('utf-8'), wrapper.SearchableText) # We pop the field if we found it to check at the # end whether all attributes where found in the schema searchable_attr.pop(name) # Test sequencenumber if searchable_attr.get('sequence_number', '') == 'test_sequence_number': seqNumb = getUtility(ISequenceNumber) self.assertIn(str(seqNumb.get_number(dossier)), wrapper.SearchableText) searchable_attr.pop('sequence_number') self.assertTrue(searchable_attr.values() == [])
def create_translation(self, *args, **kwargs): """Create translation for an object with uid in the given target_language and return its UID Usage:: Create translation /plone/en/foo ca title=Translated """ disableCSRFProtection() # Parse arguments: uid_or_path = args[0] target_language = args[1] # BBB: Support keywords arguments with robotframework < 2.8.3 kwargs.update(dict([arg.split('=', 1) for arg in args[2:]])) # Look up translatable content pc = getToolByName(self, "portal_catalog") uid_results = pc.unrestrictedSearchResults(UID=uid_or_path) path_results = pc.unrestrictedSearchResults( path={ 'query': uid_or_path.rstrip('/'), 'depth': 0 }) obj = (uid_results or path_results)[0]._unrestrictedGetObject() # Translate manager = ITranslationManager(obj) manager.add_translation(target_language) translation = manager.get_translation(target_language) # Update fields data = constructMessageFromSchemata(obj, iterSchemata(obj)) for key, value in kwargs.items(): del data[key] data[key] = Header(value, 'utf-8') del data['language'] initializeObjectFromSchemata(translation, iterSchemata(obj), data) notify(ObjectModifiedEvent(translation)) # Return uid for the translation return IUUID(translation)
def insert(self, data): """Inserts the field data on self.context """ for schemata in iterSchemata(self.context): repr = schemata(self.context) subdata = data[schemata.getName()] for name, field in schema.getFieldsInOrder(schemata): value = subdata.get(name, _marker) value = self.unpack(name, field, value) if value != _marker: setattr(repr, name, value)
def export_file(result, header_mapping, request=None): if not result: return None if request is None: request = getRequest() csv_file = StringIO.StringIO() writer = csv.writer(csv_file, delimiter=",", dialect="excel", quotechar='"') columns = [d['header'] for d in header_mapping] writer.writerow(columns) for row in result: items = [] if getattr(row, 'getObject', None): obj = row.getObject() else: obj = row for d in header_mapping: fieldid = d['field'] if obj is None: items.append(row(fieldid)) continue if fieldid == '_path': path = obj.getPhysicalPath() virtual_path = request.physicalPathToVirtualPath(path) items.append('/'.join(virtual_path)) continue elif fieldid == '_url': items.append(obj.absolute_url()) continue value = "" for schemata in iterSchemata(obj): if fieldid not in schemata: continue field = schemata[fieldid] try: value = field.get(schemata(obj)) except AttributeError: continue if value is field.missing_value: continue serializer = ISerializer(field) value = serializer(value, {}) break value = value and value.encode('utf-8') items.append(value) # log.debug(items) writer.writerow(items) csv_attachment = csv_file.getvalue() csv_file.close() return csv_attachment
def getField(obj, fieldname): obj = aq_base(obj) if getattr(obj, "Schema", None): # Archetypes return obj.getField(fieldname) # Dexterity for schemata in iterSchemata(obj): fields = getFieldsInOrder(schemata) for name, field in fields: if name == fieldname: return field
def serialize(self, context, blocks): fieldname = "blocks" for schema in iterSchemata(context): if fieldname in schema: field = schema.get(fieldname) break dm = getMultiAdapter((context, field), IDataManager) dm.set(blocks) serializer = getMultiAdapter((field, context, self.request), IFieldSerializer) return serializer()
def get_persisted_values_for_obj(context): values = {} schemas = list(iterSchemata(context)) for schema in schemas: fields = getFieldsInOrder(schema) for name, field in fields: try: value = get_persisted_value_for_field(context, field) values[name] = value except AttributeError: continue return values
def __call__(self): self.install_upgrade_profile() # Skip if eCH-0147 imports are not enabled if not api.portal.get_registry_record("ech0147_import_enabled", interface=IECH0147Settings): return # Only query non-imported-from-bundle documents and dossiers affected_types = ("opengever.dossier.businesscasedossier", "opengever.document.document") query = {"portal_type": affected_types, "bundle_guid": None} primitive_pyxb_types = tuple(pyxb.binding.datatypes.__dict__.get("_PrimitiveDatatypes")) if len(primitive_pyxb_types) != 19: raise InconsistentPyXBPrimitiveTypesException trivial_pyxb_types = (pyxb_int, pyxb_boolean, pyxb_string) trivial_types = (int, basestring) for obj in self.objects(query, "Ensure all fields of eCH-0147 imported objects are properly typed."): # Skip imported-from-bundle objects not on the index if IAnnotations(obj).get(BUNDLE_GUID_KEY): continue for schema in iterSchemata(obj): for name, field in getFieldsInOrder(schema): value = getattr(field.interface(obj), name, None) value_type = type(value) # Only touch pyxb typed values if field._type and value is not None and isinstance(value_type, primitive_pyxb_types): object_path = "/".join(obj.getPhysicalPath()) logger.info( "Found PyXB values in object %s field %s field type %s value type %s.", object_path, name, repr(field._type), repr(value_type), ) if isinstance(value_type, trivial_pyxb_types) and field._type in trivial_types: with writable(field) as wfield: wfield.set(wfield.interface(obj), wfield._type(value)) elif isinstance(value_type, pyxb_date) and field._type is date: with writable(field) as wfield: wfield.set(wfield.interface(obj), wfield._type.fromordinal(wfield.toordinal())) elif isinstance(value_type, pyxb_datetime) and field._type is datetime: with writable(field) as wfield: wfield.set(wfield.interface(obj), date_parser(value.ISO())) else: logger.warn( "PyXB values in object %s field %s field type %s value type %s fell through!", object_path, name, repr(field._type), repr(value_type), )
def __init__(self, context): self.context = context primary = None for i in iterSchemata(context): fields = getFieldsInOrder(i) for name, field in fields: if IPrimaryField.providedBy(field): primary = (name, field) break if not primary: raise TypeError('Could not adapt', context, IPrimaryFieldInfo) self.fieldname, self.field = primary
def serialize(self, context, blocks): fieldname = "blocks" field = None for schema in iterSchemata(context): if fieldname in schema: field = schema.get(fieldname) break if field is None: raise ValueError("blocks field not in the schema of %s" % context) dm = getMultiAdapter((context, field), IDataManager) dm.set(blocks) serializer = getMultiAdapter((field, context, self.request), IFieldSerializer) return serializer()
def build_new_field_map(self, new_object): fieldmap = {} for schemata in iterSchemata(new_object): for new_fieldname, field in getFieldsInOrder(schemata): if field.readonly: continue fieldmap[new_fieldname] = field fieldmap['.'.join((field.interface.__identifier__, new_fieldname))] = field return fieldmap
def test_mail_content(self): """ Test the fields and the attributes of this content type """ self.assertTrue(self.mail.disable_add_from_sidebar) fields = [] [ fields.extend(x[0] for x in getFieldsInOrder(schemata)) for schemata in iterSchemata(self.mail) ] self.assertListEqual(fields, [ 'title', 'description', 'mail_from', 'mail_to', 'mail_cc', 'mail_bcc', 'mail_body', 'subjects', 'language' ])
def demarshall(obj, message): types_tool = api.portal.get_tool('portal_types') fti = types_tool.get(obj.portal_type) if IDexterityFTI.providedBy(fti): # DX payload = message._payload message._payload = None initializeObjectFromSchemata(DublinCore(obj), [IDublinCore], message) message._payload = payload initializeObjectFromSchemata(obj, iterSchemata(obj), message) elif HAS_ARCHETYPES: # AT initializeObject(obj, iterFields(obj), message)
def demarshall(ob, message): types_tool = api.portal.get_tool('portal_types') fti = types_tool.get(ob.portal_type) if IDexterityFTI.providedBy(fti): # DX try: initializeObjectFromSchemata(ob, iterSchemata(ob), message) except Exception: raise # import pdb; pdb.set_trace() # initializeObjectFromSchemata(ob, iterSchemata(ob), message) elif HAS_ARCHETYPES: # AT initializeObject(ob, iterFields(ob), message)
def to_2100(context): # noqa: C901 logger.info("## Reindex pages with table blocks ##") def has_table_block(blocks): for block in blocks.values(): if block.get("@type", "") == "table": return True return False pc = api.portal.get_tool(name="portal_catalog") brains = pc() tot = len(brains) i = 0 items_reindexed = [] for brain in brains: i += 1 if i % 1000 == 0: logger.info("Progress: {}/{}".format(i, tot)) item_obj = brain.getObject() item = aq_base(item_obj) if getattr(item, "blocks", {}): if has_table_block(item.blocks): items_reindexed.append(brain.getPath()) item_obj.reindexObject(idxs=["SearchableText"]) for schema in iterSchemata(item): # fix blocks in blocksfields for name, field in getFields(schema).items(): if name == "blocks": blocks = getattr(item, "blocks", {}) if has_table_block(blocks): items_reindexed.append(brain.getPath()) item_obj.reindexObject(idxs=["SearchableText"]) else: if not HAS_BLOCKSFIELD: # blocks are only in blocks field continue if isinstance(field, BlocksField): value = field.get(item) if not value: continue if isinstance(value, str): continue blocks = value.get("blocks", {}) if has_table_block(blocks): items_reindexed.append(brain.getPath()) item_obj.reindexObject(idxs=["SearchableText"]) logger.info("Reindexed {} items".format(len(items_reindexed))) for path in items_reindexed: logger.info("- {}".format(path))
def getSize(self): # Get the size of the content item in bytes. # Unlike get_size, this method returns the size # by looking at the actual values. The getObjSize catalog # indexer uses get_size(), which looks up an ISized adapter, # and the default adapter uses getSize(). size = 0 for schema in iterSchemata(self): adapter = schema(self) for name, field in getFieldsInOrder(schema): value = getattr(adapter, name, None) if hasattr(value, 'getSize'): size += value.getSize() return size
def marshall(ob): types_tool = api.portal.get_tool('portal_types') fti = types_tool.get(ob.portal_type) # noinspection PyUnresolvedReferences if HAS_DEXTERITY and IDexterityFTI.providedBy(fti): # DX message = constructMessageFromSchemata(ob, iterSchemata(ob)) elif HAS_ARCHETYPES and hasattr(Acquisition.aq_base(ob), 'schema'): # AT message = constructMessage(ob, iterFields(ob)) else: # Other schemata = tuple(ob.__provides__.interfaces()) message = constructMessageFromSchemata(ob, schemata) return message
def set_default_values(content, container, values): """Set default values for all fields. (If no default value is available, fall back to setting missing value.) This is necessary for content created programmatically since dexterity doesn't persistenly set default values (or missing values) when creating content programmatically. Parameters: - content: The object in creation. Might not be AQ wrapped yet - container: The parent container the object will be added to - values: Mapping of *actual* values (not defaults) that will be or have been set on the object (not by us). I.e. kwargs to invokeFactory or createContentInContainer. Will be taken into consideration when determining whether defaults should apply or not. """ # Canonicalize field names to short form (no prefix) fields_with_value = [k.split('.')[-1] for k in values.keys()] for schema in iterSchemata(content): for name, field in getFieldsInOrder(schema): if field.readonly: continue if name in fields_with_value: # Only set default if no *actual* value was supplied as # an argument to object construction continue if object_has_value_for_field(content, field): # Only set default if a value hasn't been set on the # object yet continue if not is_aq_wrapped(content): # Content isn't AQ wrapped - temporarily wrap it content = content.__of__(container) # Attempt to find a default value for the field value = determine_default_value(field, container) if value is NO_DEFAULT_MARKER: # No default found, fall back to missing value value = field.missing_value field.set(field.interface(content), value)
def get_dx_field_values(self, old_object): no_value_marker = object() for schemata in iterSchemata(old_object): storage = schemata(old_object) for fieldname, field in getFieldsInOrder(schemata): if fieldname in self.ignore_fields: continue value = getattr(storage, fieldname, no_value_marker) if value == no_value_marker: continue value = self.normalize_dx_field_value(field, fieldname, value) yield fieldname, value
def _get_fields(self, context): fields = {} if IATContentType.providedBy(context): context_schema = context.Schema() for field in context_schema.fields(): fields[field.getName()] = (context_schema, field) elif IDexterityContent.providedBy(context): # main schema should override behaviours for schema in reversed(list(iterSchemata(context))): for fieldid, field in getFieldsInOrder(schema): fields[fieldid] = (schema, field) else: raise Exception("Unknown content type for context at %s" % context.absolute_url()) # noqa:E501 return fields