def test_primary_field(self): class IDummy(form.Schema): form.primary('foo') form.primary('bar', 'baz') foo = zope.schema.TextLine(title=u"Foo") bar = zope.schema.TextLine(title=u"Bar") baz = zope.schema.TextLine(title=u"Baz") qux = zope.schema.TextLine(title=u"Qux") self.failUnless(IPrimaryField.providedBy(IDummy['foo'])) self.failUnless(IPrimaryField.providedBy(IDummy['bar'])) self.failUnless(IPrimaryField.providedBy(IDummy['baz'])) self.failIf(IPrimaryField.providedBy(IDummy['qux']))
def listAnchorNames(self, fieldname=None): """Return a list of Anchor names""" content_field = None for schema in iterSchemata(self.context): if content_field is not None: break for name, field in getFieldsInOrder(schema): if (not fieldname and IPrimaryField.providedBy(field)) or name == fieldname: content_field = field break if content_field is None: return [] try: content = content_field.get(self.context).output except AttributeError: # Not a text field. return [] try: tree = fromstring(content) except ConflictError: raise except Exception: return [] return [anchor.get('name') for anchor in tree.findall(SEARCHPATTERN) if "name" in anchor.keys()]
def listAnchorNames(self, fieldname=None): """Return a list of Anchor names""" results = [] tree = HTMLTreeBuilder.TreeBuilder() content_field = None for schema in iterSchemata(self.context): if content_field is not None: break for name, field in getFieldsInOrder(schema): if (not fieldname and IPrimaryField.providedBy(field)) or name == fieldname: content_field = field break if content_field is None: return [] try: content = content_field.get(self.context).output except AttributeError: # Not a text field. return [] tree.feed('<root>%s</root>' % content) rootnode = tree.close() for x in rootnode.getiterator(): if x.tag == "a": if "name" in x.keys(): results.append(x.attrib['name']) return results
def listAnchorNames(self, fieldname=None): """Return a list of Anchor names""" content_field = None for schema in iterSchemata(self.context): if content_field is not None: break for name, field in getFieldsInOrder(schema): if (not fieldname and IPrimaryField.providedBy(field)) or name == fieldname: content_field = field break if content_field is None: return [] try: content = content_field.get(self.context).output except AttributeError: # Not a text field. return [] try: tree = fromstring(content) except ConflictError: raise except Exception: return [] return [ anchor.get('name') for anchor in tree.findall(SEARCHPATTERN) if "name" in anchor.keys() ]
def initializeObject(context, fields, message, defaultCharset='utf-8'): contentType = message.get_content_type() charset = message.get_charset() if charset is None: charset = message.get_param('charset') if charset is not None: charset = str(charset) else: charset = defaultCharset headerFields = {} primary = [] for name, field in fields: if IPrimaryField.providedBy(field): primary.append((name, field)) else: headerFields.setdefault(name.lower(), []).append(field) # Demarshal each header for name, value in message.items(): name = name.lower() fieldset = headerFields.get(name, None) if fieldset is None or len(fieldset) == 0: LOG.debug("No matching field found for header %s" % name) continue field = fieldset.pop(0) marshaler = queryMultiAdapter((context, field,), IFieldMarshaler) if marshaler is None: LOG.debug("No marshaler found for field %s of %s" % (name, repr(context))) continue headerValue, headerCharset = decode_header(value)[0] if headerCharset is None: headerCharset = charset # MIME messages always use CRLF. For headers, we're probably safer with # \n headerValue = headerValue.replace('\r\n', '\n') try: marshaler.demarshal( headerValue, message=message, charset=headerCharset, contentType=contentType, primary=False ) except ValueError, e: # interface allows demarshal() to raise ValueError to indicate # marshalling failed LOG.debug("Demarshalling of %s for %s failed: %s" % (name, repr(context), str(e))) continue
def constructMessage(context, fields, charset='utf-8'): msg = Message() primary = [] # First get all headers, storing primary fields for later for name, field in fields: if IPrimaryField.providedBy(field): primary.append((name, field,)) continue marshaler = queryMultiAdapter((context, field,), IFieldMarshaler) if marshaler is None: LOG.debug("No marshaler found for field %s of %s" % (name, repr(context))) continue try: value = marshaler.marshal(charset, primary=False) except ValueError, e: LOG.debug("Marshaling of %s for %s failed: %s" % (name, repr(context), str(e))) continue if value is None: value = '' elif not isinstance(value, str): raise ValueError( "Marshaler for field %s did not return a string" % name) if marshaler.ascii and '\n' not in value: msg[name] = value else: msg[name] = Header(value, charset)
def __getitem__(self, key): key, schema_ = self.resolve(key) for el in self.storage.tree.xpath( '//*[contains(@data-tile, "{0:s}")]'.format(key)): try: data = json.loads(el.get('data-tiledata') or '{}') except ValueError: if el.get('data-tiledata'): logger.error((u'No JSON object could be decoded from ' u'data "{0:s}" for tile "{0:1}".').format( el.get('data-tiledata'), key)) raise KeyError(key) # Read primary field content from el content if len(el) and len(el[0]): primary = u''.join([html.tostring(x) for x in el[0]]) elif len(el): primary = el[0].text else: primary = None if primary: for name in schema_: if IPrimaryField.providedBy(schema_[name]): data[name] = primary break return schema_compatible(data, schema_) raise KeyError(key)
def set_default_values(self, obj, named_file): # set default values for all fields for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if IPrimaryField.providedBy(field): field.set(field.interface(obj), named_file) else: default = queryMultiAdapter(( obj, obj.REQUEST, None, field, None, ), IValue, name='default') if default is not None: default = default.get() if default is None: default = getattr(field, 'default', None) if default is None: try: default = field.missing_value except AttributeError: pass value = default field.set(field.interface(obj), value)
def initializeObject(context, fields, message, defaultCharset='utf-8'): contentType = message.get_content_type() charset = message.get_charset() if charset is None: charset = message.get_param('charset') if charset is not None: charset = str(charset) else: charset = defaultCharset headerFields = {} primary = [] for name, field in fields: if IPrimaryField.providedBy(field): primary.append((name, field)) else: headerFields.setdefault(name.lower(), []).append(field) # Demarshal each header for name, value in message.items(): name = name.lower() fieldset = headerFields.get(name, None) if fieldset is None or len(fieldset) == 0: LOG.debug("No matching field found for header %s" % name) continue field = fieldset.pop(0) marshaler = queryMultiAdapter(( context, field, ), IFieldMarshaler) if marshaler is None: LOG.debug("No marshaler found for field %s of %s" % (name, repr(context))) continue headerValue, headerCharset = decode_header(value)[0] if headerCharset is None: headerCharset = charset # MIME messages always use CRLF. For headers, we're probably safer with \n headerValue = headerValue.replace('\r\n', '\n') try: marshaler.demarshal(headerValue, message=message, charset=headerCharset, contentType=contentType, primary=False) except ValueError, e: # interface allows demarshal() to raise ValueError to indicate marshalling failed LOG.debug("Demarshalling of %s for %s failed: %s" % (name, repr(context), str(e))) continue
def __setitem__(self, key, value): key, schema_ = self.resolve(key) data = json_compatible(value) # Store primary field as tile tag content primary = None for name in schema_: if IPrimaryField.providedBy(schema_[name]) and data.get(name): raw = data.pop(name) or u'' if isinstance(raw, dict): # Support supermodel RichTextValue for key_ in [k for k in raw if k != 'data']: data[u'{0:s}-{1:s}'.format(name, key_)] = raw[key_] raw = raw.get('data') try: raw = u'<div>{0:s}</div>'.format(raw or u'') primary = html.fromstring(raw) except (etree.ParseError, TypeError): pass # Update existing value for el in self.storage.tree.xpath( '//*[contains(@data-tile, "{0:s}")]'.format(key)): el.clear() el.attrib['data-tile'] = key if data: el.attrib['data-tiledata'] = json.dumps(data) elif 'data-tiledata' in el.attrib: del el.attrib['data-tiledata'] if primary is not None: el.append(primary) # Purge view.memoize invalidate_view_memoize( self, '__getitem__', (self, key), {}) invalidate_view_memoize( self, '__getitem__', (self, key.lstrip('@')), {}) invalidate_view_memoize( self, '__getitem__', (self, key.split('/', 1)[-1]), {}) return self.sync() # Add new value el = etree.Element('div') el.attrib['data-tile'] = key if data: el.attrib['data-tiledata'] = json.dumps(data) if primary is not None: el.append(primary) self.storage.tree.find('body').append(el) # Purge view.memoize invalidate_view_memoize( self, '__getitem__', (self, key), {}) invalidate_view_memoize( self, '__getitem__', (self, key.lstrip('@')), {}) invalidate_view_memoize( self, '__getitem__', (self, key.split('/', 1)[-1]), {}) self.sync()
def create_file(self, filename, data, obj): # filename must be unicode if not isinstance(filename, unicode): filename = filename.decode('utf-8') for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if IPrimaryField.providedBy(field): return field._type(data=data, filename=filename)
def get_primary_field(obj): primary = None for i in iterSchemata(obj): fields = getFieldsInOrder(i) for name, field in fields: if IPrimaryField.providedBy(field): primary = (name, field) break return primary
def create_file(self, filename, data, obj): # filename must be unicode if not isinstance(filename, unicode): filename = filename.decode("utf-8") for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if IPrimaryField.providedBy(field): return field._type(data=data, filename=filename)
def __call__(self, name, content_type, data, obj_id): ctr = cmfutils.getToolByName(self.context, 'content_type_registry') type_ = ctr.findTypeName(name.lower(), '', '') or 'File' # otherwise I get ZPublisher.Conflict ConflictErrors # when uploading multiple files upload_lock.acquire() try: transaction.begin() obj = ploneutils._createObjectByType(type_, self.context, obj_id) ttool = getToolByName(self.context, 'portal_types') ctype = ttool[obj.portal_type] schema = ctype.lookupSchema() fields = getFieldsInOrder(schema) file_fields = [field for safe_name, field in fields if INamedFileField.providedBy(field) or INamedImageField.providedBy(field)] if len(file_fields) == 0: logger.info("An error happens : the dexterity content type %s " "has no file field, rawdata can't be created", obj.absolute_url()) for file_field in file_fields: if IPrimaryField.providedBy(file_field): break else: # Primary field can't be set ttw, # then, we take the first one file_field = file_fields[0] # TODO: use adapters if HAVE_BLOBS and INamedBlobImageField.providedBy(file_field): value = NamedBlobImage(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field): value = NamedBlobFile(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif INamedImageField.providedBy(file_field): value = NamedImage(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif INamedFileField.providedBy(file_field): value = NamedFile(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) file_field.set(obj, value) obj.title = name obj.reindexObject() notify(ObjectInitializedEvent(obj)) notify(ObjectModifiedEvent(obj)) transaction.commit() finally: upload_lock.release() return obj
def _get_primary_field_type(self, obj): """Determine the type of an objects primary field (e.g. NamedBlobFile) so we can use it as a factory when setting the new document's primary field. """ for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if IPrimaryField.providedBy(field): return field._type
def __init__(self, context): self.context = context fti = getUtility(IDexterityFTI, name=context.portal_type) self.schema = fti.lookupSchema() primary = [ (name, field) for name, field in getFieldsInOrder(self.schema) if IPrimaryField.providedBy(field)] if len(primary) != 1: raise TypeError('Could not adapt', context, IPrimaryFieldInfo) self.fieldname, self.field = primary[0]
def __init__(self, context): self.context = context primary = None for i in iterSchemata(context): fields = getFieldsInOrder(i) for name, field in fields: if IPrimaryField.providedBy(field): primary = (name, field) break if not primary: raise TypeError('Could not adapt', context, IPrimaryFieldInfo) self.fieldname, self.field = primary
def set(self, data, filename, content_type): error = '' obj = self.context ttool = getToolByName(obj, 'portal_types') ctype = ttool[obj.portal_type] schema = ctype.lookupSchema() fields = getFieldsInOrder(schema) file_fields = [ field for name, field in fields if INamedFileField.providedBy(field) or INamedImageField.providedBy(field) ] if len(file_fields) == 0: error = u'serverError' logger.info( "An error happens : the dexterity content type %s " "has no file field, rawdata can't be created", obj.absolute_url()) for file_field in file_fields: if IPrimaryField.providedBy(file_field): break else: # Primary field can't be set ttw, # then, we take the first one file_field = file_fields[0] # TODO: use adapters if HAVE_BLOBS and INamedBlobImageField.providedBy(field): value = NamedBlobImage( data=data, contentType=content_type, filename=unicode(filename)) elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field): value = NamedBlobFile( data=data, contentType=content_type, filename=unicode(filename)) elif INamedImageField.providedBy(file_field): value = NamedImage( data=data, contentType=content_type, filename=unicode(file_field)) elif INamedFileField.providedBy(file_field): value = NamedFile( data=data, contentType=content_type, filename=unicode(filename)) file_field.set(obj, value) obj.reindexObject() notify(ObjectInitializedEvent(obj)) return error
def mimeType(self): if not self._haveMessage: foundOne = False for schema in iterSchemata(self.context): for name, field in getFieldsInOrder(schema): if IPrimaryField.providedBy(field): if foundOne: # more than one primary field return 'message/rfc822' else: foundOne = True # zero or one primary fields return 'text/plain' if not self._getMessage().is_multipart(): return 'text/plain' else: return 'message/rfc822'
def set(self, data, filename, content_type): error = '' obj = self.context ttool = getToolByName(obj, 'portal_types') ctype = ttool[obj.portal_type] schema = ctype.lookupSchema() fields = getFieldsInOrder(schema) file_fields = [ field for name, field in fields if INamedFileField.providedBy(field) or INamedImageField.providedBy(field) ] if len(file_fields) == 0: error = u'serverError' logger.info( "An error happens : the dexterity content type %s " "has no file field, rawdata can't be created", obj.absolute_url()) for file_field in file_fields: if IPrimaryField.providedBy(file_field): break else: # Primary field can't be set ttw, # then, we take the first one file_field = file_fields[0] # TODO: use adapters if HAVE_BLOBS and INamedBlobImageField.providedBy(file_field): value = NamedBlobImage(data=data, contentType=content_type, filename=unicode(filename, 'utf-8')) elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field): value = NamedBlobFile(data=data, contentType=content_type, filename=unicode(filename, 'utf-8')) elif INamedImageField.providedBy(file_field): value = NamedImage(data=data, contentType=content_type, filename=unicode(filename, 'utf-8')) elif INamedFileField.providedBy(file_field): value = NamedFile(data=data, contentType=content_type, filename=unicode(filename, 'utf-8')) file_field.set(obj, value) return error
def set_default_values(self, obj, named_file): # set default values for all fields for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if IPrimaryField.providedBy(field): field.set(field.interface(obj), named_file) else: default = queryMultiAdapter((obj, obj.REQUEST, None, field, None), IValue, name="default") if default is not None: default = default.get() if default is None: default = getattr(field, "default", None) if default is None: try: default = field.missing_value except AttributeError: pass value = default field.set(field.interface(obj), value)
def constructMessage(context, fields, charset="utf-8"): msg = Message() primaries = [] # First get all headers, storing primary fields for later for name, field in fields: value = '' if IPrimaryField.providedBy(field): primaries.append((name, field)) continue marshaler = queryMultiAdapter((context, field), IFieldMarshaler) if marshaler is None: logger.debug( "No marshaler found for field {0} of {1}".format( name, repr(context) ) ) continue try: value = marshaler.marshal(charset, primary=False) except ValueError as e: logger.debug( "Marshaling of {0} for {1} failed: {2}".format( name, repr(context), str(e) ) ) continue if value is None: value = "" # Enforce native strings value = safe_native_string(value) if marshaler.ascii and "\n" not in value: msg[name] = value else: # see https://tools.ietf.org/html/rfc2822#section-3.2.2 if '\n' in value: value = value.replace("\n", r"\n") msg[name] = Header(value, charset) # Then deal with the primary field _add_payload_to_message(context, msg, primaries, charset) return msg
def constructMessage(context, fields, charset='utf-8'): msg = Message() primary = [] # First get all headers, storing primary fields for later for name, field in fields: if IPrimaryField.providedBy(field): primary.append(( name, field, )) continue marshaler = queryMultiAdapter(( context, field, ), IFieldMarshaler) if marshaler is None: LOG.debug("No marshaler found for field %s of %s" % (name, repr(context))) continue try: value = marshaler.marshal(charset, primary=False) except ValueError, e: LOG.debug("Marshaling of %s for %s failed: %s" % (name, repr(context), str(e))) continue if value is None: value = '' elif not isinstance(value, str): raise ValueError("Marshaler for field %s did not return a string" % name) if marshaler.ascii and '\n' not in value: msg[name] = value else: msg[name] = Header(value, charset)
def __getitem__(self, key): key, schema_ = self.resolve(key) for el in self.storage.tree.xpath( '//*[contains(@data-tile, "{0:s}")]'.format(key)): try: data = json.loads(el.get('data-tiledata') or '{}') except ValueError: if el.get('data-tiledata'): logger.error((u'No JSON object could be decoded from ' u'data "{0:s}" for tile "{0:1}".').format( el.get('data-tiledata'), key)) raise KeyError(key) # Read primary field content from el content if len(el) and len(el[0]): primary = u''.join( [html.tostring(x, encoding='utf-8').decode('utf-8') for x in el[0]]) elif len(el): primary = el[0].text else: primary = None if primary: for name in schema_: if IPrimaryField.providedBy(schema_[name]): data[name] = primary # Supports supermodel-defined RichTextValue keys = [key_ for key_ in data.keys() if key_.startswith('{0:s}-'.format(name))] if keys: data[name] = dict( [(u'data', data[name])] + [(key_.split('-', 1)[-1], data.pop(key_)) for key_ in keys] ) break return schema_compatible(data, schema_) raise KeyError(key)
def constructMessage(context, fields, charset="utf-8"): msg = Message() primaries = [] # First get all headers, storing primary fields for later for name, field in fields: value = '' if IPrimaryField.providedBy(field): primaries.append((name, field)) continue marshaler = queryMultiAdapter((context, field), IFieldMarshaler) if marshaler is None: logger.debug("No marshaler found for field {0} of {1}".format( name, repr(context))) continue try: value = marshaler.marshal(charset, primary=False) except ValueError as e: logger.debug("Marshaling of {0} for {1} failed: {2}".format( name, repr(context), str(e))) continue if value is None: value = "" # Enforce native strings value = safe_native_string(value) if marshaler.ascii and "\n" not in value: msg[name] = value else: # see https://tools.ietf.org/html/rfc2822#section-3.2.2 if '\n' in value: value = value.replace("\n", r"\n") msg[name] = Header(value, charset) # Then deal with the primary field _add_payload_to_message(context, msg, primaries, charset) return msg
def __setitem__(self, key, value): key, schema_ = self.resolve(key) data = json_compatible(value) # Store primary field as tile tag content primary = None for name in schema_: if IPrimaryField.providedBy(schema_[name]) and data.get(name): try: raw = u'<div>{0:s}</div>'.format(data.pop(name) or u'') primary = html.fromstring(raw) except (etree.ParseError, TypeError): pass # Update existing value for el in self.storage.tree.xpath( '//*[contains(@data-tile, "{0:s}")]'.format(key)): el.clear() el.attrib['data-tile'] = key if data: el.attrib['data-tiledata'] = json.dumps(data) elif 'data-tiledata' in el.attrib: del el.attrib['data-tiledata'] if primary is not None: el.append(primary) return self.sync() # Add new value el = etree.Element('div') el.attrib['data-tile'] = key if data: el.attrib['data-tiledata'] = json.dumps(data) if primary is not None: el.append(primary) self.storage.tree.find('body').append(el) self.sync()
def initializeObject(context, fields, message, defaultCharset="utf-8"): content_type = message.get_content_type() charset = message.get_charset() if charset is None: charset = message.get_param("charset") if charset is not None: charset = str(charset) else: charset = defaultCharset header_fields = {} primary = [] for name, field in fields: if IPrimaryField.providedBy(field): primary.append((name, field)) continue header_fields.setdefault(name.lower(), []).append(field) # Demarshal each header for name, value in message.items(): name = name.lower() fieldset = header_fields.get(name, None) if fieldset is None or len(fieldset) == 0: logger.debug("No matching field found for header {0}".format(name)) continue field = fieldset.pop(0) marshaler = queryMultiAdapter((context, field), IFieldMarshaler) if marshaler is None: logger.debug( "No marshaler found for field {0} of {1}".format( name, repr(context) ) ) continue header_value, header_charset = decode_header(value)[0] if header_charset is None: header_charset = charset # MIME messages always use CRLF. # For headers, we're probably safer with \n # # Also, replace escaped Newlines, for details see # https://tools.ietf.org/html/rfc2822#section-3.2.2 if isinstance(header_value, six.binary_type): header_value = header_value.replace(b"\r\n", b"\n") header_value = header_value.replace(b"\\n", b"\n") else: header_value = header_value.replace("\r\n", "\n") header_value = header_value.replace(r"\\n", "\n") try: marshaler.demarshal( header_value, message=message, charset=header_charset, contentType=content_type, primary=False, ) except ValueError as e: # interface allows demarshal() to raise ValueError to indicate # marshalling failed logger.debug( "Demarshalling of {0} for {1} failed: {2}".format( name, repr(context), str(e) ) ) continue # Then demarshal the primary field(s) payloads = message.get_payload() # do nothing if we don't have a payload if not payloads: return # A single payload is a string, multiparts are lists if isinstance(payloads, str): if len(primary) != 1: raise ValueError( "Got a single string payload for message, but no primary " "fields found for %s" % repr(context) ) payloads = [message] if len(payloads) != len(primary): raise ValueError( "Got %d payloads for message, but %s primary fields " "found for %s" % (len(payloads), len(primary), repr(context)) ) for idx, payload in enumerate(payloads): name, field = primary[idx] payload_content_type = payload.get_content_type() charset = message.get_charset() if charset is not None: charset = str(charset) else: charset = "utf-8" marshaler = queryMultiAdapter((context, field), IFieldMarshaler) if marshaler is None: logger.debug( "No marshaler found for primary field {0} of {0}".format( name, repr(context) ) ) continue payload_value = payload.get_payload(decode=True) payload_charset = payload.get_content_charset(charset) try: marshaler.demarshal( payload_value, message=payload, charset=payload_charset, contentType=payload_content_type, primary=True, ) except ValueError as e: # interface allows demarshal() to raise ValueError to # indicate marshalling failed logger.debug( "Demarshalling of {0} for {1} failed: {2}".format( name, repr(context), str(e) ) ) continue
def getPrimaryField(self): for schema in iterSchemata(self.context): for name, field in getFieldsInOrder(schema): if IPrimaryField.providedBy(field): return field return None
def test_primary_field(self): video = self.create('video') info = IPrimaryFieldInfo(video) self.assertEquals(info.fieldname, 'video_file') self.assertTrue(IPrimaryField.providedBy(info.field))
def __call__(self, name, content_type, data, obj_id): ctr = cmfutils.getToolByName(self.context, 'content_type_registry') type_ = ctr.findTypeName(name.lower(), '', '') or 'File' # otherwise I get ZPublisher.Conflict ConflictErrors # when uploading multiple files upload_lock.acquire() try: transaction.begin() obj = ploneutils._createObjectByType(type_, self.context, obj_id) ttool = getToolByName(self.context, 'portal_types') ctype = ttool[obj.portal_type] schema = ctype.lookupSchema() fields = getFieldsInOrder(schema) file_fields = [ field for safe_name, field in fields if INamedFileField.providedBy(field) or INamedImageField.providedBy(field) ] if len(file_fields) == 0: logger.info( "An error happens : the dexterity content type %s " "has no file field, rawdata can't be created", obj.absolute_url()) for file_field in file_fields: if IPrimaryField.providedBy(file_field): break else: # Primary field can't be set ttw, # then, we take the first one file_field = file_fields[0] # TODO: use adapters if HAVE_BLOBS and INamedBlobImageField.providedBy(file_field): value = NamedBlobImage(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field): value = NamedBlobFile(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif INamedImageField.providedBy(file_field): value = NamedImage(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif INamedFileField.providedBy(file_field): value = NamedFile(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) file_field.set(obj, value) obj.title = name obj.reindexObject() notify(ObjectInitializedEvent(obj)) notify(ObjectModifiedEvent(obj)) transaction.commit() finally: upload_lock.release() return obj
def _get_primary_field_type(self, obj): for schemata in iterSchemata(obj): for name, field in getFieldsInOrder(schemata): if IPrimaryField.providedBy(field): return field._type
def _get_primary_field(self): for schema in iterSchemata(self.context): for field_name, field in getFieldsInOrder(schema): if IPrimaryField.providedBy(field): primary_field = getattr(self.context, field_name) return primary_field
def test_primary_field(self): audio = self.create('audio') info = IPrimaryFieldInfo(audio) self.assertEquals(info.fieldname, 'audio_file') self.assertTrue(IPrimaryField.providedBy(info.field))
def encode(data, schema, ignore=()): """Given a data dictionary with key/value pairs and schema, return an encoded query string. This is similar to urllib.urlencode(), but field names will include the appropriate field type converters, e.g. an int field will be encoded as fieldname:int=123. Fields not found in the data dict will be ignored, and items in the dict not in the schema will also be ignored. Additional fields to ignore can be passed with the 'ignore' parameter. If any fields cannot be converted, a ComponentLookupError will be raised. """ encode = [] for name, field in getFieldsInOrder(schema): if HAS_RFC822 and IPrimaryField.providedBy(field): continue if name in ignore or name not in data: continue converter = IFieldTypeConverter(field, None) if converter is None: raise ComponentLookupError( u'Cannot URL encode {0} of type {1}'.format( name, field.__class__)) encoded_name = name if converter.token: encoded_name = ':'.join([name, converter.token]) value = data[name] if value is None: continue elif isinstance(value, unicode): value = value.encode('utf-8') if ISequence.providedBy(field): value_type_converter = IFieldTypeConverter(field.value_type, None) if value_type_converter is None: raise ComponentLookupError( u'Cannot URL encode value type for {0} of type ' u'{1} : {2}'.format(name, field.__class__, field.value_type.__class__)) if value_type_converter.token: encoded_name = ':'.join( [name, value_type_converter.token, converter.token]) for item in value: if isinstance(item, bool): item = item and '1' or '' elif isinstance(item, unicode): item = item.encode('utf-8') if isinstance(item, dict): encode.extend(map_to_pairs(encoded_name, item)) else: encode.append(( encoded_name, item, )) else: # The :bool converter just does bool() value, but urlencode() does # str() on the object. The result is False => 'False' => True :( if isinstance(value, bool): value = value and '1' or '' if isinstance(value, dict): encode.extend(map_to_pairs(encoded_name, value)) else: encode.append((encoded_name, value)) return urllib.urlencode(encode)
def decode(data, schema, missing=True, primary=False): """Decode a data dict according to a schema. The returned dictionary will contain only keys matching schema names, and will force type values appropriately. This function is only necessary because the encoders used by encode() are not sufficiently detailed to always return the exact type expected by a field, e.g. resulting in ascii/unicode discrepancies. If missing is True, fields that are in the schema but not in the data will be set to field.missing_value. Otherwise, they are ignored. If primary is True, also fields that are marged as primary fields are decoded from the data. (Primary fields are not decoded by default, because primary field are mainly used for rich text or binary fields and data is usually parsed from query string with length limitations.) """ decoded = {} for name, field in getFields(schema).items(): if not primary and HAS_RFC822 and IPrimaryField.providedBy(field): continue if name not in data: if missing: decoded[name] = field.missing_value continue value = data[name] if value is None: continue field_type = field._type if isinstance(field_type, (tuple, list,)): field_type = field_type[-1] if ISequence.providedBy(field): converted = [] value_type_field_type = field.value_type._type if isinstance(value_type_field_type, (tuple, list,)): value_type_field_type = value_type_field_type[-1] for item in value: if field.value_type._type and not isinstance( item, field.value_type._type): item = value_type_field_type(item) converted.append(item) value = converted elif isinstance(value, (tuple, list)) and value: value = value[0] if isinstance(value, six.binary_type): value = value.decode('utf-8') if field._type is not None and not isinstance(value, field._type): value = field_type(value) decoded[name] = value return decoded
def initializeObject(context, fields, message, defaultCharset="utf-8"): content_type = message.get_content_type() charset = message.get_charset() if charset is None: charset = message.get_param("charset") if charset is not None: charset = str(charset) else: charset = defaultCharset header_fields = {} primary = [] for name, field in fields: if IPrimaryField.providedBy(field): primary.append((name, field)) continue header_fields.setdefault(name.lower(), []).append(field) # Demarshal each header for name, value in message.items(): name = name.lower() fieldset = header_fields.get(name, None) if fieldset is None or len(fieldset) == 0: logger.debug("No matching field found for header {0}".format(name)) continue field = fieldset.pop(0) marshaler = queryMultiAdapter((context, field), IFieldMarshaler) if marshaler is None: logger.debug("No marshaler found for field {0} of {1}".format( name, repr(context))) continue header_value, header_charset = decode_header(value)[0] if header_charset is None: header_charset = charset # MIME messages always use CRLF. # For headers, we're probably safer with \n # # Also, replace escaped Newlines, for details see # https://tools.ietf.org/html/rfc2822#section-3.2.2 if isinstance(header_value, six.binary_type): header_value = header_value.replace(b"\r\n", b"\n") header_value = header_value.replace(b"\\n", b"\n") else: header_value = header_value.replace("\r\n", "\n") header_value = header_value.replace(r"\\n", "\n") try: marshaler.demarshal( header_value, message=message, charset=header_charset, contentType=content_type, primary=False, ) except ValueError as e: # interface allows demarshal() to raise ValueError to indicate # marshalling failed logger.debug("Demarshalling of {0} for {1} failed: {2}".format( name, repr(context), str(e))) continue # Then demarshal the primary field(s) payloads = message.get_payload() # do nothing if we don't have a payload if not payloads: return # A single payload is a string, multiparts are lists if isinstance(payloads, six.string_types): if len(primary) != 1: raise ValueError( "Got a single string payload for message, but no primary " "fields found for %s" % repr(context)) payloads = [message] if len(payloads) != len(primary): raise ValueError("Got %d payloads for message, but %s primary fields " "found for %s" % (len(payloads), len(primary), repr(context))) for idx, payload in enumerate(payloads): name, field = primary[idx] payload_content_type = payload.get_content_type() charset = message.get_charset() if charset is not None: charset = str(charset) else: charset = "utf-8" marshaler = queryMultiAdapter((context, field), IFieldMarshaler) if marshaler is None: logger.debug( "No marshaler found for primary field {0} of {0}".format( name, repr(context))) continue payload_value = payload.get_payload(decode=True) payload_charset = payload.get_content_charset(charset) try: marshaler.demarshal( payload_value, message=payload, charset=payload_charset, contentType=payload_content_type, primary=True, ) except ValueError as e: # interface allows demarshal() to raise ValueError to # indicate marshalling failed logger.debug("Demarshalling of {0} for {1} failed: {2}".format( name, repr(context), str(e))) continue
def constructMessage(context, fields, charset='utf-8'): msg = Message() primary = [] # First get all headers, storing primary fields for later for name, field in fields: if IPrimaryField.providedBy(field): primary.append((name, field,)) break # marshaler = queryMultiAdapter((context, field,), IFieldMarshaler) # if marshaler is None: # LOG.debug("No marshaler found for field %s of %s" % (name, repr(context))) # continue # # try: # value = marshaler.marshal(charset, primary=False) # except ValueError, e: # LOG.debug("Marshaling of %s for %s failed: %s" % (name, repr(context), str(e))) # continue # # if value is None: # value = '' # elif not isinstance(value, str): # raise ValueError("Marshaler for field %s did not return a string" % name) # # if marshaler.ascii and '\n' not in value: # msg[name] = value # else: # msg[name] = Header(value, charset) # Then deal with the primary field # If there's a single primary field, we have a non-multipart message with # a string payload if len(primary) == 1: name, field = primary[0] marshaler = queryMultiAdapter((context, field,), IFieldMarshaler) # import pdb # pdb.set_trace() if marshaler is not None: contentType = marshaler.getContentType() payloadCharset = marshaler.getCharset(charset) # if contentType is not None: # msg.set_type(contentType) # # if payloadCharset is not None: # # using set_charset() would also add transfer encoding, # # which we don't want to do always # msg.set_param('charset', payloadCharset) value = marshaler.marshal(charset, primary=True) # import pdb # pdb.set_trace() if value is not None: msg.set_payload(value) # marshaler.postProcessMessage(msg) # Otherwise, we return a multipart message elif len(primary) > 1: msg.set_type('multipart/mixed') for name, field in primary: marshaler = queryMultiAdapter((context, field,), IFieldMarshaler) if marshaler is None: continue payload = Message() attach = False contentType = marshaler.getContentType() payloadCharset = marshaler.getCharset(charset) if contentType is not None: payload.set_type(contentType) attach = True if payloadCharset is not None: # using set_charset() would also add transfer encoding, # which we don't want to do always payload.set_param('charset', payloadCharset) attach = True value = marshaler.marshal(charset, primary=True) if value is not None: payload.set_payload(value) attach = True if attach: marshaler.postProcessMessage(payload) msg.attach(payload) return msg
def write(self, fieldNode, schema, field): if IPrimaryField.providedBy(field): fieldNode.set(ns('primary', self.namespace), "true")
def encode(data, schema, ignore=()): """Given a data dictionary with key/value pairs and schema, return an encoded query string. This is similar to urllib.urlencode(), but field names will include the appropriate field type converters, e.g. an int field will be encoded as fieldname:int=123. Fields not found in the data dict will be ignored, and items in the dict not in the schema will also be ignored. Additional fields to ignore can be passed with the 'ignore' parameter. If any fields cannot be converted, a ComponentLookupError will be raised. """ encode = [] for name, field in getFieldsInOrder(schema): if HAS_RFC822 and IPrimaryField.providedBy(field): continue if name in ignore or name not in data: continue converter = IFieldTypeConverter(field, None) if converter is None: raise ComponentLookupError( u'Cannot URL encode {0} of type {1}'.format( name, field.__class__ ) ) encoded_name = name if converter.token: encoded_name = ':'.join([name, converter.token]) value = data[name] if value is None: continue elif isinstance(value, six.text_type): value = value.encode('utf-8') if ISequence.providedBy(field): value_type_converter = IFieldTypeConverter(field.value_type, None) if value_type_converter is None: raise ComponentLookupError( u'Cannot URL encode value type for {0} of type ' u'{1} : {2}'.format( name, field.__class__, field.value_type.__class__ ) ) if value_type_converter.token: encoded_name = ':'.join([ name, value_type_converter.token, converter.token ]) for item in value: if isinstance(item, bool): item = item and '1' or '' elif isinstance(item, six.text_type): item = item.encode('utf-8') if isinstance(item, dict): encode.extend(map_to_pairs(encoded_name, item)) else: encode.append((encoded_name, item,)) else: # The :bool converter just does bool() value, but urlencode() does # str() on the object. The result is False => 'False' => True :( if isinstance(value, bool): value = value and '1' or '' if isinstance(value, dict): encode.extend(map_to_pairs(encoded_name, value)) else: encode.append((encoded_name, value)) return parse.urlencode(encode)
def getPrimaryField( self ): for schema in iterSchemata( self.context ): for name, field in getFieldsInOrder( schema ): if IPrimaryField.providedBy( field ): return field return None
def decode(data, schema, missing=True): """Decode a data dict according to a schema. The returned dictionary will contain only keys matching schema names, and will force type values appropriately. This function is only necessary because the encoders used by encode() are not sufficiently detailed to always return the exact type expected by a field, e.g. resulting in ascii/unicode discrepancies. If missing is True, fields that are in the schema but not in the data will be set to field.missing_value. Otherwise, they are ignored. """ decoded = {} for name, field in getFields(schema).items(): if HAS_RFC822 and IPrimaryField.providedBy(field): continue if name not in data: if missing: decoded[name] = field.missing_value continue value = data[name] if value is None: continue field_type = field._type if isinstance(field_type, ( tuple, list, )): field_type = field_type[-1] if ISequence.providedBy(field): converted = [] value_type_field_type = field.value_type._type if isinstance(value_type_field_type, ( tuple, list, )): value_type_field_type = value_type_field_type[-1] for item in value: if field.value_type._type and not isinstance( item, field.value_type._type): item = value_type_field_type(item) converted.append(item) value = converted elif isinstance(value, (tuple, list)) and value: value = value[0] if isinstance(value, str): value = unicode(value, 'utf-8') if field._type is not None and not isinstance(value, field._type): value = field_type(value) decoded[name] = value return decoded
invoiceDate=invoice.invoiceDate, invoicePayCondition=invoice.invoicePayCondition, invoiceExpireDate=invoice.invoiceExpireDate, invoiceCurrency=invoice.invoiceCurrency, invoiceTotalVat=invoice.invoiceTotalVat, invoiceTotalAmount=invoice.invoiceTotalCost ) # Get the field containing data fields = getFieldsInOrder(IInvoice) file_fields = [field for name, field in fields if INamedFileField.providedBy(field) or INamedImageField.providedBy(field) ] for file_field in file_fields: if IPrimaryField.providedBy(file_field): break else: # Primary field can't be set ttw, # then, we take the first one file_field = file_fields[0] #import pdb #pdb.set_trace() value = NamedBlobFile(data=invoicefile, contentType=contenttype, filename=unicode(filename, 'utf-8')) file_field.set(content, value)