def migrate_saved_data(ploneformgen, easyform): for data_adapter in ploneformgen.objectValues('FormSaveDataAdapter'): actions = get_actions(easyform) action = actions.get(data_adapter.getId()) schema = get_schema(easyform) if ISaveData.providedBy(action): cols = data_adapter.getColumnNames() for idx, row in enumerate(data_adapter.getSavedFormInput()): if len(row) != len(cols): logger.warning( 'Number of columns does not match. Skipping row %s in ' 'data adapter %s/%s', idx, '/'.join(easyform.getPhysicalPath()), data_adapter.getId()) continue data = {} for key, value in zip(cols, row): field = schema.get(key) value = value.decode('utf8') if IFromUnicode.providedBy(field): value = field.fromUnicode(value) elif IDatetime.providedBy(field) and value: value = DateTime(value).asdatetime() elif IDate.providedBy(field) and value: value = DateTime(value).asdatetime().date() elif ISet.providedBy(field): try: value = set(literal_eval(value)) except ValueError: pass elif INamedBlobFileField.providedBy(field): value = None data[key] = value action.addDataRow(data)
def _make_namedfile(value, field, widget): """Return a NamedImage or NamedFile instance, if it isn't already one - e.g. when it's base64 encoded data. """ if INamed.providedBy(value): return value string_types = (six.binary_type, six.text_type) if isinstance(value, string_types) and IBytes.providedBy(field): filename, data = b64decode_file(value) elif isinstance(value, dict) or isinstance(value, PersistentDict): filename = value['filename'] data = value['data'] if INamedBlobImageField.providedBy(field): value = NamedBlobImage(data=data, filename=filename) elif INamedImageField.providedBy(field): value = NamedImage(data=data, filename=filename) elif INamedBlobFileField.providedBy(field): value = NamedBlobFile(data=data, filename=filename) else: value = NamedFile(data=data, filename=filename) return value
def get_maxsize_dx(self, validator, field): """ """ if not HAS_DX: return None try: file_size = api.portal.get_registry_record( 'file_size', interface=ILimitFileSizePanel) image_size = api.portal.get_registry_record( 'image_size', interface=ILimitFileSizePanel) except InvalidParameterError: return None # Check if there's a type/field specific settings in the registry type_context = self.context if self.context == api.portal.get(): # we are in add form, so context is the portal. # validator.view has an attribute portal_type with the wanted type type_context = validator.view type_maxsize = self._get_type_maxsize(field, type_context) if type_maxsize is not None: return type_maxsize if file_size and INamedBlobFileField.providedBy(field): return float(file_size) elif image_size and INamedBlobImageField.providedBy(field): return float(image_size) return None
def getReferencedAttributes(self, obj): file_data = {} # Try to get last revision, only store a new blob if the # contents differ from the prior one, otherwise store a # reference to the prior one. # The implementation is mostly based on CMFEditions's CloneBlobs # modifier. repo = getToolByName(obj, "portal_repository") try: prior_rev = repo.retrieve(obj) except ArchivistRetrieveError: prior_rev = None for schemata in iterSchemata(obj): for name, field in getFields(schemata).items(): if INamedBlobFileField.providedBy(field) or INamedBlobImageField.providedBy(field): try: # field.get may raise an AttributeError if the field # is provided by a behavior and hasn't been # initialized yet field_value = field.get(field.interface(obj)) except AttributeError: field_value = None if field_value is None: continue blob_file = field_value.open() save_new = True dotted_name = ".".join([schemata.__identifier__, name]) if prior_rev is not None: prior_obj = prior_rev.object prior_blob = field.get(field.interface(prior_obj)) if prior_blob is not None: prior_file = prior_blob.open() # Check for file size differences if os.fstat(prior_file.fileno()).st_size == os.fstat(blob_file.fileno()).st_size: # Files are the same size, compare line by line for line, prior_line in izip(blob_file, prior_file): if line != prior_line: break else: # The files are the same, save a reference # to the prior versions blob on this # version file_data[dotted_name] = prior_blob._blob save_new = False if save_new: new_blob = file_data[dotted_name] = Blob() new_blob_file = new_blob.open("w") try: blob_file.seek(0) new_blob_file.writelines(blob_file) finally: blob_file.close() new_blob_file.close() return file_data
def __call__(self, name, content_type, data, obj_id): ctr = cmfutils.getToolByName(self.context, 'content_type_registry') type_ = ctr.findTypeName(name.lower(), '', '') or 'File' # otherwise I get ZPublisher.Conflict ConflictErrors # when uploading multiple files upload_lock.acquire() try: transaction.begin() obj = ploneutils._createObjectByType(type_, self.context, obj_id) ttool = getToolByName(self.context, 'portal_types') ctype = ttool[obj.portal_type] schema = ctype.lookupSchema() fields = getFieldsInOrder(schema) file_fields = [field for safe_name, field in fields if INamedFileField.providedBy(field) or INamedImageField.providedBy(field)] if len(file_fields) == 0: logger.info("An error happens : the dexterity content type %s " "has no file field, rawdata can't be created", obj.absolute_url()) for file_field in file_fields: if IPrimaryField.providedBy(file_field): break else: # Primary field can't be set ttw, # then, we take the first one file_field = file_fields[0] # TODO: use adapters if HAVE_BLOBS and INamedBlobImageField.providedBy(file_field): value = NamedBlobImage(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field): value = NamedBlobFile(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif INamedImageField.providedBy(file_field): value = NamedImage(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif INamedFileField.providedBy(file_field): value = NamedFile(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) file_field.set(obj, value) obj.title = name obj.reindexObject() notify(ObjectInitializedEvent(obj)) notify(ObjectModifiedEvent(obj)) transaction.commit() finally: upload_lock.release() return obj
def update_object_with_data(content, record): """ update the content with the values from records """ schema = get_schema(content) is_atct = IATContentType.providedBy(content) is_dext = IDexterityContent.providedBy(content) for k, v in record.items(): if is_atct: field = schema.get(k) if is_dext: schemas = list() schemas.append(schema) schemas.extend(get_behaviors_schema(content)) for i in schemas: field = i.get(k) if field: break logger.info("update_object_with_data::processing key=%r, value=%r, field=%r", k, v, field) if field is None: logger.info("update_object_with_data::skipping key=%r", k) continue if is_atct: # XXX handle security mutator = field.getMutator(content) mutator(v) else: #ugly hack for tags if ICategorization is field.interface: content.setSubject(v) elif IPublication is field.interface: if k == u'effective': content.setEffectiveDate(v) elif k == u'expires': content.setExpirationDate(v) elif INamedBlobFileField.providedBy(field): filename = v.get("filename") data = b64decode(v.get("data")) file_obj = NamedBlobFile(data, filename=filename) field.validate(file_obj) field.set(content, file_obj) else: field.validate(v) field.set(content, v) content.reindexObject() return content
def set(self, data, filename, content_type): error = '' obj = self.context ttool = getToolByName(obj, 'portal_types') ctype = ttool[obj.portal_type] schema = ctype.lookupSchema() fields = getFieldsInOrder(schema) file_fields = [ field for name, field in fields if INamedFileField.providedBy(field) or INamedImageField.providedBy(field) ] if len(file_fields) == 0: error = u'serverError' logger.info( "An error happens : the dexterity content type %s " "has no file field, rawdata can't be created", obj.absolute_url()) for file_field in file_fields: if IPrimaryField.providedBy(file_field): break else: # Primary field can't be set ttw, # then, we take the first one file_field = file_fields[0] # TODO: use adapters if HAVE_BLOBS and INamedBlobImageField.providedBy(field): value = NamedBlobImage( data=data, contentType=content_type, filename=unicode(filename)) elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field): value = NamedBlobFile( data=data, contentType=content_type, filename=unicode(filename)) elif INamedImageField.providedBy(file_field): value = NamedImage( data=data, contentType=content_type, filename=unicode(file_field)) elif INamedFileField.providedBy(file_field): value = NamedFile( data=data, contentType=content_type, filename=unicode(filename)) file_field.set(obj, value) obj.reindexObject() notify(ObjectInitializedEvent(obj)) return error
def set(self, data, filename, content_type): error = '' obj = self.context ttool = getToolByName(obj, 'portal_types') ctype = ttool[obj.portal_type] schema = ctype.lookupSchema() fields = getFieldsInOrder(schema) file_fields = [ field for name, field in fields if INamedFileField.providedBy(field) or INamedImageField.providedBy(field) ] if len(file_fields) == 0: error = u'serverError' logger.info( "An error happens : the dexterity content type %s " "has no file field, rawdata can't be created", obj.absolute_url()) for file_field in file_fields: if IPrimaryField.providedBy(file_field): break else: # Primary field can't be set ttw, # then, we take the first one file_field = file_fields[0] # TODO: use adapters if HAVE_BLOBS and INamedBlobImageField.providedBy(file_field): value = NamedBlobImage(data=data, contentType=content_type, filename=unicode(filename, 'utf-8')) elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field): value = NamedBlobFile(data=data, contentType=content_type, filename=unicode(filename, 'utf-8')) elif INamedImageField.providedBy(file_field): value = NamedImage(data=data, contentType=content_type, filename=unicode(filename, 'utf-8')) elif INamedFileField.providedBy(file_field): value = NamedFile(data=data, contentType=content_type, filename=unicode(filename, 'utf-8')) file_field.set(obj, value) return error
def fieldFilter(): portal_type = self.context.getPortalTypeName() fti = getUtility(IDexterityFTI, name=portal_type) schema = fti.lookupSchema() fields = getFieldsInOrder(schema) assignable = IBehaviorAssignable(self.context, None) for behavior in assignable.enumerateBehaviors(): if behavior.marker: new_fields = getFieldsInOrder(behavior.marker) if len(new_fields) > 0: fields = fields + new_fields obj_fields = [] for key, value in fields: is_image = INamedImageField.providedBy(value) is_file = INamedBlobFileField.providedBy(value) if is_image or is_file: obj_fields.append(value) return obj_fields
def getOnCloneModifiers(self, obj): """Removes references to blobs. """ blob_refs = {} for schemata in iterSchemata(obj): for name, field in getFields(schemata).items(): if (INamedBlobFileField.providedBy(field) or INamedBlobImageField.providedBy(field)): field_value = field.get(field.interface(obj)) if field_value is not None: blob_refs[id(aq_base(field_value._blob))] = True def persistent_id(obj): return blob_refs.get(id(obj), None) def persistent_load(obj): return None return persistent_id, persistent_load, [], []
def copy_fields(self, translation): fti = getUtility(IDexterityFTI, name=self.context.portal_type) schemas = [] schemas.append(fti.lookupSchema()) for behavior_schema in \ utils.getAdditionalSchemata(self.context, self.context.portal_type): if behavior_schema is not None: schemas.append(behavior_schema) doomed = False for schema in schemas: for field_name in schema: if ILanguageIndependentField.providedBy(schema[field_name]): doomed = True value = getattr(schema(self.context), field_name, _marker) if IRelationValue.providedBy(value): obj = value.to_object adapter = queryAdapter(translation, ILanguage) trans_obj = ITranslationManager(obj)\ .get_translation(adapter.get_language()) if trans_obj: intids = component.getUtility(IIntIds) value = RelationValue(intids.getId(trans_obj)) if not (value == _marker): # We check if not (value == _marker) because # z3c.relationfield has an __eq__ if (not value) and INamedBlobFileField.providedBy(schema[field_name]): value = getattr(schema(translation), field_name) setattr(schema(self.context), field_name, value) else: setattr(schema(translation), field_name, value) # If at least one field has been copied over to the translation # we need to inform subscriber to trigger an ObjectModifiedEvent # on that translation. return doomed
def __call__(self, name, content_type, data, obj_id): ctr = cmfutils.getToolByName(self.context, 'content_type_registry') type_ = ctr.findTypeName(name.lower(), '', '') or 'File' # otherwise I get ZPublisher.Conflict ConflictErrors # when uploading multiple files upload_lock.acquire() try: transaction.begin() obj = ploneutils._createObjectByType(type_, self.context, obj_id) ttool = getToolByName(self.context, 'portal_types') ctype = ttool[obj.portal_type] schema = ctype.lookupSchema() fields = getFieldsInOrder(schema) file_fields = [ field for safe_name, field in fields if INamedFileField.providedBy(field) or INamedImageField.providedBy(field) ] if len(file_fields) == 0: logger.info( "An error happens : the dexterity content type %s " "has no file field, rawdata can't be created", obj.absolute_url()) for file_field in file_fields: if IPrimaryField.providedBy(file_field): break else: # Primary field can't be set ttw, # then, we take the first one file_field = file_fields[0] # TODO: use adapters if HAVE_BLOBS and INamedBlobImageField.providedBy(file_field): value = NamedBlobImage(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field): value = NamedBlobFile(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif INamedImageField.providedBy(file_field): value = NamedImage(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) elif INamedFileField.providedBy(file_field): value = NamedFile(data=data.read(), contentType=content_type, filename=unicode(obj_id, 'utf-8')) file_field.set(obj, value) obj.title = name obj.reindexObject() notify(ObjectInitializedEvent(obj)) notify(ObjectModifiedEvent(obj)) transaction.commit() finally: upload_lock.release() return obj
def getReferencedAttributes(self, obj): file_data = {} # Try to get last revision, only store a new blob if the # contents differ from the prior one, otherwise store a # reference to the prior one. # The implementation is mostly based on CMFEditions's CloneBlobs # modifier. repo = getToolByName(obj, 'portal_repository') try: prior_rev = repo.retrieve(obj) except ArchivistRetrieveError: prior_rev = None for schemata in iterSchemata(obj): for name, field in getFields(schemata).items(): if (INamedBlobFileField.providedBy(field) or INamedBlobImageField.providedBy(field)): try: # field.get may raise an AttributeError if the field # is provided by a behavior and hasn't been # initialized yet field_value = field.get(field.interface(obj)) except AttributeError: field_value = None if field_value is None: continue blob_file = field_value.open() save_new = True dotted_name = '.'.join([schemata.__identifier__, name]) if prior_rev is not None: prior_obj = prior_rev.object prior_blob = field.get(field.interface(prior_obj)) if prior_blob is not None: prior_file = prior_blob.open() # Check for file size differences if (os.fstat(prior_file.fileno()).st_size == os.fstat(blob_file.fileno()).st_size): # Files are the same size, compare line by line for line, prior_line in izip(blob_file, prior_file): if line != prior_line: break else: # The files are the same, save a reference # to the prior versions blob on this # version file_data[dotted_name] = prior_blob._blob save_new = False if save_new: new_blob = file_data[dotted_name] = Blob() new_blob_file = new_blob.open('w') try: blob_file.seek(0) new_blob_file.writelines(blob_file) finally: blob_file.close() new_blob_file.close() return file_data
def prepare_field_value(self, new_object, field, value): recurse = partial(self.prepare_field_value, new_object, field) if isinstance(value, str): return recurse(value.decode('utf-8')) if isinstance(value, list): return map(recurse, value) if isinstance(value, tuple): return tuple(map(recurse, value)) relation_fields = filter(IRelationChoice.providedBy, (field, getattr(field, 'value_type', None))) if relation_fields and isinstance(value, unicode): target = uuidToObject(value) return create_relation('/'.join(target.getPhysicalPath())) if IRichText.providedBy(field) \ and not IRichTextValue.providedBy(value): return recurse(field.fromUnicode(value)) if INamedField.providedBy(field) and value \ and not isinstance(value, field._type): source_is_blobby = IBlobWrapper.providedBy(value) target_is_blobby = INamedBlobFileField.providedBy(field) or \ INamedBlobImageField.providedBy(field) if source_is_blobby and target_is_blobby: filename = value.filename if isinstance(filename, str): filename = filename.decode('utf-8') new_value = field._type( data='', # empty blob, will be replaced contentType=value.content_type, filename=filename) if not hasattr(new_value, '_blob'): raise ValueError( ('Unsupported file value type {!r}' ', missing _blob.').format( new_value.__class__)) # Simply copy the persistent blob object (with the file system # pointer) to the new value so that the file is not copied. # We assume that the old object is trashed and can therefore # adopt the blob file. new_value._blob = value.getBlob() return recurse(new_value) else: filename = value.filename if isinstance(filename, str): filename = filename.decode('utf-8') data = value.data data = getattr(data, 'data', data) # extract Pdata return recurse(field._type( data=data, contentType=value.content_type, filename=filename)) return value
def prepare_field_value(self, new_object, field, value): recurse = partial(self.prepare_field_value, new_object, field) if isinstance(value, str): return recurse(value.decode('utf-8')) if isinstance(value, list): return map(recurse, value) if isinstance(value, tuple): return tuple(map(recurse, value)) relation_fields = filter(IRelation.providedBy, (field, getattr(field, 'value_type', None))) if relation_fields and isinstance(value, unicode): target = uuidToObject(value) return create_relation('/'.join(target.getPhysicalPath())) if IRichText.providedBy(field) \ and not IRichTextValue.providedBy(value): return recurse(field.fromUnicode(value)) if INamedField.providedBy(field) and value is not None \ and not isinstance(value, field._type): if value == '': return None if hasattr(value, 'get_size') and value.get_size() == 0: return None source_is_blobby = IBlobWrapper.providedBy(value) target_is_blobby = INamedBlobFileField.providedBy(field) or \ INamedBlobImageField.providedBy(field) if source_is_blobby and target_is_blobby: filename = value.filename if isinstance(filename, str): filename = filename.decode('utf-8') new_value = field._type( data='', # empty blob, will be replaced contentType=value.content_type, filename=filename) if not hasattr(new_value, '_blob'): raise ValueError( ('Unsupported file value type {!r}' ', missing _blob.').format( new_value.__class__)) # Simply copy the persistent blob object (with the file system # pointer) to the new value so that the file is not copied. # We assume that the old object is trashed and can therefore # adopt the blob file. new_value._blob = value.getBlob() return recurse(new_value) else: filename = value.filename if isinstance(filename, str): filename = filename.decode('utf-8') data = value.data data = getattr(data, 'data', data) # extract Pdata return recurse(field._type( data=data, contentType=value.content_type, filename=filename)) return value
def export_blobs(self, portal_type, blob_type, blacklist, whitelist): """Return a zip-file with file and/or images for the required export. """ all_fields = get_schema_info(portal_type, blacklist, whitelist) if blob_type == 'images': fields = [ i for i in all_fields if INamedImageField.providedBy(i[1]) or INamedBlobImageField.providedBy(i[1]) ] elif blob_type == 'files': fields = [ i for i in all_fields if INamedFileField.providedBy(i[1]) or INamedBlobFileField.providedBy(i[1]) ] elif blob_type == 'related': fields = [ i for i in all_fields if IRelationChoice.providedBy(i[1]) or IRelationList.providedBy(i[1]) ] tmp_file = NamedTemporaryFile() zip_file = zipfile.ZipFile(tmp_file, 'w') catalog = api.portal.get_tool('portal_catalog') query = {'portal_type': portal_type} query['path'] = {} query['path']['query'] = '/'.join(self.context.getPhysicalPath()) blobs_found = False if HAS_MULTILINGUAL and 'Language' in catalog.indexes(): query['Language'] = 'all' for brain in catalog(query): obj = brain.getObject() for fieldname, field in fields: # manually filter for fields # if fieldname not in ['primary_picture']: # continue blobs = [] value = field.get(field.interface(obj)) if not value: continue if blob_type != 'related': blobs = [value] elif IRelationChoice.providedBy(field) or \ IRelationList.providedBy(field): blobs = get_blobs_from_relations(value, field) for blob in blobs: if not blob: continue filename = str((blob.filename).encode('utf8')) zip_file.writestr( '{0}_{1}/{2}'.format( brain.UID, # or: brain.id.upper(), fieldname, filename), str(blob.data)) blobs_found = True zip_file.close() if not blobs_found: return 'No {0} found'.format(blob_type) data = file(tmp_file.name).read() response = self.request.response response.setHeader('content-type', 'application/zip') response.setHeader('content-length', len(data)) response.setHeader('content-disposition', 'attachment; filename="{0}.zip"'.format(blob_type)) return response.write(data)
def export_blobs(self, portal_type, blob_type, blacklist, whitelist): """Return a zip-file with file and/or images for the required export. """ all_fields = get_schema_info(portal_type, blacklist, whitelist) if blob_type == 'images': fields = [ i for i in all_fields if INamedImageField.providedBy(i[1]) or INamedBlobImageField.providedBy(i[1])] elif blob_type == 'files': fields = [ i for i in all_fields if INamedFileField.providedBy(i[1]) or INamedBlobFileField.providedBy(i[1])] elif blob_type == 'related': fields = [ i for i in all_fields if IRelationChoice.providedBy(i[1]) or IRelationList.providedBy(i[1])] tmp_file = NamedTemporaryFile() zip_file = zipfile.ZipFile(tmp_file, 'w') catalog = api.portal.get_tool('portal_catalog') query = {'portal_type': portal_type} blobs_found = False if HAS_MULTILINGUAL and 'Language' in catalog.indexes(): query['Language'] = 'all' for brain in catalog(query): obj = brain.getObject() for fieldname, field in fields: # manually filter for fields # if fieldname not in ['primary_picture']: # continue blobs = [] value = field.get(field.interface(obj)) if not value: continue if blob_type != 'related': blobs = [value] elif IRelationChoice.providedBy(field) or \ IRelationList.providedBy(field): blobs = get_blobs_from_relations(value, field) for blob in blobs: if not blob: continue filename = str((blob.filename).encode('utf8')) zip_file.writestr( '{0}_{1}/{2}'.format( brain.UID, # or: brain.id.upper(), fieldname, filename), str(blob.data) ) blobs_found = True zip_file.close() if not blobs_found: return 'No {0} found'.format(blob_type) data = file(tmp_file.name).read() response = self.request.response response.setHeader('content-type', 'application/zip') response.setHeader('content-length', len(data)) response.setHeader( 'content-disposition', 'attachment; filename="{0}.zip"'.format(blob_type)) return response.write(data)