示例#1
0
    def _set_default_configuration(self):
        defaults = {}
        tile_type = getUtility(ITileType, name=self.tile.__name__)
        fields = getFieldNamesInOrder(tile_type.schema)

        for name, field in getFieldsInOrder(tile_type.schema):
            order = unicode(fields.index(name))
            # default configuration attributes for all fields
            defaults[name] = {'order': order, 'visibility': u'on'}
            if name == 'css_class':
                # css_class, set default
                defaults[name] = field.default
            if ITextLine.providedBy(field):
                # field is TextLine, we should add 'htmltag'
                defaults[name]['htmltag'] = u'h2'
            elif INamedBlobImageField.providedBy(field):
                # field is an image, we should add 'position' and 'imgsize'
                defaults[name]['position'] = u'left'
                defaults[name]['imgsize'] = u'mini 200:200'
            elif IInt.providedBy(field):
                defaults[name][name] = field.default
            elif IDatetime.providedBy(field):
                # field is Datetime, we should add 'format'
                defaults[name]['format'] = 'datetime'

        return defaults
    def _set_default_configuration(self):
        defaults = {}
        tile_type = getUtility(ITileType, name=self.tile.__name__)
        fields = getFieldNamesInOrder(tile_type.schema)

        for name, field in getFieldsInOrder(tile_type.schema):
            order = unicode(fields.index(name))
            # default configuration attributes for all fields
            defaults[name] = {'order': order, 'visibility': u'on'}
            if name == 'css_class':
                # css_class, set default
                defaults[name] = field.default
            if ITextLine.providedBy(field):
                # field is TextLine, we should add 'htmltag'
                defaults[name]['htmltag'] = u'h2'
            elif INamedBlobImageField.providedBy(field):
                # field is an image, we should add 'position' and 'imgsize'
                defaults[name]['position'] = u'left'
                defaults[name]['imgsize'] = u'mini 200:200'
            elif IInt.providedBy(field):
                defaults[name][name] = field.default
            elif IDatetime.providedBy(field):
                # field is Datetime, we should add 'format'
                defaults[name]['format'] = 'datetime'

        return defaults
    def _set_default_configuration(self):
        defaults = {}
        tile_type = getUtility(ITileType, name=self.tile.__name__)
        fields = getFieldNamesInOrder(tile_type.schema)

        for name, field in getFieldsInOrder(tile_type.schema):
            order = unicode(fields.index(name))
            # default configuration attributes for all fields
            defaults[name] = {"order": order, "visibility": u"on"}
            if name == "css_class":
                # css_class, set default
                defaults[name] = field.default
            if ITextLine.providedBy(field):
                # field is TextLine, we should add 'htmltag'
                defaults[name]["htmltag"] = u"h2"
            elif INamedBlobImageField.providedBy(field):
                # field is an image, we should add 'position' and 'imgsize'
                defaults[name]["position"] = u"left"
                defaults[name]["imgsize"] = u"mini 200:200"
            elif IInt.providedBy(field):
                defaults[name][name] = field.default
            elif IDatetime.providedBy(field):
                # field is Datetime, we should add 'format'
                defaults[name]["format"] = "datetime"

        return defaults
def _make_namedfile(value, field, widget):
    """Return a NamedImage or NamedFile instance, if it isn't already one -
    e.g. when it's base64 encoded data.
    """

    if INamed.providedBy(value):
        return value

    string_types = (six.binary_type, six.text_type)
    if isinstance(value, string_types) and IBytes.providedBy(field):
        filename, data = b64decode_file(value)
    elif isinstance(value, dict) or isinstance(value, PersistentDict):
        filename = value['filename']
        data = value['data']

    if INamedBlobImageField.providedBy(field):
        value = NamedBlobImage(data=data, filename=filename)
    elif INamedImageField.providedBy(field):
        value = NamedImage(data=data, filename=filename)
    elif INamedBlobFileField.providedBy(field):
        value = NamedBlobFile(data=data, filename=filename)
    else:
        value = NamedFile(data=data, filename=filename)

    return value
 def get_maxsize_dx(self, validator, field):
     """
     """
     if not HAS_DX:
         return None
     try:
         file_size = api.portal.get_registry_record(
             'file_size',
             interface=ILimitFileSizePanel)
         image_size = api.portal.get_registry_record(
             'image_size',
             interface=ILimitFileSizePanel)
     except InvalidParameterError:
         return None
     # Check if there's a type/field specific settings in the registry
     type_context = self.context
     if self.context == api.portal.get():
         # we are in add form, so context is the portal.
         # validator.view has an attribute portal_type with the wanted type
         type_context = validator.view
     type_maxsize = self._get_type_maxsize(field, type_context)
     if type_maxsize is not None:
         return type_maxsize
     if file_size and INamedBlobFileField.providedBy(field):
         return float(file_size)
     elif image_size and INamedBlobImageField.providedBy(field):
         return float(image_size)
     return None
    def getReferencedAttributes(self, obj):
        file_data = {}
        # Try to get last revision, only store a new blob if the
        # contents differ from the prior one, otherwise store a
        # reference to the prior one.
        # The implementation is mostly based on CMFEditions's CloneBlobs
        # modifier.
        repo = getToolByName(obj, "portal_repository")
        try:
            prior_rev = repo.retrieve(obj)
        except ArchivistRetrieveError:
            prior_rev = None

        for schemata in iterSchemata(obj):
            for name, field in getFields(schemata).items():
                if INamedBlobFileField.providedBy(field) or INamedBlobImageField.providedBy(field):
                    try:
                        # field.get may raise an AttributeError if the field
                        # is provided by a behavior and hasn't been
                        # initialized yet
                        field_value = field.get(field.interface(obj))
                    except AttributeError:
                        field_value = None
                    if field_value is None:
                        continue
                    blob_file = field_value.open()
                    save_new = True
                    dotted_name = ".".join([schemata.__identifier__, name])

                    if prior_rev is not None:
                        prior_obj = prior_rev.object
                        prior_blob = field.get(field.interface(prior_obj))
                        if prior_blob is not None:
                            prior_file = prior_blob.open()

                            # Check for file size differences
                            if os.fstat(prior_file.fileno()).st_size == os.fstat(blob_file.fileno()).st_size:
                                # Files are the same size, compare line by line
                                for line, prior_line in izip(blob_file, prior_file):
                                    if line != prior_line:
                                        break
                                else:
                                    # The files are the same, save a reference
                                    # to the prior versions blob on this
                                    # version
                                    file_data[dotted_name] = prior_blob._blob
                                    save_new = False

                    if save_new:
                        new_blob = file_data[dotted_name] = Blob()
                        new_blob_file = new_blob.open("w")
                        try:
                            blob_file.seek(0)
                            new_blob_file.writelines(blob_file)
                        finally:
                            blob_file.close()
                            new_blob_file.close()

        return file_data
    def __call__(self, name, content_type, data, obj_id):
        ctr = cmfutils.getToolByName(self.context, 'content_type_registry')
        type_ = ctr.findTypeName(name.lower(), '', '') or 'File'

        # otherwise I get ZPublisher.Conflict ConflictErrors
        # when uploading multiple files
        upload_lock.acquire()

        try:
            transaction.begin()
            obj = ploneutils._createObjectByType(type_, self.context, obj_id)

            ttool = getToolByName(self.context, 'portal_types')
            ctype = ttool[obj.portal_type]
            schema = ctype.lookupSchema()
            fields = getFieldsInOrder(schema)
            file_fields = [field for safe_name, field in fields
                           if INamedFileField.providedBy(field)
                           or INamedImageField.providedBy(field)]
            if len(file_fields) == 0:
                logger.info("An error happens : the dexterity content type %s "
                            "has no file field, rawdata can't be created",
                            obj.absolute_url())
            for file_field in file_fields:
                if IPrimaryField.providedBy(file_field):
                    break
            else:
                # Primary field can't be set ttw,
                # then, we take the first one
                file_field = file_fields[0]

            # TODO: use adapters
            if HAVE_BLOBS and INamedBlobImageField.providedBy(file_field):
                value = NamedBlobImage(data=data.read(), contentType=content_type,
                                       filename=unicode(obj_id, 'utf-8'))
            elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field):
                value = NamedBlobFile(data=data.read(), contentType=content_type,
                                      filename=unicode(obj_id, 'utf-8'))
            elif INamedImageField.providedBy(file_field):
                value = NamedImage(data=data.read(), contentType=content_type,
                                   filename=unicode(obj_id, 'utf-8'))
            elif INamedFileField.providedBy(file_field):
                value = NamedFile(data=data.read(), contentType=content_type,
                                  filename=unicode(obj_id, 'utf-8'))

            file_field.set(obj, value)
            obj.title = name
            obj.reindexObject()

            notify(ObjectInitializedEvent(obj))
            notify(ObjectModifiedEvent(obj))

            transaction.commit()
        finally:
            upload_lock.release()
        return obj
    def set(self, data, filename, content_type):
        error = ''
        obj = self.context
        ttool = getToolByName(obj, 'portal_types')
        ctype = ttool[obj.portal_type]
        schema = ctype.lookupSchema()
        fields = getFieldsInOrder(schema)
        file_fields = [
            field for name, field in fields
            if INamedFileField.providedBy(field)
            or INamedImageField.providedBy(field)
        ]
        if len(file_fields) == 0:
            error = u'serverError'
            logger.info(
                "An error happens : the dexterity content type %s "
                "has no file field, rawdata can't be created",
                obj.absolute_url())
        for file_field in file_fields:
            if IPrimaryField.providedBy(file_field):
                break
        else:
            # Primary field can't be set ttw,
            # then, we take the first one
            file_field = file_fields[0]

        # TODO: use adapters
        if HAVE_BLOBS and INamedBlobImageField.providedBy(field):
            value = NamedBlobImage(
                data=data,
                contentType=content_type,
                filename=unicode(filename))
        elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field):
            value = NamedBlobFile(
                data=data,
                contentType=content_type,
                filename=unicode(filename))
        elif INamedImageField.providedBy(file_field):
            value = NamedImage(
                data=data,
                contentType=content_type,
                filename=unicode(file_field))
        elif INamedFileField.providedBy(file_field):
            value = NamedFile(
                data=data,
                contentType=content_type,
                filename=unicode(filename))

        file_field.set(obj, value)
        obj.reindexObject()
        notify(ObjectInitializedEvent(obj))
        return error
示例#9
0
    def set(self, data, filename, content_type):
        error = ''
        obj = self.context
        ttool = getToolByName(obj, 'portal_types')
        ctype = ttool[obj.portal_type]
        schema = ctype.lookupSchema()
        fields = getFieldsInOrder(schema)
        file_fields = [
            field for name, field in fields
            if INamedFileField.providedBy(field)
            or INamedImageField.providedBy(field)
        ]
        if len(file_fields) == 0:
            error = u'serverError'
            logger.info(
                "An error happens : the dexterity content type %s "
                "has no file field, rawdata can't be created",
                obj.absolute_url())
        for file_field in file_fields:
            if IPrimaryField.providedBy(file_field):
                break
        else:
            # Primary field can't be set ttw,
            # then, we take the first one
            file_field = file_fields[0]

        # TODO: use adapters
        if HAVE_BLOBS and INamedBlobImageField.providedBy(file_field):
            value = NamedBlobImage(data=data,
                                   contentType=content_type,
                                   filename=unicode(filename, 'utf-8'))
        elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field):
            value = NamedBlobFile(data=data,
                                  contentType=content_type,
                                  filename=unicode(filename, 'utf-8'))
        elif INamedImageField.providedBy(file_field):
            value = NamedImage(data=data,
                               contentType=content_type,
                               filename=unicode(filename, 'utf-8'))
        elif INamedFileField.providedBy(file_field):
            value = NamedFile(data=data,
                              contentType=content_type,
                              filename=unicode(filename, 'utf-8'))

        file_field.set(obj, value)
        return error
    def getOnCloneModifiers(self, obj):
        """Removes references to blobs.
        """
        blob_refs = {}
        for schemata in iterSchemata(obj):
            for name, field in getFields(schemata).items():
                if (INamedBlobFileField.providedBy(field) or
                    INamedBlobImageField.providedBy(field)):
                    field_value = field.get(field.interface(obj))
                    if field_value is not None:
                        blob_refs[id(aq_base(field_value._blob))] = True

        def persistent_id(obj):
            return blob_refs.get(id(obj), None)

        def persistent_load(obj):
            return None

        return persistent_id, persistent_load, [], []
    def export_blobs(self, portal_type, blob_type, blacklist, whitelist):
        """Return a zip-file with file and/or images  for the required export.
        """
        all_fields = get_schema_info(portal_type, blacklist, whitelist)
        if blob_type == 'images':
            fields = [
                i for i in all_fields if
                INamedImageField.providedBy(i[1]) or
                INamedBlobImageField.providedBy(i[1])]
        elif blob_type == 'files':
            fields = [
                i for i in all_fields if
                INamedFileField.providedBy(i[1]) or
                INamedBlobFileField.providedBy(i[1])]
        elif blob_type == 'related':
            fields = [
                i for i in all_fields if
                IRelationChoice.providedBy(i[1]) or
                IRelationList.providedBy(i[1])]

        tmp_file = NamedTemporaryFile()
        zip_file = zipfile.ZipFile(tmp_file, 'w')

        catalog = api.portal.get_tool('portal_catalog')
        query = {'portal_type': portal_type}
        blobs_found = False
        if HAS_MULTILINGUAL and 'Language' in catalog.indexes():
            query['Language'] = 'all'
        for brain in catalog(query):
            obj = brain.getObject()
            for fieldname, field in fields:
                # manually filter for fields
                # if fieldname not in ['primary_picture']:
                #     continue
                blobs = []
                value = field.get(field.interface(obj))
                if not value:
                    continue

                if blob_type != 'related':
                    blobs = [value]
                elif IRelationChoice.providedBy(field) or \
                        IRelationList.providedBy(field):
                    blobs = get_blobs_from_relations(value, field)

                for blob in blobs:
                    if not blob:
                        continue
                    filename = str((blob.filename).encode('utf8'))
                    zip_file.writestr(
                        '{0}_{1}/{2}'.format(
                            brain.UID,  # or: brain.id.upper(),
                            fieldname,
                            filename),
                        str(blob.data)
                    )
                    blobs_found = True

        zip_file.close()
        if not blobs_found:
            return 'No {0} found'.format(blob_type)
        data = file(tmp_file.name).read()
        response = self.request.response
        response.setHeader('content-type', 'application/zip')
        response.setHeader('content-length', len(data))
        response.setHeader(
            'content-disposition',
            'attachment; filename="{0}.zip"'.format(blob_type))
        return response.write(data)
示例#12
0
    def export_blobs(self, portal_type, blob_type, blacklist, whitelist):
        """Return a zip-file with file and/or images  for the required export.
        """
        all_fields = get_schema_info(portal_type, blacklist, whitelist)
        if blob_type == 'images':
            fields = [
                i for i in all_fields if INamedImageField.providedBy(i[1])
                or INamedBlobImageField.providedBy(i[1])
            ]
        elif blob_type == 'files':
            fields = [
                i for i in all_fields if INamedFileField.providedBy(i[1])
                or INamedBlobFileField.providedBy(i[1])
            ]
        elif blob_type == 'related':
            fields = [
                i for i in all_fields if IRelationChoice.providedBy(i[1])
                or IRelationList.providedBy(i[1])
            ]

        tmp_file = NamedTemporaryFile()
        zip_file = zipfile.ZipFile(tmp_file, 'w')

        catalog = api.portal.get_tool('portal_catalog')
        query = {'portal_type': portal_type}
        query['path'] = {}
        query['path']['query'] = '/'.join(self.context.getPhysicalPath())

        blobs_found = False
        if HAS_MULTILINGUAL and 'Language' in catalog.indexes():
            query['Language'] = 'all'
        for brain in catalog(query):
            obj = brain.getObject()
            for fieldname, field in fields:
                # manually filter for fields
                # if fieldname not in ['primary_picture']:
                #     continue
                blobs = []
                value = field.get(field.interface(obj))
                if not value:
                    continue

                if blob_type != 'related':
                    blobs = [value]
                elif IRelationChoice.providedBy(field) or \
                        IRelationList.providedBy(field):
                    blobs = get_blobs_from_relations(value, field)

                for blob in blobs:
                    if not blob:
                        continue
                    filename = str((blob.filename).encode('utf8'))
                    zip_file.writestr(
                        '{0}_{1}/{2}'.format(
                            brain.UID,  # or: brain.id.upper(),
                            fieldname,
                            filename),
                        str(blob.data))
                    blobs_found = True

        zip_file.close()
        if not blobs_found:
            return 'No {0} found'.format(blob_type)
        data = file(tmp_file.name).read()
        response = self.request.response
        response.setHeader('content-type', 'application/zip')
        response.setHeader('content-length', len(data))
        response.setHeader('content-disposition',
                           'attachment; filename="{0}.zip"'.format(blob_type))
        return response.write(data)
示例#13
0
    def prepare_field_value(self, new_object, field, value):
        recurse = partial(self.prepare_field_value, new_object, field)

        if isinstance(value, str):
            return recurse(value.decode('utf-8'))

        if isinstance(value, list):
            return map(recurse, value)

        if isinstance(value, tuple):
            return tuple(map(recurse, value))

        relation_fields = filter(IRelation.providedBy,
                                 (field, getattr(field, 'value_type', None)))
        if relation_fields and isinstance(value, unicode):
            target = uuidToObject(value)
            return create_relation('/'.join(target.getPhysicalPath()))

        if IRichText.providedBy(field) \
           and not IRichTextValue.providedBy(value):
            return recurse(field.fromUnicode(value))

        if INamedField.providedBy(field) and value is not None \
           and not isinstance(value, field._type):

            if value == '':
                return None

            if hasattr(value, 'get_size') and value.get_size() == 0:
                return None

            source_is_blobby = IBlobWrapper.providedBy(value)
            target_is_blobby = INamedBlobFileField.providedBy(field) or \
                               INamedBlobImageField.providedBy(field)

            if source_is_blobby and target_is_blobby:
                filename = value.filename
                if isinstance(filename, str):
                    filename = filename.decode('utf-8')

                new_value = field._type(
                    data='',  # empty blob, will be replaced
                    contentType=value.content_type,
                    filename=filename)
                if not hasattr(new_value, '_blob'):
                    raise ValueError(
                        ('Unsupported file value type {!r}'
                         ', missing _blob.').format(
                             new_value.__class__))

                # Simply copy the persistent blob object (with the file system
                # pointer) to the new value so that the file is not copied.
                # We assume that the old object is trashed and can therefore
                # adopt the blob file.
                new_value._blob = value.getBlob()
                return recurse(new_value)

            else:
                filename = value.filename
                if isinstance(filename, str):
                    filename = filename.decode('utf-8')

                data = value.data
                data = getattr(data, 'data', data)  # extract Pdata
                return recurse(field._type(
                    data=data,
                    contentType=value.content_type,
                    filename=filename))

        return value
示例#14
0
    def prepare_field_value(self, new_object, field, value):
        recurse = partial(self.prepare_field_value, new_object, field)

        if isinstance(value, str):
            return recurse(value.decode('utf-8'))

        if isinstance(value, list):
            return map(recurse, value)

        if isinstance(value, tuple):
            return tuple(map(recurse, value))

        relation_fields = filter(IRelationChoice.providedBy,
                                 (field, getattr(field, 'value_type', None)))
        if relation_fields and isinstance(value, unicode):
            target = uuidToObject(value)
            return create_relation('/'.join(target.getPhysicalPath()))

        if IRichText.providedBy(field) \
           and not IRichTextValue.providedBy(value):
            return recurse(field.fromUnicode(value))

        if INamedField.providedBy(field) and value \
           and not isinstance(value, field._type):

            source_is_blobby = IBlobWrapper.providedBy(value)
            target_is_blobby = INamedBlobFileField.providedBy(field) or \
                               INamedBlobImageField.providedBy(field)

            if source_is_blobby and target_is_blobby:
                filename = value.filename
                if isinstance(filename, str):
                    filename = filename.decode('utf-8')

                new_value = field._type(
                    data='',  # empty blob, will be replaced
                    contentType=value.content_type,
                    filename=filename)
                if not hasattr(new_value, '_blob'):
                    raise ValueError(
                        ('Unsupported file value type {!r}'
                         ', missing _blob.').format(
                             new_value.__class__))

                # Simply copy the persistent blob object (with the file system
                # pointer) to the new value so that the file is not copied.
                # We assume that the old object is trashed and can therefore
                # adopt the blob file.
                new_value._blob = value.getBlob()
                return recurse(new_value)

            else:
                filename = value.filename
                if isinstance(filename, str):
                    filename = filename.decode('utf-8')

                data = value.data
                data = getattr(data, 'data', data)  # extract Pdata
                return recurse(field._type(
                    data=data,
                    contentType=value.content_type,
                    filename=filename))

        return value
示例#15
0
def update_object_with_data(content, record):
    """ update the content with the values from records
    """
    schema = get_schema(content)
    is_atct = IATContentType.providedBy(content)
    is_dext = IDexterityContent.providedBy(content)

    for k, v in record.items():

        if is_atct:
            field = schema.get(k)

        if is_dext:
            schemas = list()
            schemas.append(schema)
            schemas.extend(get_behaviors_schema(content))
            for i in schemas:
                field = i.get(k)
                if field:
                    break

        logger.info("update_object_with_data::processing key=%r, value=%r, field=%r", k, v, field)
        if field is None:
            logger.info("update_object_with_data::skipping key=%r", k)
            continue

        if is_atct:
            # XXX handle security
            mutator = field.getMutator(content)
            mutator(v)
        else:
            #ugly hack for tags
            if ICategorization is field.interface:
                content.setSubject(v)

            elif IPublication is field.interface:
                if k == u'effective':
                    content.setEffectiveDate(v)
                elif k == u'expires':
                    content.setExpirationDate(v)

            elif INamedBlobImageField.providedBy(field):
                filename = v.get("filename")
                data = b64decode(v.get("data"))
                file_obj = NamedBlobImage(data,filename=filename)
                field.validate(file_obj)
                field.set(content, file_obj)

            elif INamedBlobFileField.providedBy(field):
                filename = v.get("filename")
                data = b64decode(v.get("data"))
                file_obj = NamedBlobFile(data, filename=filename)
                field.validate(file_obj)
                field.set(content, file_obj)

            else:
                field.validate(v)
                field.set(content, v)

    content.reindexObject()
    return content
    def getReferencedAttributes(self, obj):
        file_data = {}
        # Try to get last revision, only store a new blob if the
        # contents differ from the prior one, otherwise store a
        # reference to the prior one.
        # The implementation is mostly based on CMFEditions's CloneBlobs
        # modifier.
        repo = getToolByName(obj, 'portal_repository')
        try:
            prior_rev = repo.retrieve(obj)
        except ArchivistRetrieveError:
            prior_rev = None

        for schemata in iterSchemata(obj):
            for name, field in getFields(schemata).items():
                if (INamedBlobFileField.providedBy(field) or
                        INamedBlobImageField.providedBy(field)):
                    try:
                        # field.get may raise an AttributeError if the field
                        # is provided by a behavior and hasn't been
                        # initialized yet
                        field_value = field.get(field.interface(obj))
                    except AttributeError:
                        field_value = None
                    if field_value is None:
                        continue
                    blob_file = field_value.open()
                    save_new = True
                    dotted_name = '.'.join([schemata.__identifier__, name])

                    if prior_rev is not None:
                        prior_obj = prior_rev.object
                        prior_blob = field.get(field.interface(prior_obj))
                        if prior_blob is not None:
                            prior_file = prior_blob.open()

                            # Check for file size differences
                            if (os.fstat(prior_file.fileno()).st_size ==
                                    os.fstat(blob_file.fileno()).st_size):
                                # Files are the same size, compare line by line
                                for line, prior_line in izip(blob_file,
                                                             prior_file):
                                    if line != prior_line:
                                        break
                                else:
                                    # The files are the same, save a reference
                                    # to the prior versions blob on this
                                    # version
                                    file_data[dotted_name] = prior_blob._blob
                                    save_new = False

                    if save_new:
                        new_blob = file_data[dotted_name] = Blob()
                        new_blob_file = new_blob.open('w')
                        try:
                            blob_file.seek(0)
                            new_blob_file.writelines(blob_file)
                        finally:
                            blob_file.close()
                            new_blob_file.close()

        return file_data
    def __call__(self, name, content_type, data, obj_id):
        ctr = cmfutils.getToolByName(self.context, 'content_type_registry')
        type_ = ctr.findTypeName(name.lower(), '', '') or 'File'

        # otherwise I get ZPublisher.Conflict ConflictErrors
        # when uploading multiple files
        upload_lock.acquire()

        try:
            transaction.begin()
            obj = ploneutils._createObjectByType(type_, self.context, obj_id)

            ttool = getToolByName(self.context, 'portal_types')
            ctype = ttool[obj.portal_type]
            schema = ctype.lookupSchema()
            fields = getFieldsInOrder(schema)
            file_fields = [
                field for safe_name, field in fields
                if INamedFileField.providedBy(field)
                or INamedImageField.providedBy(field)
            ]
            if len(file_fields) == 0:
                logger.info(
                    "An error happens : the dexterity content type %s "
                    "has no file field, rawdata can't be created",
                    obj.absolute_url())
            for file_field in file_fields:
                if IPrimaryField.providedBy(file_field):
                    break
            else:
                # Primary field can't be set ttw,
                # then, we take the first one
                file_field = file_fields[0]

            # TODO: use adapters
            if HAVE_BLOBS and INamedBlobImageField.providedBy(file_field):
                value = NamedBlobImage(data=data.read(),
                                       contentType=content_type,
                                       filename=unicode(obj_id, 'utf-8'))
            elif HAVE_BLOBS and INamedBlobFileField.providedBy(file_field):
                value = NamedBlobFile(data=data.read(),
                                      contentType=content_type,
                                      filename=unicode(obj_id, 'utf-8'))
            elif INamedImageField.providedBy(file_field):
                value = NamedImage(data=data.read(),
                                   contentType=content_type,
                                   filename=unicode(obj_id, 'utf-8'))
            elif INamedFileField.providedBy(file_field):
                value = NamedFile(data=data.read(),
                                  contentType=content_type,
                                  filename=unicode(obj_id, 'utf-8'))

            file_field.set(obj, value)
            obj.title = name
            obj.reindexObject()

            notify(ObjectInitializedEvent(obj))
            notify(ObjectModifiedEvent(obj))

            transaction.commit()
        finally:
            upload_lock.release()
        return obj