def initialize_blob_filepath(self):
     try:
         opened = openBlob(self.blob)
         self.blob_filepath = opened.name
         opened.close()
     except (IOError, AttributeError):
         self.blob_filepath = None
Example #2
0
def patched_getSize(self):
    """ Return image dimensions of the blob
    """
    try:
        blob = openBlob(self.blob)
    except POSKeyError:

        oid = self.blob._p_oid

        directories = []
        # Create the bushy directory structure with the least significant byte
        # first
        for byte in str(oid):
            directories.append('0x%s' % binascii.hexlify(byte))
        path = os.path.sep.join(directories)
        cached = self.blob._p_blob_committed

        logger.error("BLOBWARNING: Could not get "
                     "image size for blob %r. Info about blob: "
                     "OID (oid, repr, path on zeo storage): %r > %r > %r "
                     "CACHED (path to cached blob): %r ",
                     self.blob._p_oid, oid_repr(oid),
                     oid.__repr__(), path, cached)

        return 0

    size = getImageSize(blob)
    blob.close()
    return size
Example #3
0
    def render_attachment_preview(self, attachment):
        sm = getSecurityManager()
        if not sm.checkPermission(permissions.View, self.context):
            raise Unauthorized

        r = self.request.response
        settings = Settings(attachment)

        if self.preview_type not in ("large", "normal", "small"):
            self.preview_type = "small"
        if self.page is None:
            self.page = 1
        filepath = u"%s/dump_%s.%s" % (self.preview_type, self.page, settings.pdf_image_format)
        blob = settings.blob_files[filepath]
        blobfi = openBlob(blob)
        length = os.fstat(blobfi.fileno()).st_size
        blobfi.close()
        ext = os.path.splitext(os.path.normcase(filepath))[1][1:]
        if ext == "txt":
            ct = "text/plain"
        else:
            ct = "image/%s" % ext

        r.setHeader("Content-Type", ct)
        r.setHeader("Last-Modified", rfc1123_date(self.context._p_mtime))
        r.setHeader("Accept-Ranges", "bytes")
        r.setHeader("Content-Length", length)
        request_range = handleRequestRange(self.context, length, self.request, self.request.response)
        return BlobStreamIterator(blob, **request_range)
Example #4
0
def retrieveScale(self, instance, scale):
    """ retrieve a scaled version of the image """
    field = self.context
    if scale is None:
        blob = field.getUnwrapped(instance)
        data = dict(
            id=field.getName(), blob=blob.getBlob(), content_type=blob.getContentType(), filename=blob.getFilename()
        )
    else:
        fields = getattr(aq_base(instance), blobScalesAttr, {})
        scales = fields.get(field.getName(), {})
        data = scales.get(scale)
    if data is not None:
        blob = openBlob(data["blob"])
        # `updata_data` & friends (from `OFS`) should support file
        # objects, so we could use something like:
        #   ImageScale(..., data=blob.getIterator(), ...)
        # but it uses `len(data)`, so we'll stick with a string for now
        image = ImageScale(
            data["id"],
            data=blob.read(),
            blob=data["blob"],
            filename=data["filename"],
            content_type=data["content_type"],
        )
        blob.close()
        return image.__of__(instance)
    return None
    def __call__(self):
        sm = getSecurityManager()
        if not sm.checkPermission(permissions.View, self.context.context):
            raise Unauthorized

        settings = self.context.settings
        filepath = self.context.filepath
        blob = settings.blob_files[filepath]
        blobfi = openBlob(blob)
        length = os.fstat(blobfi.fileno()).st_size
        blobfi.close()
        ext = os.path.splitext(os.path.normcase(filepath))[1][1:]
        if ext == 'txt':
            ct = 'text/plain'
        else:
            ct = 'image/%s' % ext

        self.request.response.setHeader('Last-Modified',
                                        rfc1123_date(self.context._p_mtime))
        self.request.response.setHeader('Accept-Ranges', 'bytes')
        self.request.response.setHeader("Content-Length", length)
        self.request.response.setHeader('Content-Type', ct)
        request_range = handleRequestRange(
            self.context, length, self.request, self.request.response)
        return BlobStreamIterator(blob, **request_range)
Example #6
0
    def get_review_pdf(self):
        """ Return the uploaded pdf if that doesn't exist return the
        generatedPdf Blob object otherwise return None

        Also return the size since it is not easy to get this from the
        blob directly
        """
        pdf = {}
        size = 0
        if hasattr(self, "pdf"):
            size = self.pdf.get_size()
            if size > 0:
                pdf["size"] = size
                pdf["blob"] = self.pdf.blob
        if size == 0 and hasattr(self, "generatedPdf"):
            generated_pdf = self.generatedPdf
            pdf_blob = openBlob(generated_pdf)
            size = fstat(pdf_blob.fileno()).st_size
            pdf_blob.close()
            if size > 0:
                pdf["size"] = size
                pdf["blob"] = generated_pdf
        if pdf == {}:
            return None
        else:
            return pdf
Example #7
0
 def getSize(self):
     """ return image dimensions of the blob """
     # TODO: this should probably be cached...
     blob = openBlob(self.blob)
     size = getImageSize(blob)
     blob.close()
     return size
Example #8
0
def patched_field_get_size(self):
    """ Patch for get_size
    """
    try:
        blob = openBlob(self.blob)
        size = fstat(blob.fileno()).st_size
        blob.close()
    except POSKeyError:
        oid = self.blob._p_oid

        directories = []
        # Create the bushy directory structure with the least significant byte
        # first
        for byte in str(oid):
            directories.append('0x%s' % binascii.hexlify(byte))
        path = os.path.sep.join(directories)
        cached = self.blob._p_blob_committed

        logger.error("BLOBWARNING: Could not get "
                       "field size for blob %r. Info about blob: "
                       "OID (oid, repr, path on zeo storage): %r > %r > %r "
                       "CACHED (path to cached blob): %r ",
                        self, oid_repr(oid), oid.__repr__(), path, cached)
        size = 0
    return size
def patched_field_get_size(self):
    try:
        blob = openBlob(self.blob)
        size = os.fstat(blob.fileno()).st_size
        blob.close()
    except POSKeyError:
        size = 0
    return size
 def getImageAsFile(self, img=None, scale=None):
     """
     Gets the image as file-like object.
     """
     if img is None:
         field = self.getField('file')
         img = field.getScale(self, scale)
     return openBlob(self.getFile().getBlob())
Example #11
0
 def _serialize(cls, obj):
     blob = obj.getBlob()
     blobfi = openBlob(blob)
     data = blobfi.read()
     blobfi.close()
     return {
         'data': base64.b64encode(data),
         'filename': obj.getFilename()}
Example #12
0
    def generate(self, force=False):
        ''' Generate the preview from the file attribute

        It tries to spare cpu cycles by generating the thumbnail only if the
        file is actually changed.
        For this reason a _video_size parameter on the video_thumb is set
        '''
        mtr = api.portal.get_tool('mimetypes_registry')
        if not shasattr(self.schema, 'file'):
            return

        video = self.schema.file

        try:
            mime_type_description = mtr.lookup(video.contentType)[0].id
        except:
            logger.exception('Cannot check mimetype')
            mime_type_description = ''

        if not mime_type_description == 'MPEG-4 video':
            self.schema.video_thumb = None
            return

        if not force:
            try:
                cached_size = self.schema.video_thumb._video_size
            except AttributeError:
                cached_size = None

            if cached_size == video.size:
                # Highly improbable that a new video with the same size
                # replaced the old one. We have nothing to do here
                return

        fd, tmpfile = tempfile.mkstemp()

        with openBlob(video) as f:
            cmd = 'ffmpegthumbnailer -s 0 -i {infile} -o {oufile}'.format(
                infile=f.name,
                oufile=tmpfile,
            )
            try:
                subprocess.call(cmd.split())
            except:
                self.schema.video_thumb = None
                logger.exception('Error running command %r', cmd)
                return

        thumb_name = video.filename.rpartition('.')[0] + u'.png'

        with open(tmpfile, 'rb') as thumb:
            nbi = NamedBlobImage(
                thumb.read(),
                filename=thumb_name,
            )
            nbi._video_size = video.size
            self.schema.video_thumb = nbi
Example #13
0
def screenshot(blob):
    blobfi = openBlob(blob)
    filepath = docsplit.dump_image(blobfi.read(), '1000', 'gif')
    blobfi.close()

    blob = Blob()
    bfile = blob.open('w')
    sfi = open(filepath, 'rb')
    bfile.write(sfi.read())
    bfile.close()
    sfi.close()
    return blob
Example #14
0
    def set(self, name, instance, value, **kwargs):
        """Store video on Kaltura, 
           create media entry if required
        """
        initializing = kwargs.get('_initializing_', False)

        if initializing:
            AnnotationStorage.set(self, name, instance, value, **kwargs)
            return

        self.value = aq_base(value)
        if self.value.filename is None:
            return  #only interested in running set when instance is ready to save.

        #get a filehandle for the video content we are uploading to Kaltura Server
        fh_blob = openBlob(self.value.blob, mode='r')

        #find the temp dir that ZODB is using.
        tempdir = os.path.dirname(fh_blob.name)

        #connect to Kaltura Server
        (client, ks) = kconnect()
        #upload video content.

        token = KalturaUploadToken()
        token = client.uploadToken.add(token)
        token = client.uploadToken.upload(token.getId(), fh_blob)

        fh_blob.close()
        #instance needs to know the upload token to finalize the media entry
        # typically, a call to Kaltura's addFromUploadedFile or updateContent services does this.
        instance.uploadToken = token
        instance.fileChanged = True

        #if "no local storage" is set, we clobber the blob file.
        registry = getUtility(IRegistry)
        settings = registry.forInterface(IRfaKalturaSettings)
        if settings.storageMethod == u"No Local Storage":
            filename_aside = self.makeDummyData(dir=tempdir)
            value.blob.consumeFile(filename_aside)

        AnnotationStorage.set(self, name, instance, value, **kwargs)
Example #15
0
    def render_attachment_preview(self, attachment):
        sm = getSecurityManager()
        if not sm.checkPermission(permissions.View, self.context):
            raise Unauthorized

        r = self.request.response

        # avoid long dreaded CSRF error
        annotations = IAnnotations(attachment)
        if not annotations.get('collective.documentviewer', None):
            safeWrite(attachment)
        settings = Settings(attachment)  # possibly creates annotation

        if self.preview_type not in ('large', 'normal', 'small'):
            self.preview_type = 'small'
        if self.page is None:
            self.page = 1
        filepath = u'%s/dump_%s.%s' % (self.preview_type, self.page,
                                       settings.pdf_image_format)
        try:
            blob = settings.blob_files[filepath]
        except TypeError:
            # 'NoneType' object has no attribute '__getitem__'
            # happens e.g. when missing preview for stream attachment
            return

        blobfi = openBlob(blob)
        length = os.fstat(blobfi.fileno()).st_size
        blobfi.close()
        ext = os.path.splitext(os.path.normcase(filepath))[1][1:]
        if ext == 'txt':
            ct = 'text/plain'
        else:
            ct = 'image/%s' % ext

        r.setHeader('Content-Type', ct)
        r.setHeader('Last-Modified', rfc1123_date(self.context._p_mtime))
        r.setHeader('Accept-Ranges', 'bytes')
        r.setHeader("Content-Length", length)
        request_range = handleRequestRange(self.context, length, self.request,
                                           self.request.response)
        return BlobStreamIterator(blob, **request_range)
Example #16
0
    def render_blob_version(self):
        # done much like it is done in plone.app.blob's index_html
        header_value = contentDispositionHeader(
            disposition='inline',
            filename=self.context.getFilename().replace('.pdf', '.swf'))

        blob = self.settings.data
        blobfi = openBlob(blob)
        length = fstat(blobfi.fileno()).st_size
        blobfi.close()

        self.request.response.setHeader('Last-Modified',
            rfc1123_date(self.context._p_mtime))
        self.request.response.setHeader('Accept-Ranges', 'bytes')
        self.request.response.setHeader('Content-Disposition', header_value)
        self.request.response.setHeader("Content-Length", length)
        self.request.response.setHeader('Content-Type',
            'application/x-shockwave-flash')
        range = handleRequestRange(self.context, length, self.request,
            self.request.response)
        return BlobStreamIterator(blob, **range)
Example #17
0
    def render_blob_version(self):
        # done much like it is done in plone.app.blob's index_html
        header_value = contentDispositionHeader(
            disposition='inline',
            filename=self.context.getFilename().replace('.pdf', '.swf'))

        blob = self.settings.data
        blobfi = openBlob(blob)
        length = fstat(blobfi.fileno()).st_size
        blobfi.close()

        self.request.response.setHeader('Last-Modified',
                                        rfc1123_date(self.context._p_mtime))
        self.request.response.setHeader('Accept-Ranges', 'bytes')
        self.request.response.setHeader('Content-Disposition', header_value)
        self.request.response.setHeader("Content-Length", length)
        self.request.response.setHeader('Content-Type',
                                        'application/x-shockwave-flash')
        range = handleRequestRange(self.context, length, self.request,
                                   self.request.response)
        return BlobStreamIterator(blob, **range)
Example #18
0
 def __call__(self):
     sm = getSecurityManager()
     if not sm.checkPermission(permissions.View, self.context.context):
         raise Unauthorized
     settings = self.context.settings
     filepath = self.context.filepath
     blob = settings.blob_files[filepath]
     blobfi = openBlob(blob)
     length = os.fstat(blobfi.fileno()).st_size
     blobfi.close()
     ext = os.path.splitext(os.path.normcase(filepath))[1][1:]
     if ext == 'txt':
         ct = 'text/plain'
     else:
         ct = 'image/%s' % ext
     self.request.response.setHeader('Last-Modified',
                                     rfc1123_date(self.context._p_mtime))
     self.request.response.setHeader('Accept-Ranges', 'bytes')
     self.request.response.setHeader("Content-Length", length)
     self.request.response.setHeader('Content-Type', ct)
     range = handleRequestRange(self.context, length, self.request,
                                self.request.response)
     return BlobStreamIterator(blob, **range)
Example #19
0
    def __call__(self):
        try:
            data = self.get_data()
        except IOError:
            # can be from zeo client blob file weirdness with PIL
            # ocassionally
            logger.info('Could not get blob data', exc_info=True)
            raise NotFound

        if data:
            is_blob = False
            if isinstance(data, basestring):
                length = len(data)
            else:
                is_blob = True
                blobfi = openBlob(data)
                length = fstat(blobfi.fileno()).st_size
                blobfi.close()

            self.request.response.setHeader(
                'Last-Modified', rfc1123_date(self.context._p_mtime))
            resp = self.request.response
            resp.setHeader(
                'Content-Disposition', 'inline; filename=%s.%s' %
                (self.context.getId(), self.file_ext))
            resp.setHeader("Content-Length", length)
            resp.setHeader('Content-Type', self.content_type)

            if is_blob:
                resp.setHeader('Accept-Ranges', 'bytes')
                range = handleRequestRange(self.context, length, self.request,
                                           self.request.response)
                return BlobStreamIterator(data, **range)
            else:
                return data
        else:
            raise NotFound
Example #20
0
 def __str__(self):
     """ return data as a string;  this is highly inefficient as it
         loads the complete blob content into memory, but the method
         is unfortunately still used here and there... """
     return openBlob(self.blob).read()
Example #21
0
 def get_size(self):
     """ return the size of the blob """
     blob = openBlob(self.blob)
     size = fstat(blob.fileno()).st_size
     blob.close()
     return size
Example #22
0
 def __str__(self):
     """ return data as a string;  this is highly inefficient as it
         loads the complete blob content into memory, but the method
         is unfortunately still used here and there... """
     return openBlob(self.blob).read()
Example #23
0
    def __call__(self):
        portal_type = self.obj.portal_type
        rtool = getUtility(ICMISRepo)
        portal_url = getToolByName(self.obj, 'portal_url')
        obj_path = '/'.join(portal_url.getRelativeContentPath(self.obj))
        repo_obj_path = '%s/%s' % (rtool.docroot_path(), obj_path)

        # Check if there's already an object at the given path
        try:
            doc = rtool.repo.getObjectByPath(repo_obj_path)
        except ObjectNotFoundException:
            doc = None

        # Content does not exist, create it
        if doc is None:
            parent_path = '/'.join(obj_path.split('/')[:-1])
            repo_parent_path = '%s/%s' % (rtool.docroot_path(), parent_path)
            try:
                repo_parent = rtool.repo.getObjectByPath(repo_parent_path)
            except ObjectNotFoundException:
                # Parent is missing, do nothing
                return

            name = self.obj.getId()
            cmis_type = TYPE_FIELD_MAPPINGS[portal_type][0]

            # Create a folderish object
            if IFolderish.providedBy(self.obj):
                doc = repo_parent.createFolder(name, properties={
                    'cmis:name': name,
                    'cmis:objectTypeId': cmis_type,
                })
            # Create an object with a content stream
            elif IATBlob.providedBy(self.obj):
                blob_wrapper = self.obj.getBlobWrapper()
                doc = repo_parent.createDocument(
                    blob_wrapper.getFilename(),
                    properties={
                        'cmis:name': name,
                        'cmis:objectTypeId': cmis_type,
                    },
                    contentFile=openBlob(blob_wrapper.blob),
                    contentType=blob_wrapper.getContentType(),
                )

            if doc is None:
                return

            # Add aspects
            aspects = TYPE_FIELD_MAPPINGS[portal_type][2]
            for aspect in aspects:
                doc.addAspect(aspect)

        # Update properties
        props = {}
        for fieldname, propname in TYPE_FIELD_MAPPINGS[portal_type][1].items():
            field = self.obj.Schema().getField(fieldname)
            accessor = field.getAccessor(self.obj)
            value = accessor()
            if isinstance(value, str):
                value = value.decode('utf8')
            props[propname] = value
        doc.updateProperties(props)
        import pdb; pdb.set_trace( )
Example #24
0
 def get_size(self):
     """ return the size of the blob """
     blob = openBlob(self.blob)
     size = fstat(blob.fileno()).st_size
     blob.close()
     return size
Example #25
0
def process(context):
    video = context.file

    if not video or video.filename == aws.FILENAME:
        return

    try:
        opened = openBlob(video._blob)
        bfilepath = opened.name
        opened.close()
    except IOError:
        logger.warn('error opening blob file')
        return

    # by default, assume all non-mp4 videos need to be converted
    # but in reality, all videos need converting, even mp4.
    # md5 is only what makes this possible
    convert_it = video.contentType.split('/')[-1] != 'mp4'
    if md5 is not None:
        old_hash = getattr(context, '_file_hash', None)
        current_hash = md5(bfilepath)
        if old_hash is None or old_hash != current_hash:
            convert_it = True

    if context.image and not convert_it:
        # already an mp4 and already has a screen grab
        return

    tmpdir = mkdtemp()
    tmpfilepath = os.path.join(tmpdir, video.filename)
    copyfile(bfilepath, tmpfilepath)

    if convert_it:
        output_filepath = os.path.join(tmpdir, 'output.mp4')
        try:
            avconv.convert(tmpfilepath, output_filepath)
        except:
            pass
        if os.path.exists(
                output_filepath) and os.path.getsize(output_filepath) > 0:
            if md5 is not None:
                try:
                    context._file_hash = md5(output_filepath)
                except:
                    pass
            context._p_jar.sync()
            fi = open(output_filepath)
            namedblob = NamedBlobFile(fi,
                                      filename=switchFileExt(
                                          video.filename, 'mp4'))
            context.file = namedblob
            fi.close()

    if not context.image:
        # try and grab one from video
        output_filepath = os.path.join(tmpdir, u'screengrab.png')
        try:
            avconv.grab_frame(tmpfilepath, output_filepath)
            if os.path.exists(output_filepath):
                fi = open(output_filepath)
                context.image = NamedBlobImage(fi, filename=u'screengrab.png')
                fi.close()
        except:
            logger.warn('error getting thumbnail from video')
    rmtree(tmpdir)
Example #26
0
 def __init__(self, blob, mode='r', streamsize=1 << 16, start=0, end=None):
     self.blob = openBlob(blob, mode)
     self.streamsize = streamsize
     self.start = start
     self.end = end
     self.seek(start, 0)
Example #27
0
def _convertFormat(context):
    # reset these...
    context.video_file_ogv = None
    context.video_file_webm = None

    video = context.video_file
    context.video_converted = True
    try:
        opened = openBlob(video._blob)
        bfilepath = opened.name
        opened.close()
    except IOError:
        logger.warn('error opening blob file')
        return

    tmpdir = mkdtemp()
    tmpfilepath = os.path.join(tmpdir, video.filename)
    copyfile(bfilepath, tmpfilepath)

    try:
        metadata = avprobe.info(tmpfilepath)
    except:
        logger.warn('not a valid video format')
        return
    context.metadata = metadata

    conversion_types = {
        'mp4': 'video_file'
    }

    portal = getToolByName(context, 'portal_url').getPortalObject()
    settings = GlobalSettings(portal)
    for type_ in settings.additional_video_formats:
        format = getFormat(type_)
        if format:
            conversion_types[format.extension] = 'video_file_%s' % (
                format.extension
            )

    for video_type, fieldname in conversion_types.items():
        if video_type == video.contentType.split('/')[-1]:
            setattr(context, fieldname, video)
        else:
            output_filepath = os.path.join(tmpdir, 'output.' + video_type)
            try:
                avconv.convert(tmpfilepath, output_filepath)
            except:
                logger.warn('error converting to %s' % video_type)
                continue
            if os.path.exists(output_filepath):
                fi = open(output_filepath)
                namedblob = NamedBlobFile(
                    fi, filename=switchFileExt(video.filename,  video_type))
                setattr(context, fieldname, namedblob)
                fi.close()

    # try and grab one from video
    output_filepath = os.path.join(tmpdir, u'screengrab.png')
    try:
        avconv.grab_frame(tmpfilepath, output_filepath)
        if os.path.exists(output_filepath):
            fi = open(output_filepath)
            context.image = NamedBlobImage(fi, filename=u'screengrab.png')
            fi.close()
    except:
        logger.warn('error getting thumbnail from video')
    rmtree(tmpdir)
Example #28
0
def _convertFormat(context):
    # reset these...
    context.video_file_ogv = None
    context.video_file_webm = None

    video = context.video_file
    context.video_converted = True
    try:
        opened = openBlob(video._blob)
        bfilepath = opened.name
        opened.close()
    except IOError:
        logger.warn('error opening blob file')
        return

    tmpdir = mkdtemp()
    tmpfilepath = os.path.join(tmpdir, video.filename)
    copyfile(bfilepath, tmpfilepath)

    try:
        metadata = avprobe.info(tmpfilepath)
    except:
        logger.warn('not a valid video format')
        return
    context.metadata = metadata

    conversion_types = {
        'mp4': 'video_file'
    }

    portal = getToolByName(context, 'portal_url').getPortalObject()
    settings = GlobalSettings(portal)
    for type_ in settings.additional_video_formats:
        format = getFormat(type_)
        if format:
            conversion_types[format.extension] = 'video_file_%s' % (
                format.extension
            )

    # sometimes force full video conversion
    force = settings.force

    for video_type, fieldname in conversion_types.items():
        if video_type == video.contentType.split('/')[-1] and not force:
            setattr(context, fieldname, video)
        else:
            output_filepath = os.path.join(tmpdir, 'output.' + video_type)
            try:
                avconv.convert(tmpfilepath, output_filepath, video_type, context)
            except:
                logger.warn('error converting to %s' % video_type)
                continue
            if os.path.exists(output_filepath):
                fi = open(output_filepath)
                namedblob = NamedBlobFile(
                    fi, filename=switchFileExt(video.filename,  video_type))
                setattr(context, fieldname, namedblob)
                fi.close()

    # try and grab one from video
    output_filepath = os.path.join(tmpdir, u'screengrab.png')
    try:
        avconv.grab_frame(tmpfilepath, output_filepath)
        if os.path.exists(output_filepath):
            with open(output_filepath, 'rb') as fi:
                data = fi.read()
            context.image = NamedBlobImage(data, filename=u'screengrab.png')
            fi.close()
    except:
        logger.warn('error getting thumbnail from video')
    rmtree(tmpdir)
Example #29
0
 def getImageAsFile(self, img=None, scale=None):
     """ get the img as file like object """
     if img is None:
         field = self.getField('image')
         img = field.getScale(self, scale)
     return openBlob(self.getBlobWrapper().getBlob())
Example #30
0
    def get_data(self):
        if not self.context.image:
            raise NotFound

        blob = self.context.image._blob
        return openBlob(blob)
Example #31
0
def process(context):
    video = context.file
    if not video or video.filename == aws.FILENAME:
        return

    try:
        opened = openBlob(video._blob)
        bfilepath = opened.name
        opened.close()
    except IOError:
        logger.warn('error opening blob file')
        return

    # by default, assume all non-mp4 videos need to be converted
    # but in reality, all videos need converting, even mp4.
    # md5 is only what makes this possible
    convert_it = video.contentType.split('/')[-1] != 'mp4'
    if md5 is not None:
        old_hash = getattr(context, '_file_hash', None)
        current_hash = md5(bfilepath)
        if old_hash is None or old_hash != current_hash:
            convert_it = True

    if context.image and not convert_it:
        # already an mp4 and already has a screen grab
        return

    if convert_it and youtube.should_upload(context):
        try:
            youtube.upload(context, bfilepath, filename=video.filename)
            # saving hash tells us we do not need to convert anymore...
            context._file_hash = md5(bfilepath)
            convert_it = False
        except Exception:
            logger.error('Error uploading youtube video', exc_info=True)

    tmpdir = mkdtemp()
    tmpfilepath = os.path.join(tmpdir, video.filename)
    copyfile(bfilepath, tmpfilepath)

    if convert_it:
        output_filepath = os.path.join(tmpdir, 'output.mp4')
        try:
            avconv.convert(tmpfilepath, output_filepath)
        except Exception:
            logger.info('Could not convert video', exc_info=True)
        if (os.path.exists(output_filepath) and
                os.path.getsize(output_filepath) > 0):
            if md5 is not None:
                try:
                    context._file_hash = md5(output_filepath)
                except Exception:
                    logger.info('Could not get md5', exc_info=True)
            if not getCelery().conf.task_always_eager:
                context._p_jar.sync()
            fi = open(output_filepath)
            namedblob = NamedBlobFile(
                fi, filename=switchFileExt(video.filename, 'mp4'))
            context.file = namedblob
            fi.close()

    if not context.image:
        # try and grab one from video
        output_filepath = os.path.join(tmpdir, u'screengrab.png')
        try:
            avconv.grab_frame(tmpfilepath, output_filepath)
            if os.path.exists(output_filepath):
                fi = open(output_filepath)
                context.image = NamedBlobImage(fi, filename=u'screengrab.png')
                fi.close()
        except Exception:
            logger.warning(
                'error getting thumbnail from video', exc_info=True)
    rmtree(tmpdir)
Example #32
0
 def _serialize(cls, obj):
     blobfi = openBlob(obj)
     data = blobfi.read()
     blobfi.close()
     return {'data': base64.b64encode(data)}
Example #33
0
 def __init__(self, blob, mode='r', streamsize=1 << 16, start=0, end=None):
     self.blob = openBlob(blob, mode)
     self.streamsize = streamsize
     self.start = start
     self.end = end
     self.seek(start, 0)
Example #34
0
def _convertFormat(context):
    # reset these...
    context.video_file_webm = None

    video = context.video_file
    context.video_converted = True
    try:
        opened = openBlob(video._blob)
        bfilepath = opened.name
        opened.close()
    except IOError:
        logger.warn('error opening blob file')
        return

    tmpdir = mkdtemp()
    tmpfilepath = os.path.join(tmpdir, video.filename)
    copyfile(bfilepath, tmpfilepath)

    try:
        metadata = avprobe.info(tmpfilepath)
    except:
        logger.warn('NOT a valid video format')
        return
    context.metadata = metadata
    logger.info('Valid video format')

    try:
        duration = _get_duration(tmpfilepath)
    except:
        logger.warn('cannot recover duration from file')
        return
    context.duration = duration

    conversion_types = {}

    portal = getToolByName(context, 'portal_url').getPortalObject()
    settings = GlobalSettings(portal)
    for type_ in settings.additional_video_formats:
        format = getFormat(type_)
        if format:
            conversion_types[format.type_] = '%s' % (format.quality)

    for video_type, quality in conversion_types.items():
        vt = video_type.split('_')[0]
        if video_type == video.contentType.split('/')[-1]:
            setattr(context, vt, video)
        else:
            output_filepath = os.path.join(tmpdir,
                                           'output_' + video_type + '.' + vt)
            try:
                avconv.convert(tmpfilepath, output_filepath, vt, quality,
                               context)
            except:
                logger.warn('error converting to %s' % vt)
                continue
            if os.path.exists(output_filepath):
                fi = open(output_filepath)
                namedblob = NamedBlobFile(fi,
                                          filename=switchFileExt(
                                              video.filename, vt))
                setattr(context, video_type, namedblob)
                fi.close()
            import transaction
            transaction.commit()

    # try and grab one from video
    output_filepath = os.path.join(tmpdir, u'screengrab.png')
    try:
        avconv.grab_frame(tmpfilepath, output_filepath)
        if os.path.exists(output_filepath):
            with open(output_filepath, 'rb') as fi:
                data = fi.read()
            context.image = NamedBlobImage(data, filename=u'screengrab.png')
            fi.close()
    except:
        logger.warn('error getting thumbnail from video')
    logger.warn('CONVERSIONS FINISHED')
    rmtree(tmpdir)
Example #35
0
 def data(self):
     """Returns the blob content as a string.
        This is highly inefficient as it loads the complete blob content
        into memory."""
     return openBlob(self.retrieve()).read()
Example #36
0
 def size(self):
     blob = openBlob(self.retrieve())
     size = fstat(blob.fileno()).st_size
     blob.close()
     return size
Example #37
0
 def getImageAsFile(self, img=None, scale=None):
     """ get the img as file like object """
     if img is None:
         field = self.getField('image')
         img = field.getScale(self, scale)
     return openBlob(self.getBlobWrapper().getBlob())
Example #38
0
 def _serialize(kls, obj):
     blobfi = openBlob(obj)
     data = blobfi.read()
     blobfi.close()
     return {'data': base64.b64encode(data)}