def decorator(func):
            def new_func(*args, **kw):
                runner = AdminFunctionRunner(func, new_func, args, kw, task_kw)
                return runner()

            new_func.__name__ = func.__name__
            return getCelery().task(base=AfterCommitTask, **task_kw)(new_func)
 def sync_db(self):
     # circular
     from collective.documentviewer.async import celeryInstalled
     if celeryInstalled():
         from collective.celery.utils import getCelery
         if not getCelery().conf.task_always_eager:
             self.context._p_jar.sync()
Exemple #3
0
def create_pdf(obj, html, css):
    # this completes so fast we get conflict errors on save sometimes.
    # just cool it a bit
    time.sleep(2)
    if not getCelery().conf.task_always_eager:
        obj._p_jar.sync()
    return _create_pdf(obj, html, css)
 def decorator(func):
     def new_func(self, *args, **kw):
         runner = AuthorizedFunctionRunner(func, new_func, args, kw,
                                           task_kw, bind)  # noqa
         return runner(self)
     new_func.__name__ = func.__name__
     return getCelery().task(base=AfterCommitTask, bind=True, **task_kw)(new_func)
    def __call__(self):
        celery = getCelery()
        if celery.conf.task_always_eager:
            self.eager = True
            # dive out of setup, this is not run in a celery task runner
            self.app = getApp()
            return self._run()

        self.app = makerequest(getApp())
        self.app.REQUEST['PARENTS'] = [self.app]
        setRequest(self.app.REQUEST)

        transaction.begin()
        try:
            try:
                result = self._run()
                # commit transaction
                transaction.commit()
                return result
            except ConflictError as e:
                # On ZODB conflicts, retry using celery's mechanism
                transaction.abort()
                raise Retry(exc=e)
            except Exception:
                logger.warn('Error running task: %s' % traceback.format_exc())
                transaction.abort()
                raise
        finally:
            noSecurityManager()
            setSite(None)
            self.app._p_jar.close()
            clearRequest()
    def __call__(self):
        celery = getCelery()
        if celery.conf.CELERY_ALWAYS_EAGER:
            self.eager = True
            # dive out of setup, this is not run in a celery task runner
            self.app = getApp()
            return self._run()

        self.app = makerequest(getApp())
        self.app.REQUEST['PARENTS'] = [self.app]
        setRequest(self.app.REQUEST)

        transaction.begin()
        try:
            try:
                result = self._run()
                # commit transaction
                transaction.commit()
                return result
            except ConflictError, e:
                # On ZODB conflicts, retry using celery's mechanism
                transaction.abort()
                raise Retry(exc=e)
            except:
                logger.warn('Error running task: %s' % traceback.format_exc())
                transaction.abort()
                raise
Exemple #7
0
 def sync_db(self):
     # circular
     from collective.documentviewer. async import celeryInstalled
     if celeryInstalled():
         from collective.celery.utils import getCelery
         if not getCelery().conf.task_always_eager:
             self.context._p_jar.sync()
def main(argv=sys.argv):
    if len(sys.argv) < 3:
        raise Exception("must specify a zope config file and a celery command")
    argv = argv
    filepath = sys.argv[2]
    os.environ['ZOPE_CONFIG'] = filepath
    sys.argv = ['']
    from Zope2.Startup.run import configure
    configure(os.environ['ZOPE_CONFIG'])

    # Fix for setuptools generated scripts, so that it will
    # work with multiprocessing fork emulation.
    # (see multiprocessing.forking.get_preparation_data())
    if __name__ != "__main__":
        sys.modules["__main__"] = sys.modules[__name__]

    # load tasks up
    tasks = dict([(i.name, i.load()) for i in iter_entry_points(
                  group='celery_tasks', name=None)])

    tasks = getConfiguration().environment.get('CELERY_TASKS')
    if tasks:
        try:
            __import__(tasks)
        except ImportError:
            logger.warn('error importing tasks: ' + tasks)
    argv.remove(argv[2])
    # restore argv
    sys.argv = argv
    Worker(app=getCelery()).execute_from_commandline()
Exemple #9
0
def resumable_upload(obj, service, insert_request):
    response = None
    retry = 0
    while response is None:
        try:
            status, response = insert_request.next_chunk()
            if 'id' in response:
                # uploaded successfully
                if not getCelery().conf.task_always_eager:
                    obj._p_jar.sync()
                obj.youtube_url = 'https://youtu.be/{id}'.format(
                    id=response['id'])
                if IUploadedToYoutube.providedBy(obj):
                    try:
                        delete(obj._youtube_video_id)
                    except Exception:
                        logger.warning(
                            'Error deleting existing youtube video: {}'.format(
                                obj._youtube_video_id
                            ), exc_info=True)
                obj._youtube_video_id = response['id']
                alsoProvides(obj, IUploadedToYoutube)
                _update_access_token(service)
            else:
                logger.error('Youtube upload failed with an unexpected'
                             ' response: {}'.format(response))
        except Exception:
            logger.warning(
                'An error occured while uploading to youtube {e}', exc_info=True)
            retry += 1
            time.sleep(3)
            if retry > MAX_RETRIES:
                # bubble up error
                raise
Exemple #10
0
 def celery(self):
     app = getCelery()
     status = celery.bin.celery.CeleryCommand.commands['status']()
     status.app = app
     try:
         status.run()
         return True, 'ok'
     except Exception as e:
         return False, str(e)
Exemple #11
0
    def apply_async(self, args, kwargs, **options):
        args, kw = self.serialize_args(args, kwargs)

        # Let's see if this is a retry. An existing task means yes.
        # If it is one, we'll call _apply_async directly later on.
        task = getattr(self.request, 'task', None)
        task_id = options.get('task_id', None)

        # if task is not None we are in a retry and site_path and
        # authorized_userid are already in kw
        if task is None:
            kw['site_path'] = '/'.join(api.portal.get().getPhysicalPath())
            kw['authorized_userid'] = api.user.get_current().getId()

        without_transaction = options.pop('without_transaction', False)

        celery = getCelery()
        if task_id is None:
            # Here we cheat a little: since we will not start the task
            # up until the transaction is done,
            # we cannot give back to whoever called apply_async
            # its much beloved AsyncResult.
            # But we can actually pass the task a specific task_id
            # (although it's not very documented)
            # and an AsyncResult at this point is just that id, basically.
            task_id = uuid()
        else:
            # If this is a retry, task_id will be in the options.
            # Get rid of it to avoid an error.
            del options['task_id']

        # Construct a fake result
        if celery.conf.task_always_eager:
            result_ = EagerResult(task_id, None, states.PENDING, None)
        else:
            result_ = result.AsyncResult(task_id)

        # Note: one might be tempted to turn this into a datamanager.
        # This would result in two wrong things happening:
        # * A "commit within a commit" triggered by the function runner
        #   when CELERY_TASK_ALWAYS_EAGER is set,
        #   leading to the first invoked commit cleanup failing
        #   because the inner commit already cleaned up.
        # * An async task failing in eager mode would also rollback
        #   the whole transaction, which is not desiderable.
        #   Consider the case where the syncronous code constructs an object
        #   and the async task updates it, if we roll back everything
        #   then also the original content construction goes away
        #   (even if, in and by itself, worked)
        if without_transaction or celery.conf.task_always_eager or task:
            return self._apply_async(args, kw, result_, celery, task_id,
                                     options)
        else:
            queue_task_after_commit(args, kw, self, task_id, options)
            # Return the "fake" result ID
            return result_
    def apply_async(self, args, kwargs, **options):
        args, kw = self.serialize_args(args, kwargs)

        # Let's see if this is a retry. An existing task means yes.
        # If it is one, we'll call _apply_async directly later on.
        task = getattr(self.request, 'task', None)
        task_id = options.get('task_id', None)

        # if task is not None we are in a retry and site_path and
        # authorized_userid are already in kw
        if task is None:
            kw['site_path'] = '/'.join(api.portal.get().getPhysicalPath())
            kw['authorized_userid'] = api.user.get_current().getId()

        without_transaction = options.pop('without_transaction', False)

        celery = getCelery()
        if task_id is None:
            # Here we cheat a little: since we will not start the task
            # up until the transaction is done,
            # we cannot give back to whoever called apply_async
            # its much beloved AsyncResult.
            # But we can actually pass the task a specific task_id
            # (although it's not very documented)
            # and an AsyncResult at this point is just that id, basically.
            task_id = uuid()
        else:
            # If this is a retry, task_id will be in the options.
            # Get rid of it to avoid an error.
            del options['task_id']

        # Construct a fake result
        if celery.conf.task_always_eager:
            result_ = EagerResult(task_id, None, states.PENDING, None)
        else:
            result_ = result.AsyncResult(task_id)

        # Note: one might be tempted to turn this into a datamanager.
        # This would result in two wrong things happening:
        # * A "commit within a commit" triggered by the function runner
        #   when CELERY_TASK_ALWAYS_EAGER is set,
        #   leading to the first invoked commit cleanup failing
        #   because the inner commit already cleaned up.
        # * An async task failing in eager mode would also rollback
        #   the whole transaction, which is not desiderable.
        #   Consider the case where the syncronous code constructs an object
        #   and the async task updates it, if we roll back everything
        #   then also the original content construction goes away
        #   (even if, in and by itself, worked)
        if without_transaction or celery.conf.task_always_eager or task:
            return self._apply_async(args, kw, result_, celery, task_id, options)
        else:
            queue_task_after_commit(args, kw, self, task_id, options)
            # Return the "fake" result ID
            return result_
Exemple #13
0
def _create_pdf(obj, html, css):
    try:
        blob = create(html, css)
    except PDFGenerationError:
        logger.error('princexml error converting pdf', exc_info=True)
        return
    sblob = screenshot(blob)
    if not getCelery().conf.task_always_eager:
        obj._p_jar.sync()
    settings = PDFSetting(obj)
    settings.put(blob)
    settings.put_screenshot(sblob)
Exemple #14
0
def create_pdf_from_view(obj, css_files=[], unrestricted_traverse=False):
    # this completes so fast we get conflict errors on save sometimes.
    # just cool it a bit
    time.sleep(2)
    if not getCelery().conf.task_always_eager:
        obj._p_jar.sync()
    html, css = create_raw_from_view(
        obj,
        css_files=css_files,
        unrestricted_traverse=unrestricted_traverse,
    )
    return _create_pdf(obj, html, css)
Exemple #15
0
    def apply_async(self, args, kwargs, **options):
        args, kw = self.serialize_args(args, kwargs)
        kw['site_path'] = '/'.join(api.portal.get().getPhysicalPath())
        kw['authorized_userid'] = api.user.get_current().getId()

        without_transaction = options.pop('without_transaction', False)

        celery = getCelery()
        # Here we cheat a little: since we will not start the task
        # up until the transaction is done,
        # we cannot give back to whoever called apply_async
        # its much beloved AsyncResult.
        # But we can actually pass the task a specific task_id
        # (although it's not very documented)
        # and an AsyncResult at this point is just that id, basically.
        task_id = uuid()

        # Construct a fake result
        if celery.conf.CELERY_ALWAYS_EAGER:
            result_ = EagerResult(task_id, None, states.PENDING, None)
        else:
            result_ = result.AsyncResult(task_id)

        # Note: one might be tempted to turn this into a datamanager.
        # This would result in two wrong things happening:
        # * A "commit within a commit" triggered by the function runner
        #   when CELERY_ALWAYS_EAGER is set,
        #   leading to the first invoked commit cleanup failing
        #   because the inner commit already cleaned up.
        # * An async task failing in eager mode would also rollback
        #   the whole transaction, which is not desiderable.
        #   Consider the case where the syncronous code constructs an object
        #   and the async task updates it, if we roll back everything
        #   then also the original content construction goes away
        #   (even if, in and by itself, worked)
        def hook(success):
            if success:
                self._apply_async(args, kw, result_, celery, task_id, options)

        if without_transaction or celery.conf.CELERY_ALWAYS_EAGER:
            return self._apply_async(args, kw, result_, celery, task_id,
                                     options)
        else:
            transaction.get().addAfterCommitHook(hook)
            # Return the "fake" result ID
            return result_
    def apply_async(self, args, kwargs, **options):
        args, kw = self.serialize_args(args, kwargs)
        kw['site_path'] = '/'.join(api.portal.get().getPhysicalPath())
        kw['authorized_userid'] = api.user.get_current().getId()

        without_transaction = options.pop('without_transaction', False)

        celery = getCelery()
        # Here we cheat a little: since we will not start the task
        # up until the transaction is done,
        # we cannot give back to whoever called apply_async
        # its much beloved AsyncResult.
        # But we can actually pass the task a specific task_id
        # (although it's not very documented)
        # and an AsyncResult at this point is just that id, basically.
        task_id = uuid()

        # Construct a fake result
        if celery.conf.CELERY_ALWAYS_EAGER:
            result_ = EagerResult(task_id, None, states.PENDING, None)
        else:
            result_ = result.AsyncResult(task_id)

        # Note: one might be tempted to turn this into a datamanager.
        # This would result in two wrong things happening:
        # * A "commit within a commit" triggered by the function runner
        #   when CELERY_ALWAYS_EAGER is set,
        #   leading to the first invoked commit cleanup failing
        #   because the inner commit already cleaned up.
        # * An async task failing in eager mode would also rollback
        #   the whole transaction, which is not desiderable.
        #   Consider the case where the syncronous code constructs an object
        #   and the async task updates it, if we roll back everything
        #   then also the original content construction goes away
        #   (even if, in and by itself, worked)
        def hook(success):
            if success:
                self._apply_async(args, kw, result_, celery, task_id, options)
        if without_transaction or celery.conf.CELERY_ALWAYS_EAGER:
            return self._apply_async(args, kw, result_, celery, task_id, options)
        else:
            transaction.get().addAfterCommitHook(hook)
            # Return the "fake" result ID
            return result_
Exemple #17
0
def move_file(obj):
    _, bucket = get_bucket()
    if bucket is None:
        return

    uid = IUUID(obj)
    if not uid:
        logger.info('Could not get uid of object')
        return

    key = KEY_PREFIX + uid
    filename = obj.file.filename
    if not isinstance(filename, unicode):
        filename = unicode(filename, 'utf-8', errors="ignore")
    filename = urllib.quote(filename.encode("utf8"))
    disposition = "attachment; filename*=UTF-8''%s" % filename

    size = obj.file.getSize()
    chunk_count = int(math.ceil(size / float(CHUNK_SIZE)))
    content_type = obj.file.contentType
    mp = bucket.initiate_multipart_upload(key, metadata={
        'Content-Type': content_type,
        'Content-Disposition': disposition
    })

    blob_fi = obj.file._blob.open('r')

    for i in range(chunk_count):
        chunk = blob_fi.read(CHUNK_SIZE)
        fp = io.BytesIO(chunk)
        mp.upload_part_from_file(fp, part_num=i + 1)

    mp.complete_upload()
    blob_fi.close()

    if not getCelery().conf.task_always_eager:
        obj._p_jar.sync()
    obj.file = NamedBlobFile(data='', contentType=obj.file.contentType, filename=FILENAME)
    obj.file.original_filename = filename
    obj.file.original_content_type = content_type
    obj.file.original_size = size

    set_permission(obj)
Exemple #18
0
def move_file(obj):
    _, bucket = get_bucket()
    if bucket is None:
        return

    # META DATA
    uid = IUUID(obj)
    if not uid:
        logger.info('Could not get uid of object')
        return
    key = KEY_PREFIX + uid
    filename = obj.file.filename
    if not isinstance(filename, unicode):
        filename = unicode(filename, 'utf-8', errors="ignore")
    filename = quote(filename.encode("utf8"))
    disposition = "attachment; filename*=UTF-8''%s" % filename
    size = obj.file.getSize()
    content_type = obj.file.contentType
    extraargs = {
        'ContentType': content_type,
        'ContentDisposition': disposition,
    }

    # Upload to AWS
    # valid modes in ZODB 3, 4 or 5 do not include 'rb' --
    #   see ZODB/blob.py line 54 (or so) for 'valid_modes'
    # note: upload_fileobj() does a multipart upload, which is why
    #   chunked uploading is no longer performed explicitly
    #   see: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.upload_fileobj  # noqa
    blob_fi = obj.file._blob.open('r')
    bucket.upload_fileobj(blob_fi, key, ExtraArgs=extraargs)

    # Delete data from ZODB, but leave a reference
    if not getCelery().conf.task_always_eager:
        obj._p_jar.sync()
    obj.file = NamedBlobFile(data='',
                             contentType=obj.file.contentType,
                             filename=FILENAME)
    obj.file.original_filename = filename
    obj.file.original_content_type = content_type
    obj.file.original_size = size

    set_permission(obj)
Exemple #19
0
def main(argv=sys.argv):
    if len(sys.argv) < 3:
        raise Exception("must specify a zope config file and a celery command")
    argv = argv

    # find the index of the conf file in the args
    conf_index = 2
    for idx, arg in enumerate(sys.argv):
        if '.conf' in arg:
            conf_index = idx
            break
    filepath = sys.argv[conf_index]
    os.environ['ZOPE_CONFIG'] = filepath
    sys.argv = ['']
    from Zope2.Startup.run import configure
    configure(os.environ['ZOPE_CONFIG'])

    # Fix for setuptools generated scripts, so that it will
    # work with multiprocessing fork emulation.
    # (see multiprocessing.forking.get_preparation_data())
    if __name__ != "__main__":
        sys.modules["__main__"] = sys.modules[__name__]

    # load tasks up
    tasks = dict([(i.name, i.load()) for i in iter_entry_points(
                  group='celery_tasks', name=None)])

    tasks = getConfiguration().environment.get('CELERY_TASKS')
    if tasks:
        try:
            __import__(tasks)
        except ImportError:
            logger.warn('error importing tasks: ' + tasks)
    argv.remove(filepath)
    # restore argv
    sys.argv = argv
    Worker(app=getCelery()).execute_from_commandline()
    def __call__(self):
        celery = getCelery()
        if celery.conf.CELERY_ALWAYS_EAGER:
            # dive out of setup, this is not run in a celery task runner
            return self._run()

        self.app = makerequest(getApp())
        setRequest(self.app.REQUEST)

        transaction.begin()
        try:
            try:
                result = self._run()
                # commit transaction
                transaction.commit()
                return result
            except ConflictError, e:
                # On ZODB conflicts, retry using celery's mechanism
                transaction.abort()
                raise Retry(exc=e)
            except:
                logger.warn('Error running task: %s' % traceback.format_exc())
                transaction.abort()
                raise
def main(argv=sys.argv):
    if len(sys.argv) < 3:
        raise Exception("must specify a zope config file and a celery command")
    argv = argv

    # find the index of the conf file in the args
    conf_index = 2
    for idx, arg in enumerate(sys.argv):
        if '.conf' in arg:
            conf_index = idx
            break
    filepath = sys.argv[conf_index]
    os.environ['ZOPE_CONFIG'] = filepath
    sys.argv = ['']
    try:
        from Zope2.Startup.run import configure
        startup = configure(os.environ['ZOPE_CONFIG'])
    except ImportError:
        from Zope2.Startup.run import configure_wsgi
        startup = configure_wsgi(os.environ['ZOPE_CONFIG'])

    # Fix for setuptools generated scripts, so that it will
    # work with multiprocessing fork emulation.
    # (see multiprocessing.forking.get_preparation_data())
    if __name__ != "__main__":
        sys.modules["__main__"] = sys.modules[__name__]

    # load entry point tasks up
    tasks = []
    for entry_point in iter_entry_points(group='celery_tasks', name=None):
        try:
            tasks.append((entry_point.name, entry_point.load()))
        except ImportError:
            logger.warn('error importing tasks: ' + entry_point.name)
            raise
    tasks = dict(tasks)
    for name, task_list in tasks.items():
        logger.warn('importing tasks: ' + name)
        extra_config = getattr(task_list, 'extra_config', None)
        if extra_config is not None:
            logger.warn('Found additional Zope config.')
            extra_config(startup)

    # load env tasks up
    tasks = getConfiguration().environment.get('CELERY_TASKS')
    if tasks:
        for task_list in tasks.split():
            try:
                logger.warn('importing tasks: ' + tasks)
                module = import_module(tasks)
                extra_config = getattr(module, 'extra_config', None)
                if extra_config is not None:
                    logger.warn('Found additional Zope config.')
                    extra_config(startup)
            except ImportError:
                logger.warn('error importing tasks: ' + tasks)
                raise
    argv.remove(filepath)
    # restore argv
    sys.argv = argv
    Worker(app=getCelery()).execute_from_commandline()
 def decorator(func):
     def new_func(*args, **kw):
         runner = AdminFunctionRunner(func, new_func, args, kw, task_kw)
         return runner()
     new_func.__name__ = func.__name__
     return getCelery().task(base=AfterCommitTask, **task_kw)(new_func)
Exemple #23
0
 def setup_app_from_commandline(self, argv):
     self.app = getCelery()
     return argv
Exemple #24
0
def _paste_items(where, op, mdatas):
    logger.info('Copying a bunch of items')
    portal = api.portal.get()
    catalog = api.portal.get_tool('portal_catalog')
    dest = portal.restrictedTraverse(str(where.lstrip('/')))

    count = 0
    commit_count = 0
    if not getCelery().conf.task_always_eager:
        portal._p_jar.sync()

    try:
        if mdatas[0][0].startswith('cache:'):
            cache_key = mdatas[0][0].replace('cache:', '')
            mdatas = cache.get(cache_key)
    except IndexError:
        pass

    for mdata in mdatas[:]:
        count += len(catalog(path={'query': '/'.join(mdata), 'depth': -1}))
        ob = portal.unrestrictedTraverse(str('/'.join(mdata)), None)
        if ob is None:
            continue
        if op == 0:
            # copy
            api.content.copy(ob, dest, safe_id=True)
        else:
            api.content.move(ob, dest, safe_id=True)

        if count / 50 != commit_count:
            # commit every 50 objects moved
            transaction.commit()
            commit_count = count / 50
            if not getCelery().conf.task_always_eager:
                portal._p_jar.sync()
            # so we do not redo it
            try:
                mdatas.remove(mdata)
            except Exception:
                pass

    # we commit here so we can trigger conflict errors before
    # trying to send email
    transaction.commit()

    user = api.user.get_current()
    email = user.getProperty('email')
    if email:
        name = user.getProperty('fullname') or user.getId()
        try:
            utils.send_email(
                recipients=email,
                subject="Paste Operation Finished(Site: %s)" %
                (api.portal.get_registry_record('plone.site_title')),
                html="""
    <p>Hi %s,</p>

    <p>The site has finished pasting items into /%s folder.</p>""" %
                (name, where.lstrip('/')))
        except Exception:
            logger.warn('Could not send status email ', exc_info=True)
Exemple #25
0
 def setup_app_from_commandline(self, argv):
     self.app = getCelery()
     return argv
Exemple #26
0
def process(context):
    video = context.file
    if not video or video.filename == aws.FILENAME:
        return

    try:
        opened = openBlob(video._blob)
        bfilepath = opened.name
        opened.close()
    except IOError:
        logger.warn('error opening blob file')
        return

    # by default, assume all non-mp4 videos need to be converted
    # but in reality, all videos need converting, even mp4.
    # md5 is only what makes this possible
    convert_it = video.contentType.split('/')[-1] != 'mp4'
    if md5 is not None:
        old_hash = getattr(context, '_file_hash', None)
        current_hash = md5(bfilepath)
        if old_hash is None or old_hash != current_hash:
            convert_it = True

    if context.image and not convert_it:
        # already an mp4 and already has a screen grab
        return

    if convert_it and youtube.should_upload(context):
        try:
            youtube.upload(context, bfilepath, filename=video.filename)
            # saving hash tells us we do not need to convert anymore...
            context._file_hash = md5(bfilepath)
            convert_it = False
        except Exception:
            logger.error('Error uploading youtube video', exc_info=True)

    tmpdir = mkdtemp()
    tmpfilepath = os.path.join(tmpdir, video.filename)
    copyfile(bfilepath, tmpfilepath)

    if convert_it:
        output_filepath = os.path.join(tmpdir, 'output.mp4')
        try:
            avconv.convert(tmpfilepath, output_filepath)
        except Exception:
            logger.info('Could not convert video', exc_info=True)
        if (os.path.exists(output_filepath) and
                os.path.getsize(output_filepath) > 0):
            if md5 is not None:
                try:
                    context._file_hash = md5(output_filepath)
                except Exception:
                    logger.info('Could not get md5', exc_info=True)
            if not getCelery().conf.task_always_eager:
                context._p_jar.sync()
            fi = open(output_filepath)
            namedblob = NamedBlobFile(
                fi, filename=switchFileExt(video.filename, 'mp4'))
            context.file = namedblob
            fi.close()

    if not context.image:
        # try and grab one from video
        output_filepath = os.path.join(tmpdir, u'screengrab.png')
        try:
            avconv.grab_frame(tmpfilepath, output_filepath)
            if os.path.exists(output_filepath):
                fi = open(output_filepath)
                context.image = NamedBlobImage(fi, filename=u'screengrab.png')
                fi.close()
        except Exception:
            logger.warning(
                'error getting thumbnail from video', exc_info=True)
    rmtree(tmpdir)