def start(self, videoId, force=False, **kwargs): """ Action: analysis Parameters: videoId, force Begin the analyze the video by extracting every other frame and submitting them to google cloud vision. The process will be performed on a separate thread. The process is cached and won't be performed more than once, unless `force` is specified and set to `True`. """ video = model.getService('video').getById( videoId, fields=['snapshotsFolder', 'path', 'analysis', 'name', 'duration']) existing_worker = memory.getVal(MEMKEY) if existing_worker is not None and existing_worker.isAlive(): logging.warn("An analyze is still in progress for video: %s" % video['name']) existing_worker.resubscribe(self.callback) return logging.info("Starting analysis of video: %s", video['name']) analyzer = Analyzer(videoId, video['path'], video['snapshotsFolder'], progressCb=self.callback, force=force, annotator=Conf['data']['videos']['annotator'], videoDuration=video['duration']) analyzer.start() memory.setVal(MEMKEY, analyzer)
def setData(self): """ Route PUT /api/notify/data Save some data in memory The following parameters are required: * name * value """ name = self.get_argument('name') value = self.get_argument('value') logging.info("Setting %s to %s" % (name, value)) try: value = json.loads(value) except: pass data = memory.getVal('user-data') if data is None: data = {} memory.setVal('user-data', data) data[name] = value self.write(json.dumps({}))
def start(self, **kwargs): updater = memory.getVal(MEMKEY) if updater: logging.warn("An update is already running") updater.resubscribe(self.callback) else: updater = Walker(progressCb=self.callback, async=True) updater.start() memory.setVal(MEMKEY, updater)
def deleteStoredFilter(self): """ Route: DELETE /api/notify/filter Delete a filter, giving its `filteruid` """ filterType = self.get_argument('type') filteruid = self.get_argument('uid') val = memory.getVal('current-filter-%s' % filterType) if val is None: val = {} memory.setVal('current-filter-%s' % filterType, {}) if filteruid in val: del val[filteruid] self.write(json.dumps({}))
def on_progress(self, status): # save the progress status in memory so the db update status handler can access it when refreshing the page memory.setVal(STATUSMEMKEY, status) if status.get('finished', False) or status.get('interrupted', False) or status.get('errorred', False): memory.setVal(MEMKEY, None) dump = {} dump['status'] = dict(status) dump['status']['duration'] = timeFormat(float(status.get('duration', 0))) dump['status']['file'] dump = json.dumps(dump) try: self.write_message(dump) except Exception as e: logging.exception(e) # the socket is probably stale, stop receiving update # until a new connection comes in updater = memory.getVal(MEMKEY) if updater is not None: updater.resubscribe(None)
def getStoredFilter(self): """ Route: GET /api/notify/filter Get the stored filters as an object { <type>: { <uid>: {<type>, <name>, <value>} } } Requires the `type` parameter to be set """ res = {} for filterType in ['tag', 'video', 'photo']: val = memory.getVal('current-filter-%s' % filterType) if val is None: val = {} memory.setVal('current-filter-%s' % filterType, {}) res[filterType] = val self.write(json.dumps(res))
def start(self, filters, options, **kwargs): """ Action: start Parameters: filters, options Start the video compilation process """ def callback(result): # executed on the `VideoCompiler` thread, does nothing but scheduling a callback # to be executed on the main thread by the IOLoop whenever possible # would we be able to push data on the existing socket from the separate thread directly? IOLoop.instance().add_callback(lambda: self.on_progress(result)) existing_worker = memory.getVal('video-compiler') if existing_worker is not None and existing_worker.isAlive(): raise Exception("A compilation is still in progress.") logging.info("Starting analysis of videos compilation process") compiler = VideoCompiler( filters, options, progressCb=callback) compiler.start() memory.setVal('video-compiler', compiler)
def addFilter(self): """ Route: PUT /api/notify/filter Notify the server that the client added a new tag. The following parameters are required: * type: the type ('video', 'tag', or 'photo') of the filter * name: a name for this filter * value: a value for this filter * filteruid: a unique identifier for this filter, used for remove notification """ filterType = self.get_argument('type') name = self.get_argument('name') value = self.get_argument('value') filteruid = self.get_argument('uid') val = memory.getVal('current-filter-%s' % filterType) if val is None: val = {} memory.setVal('current-filter-%s' % filterType, {}) val[filteruid] = {'type': filterType, 'name': name, 'value': value} self.write(json.dumps({}))
def stop(self, **kwargs): updater = memory.getVal(MEMKEY) updater.stop() updater.join() memory.setVal(MEMKEY, None)
def stop(self, **kwargs): existing_worker = memory.getVal(MEMKEY) if existing_worker is not None: existing_worker.stop() memory.setVal(MEMKEY, None)
def regenerateThumbnail(self): """ Route: POST /api/video/thumbnails/regenerate Regenerate the thumbnail. The generation will be performed on a separate thread. If a generation is already in progress, the request will raise an error (even for a different video, only one re-generation is allowed) To get the progress of the generation, checkout the `generationProgress` function. The parameter `videoId` is required, and the parameters `frameRate`, `width` and `height` can be defined. WARNING: `frameRate` is expected to have the format: 'X/Y' where X is the number of frame to generate and Y is a time period (e.g.: `"1/60"` generate 1 frame every 60s) """ existing_worker = memory.getVal('thumbnail-generator') if existing_worker is not None and existing_worker.isAlive(): video = model.getService('video').getById(existing_worker.name, fields=['name']) raise Exception("A generation is still in progress for video: %s" % video['name']) videoId = self.get_argument('videoId') video = model.getService('video').getById(videoId) video['path'] = '%s%s' % (Conf['data']['videos']['rootFolder'], video['path']) video['snapshotsFolder'] = '%s%s' % ( Conf['data']['videos']['rootFolder'], video['snapshotsFolder']) data = { 'frameRate': self.get_argument('frameRate', default=Conf['data']['ffmpeg']['frameRate']), 'width': self.get_argument( 'width', default=Conf['data']['ffmpeg']['snapshotDimensions'][0]), 'height': self.get_argument( 'height', default=Conf['data']['ffmpeg']['snapshotDimensions'][1]), 'ffmpegPath': Conf['data']['ffmpeg']['exePath'], 'videoPath': video['path'], 'snapFolder': video['snapshotsFolder'] } logging.info("Re-generating snapshots for video: %s" % video['name']) logging.info("FrameRate=%s, Width=%s, height: %s" % (data['frameRate'], data['width'], data['height'])) try: shutil.rmtree(video['snapshotsFolder']) except: logging.warning("Unable to remove thumbnails folder: %s. \ Attempting to generate thumbnails anyways..." % video['snapshotsFolder']) try: os.makedirs(video['snapshotsFolder']) except: logging.warning("Unable to create thumbnails folder: %s. \ Attempting to generate thumbnails anyways..." % video['snapshotsFolder']) model.getService('video').set(videoId, 'nbSnapshots', 0) video['nbSnapshots'] = 0 def asyncThumbGen(data): logging.warning("Starting Thumbnail re-generation!") start_t = time.time() return_code = subprocess.call( '{ffmpegPath} -i "{videoPath}" -f image2 -vf fps=fps={frameRate} -s {width}x{height} "{snapFolder}\\thumb%03d.png"' .format(**data), shell=True) logging.warning( "Thumbnails re-generation complete! Done in %.3fs." % (time.time() - start_t)) try: thumbnails = os.listdir(video['snapshotsFolder']) except Exception as e: logging.warning("Couldn't read thumbnails in folder: %s" % (video['snapshotsFolder'])) thumbnails = [] model.getService('video').set(videoId, 'nbSnapshots', len(thumbnails)) worker = Thread(target=asyncThumbGen, name=videoId, args=[data]) worker.start() memory.setVal('thumbnail-generator', worker) self.write(json.dumps(populateMissingData(video)))