def push(request, video_id): video = Video.objects.get(pk=video_id) if request.method == 'POST': push_type = request.POST.get('push_type') server = VDNServer.objects.get(pk=request.POST.get('server_pk')) token = request.POST.get('token_{}'.format(server.pk)) server.last_token = token server.save() server_url = server.url if not server_url.endswith('/'): server_url += '/' headers = {'Authorization': 'Token {}'.format(server.last_token)} if push_type == 'annotation': new_vdn_dataset = create_child_vdn_dataset(video, server, headers) for key in request.POST: if key.startswith('annotation_') and request.POST[key]: annotation = Region.objects.get(pk=int(key.split('annotation_')[1])) data = { 'label': annotation.label, 'metadata_text': annotation.metadata_text, 'x': annotation.x, 'y': annotation.y, 'w': annotation.w, 'h': annotation.h, 'full_frame': annotation.full_frame, 'parent_frame_index': annotation.parent_frame_index, 'dataset_id': int(new_vdn_dataset.url.split('/')[-2]), } r = requests.post("{}/api/annotations/".format(server_url), data=data, headers=headers) if r.status_code == 201: annotation.vdn_dataset = new_vdn_dataset annotation.save() else: raise ValueError elif push_type == 'dataset': key = request.POST.get('key') region = request.POST.get('region') bucket = request.POST.get('bucket') name = request.POST.get('name') description = request.POST.get('description') s3export = TEvent() s3export.event_type = TEvent.S3EXPORT s3export.video = video s3export.key = key s3export.bucket = bucket s3export.save() create_root_vdn_dataset(s3export, server, headers, name, description) task_name = 'push_video_to_vdn_s3' app.send_task(task_name, args=[s3export.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: raise NotImplementedError servers = VDNServer.objects.all() context = {'video': video, 'servers': servers} if video.vdn_dataset: context['annotations'] = Region.objects.all().filter(video=video, vdn_dataset__isnull=True, region_type=Region.ANNOTATION) else: context['annotations'] = Region.objects.all().filter(video=video, region_type=Region.ANNOTATION) return render(request, 'push.html', context)
def clustering(request): context = {} context['clusters'] = Clusters.objects.all() context['algorithms'] = {k.algorithm for k in IndexEntries.objects.all()} context['index_entries'] = IndexEntries.objects.all() if request.method == 'POST': algorithm = request.POST.get('algorithm') v = request.POST.get('v') m = request.POST.get('m') components = request.POST.get('components') sub = request.POST.get('sub') excluded = request.POST.get('excluded_index_entries') c = Clusters() c.indexer_algorithm = algorithm c.included_index_entries_pk = [k.pk for k in IndexEntries.objects.all() if k.algorithm == c.indexer_algorithm] c.components = components c.sub = sub c.m = m c.v = v c.save() task_name = "perform_clustering" new_task = TEvent() new_task.clustering = c new_task.operation = task_name new_task.save() app.send_task(name=task_name, args=[new_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) return render(request, 'clustering.html', context)
def import_vdn_dataset_url(server, url, user): r = requests.get(url) response = r.json() vdn_dataset = create_dataset(response, server) vdn_dataset.save() video = Video() if user: video.uploader = user video.name = vdn_dataset.name video.vdn_dataset = vdn_dataset video.save() if vdn_dataset.download_url: task_name = 'import_vdn_file' import_video_task = TEvent() import_video_task.video = video import_video_task.operation = task_name import_video_task.save() app.send_task(name=task_name, args=[import_video_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) elif vdn_dataset.aws_key and vdn_dataset.aws_bucket: task_name = 'import_vdn_s3' import_video_task = TEvent() import_video_task.video = video import_video_task.operation = task_name import_video_task.save() app.send_task(name=task_name, args=[import_video_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: raise NotImplementedError
def handle_downloaded_file(downloaded, video, name, extract=True, user=None, rate=30, rescale=0, ): video.name = name video.save() filename = downloaded.split('/')[-1] if filename.endswith('.dva_export.zip'): create_video_folders(video, create_subdirs=False) os.rename(downloaded, '{}/{}/{}.{}'.format(settings.MEDIA_ROOT, video.pk, video.pk, filename.split('.')[-1])) video.uploaded = True video.save() task_name = 'import_video_by_id' import_video_task = TEvent() import_video_task.video = video import_video_task.save() app.send_task(name=task_name, args=[import_video_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) elif filename.endswith('.mp4') or filename.endswith('.flv') or filename.endswith('.zip'): create_video_folders(video, create_subdirs=True) os.rename(downloaded, '{}/{}/video/{}.{}'.format(settings.MEDIA_ROOT, video.pk, video.pk, filename.split('.')[-1])) video.uploaded = True if filename.endswith('.zip'): video.dataset = True video.save() if extract: extract_frames_task = TEvent() extract_frames_task.arguments_json = json.dumps({'rate': rate, 'rescale': rescale}) extract_frames_task.video = video task_name = 'extract_frames' if video.dataset else 'segment_video' extract_frames_task.operation = task_name extract_frames_task.save() app.send_task(name=task_name, args=[extract_frames_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: raise ValueError, "Extension {} not allowed".format(filename.split('.')[-1]) return video
def indexes(request): context = { 'visual_index_list': settings.VISUAL_INDEXES.items(), 'index_entries': IndexEntries.objects.all(), "videos": Video.objects.all().filter(parent_query__isnull=True), "region_types": Region.REGION_TYPES } if request.method == 'POST': index_event = TEvent() index_event.operation = 'perform_indexing' arguments = { 'region_type__in': request.POST.getlist('region_type__in', []), 'w__gte': int(request.POST.get('w__gte')), 'h__gte': int(request.POST.get('h__gte')) } for optional_key in ['metadata_text__contains', 'object_name__contains', 'object_name']: if request.POST.get(optional_key, None): arguments[optional_key] = request.POST.get(optional_key) for optional_key in ['h__lte', 'w__lte']: if request.POST.get(optional_key, None): arguments[optional_key] = int(request.POST.get(optional_key)) args = {'filters':arguments,'index':request.POST.get('visual_index_name')} queue = settings.VISUAL_INDEXES[args['index']]['indexer_queue'] index_event.arguments_json = json.dumps(args) index_event.video_id = request.POST.get('video_id') index_event.save() app.send_task(name=index_event.operation, args=[index_event.pk, ],queue=queue) return render(request, 'indexes.html', context)
def export_video(request): if request.method == 'POST': pk = request.POST.get('video_id') video = Video.objects.get(pk=pk) export_method = request.POST.get('export_method') if video: if export_method == 's3': key = request.POST.get('key') bucket = request.POST.get('bucket') s3export = TEvent() s3export.event_type = TEvent.S3EXPORT s3export.video = video s3export.key = key s3export.bucket = bucket s3export.save() task_name = 'backup_video_to_s3' app.send_task(task_name, args=[s3export.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: task_name = 'export_video_by_id' export_video_task = TEvent() export_video_task.event_type = TEvent.EXPORT export_video_task.video = video export_video_task.operation = task_name export_video_task.save() app.send_task(task_name, args=[export_video_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) return redirect('video_list') else: raise NotImplementedError
def video_send_task(request): if request.method == 'POST': video_id = int(request.POST.get('video_id')) args = json.loads(request.POST.get('arguments_json','{}')) task_name = request.POST.get('task_name') manual_event = TEvent() manual_event.video_id = video_id manual_event.arguments_json = json.dumps(args) manual_event.save() app.send_task(name=task_name, args=[manual_event.pk, ], queue=get_queue_name(task_name,args)) else: raise NotImplementedError return redirect('video_list')
def handle_youtube_video(name, url, extract=True, user=None, rate=30, rescale=0): video = Video() if user: video.uploader = user video.name = name video.url = url video.youtube_video = True video.save() task_name = 'segment_video' extract_frames_task = TEvent() extract_frames_task.video = video extract_frames_task.operation = task_name extract_frames_task.arguments_json = json.dumps({'rate': rate,'rescale': rescale}) extract_frames_task.save() if extract: app.send_task(name=task_name, args=[extract_frames_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) return video
def training(request): context = {} context["videos"] = Video.objects.all().filter(parent_query__isnull=True) context["detectors"] = CustomDetector.objects.all() if request.method == 'POST': if request.POST.get('action') == 'estimate': args = request.POST.get('args') args = json.loads(args) if args.strip() else {} args['name'] = request.POST.get('name') args['labels'] = [k.strip() for k in request.POST.get('labels').split(',') if k.strip()] args['object_names'] = [k.strip() for k in request.POST.get('object_names').split(',') if k.strip()] args['excluded_videos'] = request.POST.getlist('excluded_videos') labels = set(args['labels']) if 'labels' in args else set() object_names = set(args['object_names']) if 'object_names' in args else set() class_distribution, class_names, rboxes, rboxes_set, frames, i_class_names = create_detector_dataset(object_names, labels) context["estimate"] = { 'args':args, 'class_distribution':class_distribution, 'class_names':class_names, 'rboxes':rboxes, 'rboxes_set':rboxes_set, 'frames':frames, 'i_class_names':i_class_names } else: args = request.POST.get('args') args = json.loads(args) if args.strip() else {} args['name'] = request.POST.get('name') args['labels'] = [k.strip() for k in request.POST.get('labels').split(',') if k.strip()] args['object_names'] = [k.strip() for k in request.POST.get('object_names').split(',') if k.strip()] args['excluded_videos'] = request.POST.getlist('excluded_videos') detector = CustomDetector() detector.name = args['name'] detector.algorithm = "yolo" detector.arguments = json.dumps(args) detector.save() args['detector_pk'] = detector.pk task_name = "train_yolo_detector" train_event = TEvent() train_event.operation = task_name train_event.arguments_json = json.dumps(args) train_event.save() detector.source = train_event detector.save() app.send_task(name=task_name, args=[train_event.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) return render(request, 'training.html', context)
def handle_uploaded_file(f, name, extract=True, user=None, rate=30, rescale=0): video = Video() if user: video.uploader = user video.name = name video.save() primary_key = video.pk filename = f.name filename = filename.lower() if filename.endswith('.dva_export.zip'): create_video_folders(video, create_subdirs=False) with open('{}/{}/{}.{}'.format(settings.MEDIA_ROOT, video.pk, video.pk, filename.split('.')[-1]), 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) video.uploaded = True video.save() task_name = 'import_video_by_id' import_video_task = TEvent() import_video_task.video = video import_video_task.save() app.send_task(name=task_name, args=[import_video_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) elif filename.endswith('.mp4') or filename.endswith('.flv') or filename.endswith('.zip'): create_video_folders(video, create_subdirs=True) with open('{}/{}/video/{}.{}'.format(settings.MEDIA_ROOT, video.pk, video.pk, filename.split('.')[-1]), 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) video.uploaded = True if filename.endswith('.zip'): video.dataset = True video.save() if extract: extract_frames_task = TEvent() extract_frames_task.arguments_json = json.dumps({'rate': rate,'rescale': rescale}) extract_frames_task.video = video task_name = 'extract_frames' if video.dataset else 'segment_video' extract_frames_task.operation = task_name extract_frames_task.save() app.send_task(name=task_name, args=[extract_frames_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: raise ValueError, "Extension {} not allowed".format(filename.split('.')[-1]) return video
def import_vdn_detector_url(server, url, user): r = requests.get(url) response = r.json() vdn_detector = create_vdn_detector(response, server) detector = CustomDetector() detector.name = vdn_detector.name detector.vdn_detector = vdn_detector detector.save() if vdn_detector.download_url: task_name = 'import_vdn_detector_file' import_vdn_detector_task = TEvent() import_vdn_detector_task.operation = task_name import_vdn_detector_task.arguments_json = json.dumps({'detector_pk': detector.pk}) import_vdn_detector_task.save() app.send_task(name=task_name, args=[import_vdn_detector_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) elif vdn_detector.aws_key and vdn_detector.aws_bucket: raise NotImplementedError else: raise NotImplementedError
def export_video(request): if request.method == 'POST': pk = request.POST.get('video_id') video = Video.objects.get(pk=pk) export_method = request.POST.get('export_method') if video: if export_method == 's3': key = request.POST.get('key') bucket = request.POST.get('bucket') s3export = TEvent() s3export.event_type = TEvent.S3EXPORT s3export.video = video s3export.key = key s3export.bucket = bucket s3export.save() task_name = 'backup_video_to_s3' app.send_task(task_name, args=[ s3export.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: task_name = 'export_video_by_id' export_video_task = TEvent() export_video_task.event_type = TEvent.EXPORT export_video_task.video = video export_video_task.operation = task_name export_video_task.save() app.send_task(task_name, args=[ export_video_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) return redirect('video_list') else: raise NotImplementedError
def handle_downloaded_file(downloaded, video, name, extract=True, user=None, perform_scene_detection=True, rate=30, rescale=0, ): video.name = name video.save() filename = downloaded.split('/')[-1] if filename.endswith('.dva_export.zip'): create_video_folders(video, create_subdirs=False) os.rename(downloaded, '{}/{}/{}.{}'.format(settings.MEDIA_ROOT, video.pk, video.pk, filename.split('.')[-1])) video.uploaded = True video.save() task_name = 'import_video_by_id' import_video_task = TEvent() import_video_task.video = video import_video_task.save() app.send_task(name=task_name, args=[import_video_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) elif filename.endswith('.mp4') or filename.endswith('.flv') or filename.endswith('.zip'): create_video_folders(video, create_subdirs=True) os.rename(downloaded, '{}/{}/video/{}.{}'.format(settings.MEDIA_ROOT, video.pk, video.pk, filename.split('.')[-1])) video.uploaded = True if filename.endswith('.zip'): video.dataset = True video.save() if extract: extract_frames_task = TEvent() extract_frames_task.arguments_json = json.dumps( {'perform_scene_detection': perform_scene_detection, 'rate': rate, 'rescale': rescale}) extract_frames_task.video = video task_name = 'extract_frames_by_id' extract_frames_task.operation = task_name extract_frames_task.save() app.send_task(name=task_name, args=[extract_frames_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: raise ValueError, "Extension {} not allowed".format(filename.split('.')[-1]) return video
def retry_task(request,pk): event = TEvent.objects.get(pk=int(pk)) context = {} if settings.TASK_NAMES_TO_TYPE[event.operation] == settings.VIDEO_TASK: new_event = TEvent() new_event.video_id = event.video_id new_event.arguments_json = event.arguments_json new_event.save() result = app.send_task(name=event.operation, args=[new_event.pk],queue=settings.TASK_NAMES_TO_QUEUE[event.operation]) context['alert'] = "Operation {} on {} submitted".format(event.operation,event.video.name,queue=settings.TASK_NAMES_TO_QUEUE[event.operation]) return render_tasks(request, context) elif settings.TASK_NAMES_TO_TYPE[event.operation] == settings.QUERY_TASK: return redirect("/requery/{}/".format(event.video.parent_query_id)) else: raise NotImplementedError
def management(request): timeout = 1.0 context = { 'timeout':timeout, 'actions':ManagementAction.objects.all(), 'workers': Worker.objects.all(), 'state':SystemState.objects.all().order_by('-created')[:100] } if request.method == 'POST': op = request.POST.get("op","") host_name = request.POST.get("host_name","").strip() queue_name = request.POST.get("queue_name","").strip() if op =="list_workers": context["queues"] = app.control.inspect(timeout=timeout).active_queues() elif op == "list": t = app.send_task('manage_host', args=[op, ], exchange='qmanager') t.wait(timeout=timeout) elif op == "gpuinfo": t = app.send_task('manage_host', args=[op, ], exchange='qmanager') t.wait(timeout=timeout) elif op == "launch": t = app.send_task('manage_host', args=[op,host_name,queue_name],exchange='qmanager') t.wait(timeout=timeout) return render(request, 'management.html', context)
def import_s3(request): if request.method == 'POST': s3import = TEvent() key = request.POST.get('key') s3import.event_type = TEvent.S3IMPORT region = request.POST.get('region') bucket = request.POST.get('bucket') s3import.key = key s3import.region = region s3import.bucket = bucket video = Video() user = request.user if request.user.is_authenticated() else None if user: video.uploader = user video.name = "pending S3 import {} s3://{}/{}".format(region,bucket,key) video.save() s3import.video = video s3import.save() create_video_folders(video, create_subdirs=False) task_name = 'import_video_from_s3' app.send_task(name=task_name, args=[s3import.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: raise NotImplementedError return redirect('video_list')
def management(request): timeout = 1.0 context = { 'timeout': timeout, 'actions': models.ManagementAction.objects.all(), 'workers': models.Worker.objects.all(), 'restarts': models.TaskRestart.objects.all(), 'state': models.SystemState.objects.all().order_by('-created')[:100] } if request.method == 'POST': op = request.POST.get("op", "") if op == "list": t = app.send_task('manage_host', args=[], exchange='qmanager') t.wait(timeout=timeout) return render(request, 'dvaui/management.html', context)
def process_next(task_id, inject_filters=None, custom_next_tasks=None): if custom_next_tasks is None: custom_next_tasks = [] dt = TEvent.objects.get(pk=task_id) logging.info("next tasks for {}".format(dt.operation)) for k in settings.POST_OPERATION_TASKS.get(dt.operation, []): args = perform_substitution(k['arguments'], dt, inject_filters) jargs = json.dumps(args) logging.info( "launching {}, {} with args {} as specified in config".format( dt.operation, k['task_name'], args)) next_task = TEvent.objects.create(video=dt.video, operation=k['task_name'], arguments_json=jargs, parent=dt) app.send_task(k['task_name'], args=[ next_task.pk, ], queue=get_queue_name(k['task_name'], args)) for k in json.loads(dt.arguments_json).get('next_tasks', []) + custom_next_tasks: args = perform_substitution(k['arguments'], dt, inject_filters) jargs = json.dumps(args) logging.info( "launching {}, {} with args {} as specified in next_tasks".format( dt.operation, k['task_name'], args)) next_task = TEvent.objects.create(video=dt.video, operation=k['task_name'], arguments_json=jargs, parent=dt) app.send_task(k['task_name'], args=[ next_task.pk, ], queue=get_queue_name(k['task_name'], args))
def retry_task(request, pk): event = TEvent.objects.get(pk=int(pk)) context = {} if settings.TASK_NAMES_TO_TYPE[event.operation] == settings.VIDEO_TASK: result = app.send_task( name=event.operation, args=[event.video_id], queue=settings.TASK_NAMES_TO_QUEUE[event.operation]) context['alert'] = "Operation {} on {} submitted".format( event.operation, event.video.name, queue=settings.TASK_NAMES_TO_QUEUE[event.operation]) return render_status(request, context) else: return redirect("/requery/{}/".format(event.video.parent_query_id))
def import_vdn_detector_url(server, url, user): r = requests.get(url) response = r.json() vdn_detector = create_vdn_detector(response, server) detector = CustomDetector() detector.name = vdn_detector.name detector.vdn_detector = vdn_detector detector.save() if vdn_detector.download_url: task_name = 'import_vdn_detector_file' import_vdn_detector_task = TEvent() import_vdn_detector_task.operation = task_name import_vdn_detector_task.arguments_json = json.dumps( {'detector_pk': detector.pk}) import_vdn_detector_task.save() app.send_task(name=task_name, args=[ import_vdn_detector_task.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) elif vdn_detector.aws_key and vdn_detector.aws_bucket: raise NotImplementedError else: raise NotImplementedError
def import_s3(request): if request.method == 'POST': keys = request.POST.get('key') region = request.POST.get('region') bucket = request.POST.get('bucket') for key in keys.strip().split('\n'): if key.strip(): s3import = TEvent() s3import.event_type = TEvent.S3IMPORT s3import.key = key.strip() s3import.bucket = bucket video = Video() user = request.user if request.user.is_authenticated else None if user: video.uploader = user video.name = "pending S3 import {} s3://{}/{}".format(region, bucket, key) video.save() s3import.video = video s3import.save() task_name = 'import_video_from_s3' app.send_task(name=task_name, args=[s3import.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: raise NotImplementedError return redirect('video_list')
def send_tasks(self): for iq in self.indexer_queries: task_name = 'perform_indexing' queue_name = self.visual_indexes[iq.algorithm]['indexer_queue'] jargs = json.dumps({ 'iq_id':iq.pk, 'index':iq.algorithm, 'target':'query', 'next_tasks':[ { 'task_name': 'perform_retrieval', 'arguments': {'iq_id': iq.pk,'index':iq.algorithm} } ] }) next_task = TEvent.objects.create(video=self.dv, operation=task_name, arguments_json=jargs) self.task_results[iq.algorithm] = app.send_task(task_name, args=[next_task.pk, ], queue=queue_name, priority=5) self.context[iq.algorithm] = []
def launch_query_tasks(self): self.assign_task_group_id(self.process.script.get('map', [])) for t in self.process.script['map']: operation = t['operation'] arguments = t.get('arguments', {}) queue_name, operation = get_queue_name_and_operation( operation, arguments) next_task = TEvent.objects.create(parent_process=self.process, operation=operation, arguments=arguments, queue=queue_name, task_group_id=t['task_group_id']) self.task_results[next_task.pk] = app.send_task(name=operation, args=[ next_task.pk, ], queue=queue_name, priority=5)
def monitor_system(): """ This task used by scheduler to monitor state of the system. :return: """ last_action = models.ManagementAction.objects.filter(ping_index__isnull=False).last() if last_action: ping_index = last_action.ping_index + 1 else: ping_index = 0 # TODO: Handle the case where host manager has not responded to last and itself has died _ = app.send_task('manage_host', args=['list', ping_index], exchange='qmanager') process_stats = {'processes': models.DVAPQL.objects.count(), 'completed_processes': models.DVAPQL.objects.filter(completed=True).count(), 'tasks': models.TEvent.objects.count(), 'pending_tasks': models.TEvent.objects.filter(started=False).count(), 'completed_tasks': models.TEvent.objects.filter(started=True, completed=True).count()} _ = models.SystemState.objects.create(redis_stats=redis_client.info(),process_stats=process_stats)
def delete_video_object(video_pk,deleter,garbage_collection=True): video = Video.objects.get(pk=video_pk) deleted = DeletedVideo() deleted.name = video.name deleted.deleter = deleter deleted.uploader = video.uploader deleted.url = video.url deleted.description = video.description deleted.original_pk = video_pk deleted.save() video.delete() if garbage_collection: delete_task = TEvent() delete_task.arguments_json = json.dumps({'video_pk': video_pk}) delete_task.operation = 'delete_video_by_id' delete_task.save() queue = settings.TASK_NAMES_TO_QUEUE[delete_task.operation] _ = app.send_task(name=delete_task.operation, args=[delete_task.pk], queue=queue)
def delete_video_object(video_pk,deleter,garbage_collection=True): video = Video.objects.get(pk=video_pk) deleted = DeletedVideo() deleted.name = video.name deleted.deleter = deleter deleted.uploader = video.uploader deleted.url = video.url deleted.description = video.description deleted.original_pk = video_pk deleted.save() video.delete() if garbage_collection: delete_task = TEvent() delete_task.arguments = {'video_pk': video_pk} delete_task.operation = 'delete_video_by_id' delete_task.save() queue = settings.TASK_NAMES_TO_QUEUE[delete_task.operation] _ = app.send_task(name=delete_task.operation, args=[delete_task.pk], queue=queue)
def perform_query(count, approximate, selected_indexers, excluded_index_entries_pk, image_data_url, user): query, dv = create_query(count, approximate, selected_indexers, excluded_index_entries_pk, image_data_url, user) task_results = {} context = {} for visual_index_name, visual_index in settings.VISUAL_INDEXES.iteritems(): task_name = visual_index['retriever_task'] if visual_index_name in selected_indexers: task_results[visual_index_name] = app.send_task(task_name, args=[query.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) context[visual_index_name] = [] for visual_index_name, result in task_results.iteritems(): try: logging.info("Waiting for {}".format(visual_index_name)) _ = result.get(timeout=120) except TimeoutError: time_out = True except Exception, e: raise ValueError(e)
def search(request): if request.method == 'POST': count = request.POST.get('count') excluded_index_entries_pk = json.loads(request.POST.get('excluded_index_entries')) selected_indexers = json.loads(request.POST.get('selected_indexers')) approximate = True if request.POST.get('approximate') == 'true' else False image_data_url = request.POST.get('image_url') query, dv = create_query(count,approximate,selected_indexers,excluded_index_entries_pk,image_data_url) task_results = {} user = request.user if request.user.is_authenticated() else None for visual_index_name,visual_index in settings.VISUAL_INDEXES.iteritems(): task_name = visual_index['retriever_task'] if visual_index_name in selected_indexers: task_results[visual_index_name] = app.send_task(task_name, args=[query.pk,],queue=settings.TASK_NAMES_TO_QUEUE[task_name]) query.user = user query.save() results = [] results_detections = [] time_out = False for visual_index_name,result in task_results.iteritems(): try: entries = result.get(timeout=120) except TimeoutError: time_out = True entries = {} if entries and settings.VISUAL_INDEXES[visual_index_name]['detection_specific']: for algo,rlist in entries.iteritems(): for r in rlist: r['url'] = '{}{}/detections/{}.jpg'.format(settings.MEDIA_URL,r['video_primary_key'],r['detection_primary_key']) d = Region.objects.get(pk=r['detection_primary_key']) r['result_detect'] = True r['frame_primary_key'] = d.frame_id r['result_type'] = 'detection' r['detection'] = [{'pk': d.pk, 'name': d.object_name, 'confidence': d.confidence},] results_detections.append(r) elif entries: for algo, rlist in entries.iteritems(): for r in rlist: r['url'] = '{}{}/frames/{}.jpg'.format(settings.MEDIA_URL,r['video_primary_key'], r['frame_index']) r['detections'] = [{'pk': d.pk, 'name': d.object_name, 'confidence': d.confidence} for d in Region.objects.filter(frame_id=r['frame_primary_key'])] r['result_type'] = 'frame' results.append(r) return JsonResponse(data={'task_id':"",'time_out':time_out,'primary_key':query.pk,'results':results,'results_detections':results_detections})
def monitor_system(): """ This task used by scheduler to monitor state of the system. :return: """ last_action = models.ManagementAction.objects.filter( ping_index__isnull=False).last() if last_action: ping_index = last_action.ping_index + 1 else: ping_index = 0 # TODO: Handle the case where host manager has not responded to last and itself has died _ = app.send_task('manage_host', args=['list', ping_index], exchange='qmanager') worker_stats = { 'alive': 0, 'transition': 0, 'dead': models.Worker.objects.filter(alive=False).count() } for w in models.Worker.objects.filter(alive=True): # if worker is not heard from via manager for more than 10 minutes # mark it as dead, so that processes_monitor can mark tasks are errored and restart if possible. if (timezone.now() - w.last_ping).total_seconds() > 600: w.alive = False w.save() worker_stats['transition'] += 1 else: worker_stats['alive'] += 1 process_stats = { 'processes': models.DVAPQL.objects.count(), 'completed_processes': models.DVAPQL.objects.filter(completed=True).count(), 'tasks': models.TEvent.objects.count(), 'pending_tasks': models.TEvent.objects.filter(started=False).count(), 'completed_tasks': models.TEvent.objects.filter(started=True, completed=True).count() } _ = models.SystemState.objects.create(redis_stats=redis_client.info(), process_stats=process_stats, worker_stats=worker_stats)
def launch_task(self, t): for k, v in t.get('arguments', {}).iteritems(): if (type(v) is str or type(v) is unicode) and v.startswith('__created__'): t['arguments'][k] = self.get_created_object_pk(v) dv = None if t['operation'] in settings.NON_PROCESSING_TASKS: dv = None elif 'video_id' in t: if t['video_id'].startswith('__created__'): t['video_id'] = self.get_created_object_pk(t['video_id']) dv = Video.objects.get(pk=t['video_id']) elif 'video_selector' in t['arguments']: dv = Video.objects.get(**t['arguments']['video_selector']) t['video_id'] = dv.pk if dv: map_filters = get_map_filters(t, dv) else: map_filters = [{}] # This is useful in case of perform_stream_capture where batch size is used but number of segments is unknown if map_filters == []: map_filters = [{}] for f in map_filters: args = copy.deepcopy(t.get('arguments', {})) # make copy so that spec isnt mutated. if f: if 'filters' not in args: args['filters'] = f else: args['filters'].update(f) dt = TEvent() dt.parent_process = self.process dt.task_group_id = t['task_group_id'] dt.parent = self.root_task if 'video_id' in t: dt.video_id = t['video_id'] if 'training_set_id' in t: dt.training_set_id = t['training_set_id'] elif 'trainingset_selector' in t['arguments']: dt.training_set_id = TrainingSet.objects.get(**t['arguments']['trainingset_selector']) dt.arguments = args dt.queue, op = get_queue_name_and_operation(t['operation'], t.get('arguments', {})) dt.operation = op dt.save() self.task_results[dt.pk] = app.send_task(name=dt.operation, args=[dt.pk, ], queue=dt.queue)
def launch_tasks(k, dt, inject_filters, map_filters = None, launch_type = ""): v = dt.video op = k['operation'] p = dt.parent_process if map_filters is None: map_filters = [{},] tids = [] for f in map_filters: args = perform_substitution(k['arguments'], dt, inject_filters, f) logging.info("launching {} -> {} with args {} as specified in {}".format(dt.operation, op, args, launch_type)) q, op = get_queue_name_and_operation(k['operation'], args) if "video_selector" in k and v is None: video_per_task = Video.objects.get(**k['video_selector']) else: video_per_task = v next_task = TEvent.objects.create(video=video_per_task, operation=op, arguments=args, parent=dt, task_group_id=k['task_group_id'],parent_process=p, queue=q) tids.append(app.send_task(k['operation'], args=[next_task.pk, ], queue=q).id) return tids
def retry_task(request): pk = request.POST.get('pk') event = TEvent.objects.get(pk=int(pk)) context = {} if settings.TASK_NAMES_TO_TYPE[event.operation] == settings.VIDEO_TASK: new_event = TEvent() new_event.video_id = event.video_id new_event.arguments_json = event.arguments_json new_event.operation = event.operation new_event.save() result = app.send_task(name=event.operation, args=[new_event.pk], queue=settings.TASK_NAMES_TO_QUEUE[event.operation]) context['alert'] = "Operation {} on {} submitted".format(event.operation, event.video.name, queue=settings.TASK_NAMES_TO_QUEUE[event.operation]) return redirect('tasks') elif settings.TASK_NAMES_TO_TYPE[event.operation] == settings.QUERY_TASK: return redirect("/requery/{}/".format(event.video.parent_query_id)) else: raise NotImplementedError
def launch(self): if self.process.script['process_type'] == DVAPQL.PROCESS: for d in self.process.script.get('delete',[]): if d['MODEL'] == 'Video': d_copy = copy.deepcopy(d) m = apps.get_model(app_label='dvaapp',model_name=d['MODEL']) instance = m.objects.get(**d_copy['selector']) DeletedVideo.objects.create(deleter=self.process.user, video_uuid=instance.pk) instance.delete() else: self.process.failed = True self.process.error_message = "Cannot delete {}; Only video deletion implemented.".format(d['MODEL']) for c in self.process.script.get('create',[]): c_copy = copy.deepcopy(c) m = apps.get_model(app_label='dvaapp',model_name=c['MODEL']) for k,v in c['spec'].iteritems(): if v == '__timezone.now__': c_copy['spec'][k] = timezone.now() instance = m.objects.create(**c_copy['spec']) self.created_objects.append(instance) self.assign_task_group_id(c.get('tasks',[])) for t in copy.deepcopy(c.get('tasks',[])): self.launch_task(t,instance.pk) self.assign_task_group_id(self.process.script.get('tasks',[])) for t in self.process.script.get('tasks',[]): self.launch_task(t) elif self.process.script['process_type'] == DVAPQL.QUERY: self.assign_task_group_id(self.process.script.get('tasks', [])) for t in self.process.script['tasks']: operation = t['operation'] arguments = t.get('arguments',{}) queue_name, operation = get_queue_name_and_operation(operation,arguments) next_task = TEvent.objects.create(parent_process=self.process, operation=operation,arguments=arguments, queue=queue_name,task_group_id=t['task_group_id']) self.task_results[next_task.pk] = app.send_task(name=operation,args=[next_task.pk, ],queue=queue_name,priority=5) else: raise NotImplementedError self.process.script['task_group_name_to_index'] = self.task_group_name_to_index self.process.script['parent_task_group_index'] = self.parent_task_group_index self.process.save()
def process_next(dt, inject_filters=None, custom_next_tasks=None, sync=True, launch_next=True, map_filters=None): if custom_next_tasks is None: custom_next_tasks = [] task_id = dt.pk launched = [] args = copy.deepcopy(dt.arguments) logging.info("next tasks for {}".format(dt.operation)) next_tasks = args.get('map', []) if args and launch_next else [] if sync and settings.MEDIA_BUCKET: for k in SYNC_TASKS.get(dt.operation, []): if settings.ENABLE_CLOUDFS: dirname = k['arguments'].get('dirname', None) task_shared.upload(dirname, task_id, dt.video_id) else: launched += launch_tasks(k, dt, inject_filters, None, 'sync') for k in next_tasks + custom_next_tasks: if map_filters is None: map_filters = get_map_filters(k, dt.video) launched += launch_tasks(k, dt, inject_filters, map_filters, 'map') for reduce_task in args.get('reduce', []): next_task = TEvent.objects.create( video=dt.video, operation="perform_reduce", arguments=reduce_task['arguments'], parent=dt, task_group_id=reduce_task['task_group_id'], parent_process_id=dt.parent_process_id, queue=settings.Q_REDUCER) launched.append( app.send_task(next_task.operation, args=[ next_task.pk, ], queue=settings.Q_REDUCER).id) return launched
def launch(self): if self.process.script['process_type'] == DVAPQL.PROCESS: for c in self.process.script.get('create',[]): m = apps.get_model(app_label='dvaapp',model_name=c['MODEL']) for k,v in c['spec'].iteritems(): if v == '__timezone.now__': c['spec'][k] = timezone.now() instance = m.objects.create(**c['spec']) self.created_objects.append(instance) for t in copy.deepcopy(c.get('tasks',[])): self.launch_task(t,instance.pk) for t in self.process.script.get('tasks',[]): self.launch_task(t) elif self.process.script['process_type'] == DVAPQL.QUERY: for t in self.process.script['tasks']: operation = t['operation'] arguments = t.get('arguments',{}) queue_name = get_queue_name(operation,arguments) next_task = TEvent.objects.create(parent_process=self.process, operation=operation,arguments=arguments,queue=queue_name) self.task_results[next_task.pk] = app.send_task(name=operation,args=[next_task.pk, ],queue=queue_name,priority=5) else: raise NotImplementedError
def launch_task(self, t, created_pk=None): if created_pk: if t.get('video_id', '') == '__pk__': t['video_id'] = created_pk for k, v in t.get('arguments', {}).iteritems(): if v == '__pk__': t['arguments'][k] = created_pk if 'video_id' in t: v = Video.objects.get(pk=t['video_id']) map_filters = get_map_filters(t, v) else: map_filters = [{}] # This is useful in case of perform_stream_capture where batch size is used but number of segments is unknown if map_filters == []: map_filters = [{}] for f in map_filters: args = copy.deepcopy(t.get( 'arguments', {})) # make copy so that spec isnt mutated. if f: if 'filters' not in args: args['filters'] = f else: args['filters'].update(f) dt = TEvent() dt.parent_process = self.process dt.task_group_id = t['task_group_id'] if 'video_id' in t: dt.video_id = t['video_id'] dt.arguments = args dt.queue, op = get_queue_name_and_operation( t['operation'], t.get('arguments', {})) dt.operation = op dt.save() self.task_results[dt.pk] = app.send_task(name=dt.operation, args=[ dt.pk, ], queue=dt.queue)
def monitor_system(): """ This task used by scheduler to monitor state of the system. :return: """ for p in models.DVAPQL.objects.filter(completed=False): if models.TEvent.objects.filter(parent_process=p, completed=False).count() == 0: p.completed = True p.save() last_action = models.ManagementAction.objects.filter(ping_index__isnull=False).last() if last_action: ping_index = last_action.ping_index + 1 else: ping_index = 0 # TODO: Handle the case where host manager has not responded to last and itself has died _ = app.send_task('manage_host', args=['list', ping_index], exchange='qmanager') s = models.SystemState() s.processes = models.DVAPQL.objects.count() s.completed_processes = models.DVAPQL.objects.filter(completed=True).count() s.tasks = models.TEvent.objects.count() s.pending_tasks = models.TEvent.objects.filter(started=False).count() s.completed_tasks = models.TEvent.objects.filter(started=True, completed=True).count() s.save()
def launch_tasks(k, dt, inject_filters, map_filters=None, launch_type=""): v = dt.video op = k['operation'] p = dt.parent_process if map_filters is None: map_filters = [{}, ] tids = [] for f in map_filters: args = perform_substitution(k['arguments'], dt, inject_filters, f) logging.info("launching {} -> {} with args {} as specified in {}".format(dt.operation, op, args, launch_type)) q, op = get_queue_name_and_operation(k['operation'], args) if op in settings.NON_PROCESSING_TASKS: video_per_task = None else: if "video_selector" in k: video_per_task = Video.objects.get(**k['video_selector']) else: video_per_task = v if op in settings.TRAINING_TASKS: if "training_set_selector_id" in k: training_set = Video.objects.get(pk=k['training_set_selector_id']) elif "training_set_selector" in k: training_set = Video.objects.get(**k['training_set_selector']) else: training_set = dt.training_set else: training_set = None if op == 'perform_sync': task_group_id = k.get('task_group_id', -1) else: task_group_id = k['task_group_id'] next_task = TEvent.objects.create(video=video_per_task, operation=op, arguments=args, parent=dt, task_group_id=task_group_id, parent_process=p, queue=q, training_set=training_set) tids.append(app.send_task(k['operation'], args=[next_task.pk, ], queue=q).id) return tids
def push(request, video_id): video = Video.objects.get(pk=video_id) if request.method == 'POST': push_type = request.POST.get('push_type') server = VDNServer.objects.get(pk=request.POST.get('server_pk')) token = request.POST.get('token_{}'.format(server.pk)) server.last_token = token server.save() server_url = server.url if not server_url.endswith('/'): server_url += '/' headers = {'Authorization': 'Token {}'.format(server.last_token)} if push_type == 'annotation': new_vdn_dataset = create_child_vdn_dataset(video, server, headers) for key in request.POST: if key.startswith('annotation_') and request.POST[key]: annotation = Region.objects.get( pk=int(key.split('annotation_')[1])) data = { 'label': annotation.label, 'metadata_text': annotation.metadata_text, 'x': annotation.x, 'y': annotation.y, 'w': annotation.w, 'h': annotation.h, 'full_frame': annotation.full_frame, 'parent_frame_index': annotation.parent_frame_index, 'dataset_id': int(new_vdn_dataset.url.split('/')[-2]), } r = requests.post("{}/api/annotations/".format(server_url), data=data, headers=headers) if r.status_code == 201: annotation.vdn_dataset = new_vdn_dataset annotation.save() else: raise ValueError elif push_type == 'dataset': key = request.POST.get('key') region = request.POST.get('region') bucket = request.POST.get('bucket') name = request.POST.get('name') description = request.POST.get('description') s3export = TEvent() s3export.event_type = TEvent.S3EXPORT s3export.video = video s3export.key = key s3export.region = region s3export.bucket = bucket s3export.save() create_root_vdn_dataset(s3export, server, headers, name, description) task_name = 'push_video_to_vdn_s3' app.send_task(task_name, args=[ s3export.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) else: raise NotImplementedError servers = VDNServer.objects.all() context = {'video': video, 'servers': servers} if video.vdn_dataset: context['annotations'] = Region.objects.all().filter( video=video, vdn_dataset__isnull=True, region_type=Region.ANNOTATION) else: context['annotations'] = Region.objects.all().filter( video=video, region_type=Region.ANNOTATION) return render(request, 'push.html', context)
def search(request): if request.method == 'POST': query = Query() count = request.POST.get('count') query.count = count excluded_index_entries_pk = json.loads( request.POST.get('excluded_index_entries')) if excluded_index_entries_pk: query.excluded_index_entries_pk = [ int(k) for k in excluded_index_entries_pk ] selected_indexers = json.loads(request.POST.get('selected_indexers')) query.selected_indexers = selected_indexers query.save() primary_key = query.pk dv = Video() dv.name = 'query_{}'.format(query.pk) dv.dataset = True dv.query = True dv.parent_query = query dv.save() create_video_folders(dv) image_url = request.POST.get('image_url') image_data = base64.decodestring(image_url[22:]) query_path = "{}/queries/{}.png".format(settings.MEDIA_ROOT, primary_key) query_frame_path = "{}/{}/frames/0.png".format(settings.MEDIA_ROOT, dv.pk) with open(query_path, 'w') as fh: fh.write(image_data) with open(query_frame_path, 'w') as fh: fh.write(image_data) task_results = {} user = request.user if request.user.is_authenticated() else None for visual_index_name, visual_index in settings.VISUAL_INDEXES.iteritems( ): task_name = visual_index['retriever_task'] if visual_index_name in selected_indexers: task_results[visual_index_name] = app.send_task( task_name, args=[ primary_key, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) query.user = user query.save() results = [] results_detections = [] time_out = False for visual_index_name, result in task_results.iteritems(): try: entries = result.get(timeout=120) except TimeoutError: time_out = True entries = {} if entries and settings.VISUAL_INDEXES[visual_index_name][ 'detection_specific']: for algo, rlist in entries.iteritems(): for r in rlist: r['url'] = '/media/{}/detections/{}.jpg'.format( r['video_primary_key'], r['detection_primary_key']) d = Detection.objects.get( pk=r['detection_primary_key']) r['result_detect'] = True r['frame_primary_key'] = d.frame_id r['result_type'] = 'detection' r['detection'] = [ { 'pk': d.pk, 'name': d.object_name, 'confidence': d.confidence }, ] results_detections.append(r) elif entries: for algo, rlist in entries.iteritems(): for r in rlist: r['url'] = '/media/{}/frames/{}.jpg'.format( r['video_primary_key'], r['frame_index']) r['detections'] = [{ 'pk': d.pk, 'name': d.object_name, 'confidence': d.confidence } for d in Detection.objects.filter( frame_id=r['frame_primary_key'])] r['result_type'] = 'frame' results.append(r) return JsonResponse( data={ 'task_id': "", 'time_out': time_out, 'primary_key': primary_key, 'results': results, 'results_detections': results_detections })
def detections(request): context = {} context["videos"] = Video.objects.all().filter( parent_query__count__isnull=True) context["detectors"] = CustomDetector.objects.all() detector_stats = [] for d in CustomDetector.objects.all(): class_dist = json.loads( d.class_distribution) if d.class_distribution.strip() else {} detector_stats.append({ 'name': d.name, 'classes': class_dist, 'frames_count': d.frames_count, 'boxes_count': d.boxes_count, 'pk': d.pk }) context["detector_stats"] = detector_stats if request.method == 'POST': if request.POST.get('action') == 'detect': detector_pk = request.POST.get('detector_pk') video_pk = request.POST.get('video_pk') task_name = "detect_custom_objects" apply_event = TEvent() apply_event.video_id = video_pk apply_event.operation = task_name apply_event.arguments_json = json.dumps( {'detector_pk': int(detector_pk)}) apply_event.save() app.send_task(name=task_name, args=[ apply_event.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) elif request.POST.get('action') == 'estimate': args = request.POST.get('args') args = json.loads(args) if args.strip() else {} args['name'] = request.POST.get('name') args['labels'] = [ k.strip() for k in request.POST.get('labels').split(',') if k.strip() ] args['object_names'] = [ k.strip() for k in request.POST.get('object_names').split(',') if k.strip() ] args['excluded_videos'] = request.POST.getlist('excluded_videos') labels = set(args['labels']) if 'labels' in args else set() object_names = set( args['object_names']) if 'object_names' in args else set() class_distribution, class_names, rboxes, rboxes_set, frames, i_class_names = create_detector_dataset( object_names, labels) context["estimate"] = { 'args': args, 'class_distribution': class_distribution, 'class_names': class_names, 'rboxes': rboxes, 'rboxes_set': rboxes_set, 'frames': frames, 'i_class_names': i_class_names } else: args = request.POST.get('args') args = json.loads(args) if args.strip() else {} args['name'] = request.POST.get('name') args['labels'] = [ k.strip() for k in request.POST.get('labels').split(',') if k.strip() ] args['object_names'] = [ k.strip() for k in request.POST.get('object_names').split(',') if k.strip() ] args['excluded_videos'] = request.POST.getlist('excluded_videos') detector = CustomDetector() detector.name = args['name'] detector.algorithm = "yolo" detector.arguments = json.dumps(args) detector.save() args['detector_pk'] = detector.pk task_name = "train_yolo_detector" train_event = TEvent() train_event.operation = task_name train_event.arguments_json = json.dumps(args) train_event.save() detector.source = train_event detector.save() app.send_task(name=task_name, args=[ train_event.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[task_name]) return render(request, 'detections.html', context)
def send_tasks(self): for iq in self.indexer_queries: task_name = 'execute_index_subquery' queue_name = self.visual_indexes[iq.algorithm]['retriever_queue'] self.task_results[iq.algorithm] = app.send_task(task_name, args=[iq.pk, ], queue=queue_name) self.context[iq.algorithm] = []
def launch_process_monitor(self): monitoring_task = TEvent.objects.create(operation="perform_process_monitoring", arguments={}, parent=None, task_group_id=-1, parent_process=self.process, queue=settings.Q_REDUCER) app.send_task(name=monitoring_task.operation, args=[monitoring_task.pk, ], queue=monitoring_task.queue)
def import_video_by_id(task_id): start = TEvent.objects.get(pk=task_id) start.task_id = import_video_by_id.request.id start.started = True start.operation = import_video_by_id.name start.save() start_time = time.time() video_id = start.video_id video_obj = Video.objects.get(pk=video_id) if video_obj.vdn_dataset and not video_obj.uploaded: output_filename = "{}/{}/{}.zip".format(settings.MEDIA_ROOT, video_obj.pk, video_obj.pk) if video_obj.vdn_dataset.aws_requester_pays: s3import = TEvent() s3import.video = video_obj s3import.key = video_obj.vdn_dataset.aws_key s3import.region = video_obj.vdn_dataset.aws_region s3import.bucket = video_obj.vdn_dataset.aws_bucket s3import.requester_pays = True s3import.operation = "import_video_from_s3" s3import.save() app.send_task( s3import.operation, args=[ s3import.pk, ], queue=settings.TASK_NAMES_TO_QUEUE[s3import.operation]) start.completed = True start.seconds = time.time() - start_time start.save() return 0 else: if 'www.dropbox.com' in video_obj.vdn_dataset.download_url and not video_obj.vdn_dataset.download_url.endswith( '?dl=1'): r = requests.get(video_obj.vdn_dataset.download_url + '?dl=1') else: r = requests.get(video_obj.vdn_dataset.download_url) with open(output_filename, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) r.close() video_obj.uploaded = True video_obj.save() zipf = zipfile.ZipFile( "{}/{}/{}.zip".format(settings.MEDIA_ROOT, video_id, video_id), 'r') zipf.extractall("{}/{}/".format(settings.MEDIA_ROOT, video_id)) zipf.close() video_root_dir = "{}/{}/".format(settings.MEDIA_ROOT, video_id) old_key = None for k in os.listdir(video_root_dir): unzipped_dir = "{}{}".format(video_root_dir, k) if os.path.isdir(unzipped_dir): for subdir in os.listdir(unzipped_dir): shutil.move("{}/{}".format(unzipped_dir, subdir), "{}".format(video_root_dir)) shutil.rmtree(unzipped_dir) break with open("{}/{}/table_data.json".format(settings.MEDIA_ROOT, video_id)) as input_json: video_json = json.load(input_json) serializers.import_video_json(video_obj, video_json, video_root_dir) source_zip = "{}/{}.zip".format(video_root_dir, video_obj.pk) os.remove(source_zip) start.completed = True start.seconds = time.time() - start_time start.save()
def process_video_next(video_id,current_task_name): if current_task_name in settings.POST_OPERATION_TASKS: for k in settings.POST_OPERATION_TASKS[current_task_name]: app.send_task(k,args=[video_id,],queue=settings.TASK_NAMES_TO_QUEUE[k])