def retry_task(request): pk = request.POST.get('pk') event = TEvent.objects.get(pk=int(pk)) spec = { 'process_type': DVAPQL.PROCESS, 'map': [ { 'operation': event.operation, 'arguments': event.arguments } ] } p = DVAPQLProcess() p.create_from_json(spec) p.launch() return redirect('/processes/')
def perform_training(training_set_pk, args, user=None): args['selector'] = {"pk": training_set_pk} spec = { 'process_type': dvaapp.models.DVAPQL.PROCESS, 'map': [ { "operation": "perform_training", "arguments": args } ] } p = DVAPQLProcess() p.create_from_json(spec, user) p.launch() return p.process.pk
def init_process(): if 'INIT_PROCESS' in os.environ: path = os.environ.get('INIT_PROCESS', None) if path and path.strip(): if not path.startswith('/root/DVA/configs/custom_defaults/'): get_path_to_file(path, "temp.json") path = 'temp.json' try: jspec = json.load(file(path)) except: logging.exception("could not load : {}".format(path)) else: p = DVAPQLProcess() if DVAPQL.objects.count() == 0: p.create_from_json(jspec) p.launch()
def submit(path): """ Submit a DVAPQL process to run :param path: """ import django sys.path.append(os.path.dirname(__file__)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings") django.setup() from dvaapp.processing import DVAPQLProcess with open(path) as f: j = json.load(f) p = DVAPQLProcess() p.create_from_json(j) p.launch() print "launched Process with id {} ".format(p.process.pk)
def search(request): if request.method == 'POST': qp = DVAPQLProcess() view_shared.create_query_from_request(qp, request) qp.launch() qp.wait() qp_context = view_shared.collect(qp) return JsonResponse( data={ 'task_id': "", 'primary_key': qp.process.pk, 'results': qp_context['results'], 'regions': qp_context['regions'], 'url': '{}queries/{}.png'.format(settings.MEDIA_URL, qp.process.pk) })
def get_context_data(self, **kwargs): context = super(VisualSearchDetail, self).get_context_data(**kwargs) qp = DVAPQLProcess(process=context['object'], media_dir=settings.MEDIA_ROOT) qp_context = view_shared.collect(qp) context['results'] = qp_context['results'].items() context['regions'] = [] for k in qp_context['regions']: if 'results' in k and k['results']: k['results'] = k['results'].items() context['regions'].append(k) script = context['object'].script script[u'image_data_b64'] = "<excluded>" context['plan'] = script context['pending_tasks'] = models.TEvent.objects.all().filter(parent_process=self.object, started=False, errored=False).count() context['running_tasks'] = models.TEvent.objects.all().filter(parent_process=self.object, started=True, completed=False, errored=False).count() context['successful_tasks'] = models.TEvent.objects.all().filter(parent_process=self.object, completed=True).count() context['errored_tasks'] = models.TEvent.objects.all().filter(parent_process=self.object, errored=True).count() context['url'] = '{}queries/{}.png'.format(settings.MEDIA_URL, self.object.uuid) return context
def perform_model_export(model_pk, user=None): spec = { 'process_type': dvaapp.models.DVAPQL.PROCESS, 'map': [{ "operation": "perform_export", "arguments": { 'trainedmodel_selector': { "pk": model_pk } } }] } p = DVAPQLProcess() p.create_from_json(spec, user) p.launch() return p.process.pk
def export_video(request): if request.method == 'POST': pk = request.POST.get('video_id') video = Video.objects.get(pk=pk) export_method = request.POST.get('export_method') if video: if export_method == 's3': key = request.POST.get('key') bucket = request.POST.get('bucket') region = request.POST.get('region', 'us-east-1') process_spec = { 'process_type': DVAPQL.PROCESS, 'tasks': [ { 'video_id': video.pk, 'operation': 'perform_export', 'arguments': { 'key': key, 'bucket': bucket, 'region': region, 'destination': 'S3' } }, ] } else: process_spec = { 'process_type': DVAPQL.PROCESS, 'tasks': [ { 'video_id': video.pk, 'operation': 'perform_export', 'arguments': { 'destination': 'FILE' } }, ] } p = DVAPQLProcess() p.create_from_json(process_spec) p.launch() return redirect('video_list') else: raise NotImplementedError
def create_annotation(form, object_name, labels, frame, user=None): annotation = {} label_specs = [] annotation['object_name'] = object_name if form.cleaned_data['high_level']: annotation['full_frame'] = True annotation['x'] = 0 annotation['y'] = 0 annotation['h'] = 0 annotation['w'] = 0 else: annotation['full_frame'] = False annotation['x'] = form.cleaned_data['x'] annotation['y'] = form.cleaned_data['y'] annotation['h'] = form.cleaned_data['h'] annotation['w'] = form.cleaned_data['w'] annotation['text'] = form.cleaned_data['text'] annotation['metadata'] = form.cleaned_data['metadata'] if type(annotation['metadata'] ) is basestring and annotation['metadata'].strip(): annotation['metadata'] = json.loads(annotation['metadata']) else: annotation['metadata'] = None annotation['frame_index'] = frame.frame_index annotation['segment_index'] = frame.segment_index annotation['per_event_index'] = 0 annotation['video_id'] = frame.video_id annotation['region_type'] = dvaapp.models.Region.ANNOTATION for lname in labels: if lname.strip(): label_specs.append({'name': lname, 'set': 'UI'}) spec = { 'process_type': dvaapp.models.DVAPQL.PROCESS, 'create': [{ 'MODEL': 'Region', 'spec': annotation, 'labels': label_specs }] } p = DVAPQLProcess() p.create_from_json(spec, user) p.launch() return
def export_video(request): if request.method == 'POST': pk = request.POST.get('video_id') video = Video.objects.get(pk=pk) export_method = request.POST.get('export_method') if video: if export_method == 's3': path = request.POST.get('path') process_spec = { 'process_type': DVAPQL.PROCESS, 'map': [ { 'operation': 'perform_export', 'arguments': { 'path': path, 'video_selector': { 'pk': video.pk }, } }, ] } else: process_spec = { 'process_type': DVAPQL.PROCESS, 'map': [ { 'operation': 'perform_export', 'arguments': { 'video_selector': { 'pk': video.pk }, } }, ] } p = DVAPQLProcess() p.create_from_json(process_spec) p.launch() return redirect('video_list') else: raise NotImplementedError
def assign_video_labels(request): if request.method == 'POST': video = Video.objects.get(pk=request.POST.get('video_pk')) spec = [] for k in request.POST.get('labels').split(','): if k.strip(): spec.append({ 'MODEL':'VideoLabel', 'spec':{'video_id':video.pk,'label_id':Label.objects.get_or_create(name=k,set="UI")[0].id} }) p = DVAPQLProcess() p.create_from_json({ 'process_type':DVAPQL.PROCESS, 'create':spec, },user=request.user if request.user.is_authenticated else None) p.launch() return redirect('video_detail', pk=video.pk) else: raise NotImplementedError
def create_retriever(name, algorithm, filters, indexer_shasum, approximator_shasum, user=None): p = DVAPQLProcess() spec = { 'process_type': dvaapp.models.DVAPQL.PROCESS, 'create': [ { "MODEL": "Retriever", "spec": { "name": name, "algorithm": algorithm, "indexer_shasum": indexer_shasum, "approximator_shasum": approximator_shasum, "source_filters": filters } } ] } p.create_from_json(spec, user) p.launch() return p.process.pk
def detect_objects(request): if request.method == 'POST': detector_pk = request.POST.get('detector_pk') video_pk = request.POST.get('video_pk') p = DVAPQLProcess() p.create_from_json( j={ "process_type": DVAPQL.PROCESS, "tasks": [{ 'operation': "perform_detection", 'arguments': { 'detector_pk': int(detector_pk), 'detector': "custom" }, 'video_id': video_pk }] }, user=request.user if request.user.is_authenticated else None) p.launch() return redirect('process_detail', pk=p.process.pk) else: raise ValueError
def model_apply(model_pk, video_pks, filters, target, segments_batch_size, frames_batch_size, user=None): trained_model = TrainedModel.objects.get(pk=model_pk) if trained_model.model_type == TrainedModel.INDEXER: operation = 'perform_indexing' args = {"indexer_pk": model_pk, 'filters': filters, 'target': target} elif trained_model.model_type == TrainedModel.DETECTOR: operation = 'perform_detection' args = {"detector_pk": model_pk, 'filters': filters, 'target': target} elif trained_model.model_type == TrainedModel.ANALYZER: operation = 'perform_analysis' args = {"analyzer_pk": model_pk, 'filters': filters, 'target': target} else: operation = "" args = {} p = DVAPQLProcess() spec = {'process_type': DVAPQL.PROCESS, 'tasks': []} for vpk in video_pks: dv = Video.objects.get(pk=vpk) video_specific_args = deepcopy(args) if dv.dataset: video_specific_args['frames_batch_size'] = frames_batch_size else: video_specific_args['segments_batch_size'] = segments_batch_size spec['tasks'].append({ 'operation': operation, 'arguments': video_specific_args, 'video_id': vpk }) p.create_from_json(spec, user) p.launch() return p.process.pk
def create_retriever(request): if request.method == 'POST': spec = {} if request.POST.get('retriever_type') == Retriever.LOPQ: v = request.POST.get('v') m = request.POST.get('m') components = request.POST.get('components') sub = request.POST.get('sub') spec['name'] = request.POST.get('name') spec['algorithm'] = Retriever.LOPQ args = {} args['components']= components args['sub']= sub args['m']= m args['v']= v spec['arguments'] = args if request.POST.get('source_filters',None): spec['source_filters'] = json.loads(request.POST.get('source_filter','{}')) else: spec['source_filters'] = {'indexer_shasum':TrainedModel.objects.get(name=request.POST.get('algorithm'), model_type=TrainedModel.INDEXER).shasum} next_tasks = [{'operation': "perform_retriever_creation",'arguments': {'retriever_pk':'__pk__'},},] elif request.POST.get('retriever_type') == Retriever.EXACT: spec['name'] = request.POST.get('name') spec['last_built'] = '__timezone.now__' spec['source_filters'] = json.loads(request.POST.get('source_filters', '{}')) spec['algorithm'] = Retriever.EXACT next_tasks = [] else: raise ValueError if spec: p = DVAPQLProcess() p.create_from_json(j={"process_type": DVAPQL.PROCESS, "create":[{'MODEL':'Retriever','spec':spec,'tasks': next_tasks}], }, user=request.user if request.user.is_authenticated else None) p.launch() return redirect('retrievers')
def yt(request): if request.method == 'POST': form = YTVideoForm(request.POST, request.FILES) user = request.user if request.user.is_authenticated else None if form.is_valid(): name = form.cleaned_data['name'] path = form.cleaned_data['url'] process_spec = { 'process_type': DVAPQL.PROCESS, 'create': [ { 'spec': { 'name': name, 'uploader_id': user.pk if user else None, 'url': path, 'created': '__timezone.now__' }, 'MODEL': 'Video', }, ], 'map': [ { 'video_id': '__created__0', 'operation': 'perform_import', 'arguments': { 'force_youtube_dl': True, 'map': [ { 'operation': 'perform_video_segmentation', 'arguments': { 'map': [{ 'operation': 'perform_video_decode', 'arguments': { 'rate': settings.DEFAULT_RATE, 'segments_batch_size': settings. DEFAULT_SEGMENTS_BATCH_SIZE, 'map': json.load( file( "../configs/custom_defaults/video_processing.json" )) } }] }, }, ] } }, ] } p = DVAPQLProcess() p.create_from_json(process_spec, user) p.launch() else: raise ValueError else: raise NotImplementedError return redirect('video_list')
def ci(): """ Perform Continuous Integration testing using Travis """ import django sys.path.append(os.path.dirname(__file__)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings") django.setup() import base64 from django.core.files.uploadedfile import SimpleUploadedFile from dvaui.view_shared import handle_uploaded_file, pull_vdn_list \ , import_vdn_dataset_url from dvaapp.models import Video, TEvent, VDNServer, DVAPQL, Retriever, DeepModel from django.conf import settings from dvaapp.processing import DVAPQLProcess from dvaapp.tasks import perform_dataset_extraction, perform_indexing, perform_export, perform_import, \ perform_retriever_creation, perform_detection, \ perform_video_segmentation, perform_transformation for fname in glob.glob('tests/ci/*.mp4'): name = fname.split('/')[-1].split('.')[0] f = SimpleUploadedFile(fname, file(fname).read(), content_type="video/mp4") handle_uploaded_file(f, name, False) if sys.platform != 'darwin': for fname in glob.glob('tests/*.mp4'): name = fname.split('/')[-1].split('.')[0] f = SimpleUploadedFile(fname, file(fname).read(), content_type="video/mp4") handle_uploaded_file(f, name, False) for fname in glob.glob('tests/*.zip'): name = fname.split('/')[-1].split('.')[0] f = SimpleUploadedFile(fname, file(fname).read(), content_type="application/zip") handle_uploaded_file(f, name) for i, v in enumerate(Video.objects.all()): if v.dataset: arguments = {'sync': True} perform_dataset_extraction( TEvent.objects.create(video=v, arguments=arguments).pk) else: arguments = {'sync': True} perform_video_segmentation( TEvent.objects.create(video=v, arguments=arguments).pk) arguments = {'index': 'inception', 'target': 'frames'} perform_indexing( TEvent.objects.create(video=v, arguments=arguments).pk) if i == 0: # save travis time by just running detection on first video # face_mtcnn arguments = {'detector': 'face'} dt = TEvent.objects.create(video=v, arguments=arguments) perform_detection(dt.pk) arguments = { 'filters': { 'event_id': dt.pk }, } perform_transformation( TEvent.objects.create(video=v, arguments=arguments).pk) # coco_mobilenet arguments = {'detector': 'coco'} dt = TEvent.objects.create(video=v, arguments=arguments) perform_detection(dt.pk) arguments = { 'filters': { 'event_id': dt.pk }, } perform_transformation( TEvent.objects.create(video=v, arguments=arguments).pk) # inception on crops from detector arguments = { 'index': 'inception', 'target': 'regions', 'filters': { 'event_id': dt.pk, 'w__gte': 50, 'h__gte': 50 } } perform_indexing( TEvent.objects.create(video=v, arguments=arguments).pk) # assign_open_images_text_tags_by_id(TEvent.objects.create(video=v).pk) temp = TEvent.objects.create(video=v, arguments={'destination': "FILE"}) perform_export(temp.pk) temp.refresh_from_db() fname = temp.arguments['file_name'] f = SimpleUploadedFile(fname, file("{}/exports/{}".format( settings.MEDIA_ROOT, fname)).read(), content_type="application/zip") vimported = handle_uploaded_file(f, fname) perform_import( TEvent.objects.create(video=vimported, arguments={ "source": "LOCAL" }).pk) dc = Retriever() args = {} args['components'] = 32 args['m'] = 8 args['v'] = 8 args['sub'] = 64 dc.algorithm = Retriever.LOPQ dc.source_filters = { 'indexer_shasum': DeepModel.objects.get(name="inception", model_type=DeepModel.INDEXER).shasum } dc.arguments = args dc.save() clustering_task = TEvent() clustering_task.arguments = {'retriever_pk': dc.pk} clustering_task.operation = 'perform_retriever_creation' clustering_task.save() perform_retriever_creation(clustering_task.pk) query_dict = { 'process_type': DVAPQL.QUERY, 'image_data_b64': base64.encodestring(file('tests/query.png').read()), 'tasks': [{ 'operation': 'perform_indexing', 'arguments': { 'index': 'inception', 'target': 'query', 'next_tasks': [{ 'operation': 'perform_retrieval', 'arguments': { 'count': 20, 'retriever_pk': Retriever.objects.get(name='inception').pk } }] } }] } launch_workers_and_scheduler_from_environment() qp = DVAPQLProcess() qp.create_from_json(query_dict) qp.launch() qp.wait() server, datasets, detectors = pull_vdn_list(1) for k in datasets: if k['name'] == 'MSCOCO_Sample_500': print 'FOUND MSCOCO SAMPLE' import_vdn_dataset_url(VDNServer.objects.get(pk=1), k['url'], None, k)
def validate_process(request): if request.method == 'POST': p = DVAPQLProcess() p.create_from_json(j=json.loads(request.POST.get('script')), user=request.user if request.user.is_authenticated else None) p.validate() return redirect("process_detail",pk=p.process.pk)
def import_s3(request): if request.method == 'POST': keys = request.POST.get('key') user = request.user if request.user.is_authenticated else None create = [] for key in keys.strip().split('\n'): if key.startswith('gs://') or key.startswith('s3://'): tasks = [] key = key.strip() if key: extract_task = { 'arguments': { 'next_tasks': defaults.DEFAULT_PROCESSING_PLAN_DATASET }, 'operation': 'perform_dataset_extraction' } segment_decode_task = { 'operation': 'perform_video_segmentation', 'arguments': { 'next_tasks': [{ 'operation': 'perform_video_decode', 'arguments': { 'segments_batch_size': defaults.DEFAULT_SEGMENTS_BATCH_SIZE, 'next_tasks': defaults.DEFAULT_PROCESSING_PLAN_VIDEO } }] }, } if key.endswith('.dva_export.zip'): next_tasks = [] elif key.endswith('.zip'): next_tasks = [ extract_task, ] else: next_tasks = [ segment_decode_task, ] tasks.append({ 'video_id': '__pk__', 'operation': 'perform_import', 'arguments': { 'path': key, 'source': 'REMOTE', 'next_tasks': next_tasks } }) create.append({ 'MODEL': 'Video', 'spec': { 'uploader_id': user.pk if user else None, 'name': key }, 'tasks': tasks }) else: raise NotImplementedError( "{} startswith an unknown remote store prefix".format(key)) process_spec = {'process_type': DVAPQL.PROCESS, 'create': create} p = DVAPQLProcess() p.create_from_json(process_spec, user) p.launch() else: raise NotImplementedError return redirect('video_list')
detector_type=m.get("detector_type", ""), arguments=m.get("arguments", {}), model_type=TrainedModel.DETECTOR, ) if created: dm.download() else: dm, created = TrainedModel.objects.get_or_create( name=m['name'], mode=m.get('mode', TrainedModel.TENSORFLOW), files=m.get('files', []), algorithm=m.get('algorithm', ""), arguments=m.get("arguments", {}), shasum=m.get('shasum', None), model_type=m['model_type']) if created: dm.download() if 'INIT_PROCESS' in os.environ and DVAPQL.objects.count() == 0: path = os.environ.get('INIT_PROCESS') p = DVAPQLProcess() if not path.startswith('/root/DVA/configs/custom_defaults/'): get_path_to_file(path, "temp.json") path = 'temp.json' try: jspec = json.load(file(path)) except: logging.exception("could not load : {}".format(path)) else: p.create_from_json(jspec) p.launch()
def import_s3(request): if request.method == 'POST': keys = request.POST.get('key') region = request.POST.get('region') bucket = request.POST.get('bucket') rate = request.POST.get('rate', defaults.DEFAULT_RATE) rescale = request.POST.get('rescale', defaults.DEFAULT_RESCALE) user = request.user if request.user.is_authenticated else None create = [] for key in keys.strip().split('\n'): tasks = [] key = key.strip() if key: extract_task = { 'arguments': { 'rate': rate, 'rescale': rescale, 'next_tasks': defaults.DEFAULT_PROCESSING_PLAN_DATASET }, 'operation': 'perform_dataset_extraction' } segment_decode_task = { 'operation': 'perform_video_segmentation', 'arguments': { 'next_tasks': [{ 'operation': 'perform_video_decode', 'arguments': { 'rate': rate, 'rescale': rescale, 'segments_batch_size': defaults.DEFAULT_SEGMENTS_BATCH_SIZE, 'next_tasks': defaults.DEFAULT_PROCESSING_PLAN_VIDEO } }] }, } if key.endswith('.dva_export.zip'): next_tasks = [] elif key.endswith('.zip'): next_tasks = [ extract_task, ] else: next_tasks = [ segment_decode_task, ] tasks.append({ 'video_id': '__pk__', 'operation': 'perform_import', 'arguments': { 'key': key, 'bucket': bucket, 'region': region, 'source': 'S3', 'next_tasks': next_tasks } }) create.append({ 'MODEL': 'Video', 'spec': { 'uploader_id': user.pk if user else None, 'name': "pending S3 import {} s3://{}/{}".format( region, bucket, key) }, 'tasks': tasks }) process_spec = {'process_type': DVAPQL.PROCESS, 'create': create} p = DVAPQLProcess() p.create_from_json(process_spec, user) p.launch() else: raise NotImplementedError return redirect('video_list')
def import_s3(request): if request.method == 'POST': keys = request.POST.get('key') user = request.user if request.user.is_authenticated else None create = [] map_tasks = [] counter = 0 for key in keys.strip().split('\n'): dataset_type = False if key.startswith('gs://') or key.startswith('s3://'): key = key.strip() if key: extract_task = { 'arguments': { 'map': json.load( file( "../configs/custom_defaults/dataset_processing.json" )) }, 'operation': 'perform_dataset_extraction' } segment_decode_task = { 'operation': 'perform_video_segmentation', 'arguments': { 'map': [{ 'operation': 'perform_video_decode', 'arguments': { 'segments_batch_size': settings.DEFAULT_SEGMENTS_BATCH_SIZE, 'map': json.load( file( "../configs/custom_defaults/video_processing.json" )) } }] }, } if key.endswith('.dva_export.zip'): next_tasks = [] elif key.endswith('.zip'): next_tasks = [ extract_task, ] dataset_type = True else: next_tasks = [ segment_decode_task, ] map_tasks.append({ 'video_id': '__created__{}'.format(counter), 'operation': 'perform_import', 'arguments': { 'source': 'REMOTE', 'map': next_tasks } }) create.append({ 'MODEL': 'Video', 'spec': { 'uploader_id': user.pk if user else None, 'dataset': dataset_type, 'name': key, 'url': key }, }) counter += 1 else: raise NotImplementedError( "{} startswith an unknown remote store prefix".format(key)) process_spec = { 'process_type': DVAPQL.PROCESS, 'create': create, 'map': map_tasks } p = DVAPQLProcess() p.create_from_json(process_spec, user) p.launch() else: raise NotImplementedError return redirect('video_list')
'arguments': { 'index': 'inception', 'target': 'query', 'map': [{ 'operation': 'perform_retrieval', 'arguments': { 'count': 15, 'retriever_pk': Retriever.objects.get(name='inception', algorithm=algo, approximator_shasum=None).pk } }] } }, { 'operation': 'perform_detection', 'arguments': { 'detector': 'coco', 'target': 'query', } }] } qp = DVAPQLProcess() qp.create_from_json(query_dict) qp.launch() qp.wait(timeout=400) print QueryResults.objects.count()