def create_approximator_training_set(name, indexer_shasum, video_pks, user=None): spec = { 'process_type': dvaapp.models.DVAPQL.PROCESS, 'create': [{ "MODEL": "TrainingSet", "spec": { "name": name, "training_task_type": dvaapp.models.TrainingSet.LOPQINDEX, "instance_type": dvaapp.models.TrainingSet.INDEX, "source_filters": { "indexer_shasum": indexer_shasum, "video_id__in": video_pks, } }, }], "map": [{ "operation": "perform_training_set_creation", "arguments": { "training_set_pk": '__created__0' } }] } p = DVAPQLProcess() p.create_from_json(spec, user) p.launch()
def export_video(request): if request.method == 'POST': pk = request.POST.get('video_id') video = Video.objects.get(pk=pk) export_method = request.POST.get('export_method') if video: if export_method == 's3': path = request.POST.get('path') process_spec = {'process_type':DVAPQL.PROCESS, 'tasks':[ { 'video_id':video.pk, 'operation':'perform_export', 'arguments': {'path':path} }, ]} else: process_spec = {'process_type':DVAPQL.PROCESS, 'tasks':[ { 'video_id':video.pk, 'operation':'perform_export', 'arguments':{'destination':'FILE'} }, ] } p = DVAPQLProcess() p.create_from_json(process_spec) p.launch() return redirect('video_list') else: raise NotImplementedError
def index_video(request): if request.method == 'POST': filters = { 'region_type__in': request.POST.getlist('region_type__in', []), 'w__gte': int(request.POST.get('w__gte')), 'h__gte': int(request.POST.get('h__gte')) } for optional_key in [ 'text__contains', 'object_name__contains', 'object_name' ]: if request.POST.get(optional_key, None): filters[optional_key] = request.POST.get(optional_key) for optional_key in ['h__lte', 'w__lte']: if request.POST.get(optional_key, None): filters[optional_key] = int(request.POST.get(optional_key)) args = { 'filters': filters, 'index': request.POST.get('visual_index_name') } p = DVAPQLProcess() spec = { 'process_type': DVAPQL.PROCESS, 'tasks': [{ 'operation': 'perform_indexing', 'arguments': args, 'video_id': request.POST.get('video_id') }] } user = request.user if request.user.is_authenticated else None p.create_from_json(spec, user) p.launch() redirect('process_detail', pk=p.process.pk) else: raise ValueError
def assign_video_labels(request): if request.method == 'POST': video = Video.objects.get(pk=request.POST.get('video_pk')) spec = [] for k in request.POST.get('labels').split(','): if k.strip(): spec.append({ 'MODEL': 'VideoLabel', 'spec': { 'video_id': video.pk, 'label_id': Label.objects.get_or_create(name=k, set="UI")[0].id } }) p = DVAPQLProcess() p.create_from_json( { 'process_type': DVAPQL.PROCESS, 'create': spec, }, user=request.user if request.user.is_authenticated else None) p.launch() return redirect('video_detail', pk=video.pk) else: raise NotImplementedError
def train_detector(request): if request.method == 'POST': args = request.POST.get('args') args = json.loads(args) if args.strip() else {} args['name'] = request.POST.get('name') args['labels'] = [ k.strip() for k in request.POST.get('labels').split(',') if k.strip() ] args['object_names'] = [ k.strip() for k in request.POST.get('object_names').split(',') if k.strip() ] args['excluded_videos'] = request.POST.getlist('excluded_videos') args['detector_pk'] = '__pk__' p = DVAPQLProcess() p.create_from_json( j={ "process_type": DVAPQL.PROCESS, "create": [{ 'MODEL': 'Detector', 'spec': { 'name': args['name'], 'arguments': json.dumps(args), 'algorithm': 'yolo' }, "tasks": [{ 'operation': "perform_detector_training", 'arguments': args, }] }] }, user=request.user if request.user.is_authenticated else None) p.launch() return redirect('process_detail', pk=p.process.pk) # elif request.POST.get('action') == 'estimate': # args = request.POST.get('args') # args = json.loads(args) if args.strip() else {} # args['name'] = request.POST.get('name') # args['labels'] = [k.strip() for k in request.POST.get('labels').split(',') if k.strip()] # args['object_names'] = [k.strip() for k in request.POST.get('object_names').split(',') if k.strip()] # args['excluded_videos'] = request.POST.getlist('excluded_videos') # labels = set(args['labels']) if 'labels' in args else set() # object_names = set(args['object_names']) if 'object_names' in args else set() # class_distribution, class_names, rboxes, rboxes_set, # frames, i_class_names = create_detector_dataset(object_names, labels) # context["estimate"] = { # 'args':args, # 'class_distribution':class_distribution, # 'class_names':class_names, # 'rboxes':rboxes, # 'rboxes_set':rboxes_set, # 'frames':frames, # 'i_class_names':i_class_names # } else: raise ValueError
def create_retriever(request): if request.method == 'POST': spec = {} if request.POST.get('retriever_type') == Retriever.LOPQ: v = request.POST.get('v') m = request.POST.get('m') components = request.POST.get('components') sub = request.POST.get('sub') spec['name'] = request.POST.get('name') spec['algorithm'] = Retriever.LOPQ args = {} args['components'] = components args['sub'] = sub args['m'] = m args['v'] = v spec['arguments'] = args if request.POST.get('source_filters', None): spec['source_filters'] = json.loads( request.POST.get('source_filter', '{}')) else: spec['source_filters'] = { 'indexer_shasum': DeepModel.objects.get(name=request.POST.get('algorithm'), model_type=DeepModel.INDEXER).shasum } next_tasks = [ { 'operation': "perform_retriever_creation", 'arguments': { 'retriever_pk': '__pk__' }, }, ] elif request.POST.get('retriever_type') == Retriever.EXACT: spec['name'] = request.POST.get('name') spec['last_built'] = '__timezone.now__' spec['source_filters'] = json.loads( request.POST.get('source_filters', '{}')) spec['algorithm'] = Retriever.EXACT next_tasks = [] else: raise ValueError if spec: p = DVAPQLProcess() p.create_from_json( j={ "process_type": DVAPQL.PROCESS, "create": [{ 'MODEL': 'Retriever', 'spec': spec, 'tasks': next_tasks }], }, user=request.user if request.user.is_authenticated else None) p.launch() return redirect('retrievers')
def search(request): if request.method == 'POST': qp = DVAPQLProcess() view_shared.create_query_from_request(qp, request) qp.launch() qp.wait_query() return JsonResponse(data={'url': '/queries/{}/'.format(qp.process.pk)}) else: raise ValueError("Only POST method is valid")
def import_s3(request): if request.method == 'POST': keys = request.POST.get('key') user = request.user if request.user.is_authenticated else None create = [] map_tasks = [] counter = 0 for key in keys.strip().split('\n'): dataset_type = False if key.startswith('gs://') or key.startswith('s3://'): key = key.strip() if key: extract_task = { 'arguments': {'map': json.load(file("../configs/custom_defaults/dataset_processing.json"))}, 'operation': 'perform_dataset_extraction'} segment_decode_task = {'operation': 'perform_video_segmentation', 'arguments': { 'map': [ {'operation': 'perform_video_decode', 'arguments': { 'segments_batch_size': settings.DEFAULT_SEGMENTS_BATCH_SIZE, 'map': json.load( file("../configs/custom_defaults/video_processing.json")) } } ]}, } if key.endswith('.dva_export.zip'): next_tasks = [] elif key.endswith('.zip'): next_tasks = [extract_task, ] dataset_type = True else: next_tasks = [segment_decode_task, ] map_tasks.append({'video_id': '__created__{}'.format(counter), 'operation': 'perform_import', 'arguments': { 'source': 'REMOTE', 'map': next_tasks} }) create.append({'MODEL': 'Video', 'spec': {'uploader_id': user.pk if user else None, 'dataset': dataset_type, 'name': key, 'url': key}, }) counter += 1 else: raise NotImplementedError("{} startswith an unknown remote store prefix".format(key)) process_spec = {'process_type': DVAPQL.PROCESS, 'create': create, 'map': map_tasks } p = DVAPQLProcess() p.create_from_json(process_spec, user) p.launch() else: raise NotImplementedError return redirect('video_list')
def yt(request): if request.method == 'POST': form = YTVideoForm(request.POST, request.FILES) user = request.user if request.user.is_authenticated else None if form.is_valid(): rate = form.cleaned_data['nth'] rescale = form.cleaned_data[ 'rescale'] if 'rescale' in form.cleaned_data else 0 video = view_shared.handle_video_url(form.cleaned_data['name'], form.cleaned_data['url'], user=user) process_spec = { 'process_type': DVAPQL.PROCESS, 'tasks': [ { 'video_id': video.pk, 'operation': 'perform_import', 'arguments': { 'source': "URL", 'next_tasks': [ { 'video_id': video.pk, 'operation': 'perform_video_segmentation', 'arguments': { 'next_tasks': [{ 'operation': 'perform_video_decode', 'arguments': { 'rate': rate, 'rescale': rescale, 'segments_batch_size': defaults. DEFAULT_SEGMENTS_BATCH_SIZE, 'next_tasks': defaults. DEFAULT_PROCESSING_PLAN_VIDEO } }] }, }, ] } }, ] } p = DVAPQLProcess() p.create_from_json(process_spec, user) p.launch() else: raise ValueError else: raise NotImplementedError return redirect('video_list')
def model_apply(model_pk, video_pks, filters, target, segments_batch_size, frames_batch_size, user=None): trained_model = dvaapp.models.TrainedModel.objects.get(pk=model_pk) if trained_model.model_type == dvaapp.models.TrainedModel.INDEXER: operation = 'perform_indexing' args = { "trainedmodel_selector": { "pk": model_pk }, 'filters': filters, 'target': target } elif trained_model.model_type == dvaapp.models.TrainedModel.DETECTOR: operation = 'perform_detection' args = { "trainedmodel_selector": { "pk": model_pk }, 'filters': filters, 'target': target } elif trained_model.model_type == dvaapp.models.TrainedModel.ANALYZER: operation = 'perform_analysis' args = { "trainedmodel_selector": { "pk": model_pk }, 'filters': filters, 'target': target } else: operation = "" args = {} p = DVAPQLProcess() spec = {'process_type': dvaapp.models.DVAPQL.PROCESS, 'map': []} for vpk in video_pks: dv = dvaapp.models.Video.objects.get(pk=vpk) video_specific_args = deepcopy(args) if dv.dataset: video_specific_args['frames_batch_size'] = frames_batch_size else: video_specific_args['segments_batch_size'] = segments_batch_size spec['map'].append({ 'operation': operation, 'arguments': video_specific_args, 'video_id': vpk }) p.create_from_json(spec, user) p.launch() return p.process.pk
def submit_process(request): if request.method == 'POST': process_pk = request.POST.get('process_pk',None) if process_pk is None: p = DVAPQLProcess() p.create_from_json(j=json.loads(request.POST.get('script')), user=request.user if request.user.is_authenticated else None) p.launch() else: p = DVAPQLProcess(process=DVAPQL.objects.get(pk=process_pk)) p.launch() return redirect("process_detail",pk=p.process.pk)
def init_process(): if 'INIT_PROCESS' in os.environ: try: jspec = json.loads(base64.decodestring(os.environ['INIT_PROCESS'])) except: logging.exception("could not decode : {}".format(os.environ['INIT_PROCESS'])) else: p = DVAPQLProcess() if DVAPQL.objects.count() == 0: p.create_from_json(jspec) p.launch()
def search(request): if request.method == 'POST': qp = DVAPQLProcess() view_shared.create_query_from_request(qp,request) qp.launch() qp.wait() qp_context = view_shared.collect(qp) return JsonResponse(data={'task_id': "", 'primary_key': qp.process.pk, 'results': qp_context['results'], 'regions': qp_context['regions'], 'url': '{}queries/{}.png'.format(settings.MEDIA_URL, qp.process.uuid) })
def perform_training(training_set_pk, args, user=None): args['selector'] = {"pk": training_set_pk} spec = { 'process_type': dvaapp.models.DVAPQL.PROCESS, 'map': [{ "operation": "perform_training", "arguments": args }] } p = DVAPQLProcess() p.create_from_json(spec, user) p.launch() return p.process.pk
def yt(request): if request.method == 'POST': form = YTVideoForm(request.POST, request.FILES) user = request.user if request.user.is_authenticated else None if form.is_valid(): name = form.cleaned_data['name'] path = form.cleaned_data['url'] process_spec = { 'process_type': DVAPQL.PROCESS, 'create': [ {'spec': { 'name': name, 'uploader_id': user.pk if user else None, 'url': path, 'youtube_video': True, 'created': '__timezone.now__' }, 'MODEL': 'Video', 'tasks': [ {'video_id': '__pk__', 'operation': 'perform_import', 'arguments': {'path': path, 'force_youtube_dl': True, 'map': [{ 'operation': 'perform_video_segmentation', 'arguments': { 'map': [ {'operation': 'perform_video_decode', 'arguments': { 'rate': defaults.DEFAULT_RATE, 'segments_batch_size': defaults.DEFAULT_SEGMENTS_BATCH_SIZE, 'map': json.load(file("../configs/custom_defaults/video_processing.json")) } } ]}, }, ] } }, ] }, ], } p = DVAPQLProcess() p.create_from_json(process_spec, user) p.launch() else: raise ValueError else: raise NotImplementedError return redirect('video_list')
def retry_task(request): pk = request.POST.get('pk') event = TEvent.objects.get(pk=int(pk)) spec = { 'process_type': DVAPQL.PROCESS, 'map': [{ 'operation': event.operation, 'arguments': event.arguments }] } p = DVAPQLProcess() p.create_from_json(spec) p.launch() return redirect('/processes/')
def detect_objects(request): if request.method == 'POST': detector_pk = request.POST.get('detector_pk') video_pk = request.POST.get('video_pk') p = DVAPQLProcess() p.create_from_json(j={ "process_type":DVAPQL.PROCESS, "tasks":[{'operation':"perform_detection", 'arguments':{'detector_pk': int(detector_pk),'detector':"custom"}, 'video_id':video_pk}] },user=request.user if request.user.is_authenticated else None) p.launch() return redirect('process_detail',pk=p.process.pk) else: raise ValueError
def ci_search(): """ Perform Continuous Integration testing using Travis for search queries """ import django sys.path.append(os.path.dirname(__file__)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings") django.setup() import base64 from dvaapp.models import DVAPQL, Retriever, QueryResults from dvaapp.processing import DVAPQLProcess launch_workers_and_scheduler_from_environment() query_dict = { 'process_type': DVAPQL.QUERY, 'image_data_b64': base64.encodestring(file('tests/query.png').read()), 'tasks': [{ 'operation': 'perform_indexing', 'arguments': { 'index': 'inception', 'target': 'query', 'next_tasks': [{ 'operation': 'perform_retrieval', 'arguments': { 'count': 15, 'retriever_pk': Retriever.objects.get(name='inception', algorithm=Retriever.EXACT).pk } }] } }, { 'operation': 'perform_detection', 'arguments': { 'detector': 'coco', 'target': 'query', } }] } qp = DVAPQLProcess() qp.create_from_json(query_dict) qp.launch() qp.wait(timeout=360) print QueryResults.objects.count()
def init_process(): if 'INIT_PROCESS' in os.environ: path = os.environ.get('INIT_PROCESS', None) if path and path.strip(): if not path.startswith('/root/DVA/configs/custom_defaults/'): get_path_to_file(path, "temp.json") path = 'temp.json' try: jspec = json.load(file(path)) except: logging.exception("could not load : {}".format(path)) else: p = DVAPQLProcess() if DVAPQL.objects.count() == 0: p.create_from_json(jspec) p.launch()
def submit(path): """ Submit a DVAPQL process to run :param path: """ import django sys.path.append(os.path.dirname(__file__)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings") django.setup() from dvaapp.processing import DVAPQLProcess with open(path) as f: j = json.load(f) p = DVAPQLProcess() p.create_from_json(j) p.launch() print "launched Process with id {} ".format(p.process.pk)
def perform_model_export(model_pk, user=None): spec = { 'process_type': dvaapp.models.DVAPQL.PROCESS, 'map': [{ "operation": "perform_export", "arguments": { 'trainedmodel_selector': { "pk": model_pk } } }] } p = DVAPQLProcess() p.create_from_json(spec, user) p.launch() return p.process.pk
def create_annotation(form, object_name, labels, frame, user=None): annotation = {} label_specs = [] annotation['object_name'] = object_name if form.cleaned_data['high_level']: annotation['full_frame'] = True annotation['x'] = 0 annotation['y'] = 0 annotation['h'] = 0 annotation['w'] = 0 else: annotation['full_frame'] = False annotation['x'] = form.cleaned_data['x'] annotation['y'] = form.cleaned_data['y'] annotation['h'] = form.cleaned_data['h'] annotation['w'] = form.cleaned_data['w'] annotation['text'] = form.cleaned_data['text'] annotation['metadata'] = form.cleaned_data['metadata'] if type(annotation['metadata'] ) is basestring and annotation['metadata'].strip(): annotation['metadata'] = json.loads(annotation['metadata']) else: annotation['metadata'] = None annotation['frame_index'] = frame.frame_index annotation['segment_index'] = frame.segment_index annotation['per_event_index'] = 0 annotation['video_id'] = frame.video_id annotation['region_type'] = dvaapp.models.Region.ANNOTATION for lname in labels: if lname.strip(): label_specs.append({'name': lname, 'set': 'UI'}) spec = { 'process_type': dvaapp.models.DVAPQL.PROCESS, 'create': [{ 'MODEL': 'Region', 'spec': annotation, 'labels': label_specs }] } p = DVAPQLProcess() p.create_from_json(spec, user) p.launch() return
def create_retriever(name, algorithm, filters, indexer_shasum, approximator_shasum, user=None): p = DVAPQLProcess() spec = { 'process_type': dvaapp.models.DVAPQL.PROCESS, 'create': [ { "MODEL": "Retriever", "spec": { "name": name, "algorithm": algorithm, "indexer_shasum": indexer_shasum, "approximator_shasum": approximator_shasum, "source_filters": filters } } ] } p.create_from_json(spec, user) p.launch() return p.process.pk
'arguments': { 'index': 'inception', 'target': 'query', 'map': [{ 'operation': 'perform_retrieval', 'arguments': { 'count': 15, 'retriever_pk': Retriever.objects.get(name='inception', algorithm=algo, approximator_shasum=None).pk } }] } }, { 'operation': 'perform_detection', 'arguments': { 'detector': 'coco', 'target': 'query', } }] } qp = DVAPQLProcess() qp.create_from_json(query_dict) qp.launch() qp.wait(timeout=400) print QueryResults.objects.count()
def import_s3(request): if request.method == 'POST': keys = request.POST.get('key') user = request.user if request.user.is_authenticated else None create = [] for key in keys.strip().split('\n'): if key.startswith('gs://') or key.startswith('s3://'): tasks = [] key = key.strip() if key: extract_task = { 'arguments': { 'next_tasks': defaults.DEFAULT_PROCESSING_PLAN_DATASET }, 'operation': 'perform_dataset_extraction' } segment_decode_task = { 'operation': 'perform_video_segmentation', 'arguments': { 'next_tasks': [{ 'operation': 'perform_video_decode', 'arguments': { 'segments_batch_size': defaults.DEFAULT_SEGMENTS_BATCH_SIZE, 'next_tasks': defaults.DEFAULT_PROCESSING_PLAN_VIDEO } }] }, } if key.endswith('.dva_export.zip'): next_tasks = [] elif key.endswith('.zip'): next_tasks = [ extract_task, ] else: next_tasks = [ segment_decode_task, ] tasks.append({ 'video_id': '__pk__', 'operation': 'perform_import', 'arguments': { 'path': key, 'source': 'REMOTE', 'next_tasks': next_tasks } }) create.append({ 'MODEL': 'Video', 'spec': { 'uploader_id': user.pk if user else None, 'name': key }, 'tasks': tasks }) else: raise NotImplementedError( "{} startswith an unknown remote store prefix".format(key)) process_spec = {'process_type': DVAPQL.PROCESS, 'create': create} p = DVAPQLProcess() p.create_from_json(process_spec, user) p.launch() else: raise NotImplementedError return redirect('video_list')
def ci(): """ Perform Continuous Integration testing using Travis """ import django sys.path.append(os.path.dirname(__file__)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings") django.setup() import base64 from django.core.files.uploadedfile import SimpleUploadedFile from dvaui.view_shared import handle_uploaded_file, pull_vdn_list \ , import_vdn_dataset_url from dvaapp.models import Video, TEvent, VDNServer, DVAPQL, Retriever, DeepModel from django.conf import settings from dvaapp.processing import DVAPQLProcess from dvaapp.tasks import perform_dataset_extraction, perform_indexing, perform_export, perform_import, \ perform_retriever_creation, perform_detection, \ perform_video_segmentation, perform_transformation for fname in glob.glob('tests/ci/*.mp4'): name = fname.split('/')[-1].split('.')[0] f = SimpleUploadedFile(fname, file(fname).read(), content_type="video/mp4") handle_uploaded_file(f, name, False) if sys.platform != 'darwin': for fname in glob.glob('tests/*.mp4'): name = fname.split('/')[-1].split('.')[0] f = SimpleUploadedFile(fname, file(fname).read(), content_type="video/mp4") handle_uploaded_file(f, name, False) for fname in glob.glob('tests/*.zip'): name = fname.split('/')[-1].split('.')[0] f = SimpleUploadedFile(fname, file(fname).read(), content_type="application/zip") handle_uploaded_file(f, name) for i, v in enumerate(Video.objects.all()): if v.dataset: arguments = {'sync': True} perform_dataset_extraction( TEvent.objects.create(video=v, arguments=arguments).pk) else: arguments = {'sync': True} perform_video_segmentation( TEvent.objects.create(video=v, arguments=arguments).pk) arguments = {'index': 'inception', 'target': 'frames'} perform_indexing( TEvent.objects.create(video=v, arguments=arguments).pk) if i == 0: # save travis time by just running detection on first video # face_mtcnn arguments = {'detector': 'face'} dt = TEvent.objects.create(video=v, arguments=arguments) perform_detection(dt.pk) arguments = { 'filters': { 'event_id': dt.pk }, } perform_transformation( TEvent.objects.create(video=v, arguments=arguments).pk) # coco_mobilenet arguments = {'detector': 'coco'} dt = TEvent.objects.create(video=v, arguments=arguments) perform_detection(dt.pk) arguments = { 'filters': { 'event_id': dt.pk }, } perform_transformation( TEvent.objects.create(video=v, arguments=arguments).pk) # inception on crops from detector arguments = { 'index': 'inception', 'target': 'regions', 'filters': { 'event_id': dt.pk, 'w__gte': 50, 'h__gte': 50 } } perform_indexing( TEvent.objects.create(video=v, arguments=arguments).pk) # assign_open_images_text_tags_by_id(TEvent.objects.create(video=v).pk) temp = TEvent.objects.create(video=v, arguments={'destination': "FILE"}) perform_export(temp.pk) temp.refresh_from_db() fname = temp.arguments['file_name'] f = SimpleUploadedFile(fname, file("{}/exports/{}".format( settings.MEDIA_ROOT, fname)).read(), content_type="application/zip") vimported = handle_uploaded_file(f, fname) perform_import( TEvent.objects.create(video=vimported, arguments={ "source": "LOCAL" }).pk) dc = Retriever() args = {} args['components'] = 32 args['m'] = 8 args['v'] = 8 args['sub'] = 64 dc.algorithm = Retriever.LOPQ dc.source_filters = { 'indexer_shasum': DeepModel.objects.get(name="inception", model_type=DeepModel.INDEXER).shasum } dc.arguments = args dc.save() clustering_task = TEvent() clustering_task.arguments = {'retriever_pk': dc.pk} clustering_task.operation = 'perform_retriever_creation' clustering_task.save() perform_retriever_creation(clustering_task.pk) query_dict = { 'process_type': DVAPQL.QUERY, 'image_data_b64': base64.encodestring(file('tests/query.png').read()), 'tasks': [{ 'operation': 'perform_indexing', 'arguments': { 'index': 'inception', 'target': 'query', 'next_tasks': [{ 'operation': 'perform_retrieval', 'arguments': { 'count': 20, 'retriever_pk': Retriever.objects.get(name='inception').pk } }] } }] } launch_workers_and_scheduler_from_environment() qp = DVAPQLProcess() qp.create_from_json(query_dict) qp.launch() qp.wait() server, datasets, detectors = pull_vdn_list(1) for k in datasets: if k['name'] == 'MSCOCO_Sample_500': print 'FOUND MSCOCO SAMPLE' import_vdn_dataset_url(VDNServer.objects.get(pk=1), k['url'], None, k)
def import_s3(request): if request.method == 'POST': keys = request.POST.get('key') region = request.POST.get('region') bucket = request.POST.get('bucket') rate = request.POST.get('rate', defaults.DEFAULT_RATE) rescale = request.POST.get('rescale', defaults.DEFAULT_RESCALE) user = request.user if request.user.is_authenticated else None create = [] for key in keys.strip().split('\n'): tasks = [] key = key.strip() if key: extract_task = { 'arguments': { 'rate': rate, 'rescale': rescale, 'next_tasks': defaults.DEFAULT_PROCESSING_PLAN_DATASET }, 'operation': 'perform_dataset_extraction' } segment_decode_task = { 'operation': 'perform_video_segmentation', 'arguments': { 'next_tasks': [{ 'operation': 'perform_video_decode', 'arguments': { 'rate': rate, 'rescale': rescale, 'segments_batch_size': defaults.DEFAULT_SEGMENTS_BATCH_SIZE, 'next_tasks': defaults.DEFAULT_PROCESSING_PLAN_VIDEO } }] }, } if key.endswith('.dva_export.zip'): next_tasks = [] elif key.endswith('.zip'): next_tasks = [ extract_task, ] else: next_tasks = [ segment_decode_task, ] tasks.append({ 'video_id': '__pk__', 'operation': 'perform_import', 'arguments': { 'key': key, 'bucket': bucket, 'region': region, 'source': 'S3', 'next_tasks': next_tasks } }) create.append({ 'MODEL': 'Video', 'spec': { 'uploader_id': user.pk if user else None, 'name': "pending S3 import {} s3://{}/{}".format( region, bucket, key) }, 'tasks': tasks }) process_spec = {'process_type': DVAPQL.PROCESS, 'create': create} p = DVAPQLProcess() p.create_from_json(process_spec, user) p.launch() else: raise NotImplementedError return redirect('video_list')
detector_type=m.get("detector_type", ""), arguments=m.get("arguments", {}), model_type=TrainedModel.DETECTOR, ) if created: dm.download() else: dm, created = TrainedModel.objects.get_or_create( name=m['name'], mode=m.get('mode', TrainedModel.TENSORFLOW), files=m.get('files', []), algorithm=m.get('algorithm', ""), arguments=m.get("arguments", {}), shasum=m.get('shasum', None), model_type=m['model_type']) if created: dm.download() if 'INIT_PROCESS' in os.environ and DVAPQL.objects.count() == 0: path = os.environ.get('INIT_PROCESS') p = DVAPQLProcess() if not path.startswith('/root/DVA/configs/custom_defaults/'): get_path_to_file(path, "temp.json") path = 'temp.json' try: jspec = json.load(file(path)) except: logging.exception("could not load : {}".format(path)) else: p.create_from_json(jspec) p.launch()