def get(self, request, *args, **kwargs): result = models.Result.objects.get(pk=kwargs['pk']) res_id = kwargs['res_id'] container = AnalyzerResultContainer() container.from_hdf5(result.hdf5.path) segment_result = container[res_id] import tempfile tmp_dir = tempfile.mkdtemp(suffix=res_id + '_sv') # Pympi will not overwrite the file audio_file = os.path.abspath(result.item.file.name) tmp_sv_file = os.path.splitext( os.path.basename(audio_file))[0] + '_' + res_id + '.sv' abs_tmp_sv_file = os.path.join(tmp_dir, tmp_sv_file) segment_result.data_object.to_sonic_visualiser( svenv_file=abs_tmp_sv_file, audio_file=audio_file) file_size = os.path.getsize(abs_tmp_sv_file) # read file with open(abs_tmp_sv_file, "rb") as f: sv_data = f.read() import shutil shutil.rmtree(tmp_dir) response = HttpResponse(sv_data, content_type='application/xml') response['Content-Disposition'] = 'attachment; filename=' + \ '\"' + tmp_sv_file + '\"' response['Content-Length'] = file_size return response
def get(self, request, *args, **kwargs): result = Result.objects.get(pk=kwargs['pk']) res_id = kwargs['res_id'] container = AnalyzerResultContainer() container.from_hdf5(result.hdf5.path) segment_result = container[res_id] import tempfile tmp_dir = tempfile.mkdtemp(suffix=res_id+'_eaf') # Pympi will not overwrite the file audio_file = os.path.basename(segment_result.audio_metadata.uri) tmp_eaf_file = os.path.splitext(audio_file)[0] + '_' + res_id + '.eaf' abs_tmp_eaf_file = os.path.join(tmp_dir,tmp_eaf_file) segment_result.data_object.to_elan(elan_file=abs_tmp_eaf_file, media_file=audio_file) file_size = os.path.getsize(abs_tmp_eaf_file) # read file with open(abs_tmp_eaf_file, "rb") as f: eaf_data = f.read() import shutil shutil.rmtree(tmp_dir) response = HttpResponse(eaf_data, content_type='application/xml') response['Content-Disposition'] = 'attachment; filename=' + '\"' + tmp_eaf_file +'\"' response['Content-Length'] = file_size return response
def get(self, request, *args, **kwargs): result = Result.objects.get(pk=kwargs['pk']) res_id = kwargs['res_id'] container = AnalyzerResultContainer() container.from_hdf5(result.hdf5.path) segment_result = container[res_id] import tempfile tmp_dir = tempfile.mkdtemp(suffix=res_id + '_eaf') # Pympi will not overwrite the file audio_file = os.path.basename(segment_result.audio_metadata.uri) tmp_eaf_file = os.path.splitext(audio_file)[0] + '_' + res_id + '.eaf' abs_tmp_eaf_file = os.path.join(tmp_dir, tmp_eaf_file) segment_result.data_object.to_elan(elan_file=abs_tmp_eaf_file, media_file=audio_file) file_size = os.path.getsize(abs_tmp_eaf_file) # read file with open(abs_tmp_eaf_file, "rb") as f: eaf_data = f.read() import shutil shutil.rmtree(tmp_dir) response = HttpResponse(eaf_data, content_type='application/xml') response[ 'Content-Disposition'] = 'attachment; filename=' + '\"' + tmp_eaf_file + '\"' response['Content-Length'] = file_size return response
def get(self, request, *args, **kwargs): result = models.Result.objects.get(pk=kwargs['pk']) res_id = kwargs['res_id'] container = AnalyzerResultContainer() container.from_hdf5(result.hdf5.path) segment_result = container[res_id] import tempfile tmp_dir = tempfile.mkdtemp(suffix=res_id + '_sv') # Pympi will not overwrite the file audio_file = os.path.abspath(result.item.file.name) tmp_sv_file = os.path.splitext(os.path.basename(audio_file))[ 0] + '_' + res_id + '.sv' abs_tmp_sv_file = os.path.join(tmp_dir, tmp_sv_file) segment_result.data_object.to_sonic_visualiser(svenv_file=abs_tmp_sv_file, audio_file=audio_file) file_size = os.path.getsize(abs_tmp_sv_file) # read file with open(abs_tmp_sv_file, "rb") as f: sv_data = f.read() import shutil shutil.rmtree(tmp_dir) response = HttpResponse(sv_data, content_type='application/xml') response['Content-Disposition'] = 'attachment; filename=' + \ '\"' + tmp_sv_file + '\"' response['Content-Length'] = file_size return response
def tearDown(self): results = AnalyzerResultContainer([self.result]) results.to_numpy('/tmp/t.npy') d_numpy = results.from_numpy('/tmp/t.npy') if verbose: print('%15s' % 'from numpy:', d_numpy) self.assertEqual(d_numpy, results)
def tearDown(self): results = AnalyzerResultContainer([self.result]) results.to_numpy("/tmp/t.npy") d_numpy = results.from_numpy("/tmp/t.npy") if verbose: print "%15s" % "from numpy:", print d_numpy self.assertEqual(d_numpy, results)
def get(self, request, *args, **kwargs): result = models.Result.objects.get(uuid=kwargs['uuid']) container = AnalyzerResultContainer() if result.hdf5: container.from_hdf5(result.hdf5.path) else: return HttpResponse(container.to_json(), content_type='application/json')
def tearDown(self): results = AnalyzerResultContainer(self.result) r_yaml = results.to_yaml() if verbose: print "to yaml:" print r_yaml from_results = AnalyzerResultContainer() from_results.from_yaml(r_yaml) if verbose: print "%15s" % "from yaml:", print from_results self.assertEqual( type(self.result.data_object.frame_metadata), type(from_results["foo_bar"].data_object.frame_metadata) )
def tearDown(self): results = AnalyzerResultContainer([self.result]) r_xml = results.to_xml() if verbose: print "to xml:" print r_xml from_results = AnalyzerResultContainer() from_results.from_xml(r_xml) if verbose: print "%15s" % "from xml:", print from_results # for i in range(len(d_xml)): self.assertEqual(results, from_results)
def tearDown(self): results = AnalyzerResultContainer([self.result]) try: r_json = results.to_json() except TypeError: print ("TYPE ERROR IN JSON") if verbose: print "to json:" print r_json from_results = AnalyzerResultContainer() from_results.from_json(r_json) if verbose: print from_results print "%15s" % "from json:", self.assertEqual(results, from_results)
def get_context_data(self, **kwargs): context = super(ItemDiadems, self).get_context_data(**kwargs) context['Result'] = 'Result' Results = {} for result in self.get_object().results.all(): proc_id = result.preset.processor.pid Results[proc_id] = {'id': result.id} if result.hdf5: container = AnalyzerResultContainer() container.from_hdf5(result.hdf5.path) Results[proc_id]['json'] = True Results[proc_id]['list'] = {} elif result.mime_type: Results[proc_id]['audio'] = ('audio' in result.mime_type) | ( 'ogg' in result.mime_type) Results[proc_id]['image'] = ('image' in result.mime_type) Results[proc_id]['video'] = ('video' in result.mime_type) container = {} for res_id, res in container.items(): if res.time_mode == 'segment': if res.data_mode == 'label': Results[proc_id]['list'][res_id] = { 'elan': True, 'sv': True, 'Parameters': res.parameters, 'name': res.name } if res.time_mode == 'framewise': if res.data_mode == 'value': Results[proc_id]['list'][res_id] = { 'elan': False, 'sv': True, 'Parameters': res.parameters, 'name': res.name } context['Results'] = Results return context
def get_context_data(self, **kwargs): context = super(ItemDiadems, self).get_context_data(**kwargs) context['Result'] = 'Result' Results = {} for result in self.get_object().results.all(): proc_id = result.preset.processor.pid Results[proc_id] = {'id': result.id} if result.hdf5: container = AnalyzerResultContainer() container.from_hdf5(result.hdf5.path) Results[proc_id]['json'] = True Results[proc_id]['list']= {} elif result.mime_type: Results[proc_id]['audio'] = ('audio' in result.mime_type) | ('ogg' in result.mime_type) Results[proc_id]['image'] = ('image' in result.mime_type) Results[proc_id]['video'] = ('video' in result.mime_type) container = {} for res_id, res in container.items(): if res.time_mode == 'segment': if res.data_mode == 'label': Results[proc_id]['list'][res_id] = {'elan': True, 'sv': True, 'Parameters': res.parameters, 'name': res.name} if res.time_mode == 'framewise': if res.data_mode == 'value': Results[proc_id]['list'][res_id] = {'elan': False, 'sv': True, 'Parameters': res.parameters, 'name': res.name} context['Results'] = Results return context
def get_context_data(self, **kwargs): context = super(ItemDetail, self).get_context_data(**kwargs) context['Result'] = 'Result' Results = {} for result in self.get_object().results.all(): if result.hdf5: container = AnalyzerResultContainer() container.from_hdf5(result.hdf5.path) else: container = {} for name, res in container.items(): if res.time_mode == 'segment': if res.data_mode == 'label': Results[result.id] = name context['Results'] = Results return context
def tearDown(self): results = AnalyzerResultContainer(self.result) r_yaml = results.to_yaml() if verbose: print('to yaml:', r_yaml) from_results = AnalyzerResultContainer() from_results.from_yaml(r_yaml) if verbose: print('%15s' % 'from yaml:', from_results) self.assertEqual(type(self.result.data_object.frame_metadata), type(from_results['foo_bar'].data_object.frame_metadata))
def tearDown(self): results = AnalyzerResultContainer([self.result]) r_xml = results.to_xml() if verbose: print('to xml:', r_xml) from_results = AnalyzerResultContainer() from_results.from_xml(r_xml) if verbose: print('%15s' % 'from xml:', from_results) #for i in range(len(d_xml)): self.assertEqual(results, from_results)
def tearDown(self): results = AnalyzerResultContainer([self.result]) try: r_json = results.to_json() except TypeError: print('TYPE ERROR IN JSON') if verbose: print('to json:', r_json) from_results = AnalyzerResultContainer() from_results.from_json(r_json) if verbose: print('%15s' % 'from json:', from_results) self.assertEqual(results, from_results)
def tearDown(self): if isinstance(self.result, AnalyzerResult): results = AnalyzerResultContainer([self.result]) elif isinstance(self.result, AnalyzerResultContainer) : results = self.result else: raise(TypeError, "Wrong type for self.result") import tempfile h5_file = tempfile.NamedTemporaryFile(suffix='.h5', delete=True) results.to_hdf5(h5_file.name) from_results = AnalyzerResultContainer() from_results.from_hdf5(h5_file.name) if verbose: print('%15s' % 'from hdf5:', from_results) self.assertEqual(results, from_results) h5_file.close()
def tearDown(self): if isinstance(self.result, AnalyzerResult): results = AnalyzerResultContainer([self.result]) elif isinstance(self.result, AnalyzerResultContainer): results = self.result else: raise (TypeError, "Wrong type for self.result") import tempfile h5_file = tempfile.NamedTemporaryFile(suffix=".h5", delete=True) results.to_hdf5(h5_file.name) from_results = AnalyzerResultContainer() from_results.from_hdf5(h5_file.name) if verbose: print "%15s" % "from hdf5:", print from_results self.assertEqual(results, from_results) h5_file.close()
def get(self, request, *args, **kwargs): result = models.Result.objects.get(pk=kwargs['pk']) container = AnalyzerResultContainer() container.from_hdf5(result.hdf5.path) return HttpResponse(container.to_json(), content_type='application/json')
def get(self, request, *args, **kwargs): result = Result.objects.get(pk=kwargs['pk']) container = AnalyzerResultContainer() container.from_hdf5(result.hdf5.path) return HttpResponse(container.to_json(), content_type='application/json')