def run_analysis(project: VIANProject, analysis: IAnalysisJob, targets: List[IProjectContainer], class_objs: List[ClassificationObject] = None): fps = project.movie_descriptor.fps for clobj in class_objs: args = analysis.prepare(project, targets, fps, clobj) res = [] if analysis.multiple_result: for i, arg in enumerate(args): res.append(analysis.process(arg, progress_dummy)) else: res = analysis.process(args, progress_dummy) if isinstance(res, list): for r in res: with PROJECT_LOCK: analysis.modify_project(project, r) project.add_analysis(r) else: with PROJECT_LOCK: analysis.modify_project(project, res) project.add_analysis(res)
def test_load(self): with VIANProject(name="TestProject", path="data/test_project.eext") as project: project.store_project() with VIANProject(name="TestProject", path="data/test_project.eext") as project: project = project.load_project(path="data/test_project.eext") self.assertNotEqual(project, None)
def __init__(self, parent, main_window): super(VocabularyView, self).__init__(parent) path = os.path.abspath("qt_ui/VocabularyManager.ui") uic.loadUi(path, self) self.main_window = main_window self.project = None self.current_item = None self.vocabulary_collection = VIANProject(name="VocabularyCollection") self.vocabulary_index = dict() # A dict of vocabularies found on the file system self.fetch_vocabularies() self.complexities = { "0 (Undefined)": 1, "1 (Beginner)": 1, "2" : 2, "3 (Intermediate)" : 3, "4" : 4, "5 (Expert)" : 5 } for k, v in self.complexities.items(): self.comboBoxComplexity.addItem(k) self.treeViewLibrary = VocabularyTreeView(self, self, self.vocabulary_collection, allow_create=True) self.vocabulary_model_library = VocabularyTreeItemModel(self.treeViewLibrary, self.parent().synchronize_from_project_to_library, "library") self.innerLibrary.layout().addWidget(self.treeViewLibrary) self.treeViewProject = VocabularyTreeView(self, self, self.project, allow_create=False) self.vocabulary_model_project = VocabularyTreeItemModel(self.treeViewProject, self.parent().synchronize_from_library_to_project, "project") self.innerProject.layout().addWidget(self.treeViewProject) self.image_drop = DropImageContainer(self) self.widgetImageContainer.setLayout(QVBoxLayout()) self.widgetImageContainer.layout().addWidget(self.image_drop) self.btn_addItem.clicked.connect(self.add_word) self.lineEdit_Item.returnPressed.connect(self.add_word) self.lineEditCategory.textChanged.connect(self.on_category_changed) self.lineEditName.textChanged.connect(self.on_name_changed) self.textEditDescription.textChanged.connect(self.on_description_changed) self.lineEditComplexityGroup.textChanged.connect(self.on_complexity_group_changed) self.comboBoxComplexity.currentTextChanged.connect(self.on_complexity_level_changed) self.vocabulary_collection.onVocabularyAdded.connect(partial(self.recreate_tree)) self.vocabulary_collection.onVocabularyRemoved.connect(partial(self.recreate_tree)) self.recreate_tree() self.show()
def test_entities(self): with VIANProject(name="TestProject", path="data/test_project.eext") as project: segmentation = project.create_segmentation("SomeSegmentation") self.assertTrue(len(project.segmentation) > 0) segmentation.create_segment2(0, 1000, body="Some Annotation") self.assertTrue(len(segmentation.segments) > 0) segmentation.create_segment2(1000, 2000, body="Another Annotation")
def check_erc_template(project: VIANProject): return uuid = CONFIG['erc_template_uuid'] exp = project.get_by_id(uuid) if exp is None: log_info("No ERC Template detected") return log_info("ERC Template detected, updating") r = requests.get( "http://ercwebapp.westeurope.cloudapp.azure.com/api/experiments/1") exchange_data = r.json() temporary = "data/temp.json" with open(temporary, "w") as f: json.dump(exchange_data, f) project.import_(ExperimentTemplateUpdater(), temporary) log_info("ERC Template detected, Done")
def test_1_api_experiments(self): r = requests.get( "http://ercwebapp.westeurope.cloudapp.azure.com/api/experiments/") experiments = r.json() self.assertIsInstance(experiments, list) r = requests.get("http://127.0.0.1:5000/api/experiments/1") self.exchange_data = r.json() with open("data/test-template.json", "w") as f: json.dump(self.exchange_data, f) with open("test-template.json", "w") as f: json.dump(self.exchange_data, f) with VIANProject("TestProject") as project: with open("../extensions/pipelines/ercfilmcolors.py", "r") as f: script = f.read() project.import_(ExperimentTemplateImporter(), "data/test-template.json") pipeline = project.create_pipeline_script( name="ERCFilmColors Pipeline", author="ERCFilmColors", script=script) project.experiments[0].pipeline_script = pipeline pipeline.experiment = project.experiments[0] # project.add_pipeline_script("../extensions/pipelines/ercfilmcolors.py") project.active_pipeline_script = pipeline project.compute_pipeline_settings = dict(segments=False, screenshots=True, annotations=False) with open("ERC-FilmColors-Template.viant", "w") as f: json.dump( project.get_template(segm=True, voc=True, experiment=True, pipeline=True), f) print("Exported") with VIANProject("TestProject", folder="data") as project: project.apply_template("ERC-FilmColors-Template.viant") self.assertTrue(project.active_pipeline_script is not None) for v in project.vocabularies: r = requests.get( "http://ercwebapp.westeurope.cloudapp.azure.com/api/query/vocabulary_hash/" + v.unique_id) print(r.json())
def modify_project(self, project: VIANProject, result: IAnalysisJobAnalysis, main_window=None): """ This Function will be called after the processing is completed. Since this function is called within the Main-Thread, we can modify our project here. """ if isinstance(result, list): for r in result: r.set_target_container(project.get_by_id(r.target_container)) r.set_target_classification_obj(self.target_class_obj) else: result.set_target_container( project.get_by_id(result.target_container)) result.set_target_classification_obj(self.target_class_obj)
def test_check_project_existence(self): with VIANProject().load_project(path=PROJECT) as project: interface = WebAppCorpusInterface() t = interface.check_project_exists(project) self.assertTrue(t) project.uuid = str(uuid4()) t = interface.check_project_exists(project) self.assertFalse(t)
def test_shot_segmentation(self): with VIANProject(name="TestProject", path="data/test_project.eext", movie_path=p0) as project: t = time.time() model = ShotSegmentationAnalysis() X = model.fit(project.movie_descriptor) model = ShotSegmentationAnalysis(return_hdf5_compatible=True) X = model.fit(project.movie_descriptor)
def auto_segmentation(project: VIANProject, mode, main_window, n_segment=-1, segm_width=10000, nth_frame=4, n_cluster_lb=1, n_cluster_hb=100, resolution=30): duration = project.movie_descriptor.duration if mode == AUTO_SEGM_EVEN: if n_segment < 0: n_segment = int(duration / segm_width) else: segm_width = int(duration / n_segment) segmentation = project.create_segmentation("Auto Segmentation", False) for i in range(n_segment): segmentation.create_segment2(i * segm_width, i * segm_width + segm_width, mode=SegmentCreationMode.INTERVAL, inhibit_overlap=False, dispatch=False) # segmentation.create_segment(i * segm_width, i * segm_width + segm_width, dispatch=False) project.dispatch_changed() elif mode == AUTO_SEGM_CHIST: ready, colormetry = project.get_colormetry() if ready: histograms = colormetry.get_histogram() frame_pos = colormetry.get_frame_pos() else: histograms = None frame_pos = None job = AutoSegmentingJob([ project.movie_descriptor.get_movie_path(), colormetry.resolution, project.movie_descriptor.fps, histograms, frame_pos, nth_frame, [n_cluster_lb, n_cluster_hb], resolution ]) main_window.run_job_concurrent(job)
def create_project(name, path=None): if path is not None: folder = os.path.split(path)[0] else: folder = "" new = VIANProject(name=name, path=path, folder=folder) if path is not None: new.create_file_structure() new.connect_hdf5() new.store_project() return new
def test_4_project_import(self): r = requests.get("http://127.0.0.1:5000/download/148") self.exchange_data = r.json() with open("test-template.json", "w") as f: json.dump(self.exchange_data, f) with VIANProject("TestProject") as project1: project1.import_( WebAppProjectImporter( movie_path="F:/fiwi_datenbank/MOV/14_1_1_MOV.mov"), "test-template.json")
def test_export(self): with VIANProject().load_project(path=PROJECT) as project: settings = UserSettings(path=SETTINGS_PATH).load() cap = cv2.VideoCapture(project.movie_descriptor.movie_path) ret, frame = cap.read() cv2.imshow("", frame) for s in project.screenshots: print(s, s.frame_pos) s.load_screenshots(cap) cv2.imshow("", s.get_img_movie()) cv2.waitKey(10) interface = WebAppCorpusInterface() interface._export_project(project)
def prepare(self, project: VIANProject): if project.colormetry_analysis is None: self.colormetry_analysis = project.create_colormetry( resolution=self.resolution) self.colormetry_analysis.clear() start = 0 else: self.colormetry_analysis = project.colormetry_analysis self.resolution = self.colormetry_analysis.resolution start = self.colormetry_analysis.current_idx if start == 0: self.colormetry_analysis = project.create_colormetry( resolution=self.resolution) self.colormetry_analysis.clear() self.duration = project.movie_descriptor.duration frame_duration = ms_to_frames(self.duration, project.movie_descriptor.fps) return [ project.movie_descriptor.get_movie_path(), start, frame_duration, self.resolution, project.movie_descriptor.fps, project.movie_descriptor.get_letterbox_rect() ]
def test_pipelines(self): with VIANProject(name="TestProject", path="data/test_project.eext") as project: pipeline = project.create_pipeline_script("TestPipeline", author="UnitTest") self.assertTrue(pipeline is not None) pipeline.save_script() self.assertTrue(os.path.isfile(pipeline.path)) pipeline.import_pipeline() self.assertTrue(pipeline.name in ALL_REGISTERED_PIPELINES) project.store_project("data/test_project.eext") project = project.load_project("data/test_project.eext") self.assertTrue(len(project.pipeline_scripts) > 0) project.remove_pipeline_script(pipeline) self.assertTrue(pipeline not in project.pipeline_scripts)
def __init__(self, parent, settings, movie_path="", elan_segmentation=None, add_to_current_corpus=False): super(NewProjectDialog, self).__init__( parent, parent, "https://www.vian.app/static/manual/step_by_step/project_management/create_project.html" ) path = os.path.abspath("qt_ui/DialogNewProject.ui") uic.loadUi(path, self) self.settings = settings self.templates = [] self.project_name = "project_name" self.auto_naming = False self.elan_segmentation = elan_segmentation if movie_path is "": self.project_dir = settings.DIR_PROJECTS else: mp = movie_path.replace("\\", "/") mp = movie_path.split("/") path = "" for i in range(len(mp) - 1): path += mp[i] + "/" self.project_dir = path self.project = VIANProject(path=None, name="New Project") self.path_set_from_dialog = False for s in MovieSource: self.comboBox_Source.addItem(s.name) self.find_templates() self.tabWidget.removeTab(1) self.cB_AutomaticNaming.stateChanged.connect( self.on_automatic_naming_changed) self.lineEdit_ProjectName.textChanged.connect( self.on_proj_name_changed) self.lineEdit_ProjectPath.editingFinished.connect( self.on_proj_path_changed) self.btn_BrowseProject.clicked.connect(self.on_browse_project_path) self.lineEdit_Name.editingFinished.connect(self.on_desc_name_changed) # self.lineEdit_ID.editingFinished.connect(self.on_desc_id_changed) self.spinBox_ID_0.valueChanged.connect(self.on_desc_id_changed) self.spinBox_ID_1.valueChanged.connect(self.on_desc_id_changed) self.spinBox_ID_2.valueChanged.connect(self.on_desc_id_changed) # self.lineEdit_Year.editingFinished.connect(self.on_desc_year_changed) self.spinBox_Year.valueChanged.connect(self.on_desc_year_changed) self.comboBox_Source.currentIndexChanged.connect( self.on_desc_ource_changed) self.btn_BrowseMovie.clicked.connect(self.on_browse_movie_path) self.comboBoxCorpus.addItems(self.settings.recent_corpora_2.keys()) self.comboBoxCorpus.currentTextChanged.connect(self.on_corpus_changed) if add_to_current_corpus: if self.main_window.corpus_widget.corpus is not None: try: self.comboBoxCorpus.setCurrentText( self.main_window.corpus_widget.corpus.name) except Exception as e: print(e) self.btn_Cancel.clicked.connect(self.on_cancel) self.btn_OK.clicked.connect(self.on_ok) self.btn_Help.clicked.connect(self.on_help) self.lineEdit_MoviePath.setText(movie_path) self.project.movie_descriptor.set_movie_path(movie_path) self.checkBox_FromImages.stateChanged.connect( self.on_from_images_changed) self.lineEdit_ProjectName.setText(self.project_name) self.set_project_path() self.image_paths = [] self.show()
def glossary_to_template(glossary_path, template_path, export_voc_dir=None): """ Parses the GlossaryDB CSV and creates a custom experiment and VIANTemplate from the given data. :param glossary_path: :param template_path: :param export_voc_dir: :return: """ # Parse the Glossary glossary_words = [] glossary_ids = [] glossary_categories = [] glossary_voc_names = [] glossary_mapping_strings = [] glossary_omit = [] glossary_organization_grp = [] complexity_lvl = [] complexity_grp = [] # Read all lines of the CSV File and get the glossary values with open(glossary_path, 'r') as input_file: reader = csv.reader(input_file, delimiter=',') counter = 0 for r in reader: if counter == 0: print(r) idx_word = r.index("Term_EN") idx_id = r.index("Glossar ID") idx_column = r.index("Register") idx_voc_name = r.index("Field") idx_mapping = r.index("exp Field") idx_omit = r.index("Disregard") idx_organization = r.index("Group") idx_complexity = r.index("Level") idx_complexity_grp = r.index("ComplexityGroup") else: if r[idx_word] == "": continue word = r[idx_word] word = word.strip() word = word.replace("’", "") word = word.replace("/", "") word = word.replace(" ", "_") word = word.replace("-", "_") glossary_words.append(word) glossary_ids.append(int(r[idx_id])) glossary_categories.append(r[idx_column]) glossary_voc_names.append(r[idx_voc_name]) glossary_mapping_strings.append(r[idx_mapping]) glossary_organization_grp.append(r[idx_organization]) complexity_lvl.append(r[idx_complexity]) complexity_grp.append(r[idx_complexity_grp]) if "yes" in r[idx_omit]: glossary_omit.append(True) else: glossary_omit.append(False) counter += 1 # We create a dummy object to create our container objects subsequently prj = VIANProject("Dummy") prj.inhibit_dispatch = True exp = prj.create_experiment() exp.name = "ERC Advanced Grant FilmColors" # Adding the Main Segmentation segm = prj.create_segmentation("Main Segmentation") # Create the Classification Object Tree glob = exp.create_class_object("Global", exp) glob.set_dataset(DATASET_NAME_ADE20K) for lbl in ADE20K: glob.add_dataset_label(lbl.value) fg = exp.create_class_object("Foreground", exp) fg.set_dataset(DATASET_NAME_ADE20K) fg.add_dataset_label(ADE20K.person_lbl.value) bg = exp.create_class_object("Background", exp) bg.set_dataset(DATASET_NAME_ADE20K) for lbl in ADE20K: if lbl != ADE20K.person_lbl: bg.add_dataset_label(lbl.value) intert = exp.create_class_object("Intertitle", exp) env = exp.create_class_object("Environment", exp) light = exp.create_class_object("Lighting", exp) p_fem = exp.create_class_object("Female Protagonist", fg) p_mal = exp.create_class_object("Male Protagonist", fg) s_fem = exp.create_class_object("Female Support", fg) s_mal = exp.create_class_object("Male Support", fg) # Connect the Main Segmentation as Target container for all Created Classification Objects for cobj in exp.get_classification_objects_plain(): cobj.target_container.append(segm) # Create all vocabularies existing_voc_names = [] vocabularies = [] voc_targets = [] keyword_ids = [] for i in range(len(glossary_words)): if not glossary_omit[i] == True: # if i > 1250: # print(glossary_voc_names[i]) if glossary_voc_names[i] not in existing_voc_names: target_voc = create_vocabulary( glossary_voc_names[i].replace("/", ""), glossary_categories[i]) vocabularies.append(target_voc) existing_voc_names.append(target_voc.name) voc_targets.append(glossary_mapping_strings[i]) keyword_ids.append([glossary_ids[i]]) else: idx = existing_voc_names.index(glossary_voc_names[i]) target_voc = vocabularies[idx] keyword_ids[idx].append(glossary_ids[i]) word = target_voc.create_word(glossary_words[i], dispatch=False) if word is None: continue if glossary_organization_grp[i] == "": word.organization_group = 0 else: word.organization_group = int(glossary_organization_grp[i]) word.complexity_group = complexity_grp[i] word.complexity_lvl = int(complexity_lvl[i]) # MERGE Vocabularies that are exactly the same voc_mapping = [] keyword_ids_merged = [] voc_merged = [] print("--- ALL VOCABULARIES --") for i, v in enumerate(vocabularies): equal_voc = None # print(v.name) # Find an equal existing vocabulary in the final list for j, y in enumerate(voc_merged): # Omit Significance if "significance" in v.name.lower(): break if set([n.name for n in v.words_plain ]) == set([q.name for q in y.words_plain]): equal_voc = y if len(v.words_plain) != len(y.words_plain): print([n.name for n in v.words_plain]) print([q.name for q in y.words_plain]) break if equal_voc is None: voc_merged.append(v) voc_mapping.append([voc_targets[i].lower()]) keyword_ids_merged.append([[ x for _, x in sorted( zip([w.name for w in v.words_plain], keyword_ids[i])) ]]) else: idx = voc_merged.index(equal_voc) if voc_targets[i].lower() not in voc_mapping[idx]: voc_mapping[idx].append(voc_targets[i].lower()) keyword_ids_merged[idx].append([ x for _, x in sorted( zip([w.name for w in v.words_plain], keyword_ids[i])) ]) # print("#################") # for i, v in enumerate(voc_merged): # for j, t in enumerate(voc_mapping[i]): # print(len(v.words_plain) == len(keyword_ids_merged[i][j]), t, v.name) print("#####################") # Do some manual renaming for i, v in enumerate(voc_merged): if "Significance" in v.name: continue if "Hue" in v.name: v.name = "Hues" elif "Textures" in v.name: v.name = "Textures" elif "Visual Complexity" in v.name: v.name = "Significance" elif "Character Movement" in v.name: v.name = "Movement" elif "Surfaces" in v.name: v.name = "Surfaces" for i, v in enumerate(voc_merged): for j, t in enumerate(voc_mapping[i]): print( len(v.words_plain) == len(keyword_ids_merged[i][j]), t, v.name) # Add the final list of Vocabularies to the Project and # Connect them to the Classification Objects for i, v in enumerate(voc_merged): print(v.name) proj_voc = prj.create_vocabulary(v.name) proj_voc.category = v.category for w in sorted(v.words_plain, key=lambda x: x.name): word = proj_voc.create_word(w.name, w.parent) word.organization_group = w.organization_group word.complexity_lvl = w.complexity_lvl word.complexity_group = w.complexity_group v = proj_voc # Fint the correct classification Object for j, t in enumerate(voc_mapping[i]): if "female_protagonist" in t: p_fem.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) elif "female_support" in t: s_fem.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) elif "male_protagonist" in t: p_mal.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) elif "male_support" in t: s_mal.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) elif "intertitle" in t: intert.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) elif "character" in t or "foreground" in t: fg.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) elif "objects" in t or "background" in t: bg.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) elif "environment" in t: env.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) elif "lighting" in t: light.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) else: glob.add_vocabulary(v, external_ids=keyword_ids_merged[i][j]) for c in exp.get_classification_objects_plain(): print("####", c.name, "####") for v in c.get_vocabularies(): print("---", v.name) print("") to_count = [] for x in glossary_omit: if x == False: to_count.append(x) print(len(to_count)) print(len(exp.get_unique_keywords())) # Add Analyses sem_seg_params = dict(model="LIP_Dataset", resolution=50) palette_params = dict(resolution=50) feature_params = dict(resolution=50) exp.add_analysis_to_pipeline("Fg/Bg Segmentation", SemanticSegmentationAnalysis, sem_seg_params) exp.add_analysis_to_pipeline("ColorPalette FG", ColorPaletteAnalysis, palette_params, fg) exp.add_analysis_to_pipeline("ColorPalette BG", ColorPaletteAnalysis, palette_params, bg) exp.add_analysis_to_pipeline("ColorPalette GLOB", ColorPaletteAnalysis, palette_params, glob) exp.add_analysis_to_pipeline("ColorPalette FG", ColorFeatureAnalysis, feature_params, fg) exp.add_analysis_to_pipeline("ColorPalette BG", ColorFeatureAnalysis, feature_params, bg) exp.add_analysis_to_pipeline("ColorPalette GLOB", ColorFeatureAnalysis, feature_params, glob) # Export the Vocabularies if the path is set if export_voc_dir is not None: for v in exp.get_vocabularies(): v.export_vocabulary(os.path.join(export_voc_dir, v.name + ".json")) template = prj.get_template(True, True, False, False, True) if ".viant" not in template_path: template_path += ".viant" with open(template_path, "w") as f: json.dump(template, f) prj.get_template()
import os import shutil from core.container.project import VIANProject from core.analysis.analysis_utils import run_analysis from core.analysis.palette_analysis import ColorPaletteAnalysis with VIANProject().load_project( "C:\\Users\\gaude\\Documents\\VIAN\\projects\\506_1_1_SouthPacific_1958_DVD\\506_1_1_SouthPacific_1958_DVD.eext" ) as p: p.analysis = [] p.store_project()
def test_project_management(self): os.mkdir("data/projects/") corpus = Corpus("TestCorpus", directory="data/") proj1 = VIANProject("P1", folder="data/projects/p1", movie_path=p1) proj1.store_project() proj2 = VIANProject("P2", folder="data/projects/p2", movie_path=p2) proj2.store_project() proj3 = VIANProject("P3", folder="data/projects/p3", movie_path=p3) proj3.store_project() proj4 = VIANProject("P4", folder="data/projects/p4", movie_path=p4) proj4.store_project() corpus.add_project(proj1) corpus.add_project(proj2) corpus.add_project(file=proj3.path) corpus.add_project(file=proj4.path) self.assertTrue(len(corpus.project_paths) == 4) self.assertTrue(len(corpus.projects_loaded) == 4) corpus.remove_project(proj1) corpus.remove_project(file=proj4.path) self.assertTrue(len(corpus.project_paths) == 2) self.assertTrue(len(corpus.projects_loaded) == 2) corpus.remove_project(file=proj3.path, delete_from_disk=True) self.assertTrue(not os.path.isdir(proj3.folder))
def test_3_merge_template(self): r = requests.get( "http://ercwebapp.westeurope.cloudapp.azure.com/api/experiments/1") self.exchange_data = r.json() with open("data/test-template.json", "w") as f: json.dump(self.exchange_data, f) with VIANProject("TestProject") as project1: project1.import_(ExperimentTemplateImporter(), "data/test-template.json") with open("../extensions/pipelines/ercfilmcolors.py", "r") as f: script = f.read() pipeline = project1.create_pipeline_script( name="ERCFilmColors Pipeline", author="ERCFilmColors", script=script) project1.experiments[0].pipeline_script = pipeline pipeline.experiment = project1.experiments[0] project1.active_pipeline_script = pipeline project1.compute_pipeline_settings = dict(segments=False, screenshots=True, annotations=False) tmpl = project1.get_template(True, True, True, True, True, True) with VIANProject("TestProject") as project2: project2.apply_template(template=tmpl, script_export="data/") print(len(project1.experiments[0].get_unique_keywords()), len(project2.experiments[0].get_unique_keywords())) cl_obj_global = project2.experiments[ 0].get_classification_object_by_name("Global") project2.experiments[0].remove_classification_object( cl_obj_global) cl_new = project1.experiments[0].create_class_object( "AnotherCL") v_new = project1.create_vocabulary("AnotherV") v_new.create_word("w1") v_new.create_word("w2") cl_new.add_vocabulary(v_new) tmpl = project1.get_template(True, True, True, True, True, True) res = project2.apply_template(template=tmpl, merge=True, script_export="data/") self.assertTrue( len(project1.experiments[0].get_unique_keywords()) == len( project2.experiments[0].get_unique_keywords())) print(len(project1.experiments[0].get_unique_keywords()), len(project2.experiments[0].get_unique_keywords())) project1.experiments[0].remove_classification_object(cl_new) tmpl = project1.get_template(True, True, True, True, True, True) res = project2.apply_template(template=tmpl, merge=True, merge_drop=True, script_export="data/") print(len(project1.experiments[0].get_unique_keywords()), len(project2.experiments[0].get_unique_keywords())) self.assertTrue( len(project1.experiments[0].get_unique_keywords()) == len( project2.experiments[0].get_unique_keywords()))
def test_create_dir(self): with VIANProject(name="TestProject", path="data/test_project.eext") as project: project.store_project() self.assertTrue(os.path.isfile("data/test_project.eext"))
def test_create_no_dir(self): with VIANProject(name="NoDir") as project: self.assertIsInstance(project, VIANProject)
def on_import(self, project: VIANProject, fps): mode = 0 files = [] scr_paths = [] timestamps = [] segment_ranges = [] segment_ids = [] # If the Time Location is given, we just want to parse the screenshots locations and place them in the Project if self.checkBox_UseLocation.isChecked( ) and self.lineEdit_Delimiter.text() != "": scr_ranges = [] idx_h = self.sB_PositionTimeH.value() - 1 idx_m = self.sB_PositionTimeM.value() - 1 idx_s = self.sB_PositionTimeS.value() - 1 idx_ms = self.sB_PositionTimeMS.value() - 1 idx_segment = self.sB_PositionSegment.value() - 1 has_time_location = (idx_h >= 0 or idx_m >= 0 or idx_s >= 0 or idx_ms >= 0) if has_time_location: files = self.files mode = 0 timestamps = [] for f in self.files: dir, file = os.path.split(f) file = file.split(".")[0] file = file.split(self.lineEdit_Delimiter.text()) try: t_hour = 0 t_min = 0 t_sec = 0 t_milli = 0 if idx_h > 0: t_hour = int(file[idx_h]) if t_min > 0: t_min = int(file[idx_m]) if idx_s > 0: t_sec = int(file[idx_s]) if idx_ms > 0: t_milli = int(file[idx_ms]) time_ms = ts_to_ms(t_hour, t_min, t_sec, t_milli) timestamps.append(time_ms) scr_paths.append(f) except Exception as e: log_error("Error in Screenshot Import", str(e)) continue elif idx_segment >= 0: mode = 1 segment_ids = [] for f in self.files: dir, file = os.path.split(f) file = file.split(".")[0] file = file.split(self.lineEdit_Delimiter.text()) try: segment_id = int(file[idx_segment]) scr_paths.append(f) segment_ids.append(segment_id - 1) except Exception as e: log_error("Error in Screenshot Import", str(e)) continue for s in project.get_main_segmentation().segments: segment_ranges.append([ ms_to_frames(s.get_start(), fps), ms_to_frames(s.get_end(), fps) ]) else: mode = 2 scr_paths = self.files args = dict(mode=mode, movie_path=project.movie_descriptor.get_movie_path(), scr_paths=scr_paths, segment_ids=segment_ids, segment_ranges=segment_ranges, timestamps=timestamps) importer = ScreenshotImporter(args) self.main_window.run_job_concurrent(importer)
class VocabularyView(QWidget, IProjectChangeNotify): def __init__(self, parent, main_window): super(VocabularyView, self).__init__(parent) path = os.path.abspath("qt_ui/VocabularyManager.ui") uic.loadUi(path, self) self.main_window = main_window self.project = None self.current_item = None self.vocabulary_collection = VIANProject(name="VocabularyCollection") self.vocabulary_index = dict() # A dict of vocabularies found on the file system self.fetch_vocabularies() self.complexities = { "0 (Undefined)": 1, "1 (Beginner)": 1, "2" : 2, "3 (Intermediate)" : 3, "4" : 4, "5 (Expert)" : 5 } for k, v in self.complexities.items(): self.comboBoxComplexity.addItem(k) self.treeViewLibrary = VocabularyTreeView(self, self, self.vocabulary_collection, allow_create=True) self.vocabulary_model_library = VocabularyTreeItemModel(self.treeViewLibrary, self.parent().synchronize_from_project_to_library, "library") self.innerLibrary.layout().addWidget(self.treeViewLibrary) self.treeViewProject = VocabularyTreeView(self, self, self.project, allow_create=False) self.vocabulary_model_project = VocabularyTreeItemModel(self.treeViewProject, self.parent().synchronize_from_library_to_project, "project") self.innerProject.layout().addWidget(self.treeViewProject) self.image_drop = DropImageContainer(self) self.widgetImageContainer.setLayout(QVBoxLayout()) self.widgetImageContainer.layout().addWidget(self.image_drop) self.btn_addItem.clicked.connect(self.add_word) self.lineEdit_Item.returnPressed.connect(self.add_word) self.lineEditCategory.textChanged.connect(self.on_category_changed) self.lineEditName.textChanged.connect(self.on_name_changed) self.textEditDescription.textChanged.connect(self.on_description_changed) self.lineEditComplexityGroup.textChanged.connect(self.on_complexity_group_changed) self.comboBoxComplexity.currentTextChanged.connect(self.on_complexity_level_changed) self.vocabulary_collection.onVocabularyAdded.connect(partial(self.recreate_tree)) self.vocabulary_collection.onVocabularyRemoved.connect(partial(self.recreate_tree)) self.recreate_tree() self.show() def fetch_vocabularies(self): for p in glob.glob("data/vocabularies/*.json"): v = self.vocabulary_collection.import_vocabulary(p) v._path = p v.is_builtin = True self.vocabulary_index[v.uuid] = dict(voc=v, path=p, edited=False) for p in glob.glob(self.main_window.settings.DIR_VOCABULARIES + "/*.json"): v = self.vocabulary_collection.import_vocabulary(p) self.vocabulary_index[v.uuid] = dict(voc=v, path=p, edited=False) v._path = p def add_vocabulary(self, model, view, voc): model.appendRow(self.get_vocabulary_item_model(voc)) view.setModel(model) if voc.uuid in self.vocabulary_index: edited = self.vocabulary_index[voc.uuid]['edited'] else: edited = False self.vocabulary_index[voc.uuid] = dict(voc=voc, path=voc._path, edited=edited) # self.treeView = QTreeView() def get_vocabulary_item_model(self, voc): root = VocabularyItem(voc.name, voc) for w in voc.words: self.get_children(root, w) return root def get_children(self, parent_item, word): item = VocabularyItem(word.name, word) parent_item.appendRow(item) if len(word.children) > 0: for c in word.children: self.get_children(item, c) def set_current(self, current): """ Sets the current item to be edited in the right widget. :param current: The item to be edited """ self.current_item = None if current is None: return self.lineEditName.setText(current.name) self.textEditDescription.setPlainText(current.comment) voc = None if isinstance(current, VocabularyWord): voc = current.vocabulary self.lineEditCategory.setEnabled(False) self.comboBoxComplexity.setEnabled(True) print("Complexity Group", current.complexity_group) self.lineEditComplexityGroup.setText(current.complexity_group) for k, v in self.complexities.items(): if v == current.complexity_lvl: self.comboBoxComplexity.setCurrentText(k) break elif isinstance(current, Vocabulary): voc = current self.lineEditCategory.setText(current.category) self.lineEditCategory.setEnabled(True) complexity_groups = [] for w in current.words: #type: VocabularyWord if w.complexity_group not in complexity_groups: complexity_groups.append(w.complexity_group) if len(complexity_groups) > 1: self.lineEditComplexityGroup.setText("Multiple") elif len(complexity_groups) == 1: self.lineEditComplexityGroup.setText(complexity_groups[0]) self.current_item = current if voc is not None and voc.is_builtin: self.lineEditName.setEnabled(False) self.textEditDescription.setEnabled(False) self.lineEditCategory.setEnabled(False) self.comboBoxComplexity.setEnabled(False) self.lineEditComplexityGroup.setEnabled(False) else: self.lineEditName.setEnabled(True) self.textEditDescription.setEnabled(True) self.lineEditCategory.setEnabled(True) self.comboBoxComplexity.setEnabled(True) self.lineEditComplexityGroup.setEnabled(True) def add_word(self): name = self.lineEdit_Item.text() if name != "" and len(self.treeViewLibrary.selectedIndexes()) > 0: selected = self.vocabulary_model_library.itemFromIndex(self.treeViewLibrary.selectedIndexes()[0]) selected_item = selected.voc_object item = None if selected_item.get_type() == VOCABULARY_WORD: word = selected_item.vocabulary.create_word(name, selected_item.name) if word is not None: item = VocabularyItem(word.name, word) else: msg = QMessageBox.warning(self, "Duplicate Word", "Adding two words with the same name is not allowed.") elif selected_item.get_type() == VOCABULARY: word = selected_item.create_word(name) if word is not None: item = VocabularyItem(word.name, word) else: msg = QMessageBox.warning(self, "Duplicate Word", "Adding two words with the same name is not allowed.") else: log_error("Failed to create word") item = None if item is not None: index = self.add_to_tree(selected, item) self.treeViewLibrary.scrollTo(index) self.lineEdit_Item.setText("") def recreate_tree(self): # AutoCompleter for Complexity Groups complexity_groups = [] for v in self.vocabulary_collection.vocabularies: complexity_groups.extend(v.get_complexity_groups()) complexity_groups = list(set(complexity_groups)) completer = QCompleter(complexity_groups) self.lineEditComplexityGroup.setCompleter(completer) self.vocabulary_model_library.clear() for v in sorted(self.vocabulary_collection.vocabularies, key=lambda x:x.name): self.add_vocabulary(self.vocabulary_model_library, self.treeViewLibrary, v) self.vocabulary_model_project.clear() if self.project is not None: for v in sorted(self.project.vocabularies, key=lambda x:x.name): self.add_vocabulary(self.vocabulary_model_project, self.treeViewProject, v) self.treeViewProject.setModel(self.vocabulary_model_project) def add_to_tree(self, selected, item): selected.appendRow(item) return item.index() def on_category_changed(self): category = self.lineEditCategory.text() if isinstance(self.current_item, Vocabulary): self.current_item.category = category def on_name_changed(self): name = self.lineEditName.text() if self.current_item is not None: self.current_item.name = name def on_complexity_group_changed(self): if self.current_item is None: return if isinstance(self.current_item, Vocabulary): for w in self.current_item.words: w.complexity_group = self.lineEditComplexityGroup.text() elif isinstance(self.current_item, VocabularyWord): self.current_item.complexity_group = self.lineEditComplexityGroup.text() def on_complexity_level_changed(self): if self.current_item is None: return if isinstance(self.current_item, Vocabulary): for w in self.current_item.words: w.complexity_lvl = self.complexities[self.comboBoxComplexity.currentText()] elif isinstance(self.current_item, VocabularyWord): self.current_item.complexity_lvl = self.complexities[self.comboBoxComplexity.currentText()] def on_description_changed(self): description = self.textEditDescription.toPlainText() if self.current_item is not None: self.current_item.comment = description def on_loaded(self, project): self.project = project self.treeViewProject.collection = project self.recreate_tree() def on_closed(self): self.current_item = None self.lineEditName.setText("") self.textEditDescription.setPlainText("") def on_changed(self, project, item): if item is None: self.recreate_tree() def on_selected(self, sender, selected): pass
def test_2_merge_experiments(self): r = requests.get( "http://ercwebapp.westeurope.cloudapp.azure.com/api/experiments/1") self.exchange_data = r.json() with open("data/test-template.json", "w") as f: json.dump(self.exchange_data, f) with VIANProject("TestProject") as project1: project1.import_(ExperimentTemplateImporter(), "data/test-template.json") with VIANProject("TestProject") as project2: project2.import_(ExperimentTemplateImporter(), "data/test-template.json") cl_obj_global = project2.experiments[ 0].get_classification_object_by_name("Global") project2.experiments[0].remove_classification_object( cl_obj_global) # for v in merge_experiment_inspect(project2.experiments[0], project1.experiments[0]): # print(v) cl_new = project2.experiments[0].create_class_object( "AnotherCL") v_new = project2.create_vocabulary("AnotherV") v_new.create_word("w1") v_new.create_word("w2") cl_new.add_vocabulary(v_new) # for v in merge_experiment_inspect(project2.experiments[0], project1.experiments[0]): # print(v) merge_experiment(project2.experiments[0], project1.experiments[0]) print(len(project1.experiments[0].get_unique_keywords()), len(project2.experiments[0].get_unique_keywords())) self.assertFalse( len(project1.experiments[0].get_unique_keywords()) == len( project2.experiments[0].get_unique_keywords())) t1 = [(q.word_obj.unique_id, q.voc_obj.unique_id, q.class_obj.unique_id) for q in project1.experiments[0].get_unique_keywords()] t2 = [(q.word_obj.unique_id, q.voc_obj.unique_id, q.class_obj.unique_id) for q in project2.experiments[0].get_unique_keywords()] self.assertFalse(set(t1) == set(t2)) merge_experiment(project2.experiments[0], project1.experiments[0], drop=True) print(len(project1.experiments[0].get_unique_keywords()), len(project2.experiments[0].get_unique_keywords())) self.assertTrue( len(project1.experiments[0].get_unique_keywords()) == len( project2.experiments[0].get_unique_keywords())) t1 = [(q.word_obj.unique_id, q.voc_obj.unique_id, q.class_obj.unique_id) for q in project1.experiments[0].get_unique_keywords()] t2 = [(q.word_obj.unique_id, q.voc_obj.unique_id, q.class_obj.unique_id) for q in project2.experiments[0].get_unique_keywords()] self.assertTrue(set(t1) == set(t2))