Пример #1
0
    def parse_command(self):
        cmd = self.input_line.text()
        try:

            if cmd == "list_containers":
                self.main_window.project.print_object_list()

            elif cmd == "list_threads":
                log_info(threading.enumerate())

            elif cmd == "help":
                log_info(
                    "list_containers -- Listing all Container Objects of the Project"
                )

            else:
                # cmd = cmd.replace("print", "self.main_window.print_message")
                cmd = cmd.replace("project", "self.main_window.project")

                eval(cmd)

        except Exception as e:
            log_debug(e)

        self.input_line.clear()
Пример #2
0
    def on_use_pipeline(self, pipeline_name=None):
        """ Makes the currently selected pipeline as the active"""
        if pipeline_name is None or not isinstance(pipeline_name, str):
            if self.current_item is not None:
                self.current_item.setForeground(QColor(69, 69, 69))

            if self.listWidget_Pipelines.currentItem() is None:
                return
            pipeline_name = self.listWidget_Pipelines.currentItem().text()
        else:
            try:
                itm = self.all_items[pipeline_name]
                self.listWidget_Pipelines.setCurrentItem(itm)
            except Exception as e:
                raise e
                pass

        self.current_item = self.listWidget_Pipelines.currentItem()
        pipeline = self.pipeline_library[pipeline_name]
        log_info("Activated Pipeline", pipeline)

        if self.current_item is not None:
            self.current_item.setForeground(QColor(69, 200, 69))

        if self.project is not None and pipeline_name in ALL_REGISTERED_PIPELINES:
            pipeline = self.project.add_pipeline_script(pipeline)

        if self.comboBoxExperiment.currentText(
        ) != "Select Experiment (Optional)":
            pipeline.experiment = self.all_experiments[
                self.comboBoxExperiment.currentText()]
        self.on_reload_scripts()
        self.project.active_pipeline_script = pipeline
        self.onPipelineActivated.emit(pipeline)
Пример #3
0
    def extract(self):
        self.audioExtractingStarted.emit()
        with HDF5_FILE_LOCK:
            self._read(self.project.movie_descriptor.get_movie_path())
            self.audio_samples = self._sample_audio(self.callback)
            if self.audio_samples is None:
                return
            self.audio_volume = np.abs(np.mean(self.audio_samples, axis=1))

            log_info("Size", self.audio_samples.nbytes / 10**6)
            log_info("Size", self.audio_volume.nbytes / 10**6)
            project_audio_path = os.path.join(self.project.data_dir,
                                              "audio.mp3")
            self.audioProcessed.emit(
                TimelineDataset("Audio Volume",
                                self.audio_volume,
                                ms_to_idx=(self.resolution * 1000),
                                vis_type=TimelineDataset.VIS_TYPE_AREA))
            try:
                if not os.path.isfile(
                        project_audio_path) and self.export_audio:
                    self._audioclip.write_audiofile(
                        os.path.join(self.project.data_dir, "audio.mp3"))
            except Exception as e:
                log_error(e)
            self._videoclip.close()
            self._audioclip.close()
        self.audioExtractingEnded.emit()
Пример #4
0
    def add_project(self,
                    project: VIANProject = None,
                    file=None,
                    merge_behaviour=MERGE_BEHAVIOUR_MERGE_KEEP):
        """
        Adds a project to the corpus, can either be given by VIANProject object or file
        :param project:
        :param file:
        :return:
        """
        if project is None and file is None:
            raise ValueError("Either project or file has to be given.")
        if project is None:
            results = None
            try:
                project = VIANProject().load_project(file)
                t_exp_names = [e.name for e in self.template.experiments]
                t_exp_unique_ids = [
                    e.unique_id for e in self.template.experiments
                ]

                template_dict = self.template.get_template(segm=True,
                                                           voc=True,
                                                           ann=True,
                                                           scripts=False,
                                                           experiment=True,
                                                           pipeline=True)

                if merge_behaviour == self.MERGE_BEHAVIOUR_DELETE_DELETE:
                    to_remove = [
                        e for e in project.experiments if e.name in t_exp_names
                        or e.unique_id in t_exp_unique_ids
                    ]
                    for t in to_remove:
                        project.remove_experiment(t)
                    results = project.apply_template(template=template_dict)

                elif merge_behaviour == self.MERGE_BEHAVIOUR_MERGE_DELETE:
                    results = project.apply_template(template=template_dict,
                                                     merge=True,
                                                     merge_drop=True)

                elif merge_behaviour == self.MERGE_BEHAVIOUR_MERGE_KEEP:
                    results = project.apply_template(template=template_dict,
                                                     merge=True,
                                                     merge_drop=False)

            except Exception as e:
                log_error("Could not load project", e)
                return
            project.store_project()
            project.close()

            for l in results:
                log_info(l)

            self.projects_loaded[project.uuid] = project
            self.project_paths[project.uuid] = project.path
            self.onProjectAdded.emit(project)
Пример #5
0
    def push(self, project, analysis, targets, parameters, fps, class_objs):
        identify = (targets, class_objs, analysis.__class__)
        if identify in self.queue_identify:
            log_info("Job is already in Queue:", identify)
            return

        self.queue.append((analysis, (project, targets, fps, class_objs)))
        self.queue_identify.append(identify)
        if self.running is None:
            self._start()
Пример #6
0
 def _run_task(self, task_id, analysis, args, on_progress):
     log_info("Running Analysis", analysis.__class__)
     try:
         return analysis.process(args, on_progress)
     except Exception as e:
         traceback.print_exc()
         exctype, value = sys.exc_info()[:2]
         log_error(traceback.print_exc())
         self.signals.sign_error.emit((exctype, value, traceback.format_exc()))
         return None
def profiled():
    pr = cProfile.Profile()
    pr.enable()
    yield
    pr.disable()
    s = StringIO()
    ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
    ps.print_stats()
    # uncomment this to see who's calling what
    # ps.print_callers()
    log_info(s.getvalue())
Пример #8
0
 def _run(self):
     self.analysisStarted.emit()
     # log_info("Queue:", self.queue_running, len(self.queue))
     if len(self.queue) > 0:
         self.queue_running = True
         (func, args) = self.queue.pop(0)
         try:
             log_info(func, args)
             func(*args)
         except Exception as e:
             self.onException.emit(traceback.format_exc())
         self._run()
     else:
         self.analysisEnded.emit()
         self.queue_running = False
Пример #9
0
    def on_close(self):
        if self.h5_file is None:
            return

        self.h5_file.close()

        self.col_edge_max = None
        self.col_hue_max = None
        self.col_color_max = None
        self.col_lum_max = None

        self.path = None
        self.h5_file = None
        self._index = dict()
        self._uid_index = dict()
        log_info("Closed HDF")
Пример #10
0
    def project_changed(self, project: VIANProject):
        """
        Reads the audio from the projects moviefile, extracts the audio samples
        and stores a complete audio copy (mp3) in the project data structure.

        :param project: the current vian project
        :return:
        """

        # We have to aquire a Lock from the HDF5Manager, since moviepy (or its dependencies)
        # Lock all files references by the process, we have to make sure that the HDF5 manager doesn't
        # try to clean (replace) the HDF5 file during reading the audio samples.

        log_info("AudioHandlerPath:",
                 project.movie_descriptor.get_movie_path())

        self.project = project
        if self.project is None:
            self.audioExtractingEnded.emit()
Пример #11
0
    def __init__(self):
        super(ERCFilmColorsVIANPipeline, self).__init__()
        try:
            self.graph = tf.Graph()
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
            config.gpu_options.per_process_gpu_memory_fraction = 0.4

            with self.graph.as_default():
                self.session = tf.Session(config=config)
                KTF.set_session(self.session)
                self.model = PSPNetModelVIAN(input_shape=(512, 512, 3))
                self.model.load_weights(KERAS_LIP_WEIGHTS)
                self.model_name = DATASET_NAME_LIP
        except Exception as e:
            log_error(e)
            self.model = None
            self.model_name = "LIP"
            self.session = None
        log_info(self.model, self.model_name)
Пример #12
0
    def on_save(self, save_as = False):
        if save_as:
            folder = QFileDialog.getExistingDirectory(caption="Select Directory to Vocabularies into")
        else:
            folder = None
        for itm in self.itms:
            if itm['list_item'].checkState() == Qt.Checked:
                voc = itm['voc']['voc']
                if folder is None:
                    if itm['voc']['path'] == "":
                        path = os.path.join(self.default_dir, voc.name + ".json")
                    else:
                        path = itm['voc']['path']
                else:
                    path = os.path.join(folder, voc.name + ".json")
                voc.export_vocabulary(path)

                log_info("Saving", itm['voc'])
            else:
                log_info("Not Saving", itm['voc'])
        self.close()
Пример #13
0
 def set_path(self, path):
     self.path = path
     init = False
     log_info("HDF5: ", self.path)
     if not os.path.isfile(self.path):
         h5py.File(self.path, "w")
         init = True
     self.h5_file = h5py.File(self.path, "r+")
     log_info("Datasets in HDF5 File:")
     for k in self.h5_file.keys():
         log_info("\t-- ", k)
         if k == "ColorPalettes" and self.h5_file["ColorPalettes"].shape[1] != 1024:
             log_info("Deleting Palettes")
             del self.h5_file["ColorPalettes"]
     return init
Пример #14
0
    def update_library(self):
        log_info("Updating Pipeline Library")

        # Fetch all registered pipelines and create a PipelineScript for each
        self.pipeline_library = dict()
        to_remove = []
        if self.project is not None:
            for script in self.project.pipeline_scripts:
                script.import_pipeline()
                try:
                    script.uuid = ALL_REGISTERED_PIPELINES[script.name][0].uuid
                except AttributeError as e:
                    to_remove.append(script)
                    print(e)
                    continue
                except KeyError as e:
                    pass

        for p in to_remove:
            self.project.remove_pipeline_script(p)

        project_pipelines = dict()
        for p in self.project.pipeline_scripts:
            project_pipelines[p.name] = p

        for k, (cl, path) in ALL_REGISTERED_PIPELINES.items():
            if k in project_pipelines:
                self.pipeline_library[k] = project_pipelines[k]
            else:
                if os.path.isfile(path):
                    with open(path, "r") as f:
                        content = f.read()
                    self.pipeline_library[
                        k] = self.project.add_pipeline_script(
                            PipelineScript(name=cl.name,
                                           script=content,
                                           author=cl.author))
                    self.pipeline_library[k].save_script()
Пример #15
0
    def apply_vocabulary(self, table, project: VIANProject, print_failed = False):
        if table is None:
            return

        voc_names = [v.name for v in project.vocabularies]
        voc_obj = [v for v in project.vocabularies]

        segments = []
        skipped = 0
        added  = 0
        for i, row in enumerate(table):
            category = row[0]
            if category in voc_names:
                voc = voc_obj[voc_names.index(category)]
                for j in range(1, len(row)):
                    segm = [j - 1, []]
                    word_group = row[j]
                    for word in word_group:
                        if word != "":
                            word_obj = voc.get_word_by_name(word)
                            if word_obj is None:
                                skipped += 1
                            else:
                                added += 1
                                segm[1].append(word_obj)

                    segments.append(segm)
            else:
                log_warning("No Such Category:", category.replace(" ", "_"))

        main_seg = project.get_main_segmentation()
        log_info("Main Segmentation Length: ",len(main_seg.segments))
        for s in segments:
            idx = s[0]
            objs = s[1]
            if idx < len(main_seg.segments):
                pass
                #TODO Reimplement
                # for word in objs:
                #     main_seg.segments[idx].add_word(word)
            else:
                log_warning("Sub-Segmentation Ignored")

        log_info("Filemaker Data Loaded")
        log_info("Skipped: ", skipped)
        log_info("  Added: ", added)
Пример #16
0
    def run_selection(self):
        if self.project is not None:
            if self.main_window.vian_event_handler.current_pipeline is None:
                return

            container = self.project.selected

            scrs = [s for s in container if isinstance(s, Screenshot)]
            segments = [s for s in container if isinstance(s, Segment)]
            annotations = [s for s in container if isinstance(s, Annotation)]

            missing_info = self.project.get_missing_analyses(
                self.main_window.vian_event_handler.current_pipeline.
                requirements,
                screenshots=scrs,
                annotations=annotations,
                segments=segments)
            missing = dict()

            log_info("## Missing Analyses in Pipeline ##")
            for k in missing_info.keys():
                # print(k, missing_info[k], missing.items())
                missing[k] = missing_info[k][0]
                log_info("## -- ", k, missing_info[k][2], missing_info[k][1],
                         missing_info[k][0])

            experiment = self.main_window.vian_event_handler.current_pipeline.experiment

            if experiment is None:
                log_error("Experiment not found for RunAll")
                return
            for cl in missing.keys():
                for priority in sorted(missing[cl].keys()):
                    for analysis_name in missing[cl][priority].keys():
                        analysis = self.main_window.eval_class(analysis_name)
                        for clobj_name, containers in missing[cl][priority][
                                analysis_name].items():
                            clobj = experiment.get_classification_object_by_name(
                                clobj_name)

                            if clobj is None:
                                log_warning("Classification Object not found")
                                continue
                            d = dict(analysis=analysis(),
                                     targets=containers,
                                     parameters=None,
                                     classification_objs=clobj)
                            log_info("Pipeline Analysis: ", priority,
                                     analysis_name, clobj_name)
                            self.onRunAnalysis.emit(d)
Пример #17
0
def check_erc_template(project: VIANProject):
    return
    uuid = CONFIG['erc_template_uuid']
    exp = project.get_by_id(uuid)
    if exp is None:
        log_info("No ERC Template detected")
        return
    log_info("ERC Template detected, updating")
    r = requests.get(
        "http://ercwebapp.westeurope.cloudapp.azure.com/api/experiments/1")
    exchange_data = r.json()
    temporary = "data/temp.json"
    with open(temporary, "w") as f:
        json.dump(exchange_data, f)
    project.import_(ExperimentTemplateUpdater(), temporary)
    log_info("ERC Template detected, Done")
Пример #18
0
 def initialize_dataset(self, name, shape, dtype):
     if name not in self.h5_file:
         log_info("Init:", name, shape, dtype)
         self.h5_file.create_dataset(name=name, shape=shape, dtype=dtype, maxshape=(None, ) + shape[1:], chunks=True)
         self._index[name] = 0
Пример #19
0
def print_registered_analyses():
    log_info("Registered Analyses:")
    for k,v in ALL_REGISTERED_ANALYSES.items():
        log_info("\t--- " + v.__name__)
Пример #20
0
 def load_weights(self, path):
     log_info("Loading Weights...")
     self.model.load_weights(path)
     log_info("Done")
Пример #21
0
    def __init__(self, main_window):
        self.analyses = []
        self.plugins = []
        self.import_paths = []
        self.main_window = main_window
        self.load_plugins()
        self.load_analysis()
        self.load_pipelines("extensions/pipelines/")
        # self.load_pipelines(main_window.settings.DIR_SCRIPTS)

        log_info("\n")
        log_info("#### --- Extensions --- #####")
        if len(self.analyses) > 0:
            log_info("Loaded Analyses")
            for a in self.analyses:
                log_info(" --", a.__class__.__name__)
        if len(self.plugins) > 0:
            log_info("Loaded Plugins")
            for a in self.plugins:
                log_info(" --", a.__class__.__name__)
        log_info("\n")
Пример #22
0
    def mode_complete(self, movie_path, scr_paths, sign_progress, start = None, end = None):

        cap = cv2.VideoCapture(movie_path)
        length = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

        if start is not None and end is not None:
            length = end - start
        else:
            start = 0

        segm_length = 5000
        if length < segm_length:
            segm_length = length

        resolution = 10
        quality = 0.3

        width = int(width * quality)
        height = int(height * quality)

        scrs = []
        scr_names = []
        for p in scr_paths:
            scr_names.append(os.path.split(p)[1].split(".")[0])
            img = cv2.imread(p)
            scrs.append(img)

        cap.set(cv2.CAP_PROP_POS_FRAMES, start)
        frame_counter = -1
        n_segments = int(np.ceil(length / segm_length))

        match_table = np.zeros(shape=(n_segments, len(scrs), 2))

        new_scr = []
        for scr in scrs:
            new_scr.append(cv2.resize(scr, (int(width), int(height)), interpolation=cv2.INTER_CUBIC))

        scrs = np.array(new_scr, dtype=np.float32)

        for i in range(n_segments):
            frames = []
            # frame_idxs = []
            for j in range(segm_length):
                if self.aborted:
                    return "aborted"
                if j % 20 == 0:
                    sign_progress(round((((i * segm_length) + j) / length), 2))
                ret, frame = cap.read()
                frame_counter += 1
                if j % resolution != 0:
                    continue

                if ret and frame_counter < length:
                    frame = cv2.resize(frame, (int(width), int(height)), interpolation=cv2.INTER_CUBIC)
                    frames.append(frame)

                else:
                    break

            frames = np.array(frames, dtype=np.float32)
            for j in range(scrs.shape[0]):
                match, rate = find_closest(scrs[j], frames)
                match = (match * resolution) + (segm_length * i)
                match_table[i, j] = [match, rate]

        result = []
        for i in range(scrs.shape[0]):
            # best_value = np.amin(match_table[:, i, 1])
            best_idx = np.argmin(match_table[:, i, 1])
            frame_idx = match_table[best_idx, i, 0]
            log_info("RESULT, ", frame_idx, frame_idx + start)
            frame_idx = frame_idx + start
            cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
            ret, frame = cap.read()

            result.append([frame_idx, frame, scr_names[i]])

        return result
Пример #23
0
    def import_(self, project:VIANProject, path):
        with open(path, "r") as f:
            data = json.load(f)
        experiment = project.get_by_id(data['experiment']['uuid'])
        if experiment is None:
            raise ValueError("Experiment not found with uuid:" + data['experiment']['uuid'])

        cl_objs_index = dict()
        for entry in data['classification_objects']:
            clobj = project.get_by_id(entry['uuid'])
            if clobj is None:
                log_info("ExperimentTemplateUpdater: Creating Classification Object:", (entry['name']))
                clobj = ClassificationObject(entry['name'], experiment=experiment, parent=experiment)
                clobj.unique_id = entry['uuid']
                clobj.set_project(project)
                experiment.add_classification_object(clobj)
                clobj.semantic_segmentation_labels = (entry['semantic_segmentation_dataset'], [lbl for lbl in entry['semantic_segmentation_label_ids']])
            else:
                log_info("ExperimentTemplateUpdater: Found Classification Object:", (entry['name']))
            cl_objs_index[entry['id']] = clobj

        words_index = dict()
        for entry in data['vocabularies']:

            voc = project.get_by_id(entry['uuid'])
            if voc is None:
                log_info("ExperimentTemplateUpdater: Creating Vocabulary Object:", (entry['name']))
                voc = Vocabulary(name=entry['name'])
                voc.unique_id = entry['uuid']
                voc.uuid = entry['uuid']
                project.add_vocabulary(voc)
            else:
                log_info("ExperimentTemplateUpdater: Found Vocabulary Object:", (entry['name']))

            # Updating Values
            voc.category = entry['vocabulary_category']

            if voc.name in self.hidden_vocs:
                voc.is_visible = False

            for w in entry['words']:
                # word = voc.create_word(name = w['name'])
                word = project.get_by_id(w['uuid'])
                if word is None:
                    log_info("ExperimentTemplateUpdater: Creating Word Object:", (w['name']))
                    word = VocabularyWord(name = w['name'], vocabulary=voc)
                    word.unique_id = w['uuid']
                    word.uuid = w['uuid']
                    voc.add_word(word)
                else:
                    log_info("ExperimentTemplateUpdater: Found Word Object:", (w['name']))

                # Updating Values
                word.complexity_group = w['complexity_group']['name']
                word.complexity_lvl = w['complexity']
                word.organization_group = w['arrangement_group']
                words_index[w['id']] = word

        vocs_to_add = []
        unique_keywords = dict()
        for entry in data['unique_keywords']:
            # Check if the keyword already exists in this experiment:
            keyword = project.get_by_id(entry['uuid'])
            clobj = cl_objs_index[entry['classification_object']]

            if clobj.unique_id not in unique_keywords:
                unique_keywords[clobj.unique_id] = dict()

            word = words_index[entry['word']]
            if keyword is None:
                if word not in [kwd.word_obj for kwd in clobj.unique_keywords]:
                    keyword = UniqueKeyword(self, word.vocabulary, word, clobj, unique_id=entry['uuid'])
            else:
                clobj.remove_vocabulary(keyword.voc_obj)

            if (word.vocabulary, clobj) not in vocs_to_add:
                vocs_to_add.append((word.vocabulary, clobj))
                unique_keywords[clobj.unique_id][word.vocabulary.unique_id] = dict()
            unique_keywords[clobj.unique_id][word.vocabulary.unique_id][word.unique_id] = keyword

        for vocabulary, clobj in vocs_to_add:
            clobj.add_vocabulary(vocabulary, keyword_override=unique_keywords[clobj.unique_id][vocabulary.unique_id])