示例#1
0
def init(profile):
    audio_level_filter = mlt.Filter(profile, "audiolevel")

    global MONITORING_AVAILABLE
    if audio_level_filter != None:
        MONITORING_AVAILABLE = True
        editorstate.audio_monitoring_available = True
    else:
        MONITORING_AVAILABLE = False
        editorstate.audio_monitoring_available = False

    global CONTROL_SLOT_H, METER_SLOT_H, METER_LIGHTS, METER_HEIGHT
    if editorstate.screen_size_small_height() == True:
        if editorstate.SCREEN_HEIGHT > 898:
            METER_SLOT_H = 400
            CONTROL_SLOT_H = 240
            METER_LIGHTS = 123
            METER_HEIGHT = METER_LIGHTS * DASH_INK + (METER_LIGHTS -
                                                      1) * DASH_SKIP
        else:
            METER_SLOT_H = 275
            CONTROL_SLOT_H = 240
            METER_LIGHTS = 82
            METER_HEIGHT = METER_LIGHTS * DASH_INK + (METER_LIGHTS -
                                                      1) * DASH_SKIP

    # We want this to be always present when closing app or we'll need to handle it being missing.
    global _update_ticker
    _update_ticker = utils.Ticker(_audio_monitor_update, 0.04)
    _update_ticker.start_ticker()
    _update_ticker.stop_ticker()
示例#2
0
def init(profile):
    audio_level_filter = mlt.Filter(profile, "audiolevel")

    global MONITORING_AVAILABLE
    if audio_level_filter != None:
        MONITORING_AVAILABLE = True
        editorstate.audio_monitoring_available = True
    else:
        MONITORING_AVAILABLE = False
        editorstate.audio_monitoring_available = False

    # We want this to be always present when closing app or we'll need to handle it being missing.
    global _update_ticker
    _update_ticker = utils.Ticker(_audio_monitor_update, 0.04)
    _update_ticker.start_ticker()
    _update_ticker.stop_ticker()    
示例#3
0
def init(profile):
    audio_level_filter = mlt.Filter(profile, "audiolevel")

    global MONITORING_AVAILABLE
    if audio_level_filter != None:
        MONITORING_AVAILABLE = True
        editorstate.audio_monitoring_available = True
    else:
        MONITORING_AVAILABLE = False
        editorstate.audio_monitoring_available = False

    global CONTROL_SLOT_H, METER_SLOT_H
    if editorstate.screen_size_small_height() == True:
        METER_SLOT_H = 400
        CONTROL_SLOT_H = 220

    # We want this to be always present when closing app or we'll need to handle it being missing.
    global _update_ticker
    _update_ticker = utils.Ticker(_audio_monitor_update, 0.04)
    _update_ticker.start_ticker()
    _update_ticker.stop_ticker()
示例#4
0
 def __init__(self, profile):
 
     self.init_for_profile(profile)
     
     self.ticker = utils.Ticker(self._ticker_event, TICKER_DELAY)
示例#5
0
    def run(self):
        Gdk.threads_enter()
        updater.set_info_icon(Gtk.STOCK_OPEN)

        dialog = dialogs.load_dialog()
        persistance.load_dialog = dialog
        Gdk.threads_leave()

        ticker = utils.Ticker(_load_pulse_bar, 0.15)
        ticker.start_ticker()

        old_project = editorstate.project
        try:
            editorstate.project_is_loading = True
            
            project = persistance.load_project(self.filename)
            sequence.set_track_counts(project)
            
            editorstate.project_is_loading = False

        except persistance.FileProducerNotFoundError as e:
            print "did not find file:", e
            self._error_stop(dialog, ticker)
            primary_txt = _("Media asset was missing!")
            secondary_txt = _("Path of missing asset:") + "\n   <b>" + e.value  + "</b>\n\n" + \
                            _("Relative search for replacement file in sub folders of project file failed.") + "\n\n" + \
                            _("To load the project you will need to either:") + "\n" + \
                            u"\u2022" + " " + _("Use 'Media Linker' tool to relink media assets to new files, or") + "\n" + \
                            u"\u2022" + " " + _("Place a file with the same exact name and path on the hard drive")
            dialogutils.warning_message(primary_txt, secondary_txt, None, is_info=False)
            editorstate.project = old_project # persistance.load_project() changes this,
                                              # we simply change it back as no GUI or other state is yet changed
            return
        except persistance.ProjectProfileNotFoundError as e:
            self._error_stop(dialog, ticker)
            primary_txt = _("Profile with Description: '") + e.value + _("' was not found on load!")
            secondary_txt = _("It is possible to load the project by creating a User Profile with exactly the same Description\nas the missing profile. ") + "\n\n" + \
                            _("User Profiles can be created by selecting 'Edit->Profiles Manager'.")
            dialogutils.warning_message(primary_txt, secondary_txt, None, is_info=False)
            editorstate.project = old_project # persistance.load_project() changes this,
                                              # we simply change it back as no GUI or other state is yet changed
            return

        Gdk.threads_enter()
        dialog.info.set_text(_("Opening"))
        Gdk.threads_leave()

        time.sleep(0.3)

        Gdk.threads_enter()
        app.open_project(project)

        if self.block_recent_files: # naming flipped ????
            editorpersistance.add_recent_project_path(self.filename)
            editorpersistance.fill_recents_menu_widget(gui.editor_window.uimanager.get_widget('/MenuBar/FileMenu/OpenRecent'), open_recent_project)
        Gdk.threads_leave()
        
        Gdk.threads_enter()
        updater.set_info_icon(None)
        dialog.destroy()
        Gdk.threads_leave()

        ticker.stop_ticker()
示例#6
0
 def create_render_ticker(self):
     self.render_ticker = utils.Ticker(self.render_tick, 1.0)
示例#7
0
 def launch_render_ticker(self):
     self.ticker = utils.Ticker(self.render_tick, 1.0)
     self.ticker.start_ticker()
示例#8
0
def _loadRowRankings(data_dir, model, use_large_rowRankingCache):
    print(f"Loading row rankings into memory...")

    rowRankingsMap = {}

    # Row rankings source directory
    rowRankings_dir = f"{data_dir}/rowRankings/{model}/"

    # Row rankings cache directory
    lg_sm_str = "lg" if use_large_rowRankingCache else "sm"
    rowRankings_cache_addr = f"{data_dir}/rowRankings/rowRankings_{model}_cache_{lg_sm_str}.json"

    refresh_cache = utils.isCacheOld(rowRankings_dir, rowRankings_cache_addr)
    if refresh_cache:
        print(
            f"\tWarning! Cache not up to date, reparsing original row rankings files from \"{rowRankings_dir}\""
        )

        # Only keep the top relevant facts
        if not use_large_rowRankingCache:
            N = 200 if model == 'tfidf' else 100
            N += 1  # header

        # N = -2 # header

        # Popuilate this map with info from every question file
        # Hash by QID
        rowRankingsMap = {}

        for split in ["Train", "Test", "Dev"]:
            rankingsDir = f"{rowRankings_dir}/rankings.{split}/"
            questionFiles = os.listdir(rankingsDir)

            # User info
            numQs = len(questionFiles)
            ticker = utils.Ticker(
                numQs,
                tick_rate=121,
                message=f"Loading {numQs} {split} questions...")

            for rowRankingFileName in questionFiles:

                # info for user
                ticker.tick()

                with open(rankingsDir + rowRankingFileName, mode="r") as fp:
                    # question ID is in the file name rowRanking_{qid}.tsv
                    qid = rowRankingFileName[11:-4]
                    lines = fp.read().split("\n")

                    # Grab the header and the lines
                    header = lines[0].split("\t")
                    lines = lines[
                        1:-1] if use_large_rowRankingCache else lines[1:N + 1]
                    if use_large_rowRankingCache: N = len(lines)

                    # Make a JSON for the tsv
                    rankingJSON = {head: [""] * N for head in header}
                    rankingJSON['length'] = N
                    for i, line in enumerate(lines):
                        cols = line.split("\t")

                        for j, col in enumerate(cols):
                            rankingJSON[header[j]][i] = col

                    # Save the JSON in the full export obj
                    rowRankingsMap[qid] = rankingJSON

            ticker.end()

        # Convert stuff to floats
        print("Converting")
        for qid in rowRankingsMap.keys():
            for i, isGoldRow in enumerate(rowRankingsMap[qid]['isGoldRow']):
                rowRankingsMap[qid]['isGoldRow'][i] = int(isGoldRow)
            for i, score in enumerate(rowRankingsMap[qid]['score_1']):
                rowRankingsMap[qid]['score_1'][i] = float(score)

        print(f"Caching row rankings to \"{rowRankings_cache_addr}\".")
        with open(rowRankings_cache_addr, mode="w") as fp:
            fp.write(json_dumps(rowRankingsMap))
    else:
        print(
            f"\tReading cached row rankings from \"{rowRankings_cache_addr}\"."
        )
        with open(rowRankings_cache_addr, mode="r") as fp:
            rowRankingsMap = json_load(fp)

            # Convert stuff to floats
            for qid in rowRankingsMap.keys():
                for i, isGoldRow in enumerate(
                        rowRankingsMap[qid]['isGoldRow']):
                    rowRankingsMap[qid]['isGoldRow'][i] = int(isGoldRow)
                for i, score in enumerate(rowRankingsMap[qid]['score_1']):
                    rowRankingsMap[qid]['score_1'][i] = float(score)

            print("Done.")

    return rowRankingsMap
    def _load_questions(self):

        questionsAddr = self.questionsAddr
        questions_cached_addr = f"{self.cache_dir}/questions_annotated_cache.json"

        # Question map
        questionsList = []
        questionsList_JSON = [] # Parallel structure that doesnt use classes so that we can cache
        qids = []

        # check to see if cached questions are on file
        refresh_cache = utils.isCacheOld(questionsAddr, questions_cached_addr)
        if refresh_cache:

            print(f"Loading questions from \"{questionsAddr}\".")

            # Load the questions table
            questions = pd.read_csv(questionsAddr, sep="\t")

            # Filter out incomplete questions
            questions = questions[questions['flags'].notna()]
            questions = questions[questions['explanation'].notna()]
            questions = questions[questions['examName'].notna()]
            questions = questions[questions.flags.str.contains("SUCCESS|READY", regex=True)]

            # Annotate the questions and save them locally

            # Prepare interface for user to see progress
            numQs = questions.shape[0]
            ticker = utils.Ticker(numQs, tick_rate=166, message=f"Annotating {numQs} questions...")

            # Convert questions to a map
            # store the question dataframe; the lemmatized quesdtion text; the lemmatized answer text
            for i, question_df in questions.iterrows():
                question = question_df.to_dict()

                qid = question['QuestionID']
                qids.append(qid)

                # info for user
                ticker.tick()
                
                # Read the question text and split it up into question and all answers
                qText = question['question']
                qText = re.split(r"\([A-G1-7]\)", qText)

                # Distinguish Q and A
                q_text, q_anss = qText[0], qText[1:]

                # Remove outer whitespace
                q_text = q_text.strip()
                q_anss = [ans.strip() for ans in q_anss]
                
                # Use spacy to annotate the texts

                # Question text annotation
                [q_text_words, q_text_lemmas, q_text_pos] = self.tokenize(q_text, annotationTypes=['word', 'lemma', 'pos'])
                
                # Answer text annotation
                isAlphaAnswerKey = re.match(r'[A-Ga-g]', question['AnswerKey'])
                answerSelections = ['A', 'B', 'C', 'D', 'E', 'F', 'G'] if isAlphaAnswerKey else ['1', '2', '3', '4', '5', '6', '7']
                answers_annotation = {}
                for i_ans, q_ans in enumerate(q_anss):
                    [ans_text_words, ans_text_lemmas, ans_text_pos] = self.tokenize(q_ans, annotationTypes=['word', 'lemma', 'pos'])
                    answers_annotation[answerSelections[i_ans]] = {
                        "words": ans_text_words,
                        "lemmas": ans_text_lemmas,
                        "pos": ans_text_pos
                    }

                # Add the annotation to the question
                question["question_annotation"] = {
                    "words": q_text_words,
                    "lemmas": q_text_lemmas,
                    "pos": q_text_pos
                }

                question["answers_annotation"] = answers_annotation

                questionsList_JSON.append(question)

                # Convert to a question object
                question = Question(question)

                # Add to the list
                questionsList.append(question)

            ticker.end()

            # Cache the annotated questions locally
            print(f"\tCaching annotated questions to \"{questions_cached_addr}\".")
            with open(questions_cached_addr, mode="w") as fp:
                fp.write(json_dumps(questionsList_JSON))
        else:
            print(f"\tLoading cached annotated questions from \"{questions_cached_addr}\".")
            with open(questions_cached_addr) as fp:
                questionsList = json_load(fp)
                
                # Convert to question class
                questionsList = [Question(q) for q in questionsList]

                # Get the list of qids
                qids = [q.id for q in questionsList]

        return qids, questionsList
示例#10
0
    def _load_tables(self):
        """
            Retrieves the tables from the disk
            Will attempt to read the cache first
            If the cache is old or missing then this function re-reads the tables and annotates them

            return:
                (tables, tableNames, tableMap)
                tables - 
        """


        # location of tables
        tablestoreTablesDir = self.tablestoreTablesDir
        tableFileNames = os.listdir(tablestoreTablesDir)

        # ======= #
        # Loading #
        # ======= #
        
        # Check cache
        tablestore_cached_addr = f"{self.cache_dir}/tablestore_annotated_cache.json"
        refresh_cache = utils.isCacheOld(tablestoreTablesDir, tablestore_cached_addr)

        # If cache old re-load and annotate the tablestore
        if refresh_cache:
            print(f"\tWarning! Cache not up to date, re-annotating tables from \"{tablestoreTablesDir}\"")

            # Get the NLP tool for tokenizing
            nlp = self.nlp

            # For each table make a Table object
            tables = {}     # Dictionary for tableName based lookup
            tableNames = [] # TableNames
            tableMap = {}   # Map for quick lookup by UID
            tableMap_json = {}   # Map for storage on file

            numTables = len(tableFileNames)
            ticker = utils.Ticker(numTables, tick_rate=1, message=f"Annotating {numTables} tables in tablestore...")

            for tableFileName in tableFileNames:
                ticker.tick()
                if tableFileName[-4:] == ".tsv":
                    tableFile_addr = f"{tablestoreTablesDir}/{tableFileName}"
                    tablename = tableFileName[:-4]

                    # Write down tablename
                    tableNames.append(tablename)

                    # read the table file wth pandas
                    try:
                        with open(tableFile_addr) as fp:

                            lines = fp.read().split("\n")

                            header = lines[0].split("\t")
                            header = header[:-1] # fix extra tab in the table files

                            if header[-1] == "[SKIP] UID":

                                # Sanitized header
                                headerSanitized = []
                                for head in header:
                                    head_sanitized = re.sub(r"[^A-Za-z0-9]", " ", head)
                                    head_sanitized = head_sanitized.strip()
                                    head_sanitized = re.sub(r"  +", " ", head_sanitized)
                                    head_sanitized = re.sub(r" ", "_", head_sanitized)

                                    headerSanitized.append(head_sanitized)

                                tableRows = []

                                lines = lines[1:-1] # newline at end of file
                                for line in lines:
                                    cells = line.split("\t")[:-1]

                                    # UUID
                                    uuid = cells[-1]

                                    # Convert the cells into a sentence (skip the skip)
                                    cells_clean = []
                                    for cell, head in zip(cells, header):
                                        if cell != "" and not head.startswith("[SKIP]"):
                                            cell_sanitized = re.sub(r";", " ", cell)
                                            cell_sanitized = cell_sanitized.strip()
                                            cell_sanitized = re.sub(r"  +", " ", cell_sanitized)
                                            cells_clean.append(cell_sanitized)
                                    sentence = " ".join(cells_clean)
                                    tokens = nlp(sentence) #spacy
                                    # tokens = nlp(sentence).sentences[0].words #coreNLP

                                    # Track the token idx to sync the tokens into the 3d cell/alt/word structure
                                    token_idx = 0

                                    # Populate the 3d structures
                                    cellWords = []
                                    cellWordsLowerCase = []
                                    cellLemmas = []
                                    cellTags = []
                                    for cell, head in zip(cells, header):
                                        cell = re.sub(r"  +", " ", cell)        # remove double space typos
                                        isSkipCol = head.startswith("[SKIP]")   # Flag for when its skip column

                                        altWords = []
                                        altWordsLowerCase = []
                                        altLemmas = []
                                        altTags = []

                                        if cell != '':
                                            alts = cell.split(";")
                                            alts = [alt.strip() for alt in alts]

                                            for alt in alts:
                                                word_tokens = nlp(alt) #spacy
                                                # word_tokens = nlp(alt).sentences[0].words #coreNLP

                                                words = []
                                                wordsLowerCase = []
                                                lemmas = []
                                                tags = []

                                                if not isSkipCol:
                                                    for word_token in word_tokens:
                                                        # Spacy
                                                        words.append(tokens[token_idx].text)
                                                        wordsLowerCase.append(tokens[token_idx].lower_)
                                                        lemmas.append(tokens[token_idx].lemma_.lower())
                                                        tags.append(tokens[token_idx].tag_)

                                                        token_idx += 1
                                                        
                                                altWords.append(words)
                                                altWordsLowerCase.append(wordsLowerCase)
                                                altLemmas.append(lemmas)
                                                altTags.append(tags)
                            
                                        # Append an empty alt here
                                        else:
                                            altWords.append([])
                                            altWordsLowerCase.append([])
                                            altLemmas.append([])
                                            altTags.append([])
                                            
                                        cellWords.append(altWords)
                                        cellWordsLowerCase.append(altWordsLowerCase)
                                        cellLemmas.append(altLemmas)
                                        cellTags.append(altTags)

                                    row_obj = {
                                        "uuid": uuid,
                                        "tablename": tablename,
                                        "header": header,
                                        "headerSanitized": headerSanitized,
                                        "cells": cells,
                                        "cellWords": cellWords,
                                        "cellWordsLowerCase": cellWordsLowerCase,
                                        "cellLemmas": cellLemmas,
                                        "cellTags": cellTags,
                                    }

                                    tableRow = TableRow(row_obj, self)

                                    tableRows.append(row_obj)

                                    tableMap[uuid] = tableRow

                                table = Table(tablename, tableRows, self)
                                
                                tableMap_json[tablename] = tableRows
                                
                                # Safty check (dont keep empty tables)
                                if table.isNonEmpty:
                                    tables[tablename] = table
                                else:
                                    tableNames.remove(tablename)

                            else:
                                raise IOError
                    except IOError:
                        print('\n', end='\r', flush=True)
                        print(f"\tWarning! Cannot parse table file \"{tableFile_addr}\".")

            ticker.end()

            print(f"\tCaching annotated tables to \"{tablestore_cached_addr}\".")
            with open(tablestore_cached_addr, mode='w') as fp:
                fp.write(json_dumps(tableMap_json))

            return tables, tableNames, tableMap

        else:
            print(f"\tLoading cached annotated table from \"{tablestore_cached_addr}\".")
            if os.path.exists(tablestore_cached_addr):
                with open(tablestore_cached_addr, mode='r') as fp:

                    # Parse the JSON file
                    tablestoreJSON = json.load(fp)

                    # The keys will be each tableName
                    tableNames = tablestoreJSON.keys()

                    # For each table make a Table object
                    tables = {}     # Dictionary for tableName based lookup
                    tableMap = {}   # Map for quick lookup by UID
                    for tableName in tableNames:
                        rows = tablestoreJSON[tableName]
                        table = Table(tableName, rows, self)

                        # Safty check (dont keep empty tables)
                        if table.isNonEmpty:
                            tables[tableName] = table

                            # Add the rows to the tableMap
                            for row in table.rows:
                                uuid = row.uuid
                                tableMap[uuid] = row
                        else:
                            tableNames.remove(tableName)

                    return tables, tableNames, tableMap  

            else:
                print(f"\tERROR! Failed to load tablestore from JSON \"{tablestore_cached_addr}\"")
                return False, False, False