Beispiel #1
0
def rndLU(phrase, tag, lus):
    lenOflus = dict([(x['ID'], len(getName(x).split(" "))) for x in lus
                     if memOf(phrase, getName(x))])
    for x in sorted(lenOflus.items(), key=lambda a: a[1]):
        if getNameTag(fn.lu(x[0])) == tag:
            return fn.lu(x[0])
    return None
def load_data(fn_lu_embedding_filename, fn_plus_lu_embedding_filename):
    fn_L = pickle.load(open(fn_lu_embedding_filename, 'rb'))
    fn_plus_L = pickle.load(open(fn_plus_lu_embedding_filename, 'rb'))

    frames_to_int = {}
    for lu_id in fn_L.keys():
        frame_name = fn.lu(lu_id).frame.name
        if frame_name not in frames_to_int:
            frames_to_int[frame_name] = len(frames_to_int)

    X = list()
    Y = list()

    for lu_id, tensor in fn_L.items():
        frame_name = fn.lu(lu_id).frame.name
        X.append(tensor.numpy())
        Y.append(frames_to_int[frame_name])

    cut_off = len(Y)

    for keys, tensor in fn_plus_L.items():
        frame_name, new_lu, ori_lu = keys
        X.append(tensor.numpy())
        Y.append(frames_to_int[frame_name])
    return np.array(X), np.array(Y), len(frames_to_int), cut_off
Beispiel #3
0
def rndLU(phrase, tag, lus):
    if tag==None:
        return None
    lenDiff = dict([(x['ID'], numDiff(getName(x), phrase)) for x in lus if memOf(phrase, getName(x))])
    if len(lenDiff) == 0:
        return None
    minlenDiff = min(lenDiff.values())
    simlus = [fn.lu(x) for x in lenDiff if lenDiff[x] == minlenDiff if getNameTag(fn.lu(x))==tag]
    #print simlus
    if len(simlus) > 0:
        return simlus[0]
Beispiel #4
0
def rndLU(phrase, tag, lus):
    if tag == None:
        return None
    lenDiff = dict([(x['ID'], numDiff(getName(x), phrase)) for x in lus
                    if memOf(phrase, getName(x))])
    if len(lenDiff) == 0:
        return None
    minlenDiff = min(lenDiff.values())
    simlus = [
        fn.lu(x) for x in lenDiff if lenDiff[x] == minlenDiff
        if getNameTag(fn.lu(x)) == tag
    ]
    #print simlus
    if len(simlus) > 0:
        return simlus[0]
Beispiel #5
0
 def hieve_nltk_verbs(self, wildcard='<UNK>'):
     import nltk
     nltk.download('propbank')
     nltk.download('framenet_v17')
     from nltk.corpus import propbank
     from nltk.corpus import framenet as fn
     verbs = [x.lower() for x in propbank.verbs()]
     for i in range(len(fn.lus())):
         try:
             x = fn.lu(i).name[:-2].lower()
             x = x[:x.rindex('.')]
             verbs.append(x)
         except:
             pass
     verbs = set(verbs)
     verbs |= self.event_vocab
     for tt in self.text:
         rb = []
         for x in tt:
             if x not in verbs:
                 rb.append(wildcard)
             else:
                 rb.append(x)
         self.text_verbs.append(rb)
     print("Tokenized hieve text with FrameNet and PropBank verbs.")
Beispiel #6
0
def getPatternFromStr(text):
    #global frmHash, savNum
    [words, tags] = analyzePOSText(text)
    newWords = [word_lemma(words[k], tags[k]) for k in range(len(words))]
    #print newWords
    ptns = []
    puncArr = ['dt', '.', 'uh', ',', 'in']
    uselessArr = [None, 'c', 'scon', 'prep', 'intj']
    #cond1
    for i in range(len(words)):
        for j in range(i+1, len(words)):
            if j-i > 3: #3-gram LM
                break
            if (j==i+1) and (tags[i] in puncArr):
                break
            if len([x for x in tags[i:j] if shortTag(x) in uselessArr]) > 0:
                break

            #if (j>i+1) and (not consistant(tags[i:j])):
                #break

            phrase = ' '.join(newWords[i:j])
            '''
            if phrase not in frmHash:
            #print "********** " + phrase
                # online with framenet
                #lu = getLU_online(phrase, shortTag(tags[j-1]))
                # local with framenet
                lu = None
                luid = getLU_local(phrase, shortTag(tags[j-1]))
                if luid:
                    lu = fn.lu(luid)
                frmHash[phrase] = lu
            else:
                savNum += 1
                lu = frmHash[phrase]
            '''
            # online with framenet
            #lu = getLU_online(phrase, shortTag(tags[j-1]))
            # local with framenet
            lu = None
            luid = getLU_local(phrase, shortTag(tags[j-1]))
            if luid:
                lu = fn.lu(luid)

            if lu:
                fid = getFrameID(lu)
                ptn = "_".join(words[i:j])+"."+str(fid)
                ptns.append(ptn)
                #print lu['name'],
                #print fn.frame(fid)['name']
    return " ".join(ptns)
Beispiel #7
0
def getPatternFromStr(text):
    #global frmHash, savNum
    [words, tags] = analyzePOSText(text)
    newWords = [word_lemma(words[k], tags[k]) for k in range(len(words))]
    #print newWords
    ptns = []
    puncArr = ['dt', '.', 'uh', ',', 'in']
    uselessArr = [None, 'c', 'scon', 'prep', 'intj']
    #cond1
    for i in range(len(words)):
        for j in range(i + 1, len(words)):
            if j - i > 3:  #3-gram LM
                break
            if (j == i + 1) and (tags[i] in puncArr):
                break
            if len([x for x in tags[i:j] if shortTag(x) in uselessArr]) > 0:
                break

            #if (j>i+1) and (not consistant(tags[i:j])):
            #break

            phrase = ' '.join(newWords[i:j])
            '''
            if phrase not in frmHash:
            #print "********** " + phrase
                # online with framenet
                #lu = getLU_online(phrase, shortTag(tags[j-1]))
                # local with framenet
                lu = None
                luid = getLU_local(phrase, shortTag(tags[j-1]))
                if luid:
                    lu = fn.lu(luid)
                frmHash[phrase] = lu
            else:
                savNum += 1
                lu = frmHash[phrase]
            '''
            # online with framenet
            #lu = getLU_online(phrase, shortTag(tags[j-1]))
            # local with framenet
            lu = None
            luid = getLU_local(phrase, shortTag(tags[j - 1]))
            if luid:
                lu = fn.lu(luid)

            if lu:
                fid = getFrameID(lu)
                ptn = "_".join(words[i:j]) + "." + str(fid)
                ptns.append(ptn)
                #print lu['name'],
                #print fn.frame(fid)['name']
    return " ".join(ptns)
def exemplars2(frame_name, feR=False):
    expls = []
    for i in lu_ids(frame_name):
        e = fn.lu(i).exemplars
        for j in range(len(e)):
            #print('+++++++', e[j].FE)
            expls.append(e[j])
            pass
        pass

    annotation2(expls, frame_name)
    return simres2
    pass
Beispiel #9
0
                    "For which LU would you like to see it's associated frame and frame elements?",
                    list(range(len(lu_nameID_dict.keys()))),
                    key=i)
                for LU in LU_choice_number:
                    selectedLU = lu_list[LU]
                    if selectedLU.split(
                    )[3][0:2] == "ID":  #To account for more than one word LU
                        lu_ID = int(selectedLU.split()[3][:-1].replace(
                            "ID=", ""))
                    else:
                        lu_ID = int(selectedLU.split()[4][:-1].replace(
                            "ID=", ""))
                    lu_name = lu_nameID_dict[lu_ID]
                    st.write("You selected LU: ", LU, ".", lu_name)

                    associatedFrame = fn.lu(lu_ID).frame.name
                    lu_frame = fn.frame(associatedFrame)
                    st.write("Frame: ", associatedFrame)
                    st.write("Reference: ", lu_frame.URL)
                    FE_list = []
                    for element in lu_frame.FE:
                        FE_list.append(element)
                    st.write("Frame Elemenet(s): ")
                    st.write(FE_list)
                    st.write("Annotator Summary:")
                    st.write("File Annotated:", filename.name)
                    st.write("Selected Key:", key_list[q])
                    st.write("JSON PATH: ", jpath[q - 1][0])
                    st.write("Selected LU:", lu_name)
                    st.write("LU's Frame:", associatedFrame, "(", lu_frame.URL,
                             ")")
Beispiel #10
0
def rndLU(phrase, tag, lus):
    lenOflus = dict([(x['ID'], len(getName(x).split(" "))) for x in lus if memOf(phrase, getName(x))])
    for x in sorted(lenOflus.items(), key = lambda a:a[1]):
        if getNameTag(fn.lu(x[0]))==tag:
            return fn.lu(x[0])
    return None
Beispiel #11
0
f.ID
f.definition
set(f.lexUnit.keys())

[x.name for x in f.FE]

f.frameRelations


fn.frames_by_lemma(r'(?i)a little')




fn.lu(256).name
fn.lu(256).definition
fn.lu(256).frame
fn.lu(256).lexeme



docs = fn.documents()
len(docs)
docs[0].keys()
docs[0].filename

docs[0].annotated_document()


def show_mapping_for_one_verb_short(nlp: object, lu_id: str,
                                    lu_text: str) -> list:
    """ Prints each example sentence and the calculated mapping (short phrase approach) for a verb/LU.

    :param lu_text: String. Lexical Unit in String Format.
    :param lu_id: String. Lexical Unit ID in String Format.
    :param nlp: Object. Preloaded Language Model.
    :return: None.
    """
    lu_object = fn.lu(lu_id)
    examples = lu_object.exemplars
    sentence_count = 0

    if len(examples) > 0:

        for example in examples:
            agent_mapping = [
                'CF_Agent'
            ]  # For the direct mapping of the cf 'agent' to the fn 'frame element'
            theme_mapping = ['CF_Theme']
            sentence = example.text  # In case there are only subjects/objects mapable I take the first sentence.
            fes = example.frameAnnotation.FE[0]

            logical_subject = map.detect_subject_short_phrase(
                nlp, sentence, lu_text)  # looks like this:
            # ["subject", (position start, position end), "head", 0] (0 means False for passive boolean; so 0 is active)
            logical_object = map.detect_object_short_phrase(
                nlp, sentence, lu_text)  # same as above.

            subject_passive_bool = logical_subject[3] if len(
                logical_subject) > 0 else 0

            if len(
                    logical_subject
            ) > 0:  # and (len(agent_mapping) == 1 or subject_passive_bool is True):  # if a subject was detected.
                subject_text = logical_subject[0]
                subject_start = logical_subject[1][0]
                subject_end = logical_subject[1][1]

                for fe in fes:
                    fe_start = fe[0]
                    fe_end = fe[1]
                    frame_element_text = sentence[fe_start:fe_end]

                    if subject_start == fe_start and subject_end == fe_end:
                        agent_mapping.append(
                            fe[2]
                        ) if subject_passive_bool == 0 else theme_mapping.append(
                            fe[2])
                        # fe looks like this: (start pos, end pos, 'Frame Element name')

            if len(
                    logical_object
            ) > 0:  # and (len(theme_mapping) == 1 or subject_passive_bool is True):
                object_text = logical_object[0]
                object_start = logical_object[1][0]
                object_end = logical_object[1][1]

                for fe in fes:
                    fe_start = fe[0]
                    fe_end = fe[1]
                    frame_element_text = sentence[fe_start:fe_end]

                    if object_start == fe_start and object_end == fe_end:
                        theme_mapping.append(
                            fe[2]
                        ) if subject_passive_bool == 0 else agent_mapping.append(
                            fe[2])
                        # subject passive bool is taken on purpose because objects are not marked as passive, but
                        # when subject is passive the object has to take the agent role

            print(sentence)
            print(agent_mapping)
            print(theme_mapping)
            print("Passivsatz?: " + str(subject_passive_bool) + "\n")
            sentence_count += 1
    else:
        print("No examples found; no mapping possible.\n")
    print('Anzahl der Sätz: ' + str(sentence_count))
def pick_lus_for_evaluation(nlp: object, role_mapping: dict) -> list:
    """Picks pseudo randomly LUs for the evaluation.

    One entry of the Role Mapping Dictionaty looks like this:
    {LU ID: ['verb', LU ID, {'CF_Agent', 'Frame Element'}, {'CF_Theme', 'Frame Element'}, 'Frame', Passive Cases, {CF}]}
    :param role_mapping: Dictionary. The finished dictionary with all LUs, role mappings, connotation frames etc.
    :return: List. First element are statistics about picked LUs, second element is the list with all picked LUs (IDs).
    """
    lus_and_counts = []

    candidate_lus = [k for k in role_mapping.keys()]

    picked_lus = []

    sentence_count = 0
    passive_count = 0
    # same_frame_count = 0  # später manuell evaluieren, wie sich das Mapping bei Verben desselben Frames verhält. Sind
    # wahrscheinlich nicht so viele.

    for lu_id, information in role_mapping.items():
        if len(information) < 6:
            candidate_lus.remove(lu_id)
        elif len(information[2]) < 2 or len(information[3]) < 2:
            candidate_lus.remove(lu_id)

    while len(picked_lus) <= 25:
        random_index = random.randint(0, len(candidate_lus) - 1)

        chosen_lu = candidate_lus[random_index]

        lu_text = role_mapping[chosen_lu][0]
        lu_object = fn.lu(chosen_lu)
        examples = lu_object.exemplars

        agent_mapping = role_mapping[chosen_lu][2]
        theme_mapping = role_mapping[chosen_lu][3]

        usable_sentence_count = 0
        passive_cases_this_sent = 0

        for example in examples:
            sentence = example.text
            fes = (example.frameAnnotation.FE)[0]
            # one entry looks like this: (start pos, end pos, 'Frame Element name')

            agent_frame_elements_in_sentence = []
            theme_frame_elements_in_sentence = []
            agent_mapping_in_this_sentence = []
            theme_mapping_in_this_sentence = []

            # checking if an Agent and Theme was found in this sentence
            for fe in fes:
                frame_element_name = fe[2]

                if frame_element_name in agent_mapping:
                    agent_mapping_in_this_sentence.append(frame_element_name)

                if frame_element_name in theme_mapping:
                    theme_mapping_in_this_sentence.append(frame_element_name)

            if len(agent_mapping_in_this_sentence) != 0 and len(
                    theme_mapping_in_this_sentence) != 0:
                usable_sentence_count += 1

            # passive case checking:
            subject = map.detect_subject(nlp, sentence, lu_text)
            passive = subject[3] if len(subject) == 4 else 0
            passive_cases_this_sent += passive  # 'passive' is an integer 0 or 1

        if usable_sentence_count != 0:
            sentence_count += usable_sentence_count
            passive_count += passive_cases_this_sent
            picked_lus.append(chosen_lu)
            candidate_lus.remove(chosen_lu)

    lus_and_counts.append(sentence_count)
    lus_and_counts.append(passive_count)
    lus_and_counts.append(picked_lus)

    return lus_and_counts
def cf_evaluation(role_mapping: dict, eval_list: list) -> None:
    """Interactive programm for evaluating the connotation frames in the FrameNet context.

    The returned list looks like this:
    [{statistics dict}, {LU ID: [lu_text, lu_id, frame, [sentence1_eval, sentence2_eval]]}]
    :param eval_list: List. A list of LU IDs that are going to be evaluated.
    :param role_mapping: Dictionary. The finished dictionary with all LUs, role mappings, connotation frames etc.
    :return: None.
    """
    name = (input("What's your name? ")).lower()

    if os.path.exists('eval/{}_cf_eval.pkl'.format(name)):
        with open(os.path.join('eval', name + '_cf_eval.pkl'), 'rb') as f:
            updated_eval = pickle.load(f)
            last_stopped = updated_eval[0]['last_stopped']

            if last_stopped == (
                    len(eval_list) - 1
            ):  # The evaluation process won't run if it's already completed.
                return

    else:
        last_stopped = 0
        new_eval = [{'last_stopped': 0, 'sentence_count': 0}, {}]
        with open(os.path.join('eval', name + '_cf_eval.pkl'), 'wb') as f:
            pickle.dump(new_eval, f, pickle.HIGHEST_PROTOCOL)

        with open(os.path.join('eval', name + '_cf_eval.pkl'), 'rb') as f:
            updated_eval = pickle.load(f)

    for lexical_unit in eval_list[last_stopped:]:

        to_be_evaluated = role_mapping[lexical_unit]

        if len(to_be_evaluated) < 4:  # If no proper mapping was found
            continue

        agent_mapping = to_be_evaluated[
            2]  # To check later if agent and theme are in the sentence in order to be able
        theme_mapping = to_be_evaluated[3]  # to evaluate the Connotation Frame

        connotation_frame = to_be_evaluated[6]

        lu_text = to_be_evaluated[0]
        lu_id = to_be_evaluated[1]
        lu_object = fn.lu(lu_id)

        examples = lu_object.exemplars

        this_verb_eval = []

        this_verb_eval.append(lu_text)
        this_verb_eval.append(lu_id)
        this_verb_eval.append(lu_object.frame.name)

        sentence_count = 0

        print("Verb: '{}'\n".format(lu_text))
        print(
            "Please answer the following Connotation Frame Questions. How would you rate the following features?:\n"
        )

        persp_writer_agent_verb_eval = input(
            "Perspective(writer->agent) [please type in value between -2 and 2 or '?']:\n"
        )
        while persp_writer_agent_verb_eval not in [
                '-2', '-1', '0', '1', '2', '?'
        ]:
            persp_writer_agent_verb_eval = input(
                "Perspective(writer->agent) [please type in value between -2 and 2 or '?']:\n"
            )

        persp_writer_theme_verb_eval = input(
            "Perspective(writer->theme) [please type in value between -2 and 2 or '?']:\n"
        )
        while persp_writer_theme_verb_eval not in [
                '-2', '-1', '0', '1', '2', '?'
        ]:
            persp_writer_theme_verb_eval = input(
                "Perspective(writer->theme) [please type in value between -2 and 2 or '?']:\n"
            )

        persp_agent_theme_verb_eval = input(
            "Perspective(agent->theme) [please type in value between -2 and 2 or '?']:\n"
        )
        while persp_agent_theme_verb_eval not in [
                '-2', '-1', '0', '1', '2', '?'
        ]:
            persp_agent_theme_verb_eval = input(
                "Perspective(agent->theme) [please type in value between -2 and 2 or '?']:\n"
            )

        persp_theme_agent_verb_eval = input(
            "Perspective(theme->agent) [please type in value between -2 and 2 or '?']:\n"
        )
        while persp_theme_agent_verb_eval not in [
                '-2', '-1', '0', '1', '2', '?'
        ]:
            persp_theme_agent_verb_eval = input(
                "Perspective(theme->agent) [please type in value between -2 and 2 or '?']:\n"
            )

        value_theme_verb_eval = input(
            "Value(theme) [please type in value between -2 and 2 or '?']:\n")
        while value_theme_verb_eval not in ['-2', '-1', '0', '1', '2', '?']:
            value_theme_verb_eval = input(
                "Value(theme) [please type in value between -2 and 2 or '?']:\n"
            )

        verb_cf_eval = []

        persp_writer_agent_list = [
            'Perspective(writer->agent)',
            connotation_frame['Perspective(writer->agent)'],
            persp_writer_agent_verb_eval
        ]
        persp_writer_theme_list = [
            'Perspective(writer->theme)',
            connotation_frame['Perspective(writer->theme)'],
            persp_writer_theme_verb_eval
        ]
        persp_agent_theme_list = [
            'Perspective(agent->theme)',
            connotation_frame['Perspective(agent->theme)'],
            persp_agent_theme_verb_eval
        ]
        persp_theme_agent_list = [
            'Perspective(theme->agent)',
            connotation_frame['Perspective(theme->agent)'],
            persp_theme_agent_verb_eval
        ]
        value_theme_list = [
            'Value(theme)', connotation_frame['Value(theme)'],
            value_theme_verb_eval
        ]

        for example in examples:

            if sentence_count == 2:
                sentence_count = 0
                break

            sentence = example.text
            fes = (example.frameAnnotation.FE)[0]
            # one entry looks like this: (start pos, end pos, 'Frame Element name')

            agent_frame_elements_in_sentence = []
            theme_frame_elements_in_sentence = []
            agent_mapping_in_this_sentence = []
            theme_mapping_in_this_sentence = []

            # checking if an Agent was found in this sentence - This is also important for the CF evaluation!!
            for fe in fes:
                frame_element_name = fe[2]
                if frame_element_name in agent_mapping:
                    agent_mapping_in_this_sentence.append(frame_element_name)
                    fe_content_text = sentence[fe[0]:fe[1]]
                    agent_frame_elements_in_sentence.append(
                        "{} -> '{}'".format(frame_element_name,
                                            fe_content_text))

            if len(agent_frame_elements_in_sentence) == 0:
                continue  # Going to the next sentence as an evaluation wouldn't make sense.

            # Checking if a Theme was found in this sentence
            for fe in fes:
                frame_element_name = fe[2]
                if frame_element_name in theme_mapping:
                    theme_mapping_in_this_sentence.append(frame_element_name)
                    fe_content_text = sentence[fe[0]:fe[1]]
                    theme_frame_elements_in_sentence.append(
                        "{} -> '{}'".format(frame_element_name,
                                            fe_content_text))

            if len(theme_frame_elements_in_sentence) == 0:
                continue  # Going to the next sentence as an evaluation wouldn't make sense.

            # CF in Sentence Evaluation
            print("Verb: '{}'\n".format(lu_text))

            print("The sentence to be evaluated (VERB: {}): \n{}\n".format(
                lu_text.upper(), sentence))

            persp_writer_agent_sent_eval = input(
                "Perspective(writer->agent) [please type in value between -2 and 2 or '?']:\n"
            )
            while persp_writer_agent_sent_eval not in [
                    '-2', '-1', '0', '1', '2', '?'
            ]:
                persp_writer_agent_sent_eval = input(
                    "Perspective(writer->agent) [please type in value between -2 and 2 or '?']:\n"
                )

            persp_writer_theme_sent_eval = input(
                "Perspective(writer->theme) [please type in value between -2 and 2 or '?']:\n"
            )
            while persp_writer_theme_sent_eval not in [
                    '-2', '-1', '0', '1', '2', '?'
            ]:
                persp_writer_theme_sent_eval = input(
                    "Perspective(writer->theme) [please type in value between -2 and 2 or '?']:\n"
                )

            persp_agent_theme_sent_eval = input(
                "Perspective(agent->theme) [please type in value between -2 and 2 or '?']:\n"
            )
            while persp_agent_theme_sent_eval not in [
                    '-2', '-1', '0', '1', '2', '?'
            ]:
                persp_agent_theme_sent_eval = input(
                    "Perspective(agent->theme) [please type in value between -2 and 2 or '?']:\n"
                )

            persp_theme_agent_sent_eval = input(
                "Perspective(theme->agent) [please type in value between -2 and 2 or '?']:\n"
            )
            while persp_theme_agent_sent_eval not in [
                    '-2', '-1', '0', '1', '2', '?'
            ]:
                persp_theme_agent_sent_eval = input(
                    "Perspective(theme->agent) [please type in value between -2 and 2 or '?']:\n"
                )

            value_theme_sent_eval = input(
                "Value(theme) [please type in value between -2 and 2 or '?']:\n"
            )
            while value_theme_sent_eval not in [
                    '-2', '-1', '0', '1', '2', '?'
            ]:
                value_theme_sent_eval = input(
                    "Value(theme) [please type in value between -2 and 2 or '?']:\n"
                )

            updated_eval[0]['sentence_count'] += 1

            persp_writer_agent_list.append(persp_writer_agent_sent_eval)
            persp_writer_theme_list.append(persp_writer_theme_sent_eval)
            persp_agent_theme_list.append(persp_agent_theme_sent_eval)
            persp_theme_agent_list.append(persp_theme_agent_sent_eval)
            value_theme_list.append(value_theme_sent_eval)

            this_verb_eval.append(sentence)

            sentence_count += 1

        verb_cf_eval.append(tuple(persp_writer_agent_list))
        verb_cf_eval.append(tuple(persp_writer_theme_list))
        verb_cf_eval.append(tuple(persp_agent_theme_list))
        verb_cf_eval.append(tuple(persp_theme_agent_list))
        verb_cf_eval.append(tuple(value_theme_list))

        this_verb_eval.insert(3, verb_cf_eval)

        updated_eval[1][lu_id] = this_verb_eval
        updated_eval[0]['last_stopped'] += 1

        with open(os.path.join('eval', name + '_cf_eval.pkl'), 'wb') as f:
            pickle.dump(updated_eval, f, pickle.HIGHEST_PROTOCOL)

    print("Evaluation completed! Thank you")
def map_evaluation(role_mapping: dict, approach: str, eval_list: list) -> None:
    """The interactive evaluation programm for evaluating the role mapping from a technical view.

    :param approach: String. The name of the approach to be evaluated (to save the approach name in file name)
    :param eval_list: List. A list of LU IDs that are going to be evaluated.
    :param role_mapping: Dictionary. The finished dictionary with all LUs, role mappings, connotationn frames etc.
    :return: None.
    """
    name = (input("What's your name? ")).lower()

    if os.path.exists('eval/{}_map_{}_eval.pkl'.format(name, approach)):
        with open(
                os.path.join('eval',
                             (name + '_map_{}_eval.pkl').format(approach)),
                'rb') as f:
            updated_eval = pickle.load(f)
            last_stopped = updated_eval[0]['last_stopped']

            if last_stopped == (
                    len(eval_list) - 1
            ):  # The evaluation process won't run if it's already completed.
                return

    else:
        last_stopped = 0
        new_eval = [{
            'last_stopped': 0,
            'sentence_count': 0,
            'agent_positive_count': 0,
            'agent_negative_count': 0,
            'agent_not_existing_count': 0,
            'theme_positive_count': 0,
            'theme_negative_count': 0,
            'theme_not_existing_count': 0,
            'agent_not_sure_count': 0,
            'theme_not_sure_count': 0
        }, {}]
        with open(
                os.path.join('eval',
                             (name + '_map_{}_eval.pkl').format(approach)),
                'wb') as f:
            pickle.dump(new_eval, f, pickle.HIGHEST_PROTOCOL)

        with open(
                os.path.join('eval',
                             (name + '_map_{}_eval.pkl').format(approach)),
                'rb') as f:
            updated_eval = pickle.load(f)

    for lexical_unit in eval_list[last_stopped:]:

        to_be_evaluated = role_mapping[lexical_unit]

        if len(to_be_evaluated) < 4:  # If no proper mapping was found
            continue

        agent_mapping = to_be_evaluated[2]
        # print(agent_mapping)
        # agent_mapping.remove('CF_Agent')

        theme_mapping = to_be_evaluated[3]
        # theme_mapping.remove('CF_Theme')

        lu_text = to_be_evaluated[0]
        lu_id = to_be_evaluated[1]
        lu_object = fn.lu(lu_id)

        examples = lu_object.exemplars

        print("Verb/Lexical Unit: '{}'\n".format(lu_text))

        this_verb_eval = []

        this_verb_eval.append(lu_text)
        this_verb_eval.append(lu_id)
        this_verb_eval.append(lu_object.frame.name)

        sentence_count = 0

        for example in examples:

            if sentence_count == 2:
                sentence_count = 0
                break

            this_sentence_eval = []
            #
            # if len(agent_mapping) <= 1 or type(agent_mapping) != set:  # this means that no agent FE was being mapped
            #     continue
            #
            # if len(theme_mapping) <= 1 or type(theme_mapping) != set:  # this means that no theme FE was being mapped
            #     continue

            sentence = example.text
            fes = (example.frameAnnotation.FE)[0]
            # one entry looks like this: (start pos, end pos, 'Frame Element name')

            agent_frame_elements_in_sentence = []
            theme_frame_elements_in_sentence = []
            agent_mapping_in_this_sentence = []
            theme_mapping_in_this_sentence = []

            # checking if an Agent was found in this sentence
            for fe in fes:
                frame_element_name = fe[2]
                if frame_element_name in agent_mapping:
                    agent_mapping_in_this_sentence.append(frame_element_name)
                    fe_content_text = sentence[fe[0]:fe[1]]
                    agent_frame_elements_in_sentence.append(
                        "{} -> '{}'".format(frame_element_name,
                                            fe_content_text))

            if len(agent_frame_elements_in_sentence) == 0:
                continue  # Going to the next sentence as an evaluation wouldn't make sense.

            # Checking if a Theme was found in this sentence
            for fe in fes:
                frame_element_name = fe[2]
                if frame_element_name in theme_mapping:
                    theme_mapping_in_this_sentence.append(frame_element_name)
                    fe_content_text = sentence[fe[0]:fe[1]]
                    theme_frame_elements_in_sentence.append(
                        "{} -> '{}'".format(frame_element_name,
                                            fe_content_text))

            if len(theme_frame_elements_in_sentence) == 0:
                continue  # Going to the next sentence as an evaluation wouldn't make sense.

            # Agent Evaluation
            print("\n-------------------------AGENT------------------------\n")

            print("The sentence to be evaluated (VERB: {}): \n{}\n".format(
                lu_text.upper(), sentence))

            print(
                "For the role of the Agent, the following Frame Element(s) have been found:"
            )
            for frame_element in agent_frame_elements_in_sentence:
                print(frame_element)
                this_sentence_eval.append('Agent Mapping: ' +
                                          str(frame_element))

            print(
                "\nDoes at least one of the found Frame Elements match the Role of the Agent?"
            )
            agent_answer = input("y/n/-/?: ")

            while agent_answer not in ['y', 'n', '-', '?']:
                print(
                    "Please answer the question by typing\n'y' for yes\n'n' for no\n'-' if there is actually "
                    "no Agent in the sentence\n'?' if you're not sure")
                agent_answer = input("y/n/-: ")

            # Theme Evaluation

            print(
                "\n\n-------------------------THEME------------------------\n")

            print("The sentence to be evaluated (VERB: {}): \n{}\n".format(
                lu_text.upper(), sentence))

            print(
                "For the role of the Theme, the following Frame Element(s) have been found:"
            )
            for frame_element in theme_frame_elements_in_sentence:
                print(frame_element)
                this_sentence_eval.append('Theme Mapping :' +
                                          str(frame_element))

            print(
                "\nDoes at least one of the found Frame Elements match the Role of the Theme?"
            )
            theme_answer = input("y/n/-/?: ")

            while theme_answer not in ['y', 'n', '-', '?']:
                print(
                    "Please answer the question by typing:\n'y' for yes\n'n' for no\n'-' if there is actually "
                    "no Theme in the sentence\n'?' if you're not sure")
                theme_answer = input("y/n/-: ")

            updated_eval[0]['sentence_count'] += 1

            if agent_answer == 'y':
                updated_eval[0]['agent_positive_count'] += 1
            elif agent_answer == 'n':
                updated_eval[0]['agent_negative_count'] += 1
            elif agent_answer == '?':
                updated_eval[0]['agent_not_sure_count'] += 1
            else:
                updated_eval[0]['agent_not_existing_count'] += 1

            if theme_answer == 'y':
                updated_eval[0]['theme_positive_count'] += 1
            elif theme_answer == 'n':
                updated_eval[0]['theme_negative_count'] += 1
            elif theme_answer == '?':
                updated_eval[0]['theme_not_sure_count'] += 1
            else:
                updated_eval[0]['theme_not_existing_count'] += 1

            this_sentence_eval.append(sentence)
            this_sentence_eval.append('Agent Answer: ' + agent_answer)
            this_sentence_eval.append('Theme Answer: ' + theme_answer)

            this_verb_eval.append(this_sentence_eval)

            sentence_count += 1

        updated_eval[1][lu_id] = this_verb_eval
        updated_eval[0]['last_stopped'] += 1

        with open(
                os.path.join('eval',
                             (name + '_map_{}_eval.pkl').format(approach)),
                'wb') as f:
            pickle.dump(updated_eval, f, pickle.HIGHEST_PROTOCOL)

    print("Evaluation completed! Thank you")
import sys
from nltk.corpus import framenet as fn

sys.path.append('../')
from lexicon_utils import lemma_from_lexemes, lemmas_from_lu_name

lu = fn.lu(8009)
lemma = lemma_from_lexemes(lexemes=lu.lexemes,
                           separator=' ')
assert lemma == 'give up'
lemma = lemma_from_lexemes(lexemes=lu.lexemes,
                           separator='_')
assert lemma == 'give_up'
lu = fn.lu(16601)
lemma = lemma_from_lexemes(lexemes=lu.lexemes,
                                   separator=' ')
assert lemma == 'help'


lemmas = lemmas_from_lu_name(lu_lemma='car')
assert lemmas == {'car'}
Beispiel #17
0
'    ?person dbo:birthPlace ?place.',
'}'
))

for r in DBPedia().search(sparql, start=1, count=1000):
    print '%s (%s)' % (r.person.name, r.place.name)






##______________________________Framenet______________________________

from nltk.corpus import framenet as fn
fn.lu(3238).frame.lexUnit['glint.v'] is fn.lu(3238)

fn.frame_by_name('Replacing') is fn.lus('replace.v')[0].frame

fn.lus('prejudice.n')[0].frame.frameRelations == fn.frame_relations('Partiality')


fn.lus('look.n')[0].frame
fn.lus('look.n')[1].frame


for f in fn.lus('look.n'):
    print f.frame.name


result = fn.frames(r'(?i)erception')
Beispiel #18
0
def map_cf_roles_and_fes_long_phrase_all_sents(
        nlp: object, mapping_verb_lu_cfs: dict) -> dict:
    """Mapping of all Connotation Frame Roles and Frame Elements in FrameNet through Subjects/Objects in a sentence.

    The mapping is taking a dictionary as an input which contains the FrameNet Lexical Units as keys and the (already)
    mapped Connotation Frames as values. One key - value pair looks like this:
    {('verb', lu id): {'Perspective(writer->object)':'0,3', ...}}

    For each verb (-> for each Lexical Unit) a full mapping of both Connotation Frame Roles (Agent & Patient) is being
    performed. It is considered that the 'Agent' role aligns with the logical subject of a sentence and the 'Patient'
    role aligns with the logical object of a sentence.

    The mapping is carried out as following:
    - For each LU, one example sentence is being generated which is meant to contain both a subject and an object.
    - Both subject and object are being detected in this sentence.
    - For each Frame Element in the sentence is being checked whether the subject or object is a substring of this FE.
    - If a subject is a substring, it is considered as matching with the respective Frame Element and therefore the
      'Agent' role will be mapped to this Frame Element.
    - If an object is a substring, it is considered as matching with the respective Frame Element and therefore the
      'Patient' role will be mapped to this Frame Element.
    - If the subject is marked as passive, the 'Patient' role will be mapped to this Frame Element and the 'Agent' role
      will be mapped to the object of the sentence.

    One example of the returned dictionary looks like this:
    {123: ['verb', lu id, ('Agent', 'mapped FE'), ('Patient', 'mapped FE'), frame name, passive bool, CF]}

    :param mapping_verb_lu_cfs: Dictionary. Keys are a tuple containing verb and lu id, values are the respective CF.
    :return: Dictionary. Keys are LU IDs, values are the verbs, role mappings, CFs and example sentences in a list.
    """
    mapping = {}

    for key, value in mapping_verb_lu_cfs.items():
        information = []

        lu_text = key[0]
        lu_id = key[1]
        lu_object = fn.lu(lu_id)
        information.append(lu_text)
        information.append(lu_id)

        frame_text = lu_object.frame.name

        examples = lu_object.exemplars

        passive_count = 0

        if len(examples) > 0:
            agent_mapping = [
                'CF_Agent'
            ]  # For the direct mapping of the cf 'agent' to the fn 'frame element'
            theme_mapping = ['CF_Theme']

            for example in examples:
                sentence = example.text  # In case there are only subjects/objects mapable I take the first sentence.
                fes = example.frameAnnotation.FE[0]

                logical_subject = detect_subject_long_phrase(
                    nlp, sentence, lu_text)  # looks like this:
                # ["subject", (position start, position end), "head", 0] (0 means False for passive boolean; so 0 is active)
                logical_object = detect_object_long_phrase(
                    nlp, sentence, lu_text)  # same as above.

                subject_passive_bool = logical_subject[3] if len(
                    logical_subject) > 0 else 0

                if len(
                        logical_subject
                ) > 0:  # and (len(agent_mapping) == 1 or subject_passive_bool is True):  # if a subject was detected.
                    subject_text = logical_subject[0]
                    subject_start = logical_subject[1][0]
                    subject_end = logical_subject[1][1]

                    for fe in fes:  # fe looks like this: (start pos, end pos, 'Frame Element name')
                        fe_start = fe[0]
                        fe_end = fe[1]
                        frame_element_text = sentence[fe_start:fe_end]

                        if subject_start == fe_start and subject_end == fe_end:

                            if subject_passive_bool == 0:
                                agent_mapping.append(fe[2])
                            else:
                                theme_mapping.append(fe[2])
                                passive_count += 1

                if len(
                        logical_object
                ) > 0:  # and (len(theme_mapping) == 1 or subject_passive_bool is True):
                    object_text = logical_object[0]
                    object_start = logical_object[1][0]
                    object_end = logical_object[1][1]

                    for fe in fes:
                        fe_start = fe[0]
                        fe_end = fe[1]
                        frame_element_text = sentence[fe_start:fe_end]

                        if object_start == fe_start and object_end == fe_end:
                            theme_mapping.append(
                                fe[2]
                            ) if subject_passive_bool == 0 else agent_mapping.append(
                                fe[2])
                            # subject passive bool is taken on purpose because objects are not marked as passive, but
                            # when subject is passive the object has to take the agent role

                # if len(agent_mapping) > 1 and len(theme_mapping) > 1:
                #     break

            # tupled_agent_mapping = tuple(agent_mapping)
            # information.append(tupled_agent_mapping)
            set_agent_mapping = set(agent_mapping)
            information.append(set_agent_mapping)

            # tupled_patient_mapping = tuple(theme_mapping)
            # information.append(tupled_patient_mapping)
            set_theme_mapping = set(theme_mapping)
            information.append(set_theme_mapping)

            information.append(frame_text)
            information.append(passive_count)

            information.append(value)
            # information.append(sentence)

        else:
            information.append('No examples found. No Mapping possible')

        print(information)

        mapping[lu_id] = information

    return mapping