Exemplo n.º 1
0
    def __init__(self):
        super(EpisodicMemoryNode, self).__init__("episodic_memory")

        rospy.Subscriber('/mhri/person_presence_state', PersonPresenceState,
                         self.pps_callback)
        rospy.Subscriber('/mhri/person_identity_state', PersonIdentity,
                         self.pi_callback)
        rospy.Subscriber('/mhri/mset', MSetInfo, self.mset_callback)

        self.graph = KnowledgeGraph()

        #self.pub = rospy.Publisher("/mhri/events", PerceptionEvent, queue_size=1)

        self.persons = {}
        self.episodes = {}

        self.load_episodic_memory()

        rospack = rospkg.RosPack()
        perception_face_pkg_path = rospack.get_path('perception_face')
        self.face_registry_path = perception_face_pkg_path + '/data/face_registry'

        self.names_file_path = perception_face_pkg_path + '/data/name_info.json'
        self.load_people_names(self.names_file_path)

        self.face_memorized_pub = rospy.Publisher('/mhri/face_memorized',
                                                  String,
                                                  queue_size=1)

        rospy.loginfo("episodic_memory_node initialized.")
Exemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()

    args = parser.parse_args()
    args.dataset = config["dataset"]

    # Create AmazonDataset instance for dataset.
    # ========== BEGIN ========== #
    print("Load", args.dataset, "dataset from file...")
    if not os.path.isdir(TMP_DIR[args.dataset]):
        os.makedirs(TMP_DIR[args.dataset])
    dataset = AmazonDataset(DATASET_DIR[args.dataset])
    save_dataset(args.dataset, dataset)

    # Generate knowledge graph instance.
    # ========== BEGIN ========== #
    print("Create", args.dataset, "knowledge graph from dataset...")
    dataset = load_dataset(args.dataset)
    kg = KnowledgeGraph(dataset)
    kg.compute_degrees()
    save_kg(args.dataset, kg)
    # =========== END =========== #

    # Genereate train/test labels.
    # ========== BEGIN ========== #
    print("Generate", args.dataset, "train/test labels.")
    generate_labels(args.dataset, "train")
    generate_labels(args.dataset, "test")
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        type=str,
                        default=BEAUTY,
                        help='One of {BEAUTY, CELL, CD, CLOTH}.')
    args = parser.parse_args()

    # Create AmazonDataset instance for dataset.
    # ========== BEGIN ========== #
    print('Load', args.dataset, 'dataset from file...')
    if not os.path.isdir(
            TMP_DIR[args.dataset]
    ):  #if the required temp file doesn't exist, create it
        os.makedirs(TMP_DIR[args.dataset])  #TMP_DIR is a dict from utils.py
    dataset = AmazonDataset(DATASET_DIR[args.dataset])
    save_dataset(args.dataset,
                 dataset)  #pickle.dump dataset to file, no return value

    # Generate knowledge graph instance.
    # ========== BEGIN ========== #
    print('Create', args.dataset, 'knowledge graph from dataset...')
    dataset = load_dataset(args.dataset)
    kg = KnowledgeGraph(dataset)
    kg.compute_degrees()
    save_kg(args.dataset, kg)  #uses pickle.dump
    # =========== END =========== #

    # Genereate train/test labels.
    # ========== BEGIN ========== #
    print('Generate', args.dataset, 'train/test labels.')
    generate_labels(args.dataset, 'train')
    generate_labels(args.dataset, 'test')
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        type=str,
                        default=BEAUTY,
                        help='One of {BEAUTY, CELL, CD, CLOTH}.')
    args = parser.parse_args()

    # Create AmazonDataset instance for dataset.
    # ========== BEGIN ========== #
    print('Load', args.dataset, 'dataset from file...')
    if not os.path.isdir(TMP_DIR[args.dataset]):
        os.makedirs(TMP_DIR[args.dataset])
    dataset = AmazonDataset(DATASET_DIR[args.dataset])
    save_dataset(args.dataset, dataset)

    # Generate knowledge graph instance.
    # ========== BEGIN ========== #
    print('Create', args.dataset, 'knowledge graph from dataset...')
    dataset = load_dataset(args.dataset)
    kg = KnowledgeGraph(dataset)
    kg.compute_degrees()
    save_kg(args.dataset, kg)
    # =========== END =========== #

    # Generate train/test labels.
    # ========== BEGIN ========== #
    print('Generate', args.dataset, 'train/test labels.')
    generate_labels(args.dataset, 'train')
    generate_labels(args.dataset, 'test')
def load_data(dataset_name, word_emb_size, node_embed_type):
    graph_path = "data/" + dataset_name + "/" + dataset_name + "_kg.txt"
    train_path = "data/" + dataset_name + "/" + dataset_name + "_train.txt"
    test_path = "data/" + dataset_name + "/" + dataset_name + "_test.txt"
    vocab_path = "data/" + dataset_name + "/vocab/"
    node_embed_path = "data/" + dataset_name + "/" + dataset_name + "_embed_" + node_embed_type + ".npy"

    graph = KnowledgeGraph(graph_path, vocab_path)
    test = load_questions(train_path, parser)
    train = load_questions(test_path, parser)
    rel_embedding = init_rel_embedding("glove.840B.300d.txt",
                                       camel_case_spliter, word_emb_size,
                                       graph)
    node_embedding = np.load(node_embed_path)

    return node_embedding, rel_embedding, graph, test, train
Exemplo n.º 6
0
    def __init__(self, path_KG, path_QA, split_ratio=0.8, using_cache=True):
        self.KG = KnowledgeGraph(path_KG)
        self.embedder = Embedder()
        self.training = True  # 指定是否是训练阶段
        self._iter_i = 0
        self._split_ratio = split_ratio

        # try to load from cache
        if using_cache and Utility.Binary.exists('dataset'):
            self.questions = Utility.Binary.load('dataset')
            print('{} questions loaded'.format(len(self.questions)))
            return

        # read the original questions
        questions = pd.read_csv(
            path_QA,
            sep='\t',
            header=None,
            names=['question_sentence', 'answer_set', 'answer_path'])
        questions['answer'] = questions['answer_set'].apply(
            lambda x: x.split('(')[0])
        questions['q_split'] = questions['question_sentence'].apply(
            lambda x: x.lower().split(' '))
        questions['answer'] = questions['answer_set'].apply(
            lambda x: x.split('(')[0])
        questions['e_s'] = questions['answer_path'].apply(
            lambda x: x.split('#')[0])
        # find head entity e_s, answer, and question_list by parsing the question_sentence
        questions['q_str'] = [
            self.parse_question(row['question_sentence'].split('?')[0],
                                row['e_s'])
            for idx, row in questions.iterrows()
        ]

        # 对问题编码
        # NOTE: 这里是正对小数据集采取的空间换时间的方式,避免每一次都重新embed问题,对于大数据集需要单独处理数据
        questions['q'] = questions['q_str'].apply(
            lambda q: self.embed_question(q))

        question_list = questions[['q_str', 'q', 'e_s',
                                   'answer']].values.tolist()
        question_list = [tuple(x) for x in question_list]
        self.questions = question_list
        print('{} questions loaded'.format(len(question_list)))

        if using_cache:
            Utility.Binary.save('dataset', question_list)
Exemplo n.º 7
0
def test_add_relationship():
    kg = KnowledgeGraph()
    kg.add_relationship('DMG mori CMX', 'Heidenhain TNC 640', 'related_to')
Exemplo n.º 8
0
def test_add_node_by_name():
    kg = KnowledgeGraph()
    kg.add_node_by_name('DMG mori CMX')
Exemplo n.º 9
0
def test_search_node_by_name():
    kg = KnowledgeGraph()
    kg.search_node_by_name('DMG mori CMX')
Exemplo n.º 10
0
class EpisodicMemoryNode(PerceptionBase):
    '''
    Episodic Memory Node
    '''
    def __init__(self):
        super(EpisodicMemoryNode, self).__init__("episodic_memory")

        rospy.Subscriber('/mhri/person_presence_state', PersonPresenceState,
                         self.pps_callback)
        rospy.Subscriber('/mhri/person_identity_state', PersonIdentity,
                         self.pi_callback)
        rospy.Subscriber('/mhri/mset', MSetInfo, self.mset_callback)

        self.graph = KnowledgeGraph()

        #self.pub = rospy.Publisher("/mhri/events", PerceptionEvent, queue_size=1)

        self.persons = {}
        self.episodes = {}

        self.load_episodic_memory()

        rospack = rospkg.RosPack()
        perception_face_pkg_path = rospack.get_path('perception_face')
        self.face_registry_path = perception_face_pkg_path + '/data/face_registry'

        self.names_file_path = perception_face_pkg_path + '/data/name_info.json'
        self.load_people_names(self.names_file_path)

        self.face_memorized_pub = rospy.Publisher('/mhri/face_memorized',
                                                  String,
                                                  queue_size=1)

        rospy.loginfo("episodic_memory_node initialized.")

    def load_people_names(self, names_file_path):
        rospy.loginfo('episodic_memory - Names File: %s', names_file_path)
        self.names = json.load(open(names_file_path))

    def save_person_name(self, person_id, name):
        if person_id not in self.names:
            self.names[person_id] = name
        with open(self.names_file_path, 'w') as names_file:
            json.dump(self.names, names_file)

    def load_episodic_memory(self):
        '''
        Load all the memory in the episodic memory.
        '''
        query = 'match (x:Episode) return x.person_id, x.name, x.confidence, x.eyeglasses,\
                 x.cloth_color, x.hair_length, x.start_time, x.end_time, x.gender'

        categories = [
            'name', 'confidence', 'eyeglasses', 'cloth_color', 'hair_length',
            'start_time', 'end_time', 'gender'
        ]
        result = self.graph.Run(query)
        records = []
        for record in result:
            records.append(record)
        rospy.loginfo("EPISODIC_MEMORY: Total No of episodes = %d",
                      len(records))
        persons = {}
        for record in records:
            person_id = record['x.person_id']
            if person_id not in persons:
                persons[person_id] = {}
            for category in categories:
                if category not in persons[person_id]:
                    persons[person_id][category] = {}
                value = record['x.' + category]
                if value not in persons[person_id][category]:
                    persons[person_id][category][value] = 0
                persons[person_id][category][value] += 1
        for person_id in persons:
            for category in categories:
                max_num = 0
                max_val = ''
                values = persons[person_id][category]
                for key in values:
                    rospy.loginfo('EPISODES(%s): %s is %s = %d', person_id,
                                  category, key, values[key])
                    if key == '':
                        continue
                    if max_val == '' or values[key] >= max_num:
                        max_num = values[key]
                        max_val = key
                persons[person_id][category] = max_val
                rospy.loginfo('EPISODES(%s): Decision for %s is %s', person_id,
                              category, max_val)
                if category == 'name':
                    rospy.set_param('/{0}/{1}'.format(str(person_id),
                                                      str(category)),
                                    max_val)  # 170118
                    rospy.loginfo('/%s/%s = %s', str(person_id), str(category),
                                  max_val)
        self.episodes = persons
        rospy.loginfo("EPISODIC_MEMORY: Memory of %d people recalled.",
                      len(self.episodes.keys()))

    def person_appeared(self, trk_id):
        '''
        새로운 사람의 등장!
        '''
        if trk_id not in self.persons:
            self.persons[trk_id] = {}
            self.persons[trk_id]['start_time'] = time.time()

    def person_disappeared(self, trk_id):
        '''
        사람 사라짐!
        '''
        if trk_id in self.persons:
            self.persons[trk_id]['end_time'] = time.time()
            self.memorize_episode(trk_id)
        else:
            rospy.logerr("I DO NOT KNOW A PERSON WITH AN ID=%s", trk_id)

    def memorize_episode(self, trk_id):
        '''
        Store an episode with a person(trk_id).
        '''
        if trk_id not in self.persons:
            rospy.logerr("I DO NOT KNOW A PERSON WITH AN ID=%s", trk_id)
            return

        episode = self.persons[trk_id]

        # if the person has not been identified, no memory lives on.
        # if an episode ended in 10 seconds, no memory lives on.
        duration = episode['end_time'] - episode['start_time']
        if not episode.has_key('person_id') or duration < 10:
            rospy.loginfo('EPISODIC_MEMORY: No Memory with %s', trk_id)
            return

        rospy.loginfo("MEMORIZING EPISODE (%s):======\n%s\n=======", trk_id,
                      episode)
        self.graph.Create('Episode', 'human_id', trk_id)
        self.graph.Set('Episode', 'human_id', trk_id, 'person_id',
                       episode['person_id'])
        self.graph.Set('Episode', 'human_id', trk_id, 'confidence',
                       episode.get('confidence', 0))
        self.graph.Set('Episode', 'human_id', trk_id, 'name',
                       episode.get('name', ''))
        self.graph.Set('Episode', 'human_id', trk_id, 'gender',
                       episode.get('gender', 0))
        self.graph.Set('Episode', 'human_id', trk_id, 'eyeglasses',
                       episode.get('eyeglasses', 0))
        self.graph.Set('Episode', 'human_id', trk_id, 'cloth_color',
                       episode.get('cloth_color', ''))
        self.graph.Set('Episode', 'human_id', trk_id, 'hair_length',
                       episode.get('hair_length', ''))
        self.graph.Set('Episode', 'human_id', trk_id, 'start_time',
                       episode.get('start_time', 0))
        self.graph.Set('Episode', 'human_id', trk_id, 'end_time',
                       episode.get('end_time', 0))

        # 초면인 경우 얼굴을 기억한다.
        if trk_id == episode['person_id']:
            self.memorize_face(trk_id)
            self.face_memorized_pub.publish("A")
            rospy.set_param('/{0}/{1}'.format(trk_id, 'name'),
                            episode.get('name', ''))  # 170214

    def memorize_face(self, trk_id):
        '''
        얼굴을 기억한다.
        '''
        files = []
        home_dir = expanduser("~") + '/.ros'
        img_file_names = list_files(home_dir, '.jpg')
        for file_name in img_file_names:
            pid = file_name.split('/')[-1].split('_')[-1].split('.')[0]
            if pid == trk_id:
                files.append(file_name)
        rospy.loginfo(">>>>FILE NO: %s = %d", trk_id, len(files))
        if len(files) == 0:
            return
        elif len(files) < 3:
            count = len(files)
        else:
            count = 3
        for num in range(count):
            i = randint(0, len(files) - 1)
            file_name = files[i]
            rospy.loginfo('>>>>FILE: %s', file_name)
            copy(file_name, self.face_registry_path)
            files.remove(file_name)

    def identity_recognized(self, trk_id, person_id):
        '''
        얼굴이 인식되었을 때 호출되는 콜백
        '''
        if trk_id not in self.persons:
            rospy.logerr("I DO NOT KNOW A PERSON WITH AN ID=%s", trk_id)
        else:
            if person_id == 'unknown':
                person_id = trk_id
            rospy.loginfo('>>> EPISODIC_MEMORY_NODE: person_id=%s', person_id)
            self.persons[trk_id]["person_id"] = person_id
            # 기억을 기반으로 소셜 이벤트 생성
            if person_id in self.episodes:
                self.write_memory_recall_event()

    def write_memory_recall_event(self):
        '''
        Generate a social event by recalling episodic memory.
        '''
        wr_data = self.get_memory_data()

        rospy.loginfo("EPISODIC MEMORY_RECALLED: %s", wr_data)

        self.save_to_memory(self.conf_data.keys()[0], data=wr_data)
        self.raise_event(self.conf_data.keys()[0], 'memory_recalled')

    def get_memory_data(self):
        '''
        Convert retrieved memory into a JSON document.
        '''
        data = json.loads('{}')
        data['human_id'] = []
        data['person_id'] = []
        data['name'] = []
        data['gender'] = []
        data['eyeglasses'] = []
        data['cloth_color'] = []
        data['hair_length'] = []
        data['confidence'] = []
        for trk_id in self.persons:
            if 'person_id' not in self.persons[trk_id]:
                continue
            person_id = self.persons[trk_id]['person_id']
            if person_id in self.episodes:
                memory = self.episodes[person_id]
                data['human_id'].append(trk_id)
                data['person_id'].append(person_id)
                data['name'].append(memory['name'])
                data['gender'].append(memory['gender'])
                data['eyeglasses'].append(memory['eyeglasses'])
                data['cloth_color'].append(memory['cloth_color'])
                data['hair_length'].append(memory['hair_length'])
                data['confidence'].append(memory['confidence'])
        return json.dumps(data)

    def pi_callback(self, pi_msg):
        '''
        pi_msg structure:
            string human_id
            string category
            string value
            int i_value
            float f_value
        '''
        if pi_msg.human_id not in self.persons:
            rospy.logerr("I DO NOT KNOW A PERSON WITH AN ID=%s",
                         pi_msg.human_id)
            return
        rospy.logdebug(
            "EPISODIC_MEMORY: PI MSG = human_id=%s category=%s value=%s i_value=%i f_value=%f",
            pi_msg.human_id, pi_msg.category, pi_msg.value, pi_msg.i_value,
            pi_msg.f_value)
        if pi_msg.category == 'person_id':
            rospy.loginfo('EPISODIC_MEMORY_NODE: person id notified: %s',
                          pi_msg.value)
            self.identity_recognized(pi_msg.human_id, pi_msg.value)
        else:
            if pi_msg.i_value != 0:
                self.persons[pi_msg.human_id][pi_msg.category] = pi_msg.i_value
            elif pi_msg.f_value != 0:
                self.persons[pi_msg.human_id][pi_msg.category] = pi_msg.f_value
            else:
                self.persons[pi_msg.human_id][pi_msg.category] = pi_msg.value
            '''
            else:
                rospy.logerr("EPISODIC_MEMORY: Invalid Value = %s %s %s",
                             pi_msg.value, pi_msg.i_value, pi_msg.f_value)
            '''

    def pps_callback(self, pps_msg):
        '''
        pps_msg 구조
        ------------
            Header header
            int16 count
            string[] appeared
            string[] disappeared
        '''
        for human_id in pps_msg.appeared:
            self.person_appeared(human_id)

        for human_id in pps_msg.disappeared:
            self.person_disappeared(human_id)

    def mset_callback(self, mset_msg):
        '''
        mset_msg 구조
        -------------
            string Key
            string value
        '''
        rospy.loginfo("mset_callback called: %s %s", mset_msg.key,
                      mset_msg.value)
        keys = mset_msg.key.split('/')
        tag = keys[0]
        human_id = keys[1]

        # TODO: 형태소 분석기 활용에 적정 장소
        if tag == 'name':  # 사용자의 이름 정보
            if human_id in self.persons and self.persons[human_id].has_key(
                    'person_id'):
                rospy.loginfo("mset_callback: name is going to be updated...")
                self.persons[human_id]['name'] = mset_msg.value
                person_id = self.persons[human_id]['person_id']
                self.save_person_name(person_id, mset_msg.value)
                self.update_memory_on_name(person_id, mset_msg.value)
            else:
                rospy.loginfo("mset_callback: name is not updated...")

    def update_memory_on_name(self, person_id, name):
        self.graph.Set('Episode', 'person_id', person_id, 'name', name)
Exemplo n.º 11
0
        if len(tuples) > MAX_CANDIDATE_TUPLES:
          tuples = set(random.sample(tuples, MAX_CANDIDATE_TUPLES))
        sources = extract_dimension_from_tuples_as_list(tuples, 0)
        relations = extract_dimension_from_tuples_as_list(tuples, 1)
        targets = extract_dimension_from_tuples_as_list(tuples, 2)
        output_row = {
          'question': question,
          'qn_entities': get_str_of_seq(qn_entities),
          'ans_entities': get_str_of_seq(ans_entities),
          'sources': get_str_of_seq(sources),
          'relations': get_str_of_seq(relations),
          'targets': get_str_of_seq(targets)
        }
        writer.writerow(output_row)


if __name__ == "__main__":
  parser = argparse.ArgumentParser(description='Specify arguments')
  parser.add_argument('--input_examples', help='the raw qa pairs', required=True)
  parser.add_argument('--input_graph', help='the graph file', required=True)
  parser.add_argument('--input_doc', help='the doc file', required=False)
  parser.add_argument('--stopwords', help='stopwords file', required=False)
  parser.add_argument('--output_examples', help='the processed output file', required=True)
  args = parser.parse_args()

  #global variables
  knowledge_base = KnowledgeGraph(args.input_graph, unidirectional=False)
  search_index = SearchIndex(args.input_doc, args.stopwords)
  stop_vocab = read_file_as_dict(args.stopwords)
  question_parser = QuestionParser(knowledge_base.get_entities(), stop_vocab)
  main(args)
Exemplo n.º 12
0
textExtractor4.extract()
textExtractorPipe = TextExtractorPipe()
textExtractorPipe.addTextExtractor(textExtractor1)
textExtractorPipe.addTextExtractor(textExtractor2)
textExtractorPipe.addTextExtractor(textExtractor3)
textExtractorPipe.addTextExtractor(textExtractor4)

nlp = spacy.load('en_core_web_sm')
nlp.add_pipe(nlp.create_pipe('sentencizer'))  # updated
doc = nlp(textExtractorPipe.extract())

andOtherPatternMatcher = AndOtherPatternMatcher(nlp)
suchAsMatcher = SuchAsPatternMatcher(nlp)
orOtherMatcher = OrOtherPatternMatcher(nlp)
includingPatternMatcher = IncludingPatternMatcher(nlp)
especiallyPatternMatcher = EspeciallyPatternMatcher(nlp)
matcherPipe = MatcherPipe()
matcherPipe.addMatcher(andOtherPatternMatcher)
matcherPipe.addMatcher(suchAsMatcher)
matcherPipe.addMatcher(orOtherMatcher)
matcherPipe.addMatcher(includingPatternMatcher)
matcherPipe.addMatcher(especiallyPatternMatcher)
relations = matcherPipe.extract(doc)

for relation in relations:
    print(relation.getHypernym(), relation.getHyponym())

knowledgeGraph = KnowledgeGraph(relations)
knowledgeGraph.build()
knowledgeGraph.show()
Exemplo n.º 13
0
def test(nlp,
         src,
         gen,
         bert=False,
         print_annotations=False,
         print_latex=False,
         verbose=False):
    if print_annotations:
        print("source:", src_line[:50])
        print("summary:", gen_line[:50])
    src = nlp(src)
    gen = nlp(gen)
    if verbose:
        print("clusters:", src._.coref_clusters, gen._.coref_clusters)
    ce = CompoundEquivalency()
    spe = SpeakerPronounEquivalency()
    spe.register(src)
    spe.register(gen)
    kg = KnowledgeGraph(nlp,
                        use_bert=bert,
                        equivalencies=[ce, spe],
                        verbose=verbose)
    if print_annotations:
        annotator = Annotator(src, gen, latex=print_latex)
    kg.add_document(src)
    contained = 0
    contained_bert = 0
    missing = 0
    missing_verb = 0
    missing_actors = 0
    missing_acteds = 0
    contradiction = 0
    contradiction_bert = 0
    invalid_simplification = 0
    total = 0
    for token in gen:
        if token.pos_ == "VERB":
            total += 1
            relation = kg.get_relation(token)
            r = kg.query_relation(relation)
            if r[0] == KnowledgeGraph.entailment:
                if print_annotations:
                    print(util.format("contained", "blue", latex=print_latex),
                          "|", relation, "|", r[1])
                contained += 1
            if r[0] == KnowledgeGraph.entailment_bert:
                if print_annotations:
                    print(
                        util.format("contained (BERT)",
                                    "blue",
                                    latex=print_latex), "|", relation, "|",
                        r[1])
                contained_bert += 1
            if r[0] == KnowledgeGraph.contradiction_bert:
                if print_annotations:
                    print(
                        util.format("contradiction (BERT)",
                                    "red",
                                    latex=print_latex), "|", relation, "|",
                        r[1])
                contradiction_bert += 1
            elif r[0] == KnowledgeGraph.missing_dependencies:
                missing += 1
                if print_annotations:
                    print(
                        util.format("generic missing dependency",
                                    "yellow",
                                    latex=print_latex), "|", relation, "|",
                        r[1])
            elif r[0] == KnowledgeGraph.missing_actors:
                missing_actors += 1
                if print_annotations:
                    print(
                        util.format("missing actors",
                                    "magenta",
                                    latex=print_latex), "|", relation, "|",
                        r[1])
            elif r[0] == KnowledgeGraph.missing_acteds:
                missing_acteds += 1
                if print_annotations:
                    print(
                        util.format("missing acteds",
                                    "magenta",
                                    latex=print_latex), "|", relation, "|",
                        r[1])
            elif r[0] == KnowledgeGraph.missing_verb:
                missing_verb += 1
                if print_annotations:
                    print(
                        util.format("missing verb",
                                    "magenta",
                                    latex=print_latex), "|", relation, "|",
                        r[1])
            elif r[0] == KnowledgeGraph.invalid_simplification:
                invalid_simplification += 1
                if print_annotations:
                    print(
                        util.format("invalid simplification",
                                    "magenta",
                                    latex=print_latex), "|", relation, "|",
                        r[1])
            elif r[0] == KnowledgeGraph.contradiction:
                contradiction += 1
                if print_annotations:
                    print(
                        util.format("contradiction", "red", latex=print_latex),
                        "|", relation, "|", r[1])
            if print_annotations:
                annotator.annotate(relation, r)
    if print_annotations:
        annotated_document, annotated_summary = annotator.annotated()
        print("Document:", " ".join(annotated_document))
        print("Summary:", " ".join(annotated_summary))
    if total == 0:
        return 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    return 100.0 * contained / total, \
            100.0 * contained_bert / total, \
            100.0 * missing / total, \
            100.0 * missing_verb / total, \
            100.0 * missing_actors / total, \
            100.0 * missing_acteds / total, \
            100.0 * contradiction / total, \
            100.0 * contradiction_bert / total, \
            100.0 * invalid_simplification / total
Exemplo n.º 14
0
from flask import Flask, render_template, request, jsonify
from knowledge_graph import KnowledgeGraph
from google_sheets import GoogleSheets

app = Flask(__name__)
kg = KnowledgeGraph()
gsheets = GoogleSheets()


@app.route("/", methods=['GET', 'POST'])
def welcome():
    data = request.get_json()
    #print(data)
    intent = data['queryResult']['intent']['displayName']
    #message_text = data['queryResult']['queryText']
    actual_value = data['queryResult']['parameters']['float_measure']
    #print(f'Intent: {intent}')
    #print(f'Message text: {message_text}')

    if intent == 'write_measurement':
        #print(type(actual_value))
        #print(f'Actual value: {actual_value}')
        #gdocs.insert_text(message_text)
        gsheets.write_actual_size(actual_value)

        response = {
            "fulfillmentMessages": [{
                "text": {
                    "text":
                    ["Der Wert " + actual_value + " wurde eingetragen."]
                }
Exemplo n.º 15
0
def retrieve(seeds):
    crawled_one_hop = [crawl_one_hop(seed) for seed in seeds]
    crawled_two_hop = [crawl_two_hop(seed) for seed in seeds]
    kg = KnowledgeGraph()
    for crawled_from_seed, seed in zip(crawled_one_hop, seeds):
        kg.add_node(seed)
        kg.nodes[seed].add_candidates([seed])
        kg.nodes[seed].set_type(get_type(seed))
        for path in crawled_from_seed:
            x = kg.add_node()
            kg.add_edge(seed, x, path[0])
            kg.nodes[x].set_type(path[1])
            cands = get_candidates(seed, path[0:1])
            kg.nodes[x].add_candidates(cands)
    for crawled_from_seed, seed in zip(crawled_two_hop, seeds):
        for path in crawled_from_seed:
            predge = kg.find_edge(seed, None, path[0])[0]
            if predge:
                med = predge.split('--')[2]
                x = kg.add_node()
                kg.add_edge(med, x, path[1])
                kg.nodes[x].set_type(path[3])
                cands = get_candidates(seed, path[0:2])
                kg.nodes[x].add_candidates(cands)
            else:
                x1 = kg.add_node()
                x2 = kg.add_node()
                kg.add_edge(seed, x1, path[0])
                kg.add_edge(x1, x2, path[1])
                kg.nodes[x1].set_type(path[2])
                kg.nodes[x2].set_type(path[3])

    return kg