Beispiel #1
0
        def _trans(cur, *args):
            def _exec(data, record):
                if len(data):
                    sql = ",".join(["`%s`='%s'" % (k,v) for k,v in record.items()])
                    sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % (DBConf.TABLE_DBINFO, sql, dbnode)
                    utils.log(utils.cur(), sql)
                    cur.execute(sql)
                else:
                    record["dbnode"] = dbnode
                    keys = ",".join(["`%s`" % k for k in record.keys()])
                    vals = ",".join(["'%%(%s)s'" % v for v in record.keys()])
                    vals = vals % record
                    sql = "INSERT INTO `%s` (%s) VALUES (%s)" % (DBConf.TABLE_DBINFO, keys, vals)
                    utils.log(utils.cur(), sql)
                    cur.execute(sql)
            cur.execute("START TRANSACTION")
            for dbnode in dbnodes:
                if not dbnode: continue
                record = dict()
                for key, value in path.items():
                    node = os.path.join(root, dbnode, value)
                    if self.zk.exists(node, None):
                        (data, meta) = self.zk.get(node, None)
                        record[key] = data

                cur.execute("SELECT * FROM `%s` WHERE `dbnode` = '%s'" % (DBConf.TABLE_DBINFO, dbnode))
                dbinfo = cur.fetchall()
                _exec(dbinfo, record)
            # delete records that have been already deleted
            sql = "DELETE FROM `%s` WHERE `dbnode` not in (%s) " % (DBConf.TABLE_DBINFO, ",".join(["'"+d+"'" for d in dbnodes]))
            utils.log(utils.cur(), sql)
            cur.execute(sql)
            cur.execute("COMMIT")
Beispiel #2
0
def BFS(graph, startnode,endnode):
    q=deque()
    q.append(startnode)
    visitednodes=[]
    path={}
    path[vector_to_int(startnode)]=None

    visitednodes.append(startnode)

    while(len(q)>0):
        currentnode=q.popleft()
        if currentnode == endnode:
            break
        for nextnode in graph.find_neighbors(currentnode):
            #if(nextnode not in visitednodes):
            if(vector_to_int(nextnode) not in path):
                q.append(nextnode)
                visitednodes.append(nextnode)
                path[vector_to_int(nextnode)] = nextnode - currentnode
    print("********************VISITED***********************")
    print(visitednodes)
    print("************************PATH***********************")
    for key, value in path.items() :
        print (key)
    return path            
Beispiel #3
0
    def view_drawpath(self, ):
        all_path = []
        path, root = self.mod.load_drawpath_db()

        for key, item in path.items():
            all_path.append((1, key, root[key], '', len(item)))
            for file in item:
                all_path.append((2, ) + file)
        if not all_path:
            all_path = [(1, "右键添加文件目录", "", "")]

        self.tree_out(all_path, type='PATH', unfold=0)
Beispiel #4
0
        def _trans(cur, *args):
            def _exec(data, record):
                if len(data):
                    sql = ",".join(
                        ["`%s`='%s'" % (k, v) for k, v in record.items()])
                    sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % (
                        DBConf.TABLE_DBINFO, sql, dbnode)
                    utils.log(utils.cur(), sql)
                    cur.execute(sql)
                else:
                    record["dbnode"] = dbnode
                    keys = ",".join(["`%s`" % k for k in record.keys()])
                    vals = ",".join(["'%%(%s)s'" % v for v in record.keys()])
                    vals = vals % record
                    sql = "INSERT INTO `%s` (%s) VALUES (%s)" % (
                        DBConf.TABLE_DBINFO, keys, vals)
                    utils.log(utils.cur(), sql)
                    cur.execute(sql)

            cur.execute("START TRANSACTION")
            for dbnode in dbnodes:
                if not dbnode: continue
                record = dict()
                for key, value in path.items():
                    node = os.path.join(root, dbnode, value)
                    if self.zk.exists(node, None):
                        (data, meta) = self.zk.get(node, None)
                        record[key] = data

                cur.execute("SELECT * FROM `%s` WHERE `dbnode` = '%s'" %
                            (DBConf.TABLE_DBINFO, dbnode))
                dbinfo = cur.fetchall()
                _exec(dbinfo, record)
            # delete records that have been already deleted
            sql = "DELETE FROM `%s` WHERE `dbnode` not in (%s) " % (
                DBConf.TABLE_DBINFO, ",".join(["'" + d + "'"
                                               for d in dbnodes]))
            utils.log(utils.cur(), sql)
            cur.execute(sql)
            cur.execute("COMMIT")
Beispiel #5
0
 def _get_authz_info(self):
     if not self.authz_file:
         self.log.error("The [svn] authz_file configuration option in "
                        "trac.ini is empty or not defined")
         raise ConfigurationError()
     try:
         mtime = os.path.getmtime(self.authz_file)
     except OSError as e:
         self.log.error(
             "Error accessing svn authz permission policy "
             "file: %s", exception_to_unicode(e))
         raise ConfigurationError()
     if mtime != self._mtime:
         self._mtime = mtime
         rm = RepositoryManager(self.env)
         modules = set(repos.reponame
                       for repos in rm.get_real_repositories())
         if '' in modules and self.authz_module_name:
             modules.add(self.authz_module_name)
         modules.add('')
         self.log.info("Parsing authz file: %s", self.authz_file)
         try:
             self._authz = parse(self.authz_file, modules)
         except ParsingError as e:
             self.log.error(
                 "Error parsing svn authz permission policy "
                 "file: %s", exception_to_unicode(e))
             raise ConfigurationError()
         else:
             self._users = {
                 user
                 for paths in self._authz.values()
                 for path in paths.values()
                 for user, result in path.items() if result
             }
     return self._authz, self._users
Beispiel #6
0
        if event.type == pg.KEYDOWN:
            if event.key == pg.K_ESCAPE:
                running = False
            if event.key == pg.K_m:
                # dump the wall list for saving
                print([(int(loc.x), int(loc.y)) for loc in g.walls])
        if event.type == pg.MOUSEBUTTONDOWN:
            mpos = vec(pg.mouse.get_pos()) // TILESIZE
            if event.button == 1:
                if mpos in g.walls:
                    g.walls.remove(mpos)
                else:
                    g.walls.append(mpos)
            if event.button == 3:
                start = mpos
            path = breadth_first_search(g, start)

    pg.display.set_caption("{:.2f}".format(clock.get_fps()))
    screen.fill(DARKGRAY)
    draw_grid()
    g.draw()
    for node, dir in path.items():
        if dir:
            x, y = node
            x = x * TILESIZE + TILESIZE / 2
            y = y * TILESIZE + TILESIZE / 2
            img = arrows[vec2int(dir)]
            r = img.get_rect(center=(x, y))
            screen.blit(img, r)
    pg.display.flip()
def process_features_stanford(stanfordf, naf_f, predicateMatrix,
                              wn_supersenses, outfile):

    doc = etree.parse(stanfordf, etree.XMLParser(remove_blank_text=True))
    root = doc.getroot()
    root.getchildren()
    """
    sentence, token, lemma, pos, NER
    """

    word_token = {}
    dependency_dict = {}
    timex_dict = collections.defaultdict(list)
    counter = 0

    for sent in root.iter('sentence'):

        for token in sent.iter('token'):
            counter += 1
            sentence_id = sent.attrib.get("id", "null")
            token_sentence_id = token.attrib.get("id", "null")

            if token.getchildren()[5].text == "O":
                word_token[sentence_id + "\t" +
                           token_sentence_id] = (str(counter),
                                                 token.getchildren()[0].text,
                                                 token.getchildren()[1].text,
                                                 token.getchildren()[4].text,
                                                 token.getchildren()[5].text,
                                                 token.getchildren()[5].text)

            elif token.getchildren()[5].text == "DATE":
                word_token[sentence_id + "\t" +
                           token_sentence_id] = (str(counter),
                                                 token.getchildren()[0].text,
                                                 token.getchildren()[1].text,
                                                 token.getchildren()[4].text,
                                                 "O",
                                                 token.getchildren()[5].text)

            elif token.getchildren()[5].text == "DURATION":
                word_token[sentence_id + "\t" +
                           token_sentence_id] = (str(counter),
                                                 token.getchildren()[0].text,
                                                 token.getchildren()[1].text,
                                                 token.getchildren()[4].text,
                                                 "O",
                                                 token.getchildren()[5].text)

            elif token.getchildren()[5].text == "TIME":
                word_token[sentence_id + "\t" +
                           token_sentence_id] = (str(counter),
                                                 token.getchildren()[0].text,
                                                 token.getchildren()[1].text,
                                                 token.getchildren()[4].text,
                                                 "O",
                                                 token.getchildren()[5].text)

            elif token.getchildren()[5].text == "SET":
                word_token[sentence_id + "\t" +
                           token_sentence_id] = (str(counter),
                                                 token.getchildren()[0].text,
                                                 token.getchildren()[1].text,
                                                 token.getchildren()[4].text,
                                                 "O",
                                                 token.getchildren()[5].text)

            else:
                word_token[sentence_id + "\t" +
                           token_sentence_id] = (str(counter),
                                                 token.getchildren()[0].text,
                                                 token.getchildren()[1].text,
                                                 token.getchildren()[4].text,
                                                 token.getchildren()[5].text,
                                                 "O")

            for timex in token.findall("Timex"):
                timex_id = timex.attrib.get("tid", "null")
                timex_dict[timex_id].append(sentence_id + "\t" +
                                            token_sentence_id)

        #########################
        ## - dependencies
        #########################

        for dep in sent.iter('dependencies'):
            if dep.attrib.get("type", "null") == "basic-dependencies":
                for deprel in dep.iter('dep'):
                    for gov in deprel.iter('governor'):
                        for dep in deprel.iter('dependent'):
                            key = sentence_id + "\t" + dep.attrib.get(
                                "idx", "null") + "\t" + deprel.attrib.get(
                                    "type", "null")
                            values = gov.attrib.get("idx", "null")
                            dependency_dict[key] = values

#########################
## - add dependency to token and POS
#########################

    token_dependency = {}

    for k, v in word_token.items():
        for k1, v1 in dependency_dict.items():
            k1_splitted = k1.split("\t")
            key_dep = k1_splitted[0] + "\t" + k1_splitted[1]
            if key_dep == k:
                new_v = v + (
                    k1_splitted[2],
                    v1,
                )
                token_dependency[k] = new_v

    for k, v in word_token.items():
        if k not in token_dependency:
            new_v = v + (
                "_",
                "_",
            )
            token_dependency[k] = new_v

#########################
## - solve timex to BIO-format
#########################

    for k, v in token_dependency.items():

        for k1, v1 in timex_dict.items():
            if len(v1) > 1:
                if k == v1[0]:
                    new_val = v[:5] + ("B-TIMEX", ) + v[6:]
                    token_dependency[k] = new_val

                for i in range(1, len(v1)):
                    val = v1[i]
                    if val == k:
                        new_val = v[:5] + ("I-TIMEX", ) + v[6:]
                        token_dependency[k] = new_val

            else:
                if k in v1:
                    new_val = v[:5] + ("B-TIMEX", ) + v[6:]
                    token_dependency[k] = new_val

######################
## -  path2root - dependencies
######################

    path = {}

    for k, v in token_dependency.items():
        k_splitted = k.split("\t")
        sentence_id = k_splitted[0]
        token_per_sentence = k_splitted[1]

        if v[7] != "_":
            path[sentence_id + "#" +
                 token_per_sentence] = sentence_id + "#" + v[7]

    path2root = {}
    path2root_solved = {}

    for k, v in path.items():
        path2root[k] = get_path2root(path, k)

    for k, v in path2root.items():
        k_splitted = k.split("#")
        sentence_id_path = k_splitted[0]

        for k1, v1 in token_dependency.items():
            k1_splitted = k1.split("\t")
            match = k1_splitted[0] + "#" + k1_splitted[1]

            if str(sentence_id_path) == str(k1_splitted[0]):
                for n, i in enumerate(v):
                    if str(i) == str(match):
                        match_full = v1[2] + "|" + v1[3] + "|" + v1[6]
                        v[n] = match_full
                        path2root_solved[k] = tuple(v)

    for k, v in path2root_solved.items():
        lemma_path = tuple(["_".join([item.split('|')[0] for item in v])])

        pos_path = tuple(["_".join([item.split('|')[1] for item in v])])

        dep_path = tuple(["_".join([item.split('|')[2] for item in v])])

        dep_pos_path = [item.split('|')[1:] for item in v]
        path_dep_pos_reverse = [sublist[::-1] for sublist in dep_pos_path]
        dep_pos_path_flat = tuple([
            "_".join(
                [item for sublist in path_dep_pos_reverse for item in sublist])
        ])

        full_path_partial = [item.split('|') for item in v]
        full_path = tuple([
            "_".join(
                [item for sublist in full_path_partial for item in sublist])
        ])

        new_val = full_path + pos_path + lemma_path + dep_path + dep_pos_path_flat
        path2root_solved[k] = new_val

################
## merge data VN
################

    vn_verb = predicate_verb_vn(predicateMatrix)
    for k, v in token_dependency.items():
        if v[3].startswith('V'):
            if v[2] in vn_verb:
                vn_values = "_".join(vn_verb[v[2]])
                new_val = v + (vn_values, )
                token_dependency[k] = new_val
            else:
                new_val = v + ("O", )
                token_dependency[k] = new_val
        else:
            new_val = v + ("O", )
            token_dependency[k] = new_val

################
# ## merge data FN
################

    fn_verb = predicate_verb_fn(predicateMatrix)
    for k, v in token_dependency.items():
        if v[3].startswith('V'):
            if v[2] in fn_verb:
                fn_values = "_".join(fn_verb[v[2]])
                new_val = v + (fn_values, )
                token_dependency[k] = new_val
            else:
                new_val = v + ("O", )
                token_dependency[k] = new_val

        else:
            new_val = v + ("O", )
            token_dependency[k] = new_val

################
## merge supersenses
################

    wn_data = {}
    noun_supersense = wn_supersense(wn_supersenses)
    for k, v in token_dependency.items():
        if v[3].startswith('N'):
            if v[2] in noun_supersense:
                wn_values = "_".join(noun_supersense[v[2]])
                new_val = v + (wn_values, )
                wn_data[k] = new_val

    verb_supersense = predicate_verb_wn(predicateMatrix)
    for k, v in token_dependency.items():
        if v[3].startswith('V'):
            if v[2] in verb_supersense:
                wn_values = "_".join(verb_supersense[v[2]])
                new_val = v + (wn_values, )
                wn_data[k] = new_val

####################
## add supersense - stanford data
####################

    for k, v in token_dependency.items():
        if k in wn_data:
            new_val = wn_data[k]
            token_dependency[k] = new_val
        else:
            new_val = v + ("O", )
            token_dependency[k] = new_val

####################
## add path2root - stanford data
####################

    for k, v in token_dependency.items():
        new_key = k.replace("\t", "#")
        if new_key in path2root_solved:
            new_val = v + path2root_solved[new_key]
            token_dependency[k] = new_val
        else:
            new_val = v + (
                "O",
                "O",
                "O",
                "O",
                "O",
            )
            token_dependency[k] = new_val

####################
## solve governor lemma and POS - stanford data
####################

    token_dependency_copy = token_dependency.copy()

    for k, v in token_dependency.items():
        k_splitted = k.split("\t")
        gov_key = k_splitted[0] + "\t" + v[7]
        if gov_key in token_dependency_copy:
            new_val = v[:7] + (
                token_dependency_copy[gov_key][2],
                token_dependency_copy[gov_key][3],
            ) + v[8:]
            token_dependency[k] = new_val
        else:
            new_val = v[:7] + (
                "O",
                "O",
            ) + v[8:]
            token_dependency[k] = new_val

#################
# semantic roles and predicates
#################

    sem_roles_naf = process_feature_naf(naf_f)
    sem_roles_token = {}

    for k, v in token_dependency.items():
        for k1, v1 in sem_roles_naf.items():
            k1_splitted = k1.split("\t")
            if v[0] in v1:
                sem_role = k1_splitted[1]

                if v[0] in sem_roles_token:
                    list_value_type = sem_roles_token[v[0]]
                    if sem_role not in list_value_type:
                        list_value_type.append(sem_role)
                else:
                    list_value_type = []
                    list_value_type.append(sem_role)
                sem_roles_token[v[0]] = sorted(list_value_type)

    for k, v in token_dependency.items():
        if v[0] in sem_roles_token:
            new_val = v + tuple(["_".join(sem_roles_token[v[0]])])
            token_dependency[k] = new_val
        else:
            new_val = v + ("O", )
            token_dependency[k] = new_val


####################
## final format
#####################

    final_test = {}

    for k, v in token_dependency.items():
        k_splitted = k.split("\t")
        new_sent_id = int(k_splitted[0]) - 2
        new_token_id = int(v[0]) - 1
        f = stanfordf.split("/")[-1]
        final_val = (f, ) + (str(new_sent_id), ) + (
            k_splitted[1], ) + v[1:] + (
                "O",
                "O",
                "O",
                "O",
                "O",
            )
        final_test[new_token_id] = final_val

    for k, v in final_test.items():
        if v[1] == "-1":
            new_val = (v[0], ) + ("-99", ) + v[2:]
            final_test[k] = new_val
        else:
            new_val = v
            final_test[k] = new_val

    for k, v in final_test.items():
        if int(k) == 0 and int(v[2]) == 1:
            output = open(outfile, 'a')
            output.writelines(v[0] + "\t" + str(k) + "\t" + '\t'.join(v[1:6]) +
                              "\t" + "\t".join(v[8:11]) + "\t" +
                              "\t".join(v[14:19]) + "\t" + v[6] + "\t" + v[7] +
                              "\t" + "\t".join(v[11:14]) + "\t" +
                              "\t".join(v[19:]) + "\tO" + "\n")
            output.close()

        elif int(k) != 0 and int(v[2]) == 1:
            output = open(outfile, 'a')
            output.writelines("\n" + v[0] + "\t" + str(k) + "\t" +
                              '\t'.join(v[1:6]) + "\t" + "\t".join(v[8:11]) +
                              "\t" + "\t".join(v[14:19]) + "\t" + v[6] + "\t" +
                              v[7] + "\t" + "\t".join(v[11:14]) + "\t" +
                              "\t".join(v[19:]) + "\tO" + "\n")
            output.close()

        else:
            output = open(outfile, 'a')
            output.writelines(v[0] + "\t" + str(k) + "\t" + '\t'.join(v[1:6]) +
                              "\t" + "\t".join(v[8:11]) + "\t" +
                              "\t".join(v[14:19]) + "\t" + v[6] + "\t" + v[7] +
                              "\t" + "\t".join(v[11:14]) + "\t" +
                              "\t".join(v[19:]) + "\tO" + "\n")
            output.close()

    return final_test
Beispiel #8
0
def process_features_naf(naff, train_f, predicateMatrix, wn_supersenses,
                         outfile):

    doc = etree.parse(naff, etree.XMLParser(remove_blank_text=True))
    root = doc.getroot()
    root.getchildren()
    """
    sentence, token, lemma, pos, NER
    """

    token_per_sentence = collections.defaultdict(list)
    token_sentence_id = {}
    entity_dict = {}
    timex_dict = collections.defaultdict(list)
    dependency_dict = {}
    token_dependency = {}

    counter = 0
    for token in root.iter("wf"):
        token_id = token.attrib.get("id", "null")
        sentence_id = token.attrib.get("sent", "null")
        token_word = token.text

        token_per_sentence[sentence_id].append(token_id)
        token_term_map = int(token_id.replace('w', '')) + 1
        token_term = "t" + str(token_term_map)
        token_dependency[token_term] = (token_id, sentence_id, token_word)

    for k, v in token_per_sentence.items():
        for i in v:
            new_index = int(v.index(i)) + 1
            token_term_map = int(i.replace('w', '')) + 1
            key = "t" + str(token_term_map)
            token_sentence_id[key] = str(new_index)

    for k, v in token_dependency.items():
        if k in token_sentence_id:
            new_val = v[:-1] + (token_sentence_id[k], ) + (v[2], )
            token_dependency[k] = new_val

    for terms in root.iter("terms"):
        for term in terms.iter("term"):

            lemma_id = term.attrib.get("id", "null")
            lemma_token = term.attrib.get("lemma", "null")
            lemma_pos = term.attrib.get("morphofeat", "null")

            if lemma_id in token_dependency:
                new_val = token_dependency[lemma_id] + (
                    lemma_token,
                    lemma_pos,
                )
                token_dependency[lemma_id] = new_val

    for entities in root.iter("entities"):
        for entity in entities.iter("entity"):
            entity_type = entity.attrib.get("type", "null")
            for reference in entity.iter("references"):
                for token_span in reference.iter("span"):
                    for token_id in token_span.iter("target"):
                        token_match = token_id.attrib.get("id", "null")
                        entity_dict[token_match] = entity_type

    for k, v in token_dependency.items():
        if k in entity_dict:
            new_val = v + (
                entity_dict[k],
                "O",
            )
            token_dependency[k] = new_val
        else:
            new_val = v + (
                "O",
                "O",
            )
            token_dependency[k] = new_val

    for timexpression in root.iter("timeExpressions"):
        for timex in timexpression.iter("timex3"):
            timex_id = timex.attrib.get("id", "null")
            for timex_span in timex.iter("span"):
                for token_id in timex_span.iter("target"):
                    tokens = "t" + str(
                        int(
                            token_id.attrib.get("id", "null").replace('w', ''))
                        + 1)
                    timex_dict[timex_id].append(tokens)

    for k, v in token_dependency.items():

        for k1, v1 in timex_dict.items():
            if len(v1) > 1:
                if k == v1[0]:
                    new_val = v[:-1] + ("B-TIMEX", )
                    token_dependency[k] = new_val

                for i in range(1, len(v1)):
                    val = v1[i]
                    if val == k:
                        new_val = v[:-1] + ("I-TIMEX", )
                        token_dependency[k] = new_val

            else:
                if k in v1:
                    new_val = v[:-1] + ("B-TIMEX", )
                    token_dependency[k] = new_val

    for dependecies in root.iter("deps"):
        for dep in dependecies.iter("dep"):
            governor = dep.attrib.get("from", "null")
            dependent = dep.attrib.get("to", "null")
            dep_rel = dep.attrib.get("rfunc", "null")
            dependency_dict[dependent] = (dep_rel, governor)

    for k, v in token_dependency.items():
        if k in dependency_dict:
            new_val = v + dependency_dict[k]
            token_dependency[k] = new_val
        else:
            new_val = v + (
                "_",
                "_",
            )
            token_dependency[k] = new_val

#########################
# SRL naf
#########################

    predicate_term = {}
    predicate_roles = collections.defaultdict(list)

    for elem in root.iter("srl"):
        for srl in elem.findall("predicate"):
            predicate_id = srl.attrib.get("id", "null")
            for term in srl.findall("span"):
                for term_id in term.findall("target"):
                    #                    predicte_term = term_id.attrib.get("id", "null").replace('t', '')
                    predicte_term = term_id.attrib.get("id", "null")
                    predicate_term[predicate_id] = predicte_term

            for role in srl.findall("role"):
                role_id = role.attrib.get("id", "null")
                role_type = role.attrib.get("semRole", "null")
                for role_span in role.findall("span"):
                    for role_term in role_span.findall("target"):
                        #                        role_span_id = role_term.attrib.get("id", "null").replace('t', '')
                        role_span_id = role_term.attrib.get("id", "null")
                        predicate_roles[predicate_id + "\t" + role_type +
                                        "\t" + role_id].append(role_span_id)

    predicate_argument_final = {}
    for k, v in predicate_roles.items():
        k_splitted = k.split("\t")
        if k_splitted[0] in predicate_term:
            new_val = tuple(v)
            predicate_argument_final[predicate_term[k_splitted[0]] + "\t" +
                                     "\t".join(k_splitted[1:])] = new_val

######################
# ## -  path2root - dependencies
######################

    path = {}
    for k, v in token_dependency.items():
        sentence_id = v[1]
        path[sentence_id + "#" + k] = sentence_id + "#" + v[9]

    path2root = {}
    path2root_solved = {}

    for k, v in path.items():
        path2root[k] = get_path2root(path, k)

    for k, v in path2root.items():
        k_splitted = k.split("#")
        sentence_id_path = k_splitted[0]

        for k1, v1 in token_dependency.items():
            sentence_id = v1[1]
            term_id = k1

            match = sentence_id + "#" + term_id

            if str(sentence_id_path) == str(sentence_id):
                for n, i in enumerate(v):
                    if str(i) == str(match):
                        if v1[8] == "_":
                            match_full = v1[4] + "|" + v1[5] + "|root"
                            v[n] = match_full
                            path2root_solved[k_splitted[1]] = tuple(v)
                        else:
                            match_full = v1[4] + "|" + v1[5] + "|" + v1[8]
                            v[n] = match_full
                            path2root_solved[k_splitted[1]] = tuple(v)

    for k, v in path2root_solved.items():
        lemma_path = tuple(["_".join([item.split('|')[0] for item in v])])
        pos_path = tuple(["_".join([item.split('|')[1] for item in v])])
        dep_path = tuple(["_".join([item.split('|')[2] for item in v])])

        dep_pos_path = [item.split('|')[1:] for item in v]
        path_dep_pos_reverse = [sublist[::-1] for sublist in dep_pos_path]
        dep_pos_path_flat = tuple([
            "_".join(
                [item for sublist in path_dep_pos_reverse for item in sublist])
        ])

        full_path_partial = [item.split('|') for item in v]
        full_path = tuple([
            "_".join(
                [item for sublist in full_path_partial for item in sublist])
        ])

        new_val = full_path + pos_path + lemma_path + dep_path + dep_pos_path_flat
        path2root_solved[k] = new_val

# ################
# ## merge data VN
# ################

    vn_verb = predicate_verb_vn(predicateMatrix)
    for k, v in token_dependency.items():
        if v[5].startswith('V'):
            if v[4] in vn_verb:
                vn_values = "_".join(vn_verb[v[4]])
                new_val = v + (vn_values, )
                token_dependency[k] = new_val
            else:
                new_val = v + ("O", )
                token_dependency[k] = new_val
        else:
            new_val = v + ("O", )
            token_dependency[k] = new_val

# ################
# ## merge data FN
# ################

    fn_verb = predicate_verb_fn(predicateMatrix)
    for k, v in token_dependency.items():
        if v[5].startswith('V'):
            if v[4] in fn_verb:
                fn_values = "_".join(fn_verb[v[4]])
                new_val = v + (fn_values, )
                token_dependency[k] = new_val
            else:
                new_val = v + ("O", )
                token_dependency[k] = new_val
        else:
            new_val = v + ("O", )
            token_dependency[k] = new_val

# ################
# ## merge supersenses
# ################

    wn_data = {}
    noun_supersense = wn_supersense(wn_supersenses)
    for k, v in token_dependency.items():
        if v[5].startswith('N'):
            if v[4] in noun_supersense:
                wn_values = "_".join(noun_supersense[v[4]])
                new_val = v + (wn_values, )
                wn_data[k] = new_val

    verb_supersense = predicate_verb_wn(predicateMatrix)
    for k, v in token_dependency.items():
        if v[5].startswith('V'):
            if v[4] in verb_supersense:
                wn_values = "_".join(verb_supersense[v[4]])
                new_val = v + (wn_values, )
                wn_data[k] = new_val

# ####################
# ## add supersense - naf data
# ####################

    for k, v in token_dependency.items():
        if k in wn_data:
            new_val = wn_data[k]
            token_dependency[k] = new_val
        else:
            new_val = v + ("O", )
            token_dependency[k] = new_val

# ####################
# ## add path2root - naf data
# ####################

    for k, v in token_dependency.items():
        if k in path2root_solved:
            new_val = v + path2root_solved[k]
            token_dependency[k] = new_val
        else:
            new_val = v + (
                "O",
                "O",
                "O",
                "O",
                "O",
            )
            token_dependency[k] = new_val

# ####################
# ## solve governor lemma and POS - naf data
# ####################

    token_dependency_copy = token_dependency.copy()

    for k, v in token_dependency.items():
        gov_key = v[9]
        if gov_key in token_dependency_copy:
            new_val = v[:9] + (
                token_dependency_copy[gov_key][4],
                token_dependency_copy[gov_key][5],
            ) + v[10:]
            token_dependency[k] = new_val
        else:
            new_val = v[:9] + (
                "O",
                "O",
            ) + v[10:]
            token_dependency[k] = new_val

#################
# semantic roles and predicates
#################

    sem_roles_token = {}

    for k, v in token_dependency.items():

        for k1, v1 in predicate_argument_final.items():
            k1_splitted = k1.split("\t")

            if k in v1:
                sem_role = k1_splitted[1]

                if k in sem_roles_token:
                    list_value_type = sem_roles_token[k]
                    if sem_role not in list_value_type:
                        list_value_type.append(sem_role)
                else:
                    list_value_type = []
                    list_value_type.append(sem_role)
                sem_roles_token[k] = sorted(list_value_type)

    for k, v in token_dependency.items():
        if k in sem_roles_token:
            new_val = v + tuple(["_".join(sem_roles_token[k])])
            token_dependency[k] = new_val
        else:
            new_val = v + ("O", )
            token_dependency[k] = new_val

#################
# event class only
#################

    training = process_train(train_f)
    event_arguments = collections.defaultdict(list)

    for k, v in token_dependency.items():

        for k1, v1 in predicate_argument_final.items():
            k1_splitted = k1.split("\t")
            pred_id = k1_splitted[0]
            role_type = k1_splitted[1]

            for i in v1:
                if i == k:
                    new_val = int(v[0].replace('w', ''))
                    if new_val in training:  # match token id
                        if training[new_val][0] != "O":  # event_data
                            event_argument_label_pos_lemma = v[
                                4] + "|" + role_type + "|" + v[5]
                            event_arguments[pred_id].append(
                                event_argument_label_pos_lemma)

    for k, v in token_dependency.items():

        if k in event_arguments:
            values = event_arguments[k]

            arg_label = tuple(
                ["_".join([item.split('|')[1] for item in values])])

            label_lemma = [item.split('|')[0:2] for item in values]
            label_lemma_flat = tuple([
                "_".join([item for sublist in label_lemma for item in sublist])
            ])

            label_lemma_pos = [item.split('|') for item in values]
            label_lemma_pos_flat = tuple([
                "_".join(
                    [item for sublist in label_lemma_pos for item in sublist])
            ])

            new_val = v + label_lemma_pos_flat + label_lemma_flat + arg_label
            token_dependency[k] = new_val

        else:
            new_val = v + (
                "O",
                "O",
                "O",
            )
            token_dependency[k] = new_val


##############
# final format
##############

    final_class = {}
    for k, v in token_dependency.items():
        k_ord = int(k.replace('t', '')) - 1
        new_sent_id = int(v[1]) - 2

        f = naff.split("/")[-1]

        final_val_class = (f, ) + (v[0], ) + (str(new_sent_id), ) + v[2:]

        final_class[int(k_ord)] = final_val_class

    final = {}

    for k, v in final_class.items():
        if k in training:
            if training[k][-1] != "O":
                new_val = (v[0], ) + v[2:] + ("B-EVENT", ) + (
                    training[k][-1], )
                final[k] = new_val

    for k, v in final.items():
        if int(k) == 0 and int(v[2]) == 1:
            output = open(outfile, 'a')
            output.writelines(v[0] + "\t" + str(k) + "\t" + '\t'.join(v[1:6]) +
                              "\t" + "\t".join(v[8:11]) + "\t" +
                              "\t".join(v[14:19]) + "\t" + v[6] + "\t" + v[7] +
                              "\t" + "\t".join(v[11:14]) + "\t" +
                              "\t".join(v[19:]) + "\n")
            output.close()

        elif int(k) != 0 and int(v[2]) == 1:
            output = open(outfile, 'a')
            output.writelines("\n" + v[0] + "\t" + str(k) + "\t" +
                              '\t'.join(v[1:6]) + "\t" + "\t".join(v[8:11]) +
                              "\t" + "\t".join(v[14:19]) + "\t" + v[6] + "\t" +
                              v[7] + "\t" + "\t".join(v[11:14]) + "\t" +
                              "\t".join(v[19:]) + "\n")
            output.close()

        else:
            output = open(outfile, 'a')
            output.writelines(v[0] + "\t" + str(k) + "\t" + '\t'.join(v[1:6]) +
                              "\t" + "\t".join(v[8:11]) + "\t" +
                              "\t".join(v[14:19]) + "\t" + v[6] + "\t" + v[7] +
                              "\t" + "\t".join(v[11:14]) + "\t" +
                              "\t".join(v[19:]) + "\n")
            output.close()

    return final
Beispiel #9
0
        if event.type == pg.KEYDOWN:
            if event.key == pg.K_ESCAPE:
                running = False
            if event.key == pg.K_m:
                # dump the wall list for saving
                print([(int(loc.x), int(loc.y)) for loc in g.walls])
        if event.type == pg.MOUSEBUTTONDOWN:
            mpos = vec(pg.mouse.get_pos()) // TILESIZE
            if event.button == 1:
                if mpos in g.walls:
                    g.walls.remove(mpos)
                else:
                    g.walls.append(mpos)
            if event.button == 3:
                start = mpos
            path = breadth_first_search(g, start)

    pg.display.set_caption("{:.2f}".format(clock.get_fps()))
    screen.fill(DARKGRAY)
    draw_grid()
    g.draw()
    for node, dir in path.items():
        if dir:
            x, y = node
            x = x * TILESIZE + TILESIZE / 2
            y = y * TILESIZE + TILESIZE / 2
            img = arrows[vec2int(dir)]
            r = img.get_rect(center=(x, y))
            screen.blit(img, r)
    pg.display.flip()
Beispiel #10
0
            if event.key == pg.K_ESCAPE:
                running = False
            if event.key == pg.K_m:
                # dump the wall list for saving
                print([(int(loc.x), int(loc.y)) for loc in g.walls])
        if event.type == pg.MOUSEBUTTONDOWN:
            mpos = vec(pg.mouse.get_pos()) // TILESIZE
            if event.button == 1:
                if mpos in g.walls:
                    g.walls.remove(mpos)
                else:
                    g.walls.append(mpos)
            if event.button == 3:
                start = mpos
            path = flow_field(g, start)

    pg.display.set_caption("{:.2f}".format(clock.get_fps()))
    screen.fill(DARKGRAY)
    draw_grid()
    g.draw()
    for n, d in path.items():
        if d:
            x, y = n
            x = x * TILESIZE + TILESIZE / 2
            y = y * TILESIZE + TILESIZE / 2
            img = arrows[vec2int(d)]
            r = img.get_rect(center=(x, y))
            screen.blit(img, r)

    pg.display.flip()
Beispiel #11
0
            if event.key == pg.K_ESCAPE:
                running = False
            if event.key == pg.K_m:
                # dump the wall list for saving
                print([(int(loc.x), int(loc.y)) for loc in g.walls])
        if event.type == pg.MOUSEBUTTONDOWN:
            mpos = vec(pg.mouse.get_pos()) // TILESIZE
            if event.button == 1:
                if mpos in g.walls:
                    g.walls.remove(mpos)
                else:
                    g.walls.append(mpos)
            if event.button == 3:
                start = mpos
            path = flow_field(g, start)

    pg.display.set_caption("{:.2f}".format(clock.get_fps()))
    screen.fill(DARKGRAY)
    draw_grid()
    g.draw()
    for n, d in path.items():
        if d:
            x, y = n
            x = x * TILESIZE + TILESIZE / 2
            y = y * TILESIZE + TILESIZE / 2
            img = arrows[vec2int(d)]
            r = img.get_rect(center=(x, y))
            screen.blit(img, r)

    pg.display.flip()
Beispiel #12
0
    def update(self):
        # game loop - update

        self.all_sprites.update()
        self.camera.update(self.player)

        self.screen.fill(BLUE)

        keystate = pg.key.get_pressed()

        #      pg.draw.rect(self.screen, WHITE, self.player.hit_rect, 2)

        # enemy hits player

        for door in self.doors:
            dist = door.rect.center - self.player.pos
            if 0 < dist.length() < 100:
                self.draw_text("2(Q", 12, WHITE, door.rect.x, door.rect.y)
                #           self.draw_texty("2 (Q)", self.body_font, 12, WHITE, door.rect.x + 45 , door.rect.y +20, align="center")
                if 0 < dist.length() < 40 and self.score >= 2 and keystate[ord('q')]:
                    pg.mixer.Sound(path.join(self.sound_folder, 'door.wav')).play()
                    self.score -= 2
                    door.kill()

        for trapdoor in self.trapdoors:
            dist = trapdoor.rect.center - self.player.pos
            if 0 < dist.length() < 100:
                self.draw_texty("2 (Q)", self.body_font, 12, WHITE, trapdoor.rect.x + 20, trapdoor.rect.y + 30,
                                align="center")
                if self.score >= 2 and keystate[ord('q')]:
                    pg.mixer.Sound(path.join(self.sound_folder, 'door.wav')).play()
                    self.score -= 2
                    trapdoor.kill()

        hits = pg.sprite.spritecollide(self.player, self.enemy1s, False)
        for hit in hits:
            self.player.health -= ENEMY1_DAMAGE
            hit.vel = vector(0, 0)
            if self.player.health <= 0:
                self.playing = False
        if hits:
            self.player.vel += vector(KNOCKBACK, 0).rotate(-hits[0].rot)





        block_hit_list = pg.sprite.spritecollide(self.player, self.platforms, False, collide_hit_rect)
        for block in block_hit_list:
            if self.player.vel.y > 0:
                if self.player.pos.y < block.rect.centery:
                    self.player.pos.y = block.rect.top + 1
                    #
                    self.player.vel.y = 0
            if self.player.vel.x > 0 and self.player.vel.y != 0:
                if self.player.pos.x < block.rect.left:
                    self.player.pos.x = block.rect.left - self.player.hit_rect.width / 2
                    self.player.vel.x = 0
            if self.player.vel.x < 0 and self.player.vel.y != 0:
                if self.player.pos.x > block.rect.right:
                    self.player.pos.x = block.rect.right + self.player.hit_rect.width / 2
                    self.player.vel.x = 0
            if self.player.vel.y < 0:
                if self.player.pos.y - self.player.hit_rect.height > block.rect.bottom:
                    self.player.pos.y = block.rect.bottom + self.player.hit_rect.height
                #
                self.player.vel.y = 0

        block_hit_list = pg.sprite.spritecollide(self.player, self.trapdoors, False, collide_hit_rect)
        for block in block_hit_list:
            if self.player.vel.y > 0:
                if self.player.pos.y < block.rect.centery:
                    self.player.pos.y = block.rect.top + 1
                    #
                    self.player.vel.y = 0
            if self.player.vel.x > 0 and self.player.vel.y != 0:
                if self.player.pos.x < block.rect.left:
                    self.player.pos.x = block.rect.left - self.player.hit_rect.width / 2
                    self.player.vel.x = 0
            if self.player.vel.x < 0 and self.player.vel.y != 0:
                if self.player.pos.x > block.rect.right:
                    self.player.pos.x = block.rect.right + self.player.hit_rect.width / 2
                    self.player.vel.x = 0
            if self.player.vel.y < 0:
                if self.player.pos.y - self.player.hit_rect.height > block.rect.bottom:
                    self.player.pos.y = block.rect.bottom + self.player.hit_rect.height
                #
                self.player.vel.y = 0

        block_hit_list = pg.sprite.spritecollide(self.player, self.doors, False, collide_hit_rect)
        for block in block_hit_list:
            if self.player.vel.x > 0:
                if self.player.pos.x < block.rect.left:
                    self.player.pos.x = block.rect.left - self.player.hit_rect.width / 2
                    self.player.vel.x = 0

            if self.player.vel.y < 0:
                if self.player.pos.y - self.player.hit_rect.height > block.rect.bottom:
                    self.player.pos.y = block.rect.bottom + self.player.hit_rect.height
                #
                self.player.vel.y = 0

            if self.player.vel.x < 0:
                if self.player.pos.x > block.rect.right:
                    self.player.pos.x = block.rect.right + self.player.hit_rect.width / 2
                    self.player.vel.x = 0

        block_hit_list = pg.sprite.spritecollide(self.player, self.walls, False, collide_hit_rect)
        for block in block_hit_list:
            if self.player.vel.x > 0:
                if self.player.pos.x < block.rect.left:
                    self.player.pos.x = block.rect.left - self.player.hit_rect.width / 2
                    self.player.vel.x = 0

            if self.player.vel.y < 0:
                if self.player.pos.y - self.player.hit_rect.height > block.rect.bottom:
                    self.player.pos.y = block.rect.bottom + self.player.hit_rect.height
                #
                self.player.vel.y = 0

            if self.player.vel.x < 0:
                if self.player.pos.x > block.rect.right:
                    self.player.pos.x = block.rect.right + self.player.hit_rect.width / 2
                    self.player.vel.x = 0

        # check if player hits coins

        if pg.mixer.Channel(0).get_busy() == False:
            pg.mixer.music.set_volume(1)

        if len(self.enemy1s) == 0:
            pg.mixer.music.set_volume(0.6)
            self.channel1.play(self.bell_sound)
            self.round += 1
            self.player.health = 100
            self.spawn()

        arrows = {}

        self.arrow_img = pg.transform.scale(self.arrow_img, (41, 41))
        for dir in [(41, 0), (0, 41), (-41, 0), (0, -41)]:
            arrows[dir] = pg.transform.rotate(self.arrow_img, vector(dir).angle_to(vector(1, 0)))

        grid = SquareGrid(g, g.map_width, g.map_height)
        grid.draw_grid()

        path = breadth_first_search(grid, start)
        for node, dir in path.items():
            if dir:
                x, y = node
                img = arrows[vector_conv(dir)]
                r = img.get_rect(center=(x, y))
                self.screen.blit(img, r)

        grid.find_neighbours(start)