Exemplo n.º 1
0
 def add_tokens(self, tokens):
     for tokenname in tokens:
         token=tokens[tokenname]
         if tokenname in self.tokens:
             self.tokens[tokenname] = Tokenizer.best_match([self.tokens[tokenname], token])
         else:
             self.tokens[tokenname] = token
Exemplo n.º 2
0
 def add_tokens(self, tokens):
     for tokenname in tokens:
         token=tokens[tokenname]
         lname = tokenname.lower()
         if lname in self.tokens:
             self.tokens[lname] = Tokenizer.best_match([self.tokens[lname], token])
         else:
             self.tokens[lname] = token
 def traverse_members(self, view, pos, full = False):
     filename = self.currentfile
     line = view.line(pos)
     line.b=pos
     line=view.substr(line)
     oldline=""
     while oldline != line:
         oldline = line
         line = re.sub(r'\[[^\[]*\]', '', line)
         self.debug(line)
     line = re.split(',|&|;|!|\+|\(|\[|\s+', line.strip())[-1].strip()
     self.debug(line)
     chain = [x.split("[", 1)[0] for x in re.split('->|\.|::', line.strip())]
     self.debug(chain)
     func = self.current_function(view)
     if not filename in self.cc.functiontokens or not func in self.cc.functiontokens[filename]:
         self.debug("Not in a filled function (%s, %s)" % (filename, func))
         return []
     tokens = [x for x in self.cc.functiontokens[filename][func] if x[Tokenizer.T_NAME] == chain[0]]
     token = None
     if len(tokens) > 0:
         token = tokens[0]
     else:
         token = self.cc.tokens[chain[0]]
         if not token or token[Tokenizer.T_KIND] != Tokenizer.K_VARIABLE:
             return []
     type=""
     self.debug("Token: %s" % str(token))
     if token[Tokenizer.T_KIND] == Tokenizer.K_PARAM:
         type = token[Tokenizer.T_EXTRA]["type"]
     elif 'typeref' in token[Tokenizer.T_EXTRA]:
         type = token[Tokenizer.T_EXTRA]['typeref']
         if type[0:7] == "struct:":
             type=type[7:]
         elif type[0:6] == "union:":
             type=type[6:]
     else:
         type = Tokenizer.parsevariable(token[Tokenizer.T_SEARCH])[1]
     type = self.get_base_type(type)
     self.debug("type: %s" % str(type))
     pchain = chain[1:]
     if not full:
         pchain = pchain[0:-1]
     for newtype in pchain:
         type = type + "::" + newtype
         type = self.get_base_type(type)
         self.debug("type: %s" % str(type))
     members = self.cc.search_tokens(type + "::")
     goodmembers = self.filter_members(members,type)
     return goodmembers
Exemplo n.º 4
0
 def traverse_members(self, view, pos, full = False):
     filename = self.currentfile
     line = view.line(pos)
     line.b=pos
     line=view.substr(line)
     oldline=""
     while oldline != line:
         oldline = line
         line = re.sub(r'\[[^\[]*\]', '', line)
         print(line)
     line = re.split(',|;|\(|\[|\s+', line.strip())[-1].strip()
     print(line)
     chain = [x.split("[", 1)[0] for x in re.split('->|\.|::', line.strip())]
     print(chain)
     func = self.current_function(view)
     if not filename in self.cc.functiontokens or not func in self.cc.functiontokens[filename]:
         print("Not in a filled function (%s, %s)" % (filename, func))
         return []
     tokens = [x for x in self.cc.functiontokens[filename][func] if x[Tokenizer.T_NAME] == chain[0]]
     token = None
     if len(tokens) > 0:
         token = tokens[0]
     else:
         token = self.cc.tokens[chain[0].lower()]
         if not token or token[Tokenizer.T_KIND] != Tokenizer.K_VARIABLE:
             return []
     type=""
     if token[Tokenizer.T_KIND] == Tokenizer.K_PARAM:
         type = token[Tokenizer.T_EXTRA]["type"]
     else:
         type = Tokenizer.parsevariable(token[Tokenizer.T_SEARCH])[1]
     type = self.get_base_type(type)
     pchain = chain[1:]
     if not full:
         pchain = pchain[0:-1]
     for newtype in pchain:
         type = type + "::" + newtype
         type = self.get_base_type(type)
     members = self.cc.search_tokens(type + "::")
     goodmembers = [x for x in members if x[Tokenizer.T_NAME][len(type)+2:].find("::") == -1]
     return goodmembers
Exemplo n.º 5
0
    def get_sel_token(self, view):
        if len(view.sel()) < 1:
            return (None, None)
        selword = view.word(view.sel()[0].end())
        i = selword.begin()
        word = view.substr(selword)
        if i>2 and (view.substr(sublime.Region(i-2, i)) == "->" or view.substr(sublime.Region(i-1, i)) == "." or view.substr(sublime.Region(i-2, i)) == "::"):
            members = self.traverse_members(view, selword.end())
            for m in members:
                if m[Tokenizer.T_NAME].endswith("::" + word):
                    return (word, m)
            return (word, None)

        func =  self.current_function(view)
        filename = self.currentfile
        if filename in self.cc.functiontokens and func in self.cc.functiontokens[filename] and self.cc.functiontokens[filename][func]:
            tokens = [x for x in self.cc.functiontokens[filename][func] if x[Tokenizer.T_NAME] == word]
            if len(tokens) > 0:
                return (word, Tokenizer.best_match(tokens))
        if word.lower() in self.cc.tokens:
            return (word, self.cc.tokens[word.lower()])
        return (word, None)
Exemplo n.º 6
0
 def __init__(self, cachesize = 500, cachepath = "/tmp"):
     self.cachesize = cachesize
     self.T = Tokenizer(cachepath, cachesize)
     self.I = IncludeScanner()
     self.tokens = {}
     self.functiontokens = {}
Exemplo n.º 7
0
class CComplete:
    def __init__(self, cachesize = 500, cachepath = "/tmp"):
        self.cachesize = cachesize
        self.T = Tokenizer(cachepath, cachesize)
        self.I = IncludeScanner()
        self.tokens = {}
        self.functiontokens = {}

    def add_tokens(self, tokens):
        for tokenname in tokens:
            token=tokens[tokenname]
            lname = tokenname.lower()
            if lname in self.tokens:
                self.tokens[lname] = Tokenizer.best_match([self.tokens[lname], token])
            else:
                self.tokens[lname] = token

    def is_valid(self, filename, basepaths = [], syspaths = [], extra_files=[]):
        files = self.I.scan_recursive(filename, basepaths, syspaths)
        for file in extra_files:
            if file not in files:
                files.append(file)
        return self.T.files_valid(files)

    def load_file(self, filename, basepaths = [], syspaths = [], extra_files=[], progress = None):
        t=time.clock()
        self.files = self.I.scan_recursive(filename, basepaths, syspaths)
        t=time.clock()-t
        print("Scanning for includes took: %fs" % t)
        for file in extra_files:
            if file not in self.files:
                self.files.append(file)
        self.T.set_cache_size(max(self.cachesize, len(self.files)))
        self.tokens = {}
        self.functiontokens = {}
        self.sortedtokens = []
        total = len(self.files)
        i=1
        t=time.clock()-t
        for file in self.files:
            if progress:
                progress(i, total)
                i+=1
            tokens, functokens = self.T.scan_file(file)
            self.add_tokens(tokens)
            self.functiontokens[file] = functokens
        t=time.clock()-t
        print("Scanning for tokens took: %fs" % t)
        self.sortedtokens = [x.lower() for x in self.tokens.keys()]
        self.sortedtokens.sort()
        rem = self.T.clean_cache(set(self.files))
        print("Removed %d entries" % rem)
        print("Done loading, %d files" % len(self.files))

    def search_tokens(self, prefix):
        prefix = prefix.lower()
        pos=bisect.bisect_left(self.sortedtokens, prefix)
        results=[]
        while pos < len(self.sortedtokens):
            if self.sortedtokens[pos].startswith(prefix):
                results.append(self.tokens[self.sortedtokens[pos]])
            else:
                break
            pos+=1
        return results