def test_load_textfile(self): # load_textfile(filename) data = load_textfile(self.textfilename) text = data['content'] assert isinstance(text, unicode) assert text == TEXT_DATA assert data['encoding'] == 'utf8' assert data['linesep'] == '\n'
def load_document(location, settings): textfile = load_textfile(location) kwargs = { 'encoding': textfile['encoding'], 'linesep': textfile['linesep'], 'tab_size': settings.tab_size, # TODO: try and detect first 'location': location, 'content': textfile['content'], } return Document(**kwargs)
def run(self): search = self.search view = search.view workspace = search.workspace directory = search.directory search_type = search.search_type # build the list of files we need to search if search_type == SEARCH_SELECTION: if view.document.location: search.files = set([view.document.location]) else: search.files = set([view.document]) elif search_type == SEARCH_DOCUMENT: if view.document.location: search.files = set([view.document.location]) else: search.files = set([view.document]) elif search_type == SEARCH_DOCUMENTS: search.files = set() for view in search.editor.views: if view.document.location: search.files.add(view.document.location) else: search.files.add(view.document) elif search_type == SEARCH_WORKSPACE: search.files = set() if search.workspace_filter_glob: pattern = search.workspace_filter_glob paths = workspace.filepaths(glob_filter=pattern, skip_hidden=search.skip_hidden) else: pattern = search.workspace_filter_regex paths = workspace.filepaths(re_filter=pattern, skip_hidden=search.skip_hidden) for filename in paths: if self.interrupted: return search.files.add(filename) elif search_type == SEARCH_DIRECTORY: search.files = set() # sanity checks if not os.path.exists(directory): return if not os.path.isdir(directory): return pattern = search.directory_filter if search.is_recursive: kwargs = { "rootpath": directory, "dirpath": directory, "exclude_globs": [], "exclude_regulars": [], "exclude_hidden": search.skip_hidden, } if pattern: kwargs["match_func"] = lambda s: glob_match(s, pattern) else: kwargs["match_func"] = lambda s: True for filename in filtered_files(**kwargs): if self.interrupted: return search.files.add(filename) else: for filename in os.listdir(directory): fullpath = os.path.join(directory, filename) if os.path.isfile(fullpath): if not pattern or glob_match(filename, pattern): search.files.add(fullpath) print len(search.files), "files." # cache open files / views open_files = {} for view in search.editor.views: if view.document.location: open_files[view.document.location] = view else: # files without locations haven't been saved yet open_files[view.document] = view if search_type == SEARCH_SELECTION: pass # forget about this one for now else: # go through the files one by one, search for fle in search.files: if self.interrupted: return if isinstance(fle, Document): document = fle path = document.description # this means we're searching an unsaved document if open_files.has_key(document): content = document.content else: continue else: filename = fle path = filename # we're searching a document that has a location if open_files.has_key(filename): # read file from memory doc = open_files[filename].document content = doc.content else: # we don't have the file open, so we have to read it # from disk try: try: data = load_textfile(filename) content = data["content"] except BinaryFile: # the file appears to be binary, so skip it continue except: raise # TODO: handle error nicely print path if search.use_regex: found_matches = self.search_regex(path, content) else: found_matches = self.search_simple(path, content) if found_matches: pass # signal redraw search.notify_done()
def test_load_textfile_fail(self): data = load_textfile(self.binaryfilename)