def _file_name_set ( self, file_name ): """ Handles the 'file_name' facet being changed. """ if file_name != '': file_name = abspath( file_name ) text_files = self.text_files if self.auto_close: for i in xrange( len( text_files ) - 1, -1, -1 ): text_file = text_files[ i ] if ((not text_file.modified) and (file_name != text_file.file_name)): del text_files[ i ] for text_file in text_files: if file_name == text_file.file_name: break else: text_file = TextFile( file_name = file_name, text = read_file( file_name ) or '' ).set( modified = False ) text_files.append( text_file ) self.text_file = text_file do_later( self.set, file_name = '' )
def _get_md5 ( self ): """ Returns the md5 hash of the file's contents. """ if not self.is_file: return '' return hashlib.md5( read_file( self.absolute_path ) ).hexdigest()
def _data_default ( self ): if self.is_file: data = read_file( self.absolute_path ) if data is not None: return data return ''
def _file_name_set ( self, file_name ): """ Handles the 'file_name' facet being changed. """ source = read_file( file_name ) if source is not None: file_name = splitext( abspath( file_name ) )[0] module = basename( file_name ) package = '' for path in self.path: path = abspath( path ) if file_name.startswith( path ): base = file_name[ len( path ): ].strip( os.sep ) for path_element in base.split( os.sep )[:-1]: path = join( path, path_element ) if not isfile( join( path, '__init__.py' ) ): break else: module = base.replace( os.sep, '.' ) package = module.rsplit( '.', 1 )[0] break self.package = package self.module = module self.source = source
def __init__(self, **facets): """ Initializes the object. """ super(AnFBIViewer, self).__init__(**facets) source = read_file(self.file_name) if source is not None: text = source.split("\n") nlines = self.lines if nlines >= 0: self.nlines = nlines line = self.line - 1 source = "\n".join(text[line : line + nlines]) self.line = 1 self.starting_line = line else: self.nlines = len(text) self.selected_line = self.cursor = self.line self.source = source self.module = self.fbi.get_module(self.file_name) self.module.on_facet_set(self.update_viewer, "bp_lines[]") title = self.file_name object = self.object if object is not None: title += " Object: %s(0x%08X)" % (object.__class__.__name__, id(object)) self.title = title self.update_viewer()
def _get_source ( self ): if self._source is None: try: self._source = read_file( self.file ).split( '\n' )[ self.line - 1 ].strip() except: self._source = '???' return self._source
def image_data(self, image_name): """ Returns the image data (i.e. file contents) for the specified image name. """ volume_name, file_name, encoded = split_image_name(image_name) if self.is_zip_file: return self.zip_file.read(file_name) else: return read_file(join(self.path, file_name))
def _get_source(self): if self._source is None: self._source = "" source = read_file(self.file_name) if source is not None: try: self._source = source.split("\n")[self.line - 1].strip() except: pass return self._source
def _convert_markdown ( self, text ): """Returns the HTML representation of the markdown formatted text represented by *text*. """ global MarkdownCSS, HTMLTemplate if MarkdownCSS == '': MarkdownCSS = read_file( join( dirname( markdown2.__file__ ), 'default.css' ) ) return (HTMLTemplate % ( MarkdownCSS, markdown2.markdown( text ) ))
def update(self, file_name): """ Refreshes the text file from disk if it has not been modified and is still the same file. """ if file_name == self.file_name: if self.ignore_reload: self.ignore_reload = False elif not self.modified: self.text = read_file(file_name) self.status = "File reloaded" self.modified = False else: file_watch.watch(self.update, file_name, remove=True)
def source_for ( self, line ): """ Returns a specified source file line. """ if self._source_file is None: try: self._source_file = read_file( self.file ).split( '\n' ) except: self._source_file = [] try: return self._source_file[ line - 1 ].strip() except: return '???'
def _update_file ( self, file_name ): """ Updates the current presentation with the contents of the file specified by *file_name*. """ text = read_file( file_name ) if ((text is not None) and (text.find( '\x00' ) < 0) and (text.find( '\xFF' ) < 0)): self.file_base = dirname( file_name ) self.presentation = text return True return False
def _load_file_name ( self, file_name ): """ Loads a specified source file. """ self._dont_update = True self.can_save = True source = read_file( file_name ) if source is None: self.error = 'Error reading file' self.can_save = False source = '' self.source = source self._dont_update = False self.needs_save = False
def _item_contents_default ( self ): contents = read_file( self.item ) if contents is None: return '' if (contents.find( '\x00' ) >= 0) or (contents.find( '\xFF' ) >= 0): self.is_binary = True if len( contents ) > 20: return ('%s [...binary data...] %s' % ( repr( contents[:10] )[1:-1], repr( contents[-10:] )[1:-1] )) return repr( contents )[1:-1] return contents.rstrip()
def add ( self, document ): """ Adds the document whose file name is specified by *document* to the document index database. """ # Normalize the document path: document = abspath( document ) # Only index documents that we have not already indexed previously: if IndexDocument( document = document ).load() is not None: print ("'%s' has already been indexed and is being ignored." % document) return False # Read the contents of the document (if possible): text = read_file( document ) if text is None: print "'%s' could not be read and is being ignored." % document return False # Select the parsing method to use (Python or normal text): if splitext( document )[1] == '.py': next_word = self.parse_python( text ) else: next_word = self.parse_text( text ) # Parse the document into words and add each valid word to the document # index, creating new entries in the index for newly encountered words: words = 0 all_words = self.all_words for word in next_word(): index_word = all_words.get( word ) if index_word is None: all_words[ word ] = index_word = \ IndexWord( word = word ).load( add = True ) index_word.documents.add( document ) index_word.count += 1 words += 1 # Add a new entry for the document to the index: IndexDocument( document = document, words = words ).save() # Indicate that the document was processed successfully: print "'%s' has been added to the document index." % document return True
def _update ( self, file_name ): """ Updates the view with the contents of the specified file. """ data = read_file( file_name ) if data is not None: if self.is_text( data ): if self.lines > 0: self.text = '\n'.join( data.split( '\n' ) [ max( 0, self.line - 1 ): self.line + self.lines - 1 ] ) else: self.text = data else: format = self.format self.text = '\n'.join( [ format( i, data[ i: i + 16 ] ) for i in range( 0, len( data ), 16 ) ] )
def _get_body ( self ): is_markdown = self.is_markdown content = self.content if content.find( '\n' ) < 0: ext = splitext( content )[1].lower() if (ext in ContentTypes) and isfile( content ): content = read_file( content ) is_markdown = (ext == '.md') if is_markdown: content = self._convert_markdown( content ) cl = content.lower() self.is_html = ((cl.find( '<html>' ) >= 0) and (cl.find( '</html>' ) >= 0)) return content
def invoke ( self, file_name, monitor ): """ Invokes the rule for a specified file name. """ path, base = split( file_name ) root, ext = splitext( base ) dic = { '%n': repr( file_name ), '%p': repr( path ), '%b': repr( base ), '%r': repr( root ), '%x': repr( ext ), '%^': repr( monitor.path ), '%.': repr( join( path, root ) ), '%m': 'monitor' } if '%c' in rule: dic[ '%c' ] = repr( read_file( file_name ) ) monitor.invoke( file_name, self.substitute( rule, dic ) )
def restore ( self ): """ Restores the correct line value based on current source file contents. """ self._file_set() try: lines = read_file( self.file ).split( '\n' ) except: return False n = len( lines ) line = self.line - 1 delta = self.end_line - self.line source = self.source # Search outward from the last known location of the source for a # match: for i in range( 100 ): if ((line - i) < 0) and ((line + i) >= n): break j = line + i if (j < n) and (source == lines[j].strip()): self.line = j + 1 self.end_line = delta + j + 1 self.register() return True j = line - i if (j >= 0) and (source == lines[j].strip()): self.line = j + 1 self.end_line = delta + j + 1 self.register() return True # Indicate source line could not be found: self.line = 0 return False
def add_path(self, volume_name, path=None): """ Adds the directory specified by **path** as a *virtual* volume called **volume_name**. All image files contained within path define the contents of the volume. If **path** is None, the *images* contained in the 'images' subdirectory of the same directory as the caller are is used as the path for the *virtual* volume.. """ # Make sure we don't already have a volume with that name: if volume_name in self.catalog: raise FacetError("The volume name '%s' is already in the image library." % volume_name) # If no path specified, derive one from the caller's source code # location: if path is None: path = join(get_resource_path(2), "images") # Make sure that the specified path is a directory: if not isdir(path): raise FacetError("The image volume path '%s' does not exist." % path) # Create the ImageVolume to describe the path's contents: image_volume_path = join(path, "image_volume.py") if exists(image_volume_path): volume = get_python_value(read_file(image_volume_path), "volume") else: volume = ImageVolume() # Set up the rest of the volume information: volume.set(name=volume_name, path=path, is_zip_file=False) # Try to bring the volume information up to date if necessary: volume.check_save() # Add the new volume to the library: self.catalog[volume_name] = volume self.volumes.append(volume)
def _item_set ( self, item ): """ Handles the 'item' facet being changed. """ # Check to see if it is a list of File objects, which represent files # dropped onto the view from an external source (like MS Explorer): if isinstance( item, list ) and (len( item ) > 0): for an_item in item: if not (isinstance( an_item, File ) or (isinstance( an_item, basestring ) and isfile( an_item ))): break else: for an_item in item: self._item_set( an_item ) return # Set up the default values: name = full_name = '' line = 0 lines = -1 # Extract the file name from a File object: if isinstance( item, File ): item = item.absolute_path # Handle the case of an item which contains a payload: elif isinstance( item, HasPayload ): name = item.payload_name full_name = item.payload_full_name item = item.payload # Handle the case of a file position, which has a file name and a # possible starting line and range of lines: if isinstance( item, FilePosition ): name = item.name line = item.line lines = item.lines item = item.file_name # Only create an inspector if there actually is a valid item: if item is not None: inspector = None # If it is an image, create an ImageInspector for it: if isinstance( item, AnImageResource ): inspector = ImageInspector( image = item ) # If it is a theme, create a ThemeLayout tool to view it: elif isinstance( item, Theme ): from theme_layout import ThemeLayout inspector = ThemeLayout( theme = item ) # If it is a string value, check to see if it is a valid file name: elif isinstance( item, basestring ) and isfile( item ): ext = splitext( item )[1].lower() if ext in ImageTypes: inspector = ImageInspector( image = item ) elif ext == '.md': inspector = MarkdownInspector( file = item ) elif ext == '.pres': inspector = PresentationInspector( file = item ) else: data = read_file( item, 'r' ) if data is not None: if name == '': name = basename( item ) full_name = item try: inspector = ObjectInspector( object = self._object_from_pickle( data ), name = name, full_name = full_name, owner = self ) except: try: inspector = ObjectInspector( object = self._object_from_pickle( read_file( item ) ), name = name, full_name = full_name, owner = self ) except: inspector = FileInspector( name = name, line = line, lines = lines ).set( file_name = item ) # If it is not a file, then it must just be a generic object: if inspector is None: inspector = ObjectInspector( object = item, name = name, full_name = full_name, owner = self ) inspectors = self.inspectors # Make sure the # of inspectors doesn't exceed the maximum allowed: if len( inspectors ) >= self.max_inspectors: del inspectors[0] # Add the new inspector to the list of inspectors (which will # cause it to appear as a new notebook page): inspectors.append( inspector ) # Reset the current item to None, so we are ready for a new item: do_later( self.set, item = None )
def _update_markdown_file ( self, markdown_file ): """ Handles the current markdown file being updated in some way. """ self.markdown = read_file( markdown_file ) or ''
def _update_css_file ( self, css_file ): """ Handles the current .css file being updated in some way. """ css = read_file( css_file ) self.css = ('' if css is None else (CSSStyleTemplate % css))
def _execute_file ( self, file_name ): """ Attempts to load and execute the file specified by *file_name*. """ text = read_file( self._file_name_for( file_name.strip() ) ) return (1 if text is None else self._parse_slides( text ))
def save(self): """ Saves the contents of the image volume using the current contents of the **ImageVolume**. """ path = self.path if not self.is_zip_file: # Make sure the directory is writable: if not access(path, R_OK | W_OK | X_OK): return False # Make sure the directory and zip file are writable: elif (not access(dirname(path), R_OK | W_OK | X_OK)) or (exists(path) and (not access(path, W_OK))): return False # Pre-compute all of the file contents that need to be generated so that # we can check them against the existing files (if any) to see if an # update really needs to be made: image_volume_code = self.image_volume_code images_code = self.images_code license_text = self.license_text if not self.is_zip_file: # Get the file paths for all files that need to be updated: image_volume_py = join(path, "image_volume.py") image_info_py = join(path, "image_info.py") license_txt = join(path, "license.txt") modified = False # Write the image info source code to a file if it changed: if images_code != read_file(image_info_py): write_file(image_info_py, images_code) modified = True # Write a separate license file for human consumption if it changed: if license_text != read_file(license_txt): write_file(license_txt, license_text) modified = True # Write the volume manifest source code to a file if anything # changed: if modified or (image_volume_code != read_file(image_volume_py)): write_file(image_volume_py, image_volume_code) return True # Check to see if we really have to update the current zip file by # checking to see if any changes have actually occurred to any of the # files we need to update: try: cur_zf = self.zip_file if ( (images_code == cur_zf.read("image_info.py")) and (license_text == cur_zf.read("license.txt")) and (image_volume_code == cur_zf.read("image_volume.py")) ): return True except: pass # Create a temporary name for the new .zip file: file_name = path + ".###" # Create the new zip file: try: new_zf = ZipFile(file_name, "w", ZIP_DEFLATED) except IOError: # We catch this error because there are some cases under Windows # where it passes the preceding os.access tests incorrectly. Once # the bug in os.access has been fixed, this try block can be # removed... return False try: # Copy all of the image files from the current zip file to the new # zip file: for name in cur_zf.namelist(): if name not in dont_copy_list: new_zf.writestr(name, cur_zf.read(name)) # Temporarily close the current zip file while we replace it with # the new version: cur_zf.close() # Write the volume manifest source code to the zip file: new_zf.writestr("image_volume.py", image_volume_code) # Write the image info source code to the zip file: new_zf.writestr("image_info.py", images_code) # Write a separate license file for human consumption: new_zf.writestr("license.txt", license_text) # Done creating the new zip file: new_zf.close() new_zf = None # Rename the original file to a temporary name, so we can give the # new file the original name. Note that unlocking the original zip # file after the previous close sometimes seems to take a while, # which is why we repeatedly try the rename until it either succeeds # or takes so long that it must have failed for another reason: temp_name = path + ".$$$" for i in range(50): try: rename(path, temp_name) break except: sleep(0.1) try: rename(file_name, path) file_name = temp_name except: rename(temp_name, path) raise finally: if new_zf is not None: new_zf.close() remove(file_name) return True
def _load_image_info(self): """ Returns the list of ImageInfo objects for the images in the volume. """ # If there is no current path, then return a default list of images: if self.path == "": return [] time_stamp = time_stamp_for(stat(self.path)[ST_MTIME]) volume_name = self.name old_images = [] cur_images = [] if self.is_zip_file: zf = self.zip_file # Get the names of all top-level entries in the zip file: names = zf.namelist() # Check to see if there is an image info manifest file: if "image_info.py" in names: # Load the manifest code and extract the images list: old_images = get_python_value(zf.read("image_info.py"), "images") # Check to see if our time stamp is up to date with the file: if self.time_stamp < time_stamp: # If not, create an ImageInfo object for all image files # contained in the .zip file: for name in names: root, ext = splitext(name) if ext in ImageFileExts: cur_images.append(ImageInfo(name=root, image_name=join_image_name(volume_name, name))) else: image_info_path = join(self.path, "image_info.py") if exists(image_info_path): # Load the manifest code and extract the images list: old_images = get_python_value(read_file(image_info_path), "images") # Check to see if our time stamp is up to date with the file: if self.time_stamp < time_stamp: # If not, create an ImageInfo object for each image file # contained in the path: for name in listdir(self.path): root, ext = splitext(name) if ext in ImageFileExts: cur_images.append(ImageInfo(name=root, image_name=join_image_name(volume_name, name))) # Merge the old and current images into a single up to date list: if len(cur_images) == 0: images = old_images else: cur_image_set = dict([(image.image_name, image) for image in cur_images]) for old_image in old_images: cur_image = cur_image_set.get(old_image.image_name) if cur_image is not None: cur_image_set[old_image.image_name] = old_image cur_image.volume = self old_image.width = cur_image.width old_image.height = cur_image.height cur_image.volume = None images = cur_image_set.values() # Set the new time stamp of the volume: self.time_stamp = time_stamp # Return the resulting sorted list as the default value: images.sort(key=lambda item: item.image_name) # Make sure all images reference this volume: for image in images: image.volume = self return images
def watched_file_name_updated ( self, file_name ): self.watched_file_name_data( read_file( file_name ) )
def _save_xref ( self, xref_file ): """ Build the cross-reference data for the current 'root' directory and save it in the file specified by *xref_file*. """ # Create the regex pattern used to match 'from ...' statements: package = self.package if package != '': package += '\.' regex = re.compile( ImportTemplate % package, re.MULTILINE | re.DOTALL ) n = len( self.root ) + 1 xrefs = {} for path, dirs, files in walk( self.root ): # Don't process any .svn directories: for i, dir in enumerate( dirs ): if dir == '.svn': del dirs[ i ] break # Process each file in the directory: for file in files: file_name = join( path, file ) # Only handle Python files: if splitext( file_name )[1] == '.py': # Create a location independent file name: short_file_name = file_name[ n: ] # Find each 'from package.xxx import ...' statement in file: source = read_file( file_name ).replace( '\r\n', '\n' ).replace( '\r', '\n' ) names = {} for match in regex.finditer( source ): line = (len( source[ : match.start() ].split( '\n' ) ) + 1) text = match.group( 1 ).replace( '\\', '' ) for c in '#;': text = text.split( c, 1 )[0] for name in text.split( ',' ): name = name.split()[0].strip() names.setdefault( name, ( line, 1 ) ) # For each unique symbol, add a cross-reference entry to # the current file: for name, location in names.iteritems(): entry = xrefs.get( name ) if entry is None: xrefs[ name ] = entry = ( [], [] ) entry[1].append( ( short_file_name, ) + location ) # Add all top-level definitions contained in the file: file_defs = self._find_definitions( source ) for name, location in file_defs.iteritems(): entry = xrefs.get( name ) if entry is None: xrefs[ name ] = entry = ( [], [] ) entry[0].append( ( short_file_name, ) + location ) # Eliminate any entries which have no references: for name in xrefs.keys(): if len( xrefs[ name ][1] ) == 0: del xrefs[ name ] # Sort all cross-reference symbols case insensitively: names = xrefs.keys() names.sort( lambda l, r: cmp( l.lower(), r.lower() ) ) # Pickle the cross-reference data to a save file: fh = open( xref_file, 'wb' ) dump( [ ( name, ) + xrefs[ name ] for name in names ], fh, -1 ) fh.close()
def _file_set ( self, file ): self.presentation = read_file( file ) self.file_base = dirname( file )
def _text_default ( self ): return read_file( self.item.path )