def handle_style(self, chunk): self.changed = True if chunk: log.error(".. styles:: does not recognize string %s" % repr(chunk)) return source = self.source data = source and source.pop().splitlines() or [] data.reverse() mystyles = [] while data: myline = data.pop().rstrip() if not myline: continue if myline.lstrip() == myline: data.append(myline) break mystyles.append(myline) data.reverse() data.append('') source.append('\n'.join(data)) if not mystyles: log.error("Empty .. styles:: block found") indent = min(len(x) - len(x.lstrip()) for x in mystyles) mystyles = [x[indent:] for x in mystyles] mystyles.append('') mystyles = '\n'.join(mystyles) try: styles = rson_loads(mystyles) self.styles.setdefault('styles', {}).update(styles) except ValueError, e: # Error parsing the JSON data log.critical('Error parsing stylesheet "%s": %s'%\ (mystyles, str(e)))
def readStyle(self, ssname): # If callables are used, they should probably be subclassed # strings, or something else that will print nicely for errors if callable(ssname): return ssname() fname = self.findStyle(ssname) if fname: try: return rson_loads(open(fname).read()) except ValueError as e: # Error parsing the JSON data log.critical('Error parsing stylesheet "%s": %s' % (fname, str(e))) except IOError as e: # Error opening the ssheet log.critical('Error opening stylesheet "%s": %s' % (fname, str(e)))
def readStyle(self, ssname): # If callables are used, they should probably be subclassed # strings, or something else that will print nicely for errors if callable(ssname): return ssname() fname = self.findStyle(ssname) if fname: try: return rson_loads(open(fname).read()) except ValueError, e: # Error parsing the JSON data log.critical('Error parsing stylesheet "%s": %s' % (fname, str(e))) except IOError, e: # Error opening the ssheet log.critical('Error opening stylesheet "%s": %s' % (fname, str(e)))
def handle_style(self, chunk): """ Parse through the source until we find lines that are no longer indented, then pass our indented lines to the RSON parser. """ self.changed = True if chunk: log.error(".. style:: does not recognize string %s" % repr(chunk)) return mystyles = "\n".join(self.read_indented()) if not mystyles: log.error("Empty .. style:: block found") try: styles = rson_loads(mystyles) except ValueError, e: # Error parsing the JSON data log.critical('Error parsing stylesheet "%s": %s' % (mystyles, str(e)))
def handle_style(self, chunk): ''' Parse through the source until we find lines that are no longer indented, then pass our indented lines to the RSON parser. ''' self.changed = True if chunk: log.error(".. style:: does not recognize string %s" % repr(chunk)) return mystyles = '\n'.join(self.read_indented()) if not mystyles: log.error("Empty .. style:: block found") try: styles = rson_loads(mystyles) except ValueError, e: # Error parsing the JSON data log.critical('Error parsing stylesheet "%s": %s'%\ (mystyles, str(e)))
def readStyle(self, ssname): # If callables are used, they should probably be subclassed # strings, or something else that will print nicely for errors if callable(ssname): return ssname() fname = self.findStyle(ssname) if fname: with open(fname) as f: stylestr = f.read() try: return rson_loads(stylestr) except ValueError: # Error parsing the JSON data _, e, _ = sys.exc_info() log.critical('Error parsing stylesheet "%s": %s\n', fname, str(e)) except IOError: #Error opening the ssheet _, e, _ = sys.exc_info() log.critical('Error opening stylesheet "%s": %s', fname, str(e))
def __call__(self): return rson_loads(self.value)
def __init__(self, sourcef, incfile=False): name = sourcef.name source = sourcef.read().replace('\r\n', '\n').replace('\r', '\n') if incfile: try: self.styles = rson_loads(source) except: pass else: self.styles['styles'] = dict(self.styles['styles']) self.changed = True self.keep = False return self.sourcef = DummyFile(source) self.sourcef.name = name self.source = source = [x for x in self.splitter(source) if x] self.result = result = [] self.styles = {} self.widthcount = 0 self.changed = False source.reverse() isblank = False while source: wasblank = isblank isblank = False chunk = source.pop() result.append(chunk) # Only process single lines if not chunk.endswith('\n'): continue result[-1] = chunk[:-1] if chunk.index('\n') != len(chunk) - 1: continue tokens = chunk.split() isblank = not tokens if len(tokens) >= 2 and tokens[0] == '..' and tokens[1].endswith( '::'): keyword = tokens[1][:-2] if keyword not in self.keywords: continue chunk = chunk.split('::', 1)[1] elif wasblank and len(tokens) == 1 and chunk[0].isalpha( ) and tokens[0].isalpha(): keyword = 'single' chunk = tokens[0] else: continue result.pop() getattr(self, 'handle_' + keyword)(chunk.strip()) if self.changed: result.append('') result = DummyFile('\n'.join(result)) result.name = name + '.build_temp' self.keep = keep = len(result.strip()) if keep: f = open(result.name, 'wb') f.write(result) f.close() self.result = result else: self.result = self.sourcef
def __init__(self, sourcef, incfile=False, widthcount=0): ''' Process a file and decorate the resultant Preprocess instance with self.result (the preprocessed file) and self.styles (extracted stylesheet information) for the caller. ''' self.widthcount = widthcount name = sourcef.name source = sourcef.read().replace('\r\n', '\n').replace('\r', '\n') # Make the determination if an include file is a stylesheet or # another restructured text file, and handle stylesheets appropriately. if incfile: try: self.styles = styles = rson_loads(source) substyles = styles.get('styles') if substyles is not None: styles['styles'] = dict(substyles) except: pass else: self.changed = True self.keep = False return # Read the whole file and wrap it in a DummyFile self.sourcef = DummyFile(source) self.sourcef.name = name # Use a regular expression on the source, to take it apart # and put it back together again. self.source = source = [x for x in self.splitter(source) if x] self.result = result = [] self.styles = {} self.changed = False # More efficient to pop() a list than to keep taking tokens from [0] source.reverse() isblank = False keywords = self.keywords handle_single = keywords['single::'] while source: wasblank = isblank isblank = False chunk = source.pop() result.append(chunk) # Only process single lines if not chunk.endswith('\n'): continue result[-1] = chunk[:-1] if chunk.index('\n') != len(chunk) - 1: continue # Parse the line to look for one of our keywords. tokens = chunk.split() isblank = not tokens if len(tokens) >= 2 and tokens[0] == '..' and tokens[1].endswith( '::'): func = keywords.get(tokens[1]) if func is None: continue chunk = chunk.split('::', 1)[1] elif wasblank and len(tokens) == 1 and chunk[0].isalpha( ) and tokens[0].isalpha(): func = handle_single chunk = tokens[0] else: continue result.pop() func(self, chunk.strip()) # Determine if we actually did anything or not. Just use our source file # if not. Otherwise, write the results to disk (so the user can use them # for debugging) and return them. if self.changed: result.append('') result = DummyFile('\n'.join(result)) result.name = name + '.build_temp' self.keep = keep = len(result.strip()) if keep: f = open(result.name, 'wb') f.write(result) f.close() self.result = result else: self.result = self.sourcef
def __init__(self, sourcef, incfile=False, widthcount=0): """ Process a file and decorate the resultant Preprocess instance with self.result (the preprocessed file) and self.styles (extracted stylesheet information) for the caller. """ self.widthcount = widthcount name = sourcef.name source = sourcef.read().replace("\r\n", "\n").replace("\r", "\n") # Make the determination if an include file is a stylesheet or # another restructured text file, and handle stylesheets appropriately. if incfile: try: self.styles = styles = rson_loads(source) substyles = styles.get("styles") if substyles is not None: styles["styles"] = dict(substyles) except: pass else: self.changed = True self.keep = False return # Read the whole file and wrap it in a DummyFile self.sourcef = DummyFile(source) self.sourcef.name = name # Use a regular expression on the source, to take it apart # and put it back together again. self.source = source = [x for x in self.splitter(source) if x] self.result = result = [] self.styles = {} self.changed = False # More efficient to pop() a list than to keep taking tokens from [0] source.reverse() isblank = False keywords = self.keywords handle_single = keywords["single::"] while source: wasblank = isblank isblank = False chunk = source.pop() result.append(chunk) # Only process single lines if not chunk.endswith("\n"): continue result[-1] = chunk[:-1] if chunk.index("\n") != len(chunk) - 1: continue # Parse the line to look for one of our keywords. tokens = chunk.split() isblank = not tokens if len(tokens) >= 2 and tokens[0] == ".." and tokens[1].endswith("::"): func = keywords.get(tokens[1]) if func is None: continue chunk = chunk.split("::", 1)[1] elif wasblank and len(tokens) == 1 and chunk[0].isalpha() and tokens[0].isalpha(): func = handle_single chunk = tokens[0] else: continue result.pop() func(self, chunk.strip()) # Determine if we actually did anything or not. Just use our source file # if not. Otherwise, write the results to disk (so the user can use them # for debugging) and return them. if self.changed: result.append("") result = DummyFile("\n".join(result)) result.name = name + ".build_temp" self.keep = keep = len(result.strip()) if keep: f = open(result.name, "wb") f.write(result) f.close() self.result = result else: self.result = self.sourcef