def _13SPLIT_GENERAL_imperechiere_intreb_rasp(lSeparEtic, text): dsort = odict.OrderedDict(lSeparEtic) lDelimCompleta = dsort.keys() lDelimEfectiva = [] for d in lDelimCompleta: if d in text: #intreb apare in textul efectiv? lDelimEfectiva.append(d) # --- bifez rasp efective lRaspEfective = split(lDelimEfectiva, text, maxsplit=0) # izolez obiectiv- text ; raman lista perechi intreb:rasp elem1_extraPerechi = lRaspEfective[0].strip( ) # ob, cu care incepe, nu e o pereche intreb rasp; il elim ca sa mearga zip if elem1_extraPerechi == '': elem1_extraPerechi = '-' #--- impercherea intreb/rasp del lRaspEfective[0:1] # ca sa pot face zip lIntrebRaspEfectiv = zip(lDelimEfectiva, lRaspEfective) # --- bifez rasp efective for intrebare, rasp in lIntrebRaspEfectiv: etic, val = dsort[intrebare] dsort[intrebare] = [etic, rasp.strip()] #celelalt eramin pe '-' return dsort, elem1_extraPerechi
def __init__(self, extensions=[], extension_configs={}, safe_mode=False): """ Creates a new Markdown instance. Keyword arguments: * extensions: A list of extensions. If they are of type string, the module mdx_name.py will be loaded. If they are a subclass of markdown.Extension, they will be used as-is. * extension-configs: Configuration setting for extensions. * safe_mode: Disallow raw html. One of "remove", "replace" or "escape". """ self.safeMode = safe_mode self.registeredExtensions = [] self.docType = "" self.stripTopLevelTags = True # Preprocessors self.preprocessors = odict.OrderedDict() self.preprocessors["html_block"] = \ preprocessors.HtmlBlockPreprocessor(self) self.preprocessors["reference"] = \ preprocessors.ReferencePreprocessor(self) # footnote preprocessor will be inserted with "<reference" # Block processors - ran by the parser self.parser = blockparser.BlockParser() self.parser.blockprocessors['empty'] = \ blockprocessors.EmptyBlockProcessor(self.parser) self.parser.blockprocessors['indent'] = \ blockprocessors.ListIndentProcessor(self.parser) self.parser.blockprocessors['code'] = \ blockprocessors.CodeBlockProcessor(self.parser) self.parser.blockprocessors['hashheader'] = \ blockprocessors.HashHeaderProcessor(self.parser) self.parser.blockprocessors['setextheader'] = \ blockprocessors.SetextHeaderProcessor(self.parser) self.parser.blockprocessors['hr'] = \ blockprocessors.HRProcessor(self.parser) self.parser.blockprocessors['olist'] = \ blockprocessors.OListProcessor(self.parser) self.parser.blockprocessors['ulist'] = \ blockprocessors.UListProcessor(self.parser) self.parser.blockprocessors['quote'] = \ blockprocessors.BlockQuoteProcessor(self.parser) self.parser.blockprocessors['paragraph'] = \ blockprocessors.ParagraphProcessor(self.parser) #self.prePatterns = [] # Inline patterns - Run on the tree self.inlinePatterns = odict.OrderedDict() self.inlinePatterns["backtick"] = \ inlinepatterns.BacktickPattern(inlinepatterns.BACKTICK_RE) self.inlinePatterns["escape"] = \ inlinepatterns.SimpleTextPattern(inlinepatterns.ESCAPE_RE) self.inlinePatterns["reference"] = \ inlinepatterns.ReferencePattern(inlinepatterns.REFERENCE_RE, self) self.inlinePatterns["link"] = \ inlinepatterns.LinkPattern(inlinepatterns.LINK_RE, self) self.inlinePatterns["image_link"] = \ inlinepatterns.ImagePattern(inlinepatterns.IMAGE_LINK_RE, self) self.inlinePatterns["image_reference"] = \ inlinepatterns.ImageReferencePattern(inlinepatterns.IMAGE_REFERENCE_RE, self) self.inlinePatterns["autolink"] = \ inlinepatterns.AutolinkPattern(inlinepatterns.AUTOLINK_RE, self) self.inlinePatterns["automail"] = \ inlinepatterns.AutomailPattern(inlinepatterns.AUTOMAIL_RE, self) self.inlinePatterns["linebreak2"] = \ inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_2_RE, 'br') self.inlinePatterns["linebreak"] = \ inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_RE, 'br') self.inlinePatterns["html"] = \ inlinepatterns.HtmlPattern(inlinepatterns.HTML_RE, self) self.inlinePatterns["entity"] = \ inlinepatterns.HtmlPattern(inlinepatterns.ENTITY_RE, self) self.inlinePatterns["not_strong"] = \ inlinepatterns.SimpleTextPattern(inlinepatterns.NOT_STRONG_RE) self.inlinePatterns["strong_em"] = \ inlinepatterns.DoubleTagPattern(inlinepatterns.STRONG_EM_RE, 'strong,em') self.inlinePatterns["strong"] = \ inlinepatterns.SimpleTagPattern(inlinepatterns.STRONG_RE, 'strong') self.inlinePatterns["emphasis"] = \ inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_RE, 'em') self.inlinePatterns["emphasis2"] = \ inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_2_RE, 'em') # The order of the handlers matters!!! # Tree processors - run once we have a basic parse. self.treeprocessors = odict.OrderedDict() self.treeprocessors["inline"] = treeprocessors.InlineProcessor(self) self.treeprocessors["prettify"] = \ treeprocessors.PrettifyTreeprocessor(self) # Postprocessors - finishing touches. self.postprocessors = odict.OrderedDict() self.postprocessors["raw_html"] = \ postprocessors.RawHtmlPostprocessor(self) self.postprocessors["amp_substitute"] = \ postprocessors.AndSubstitutePostprocessor() # footnote postprocessor will be inserted with ">amp_substitute" self.references = {} self.htmlStash = preprocessors.HtmlStash() self.registerExtensions(extensions=extensions, configs=extension_configs) self.reset()
def build_treeprocessors(md_instance, **kwargs): treeprocessors = odict.OrderedDict() treeprocessors['inline'] = InlineProcessor(md_instance) treeprocessors['prettify'] = PrettifyTreeprocessor(md_instance) return treeprocessors
def build_preprocessors(md_instance, **kwargs): preprocessors = odict.OrderedDict() if md_instance.safeMode != 'escape': preprocessors['html_block'] = HtmlBlockPreprocessor(md_instance) preprocessors['reference'] = ReferencePreprocessor(md_instance) return preprocessors
def build_postprocessors(md_instance, **kwargs): """ Build the default postprocessors for Markdown. """ postprocessors = odict.OrderedDict() postprocessors["raw_html"] = RawHtmlPostprocessor(md_instance) postprocessors["amp_substitute"] = AndSubstitutePostprocessor() postprocessors["unescape"] = UnescapePostprocessor() return postprocessors
def build_preprocessors(md_instance, **kwargs): """ Build the default set of preprocessors used by Markdown. """ preprocessors = odict.OrderedDict() if md_instance.safeMode != 'escape': preprocessors["html_block"] = HtmlBlockPreprocessor(md_instance) preprocessors["reference"] = ReferencePreprocessor(md_instance) return preprocessors
def _BuildMap(events): """Build an ordered dict from a yaml.events sequence describing a mapping.""" m = odict.OrderedDict() for key in events: if isinstance(key, yaml.events.MappingEndEvent): return m # We have a key. Grab the value by advancing the iterator, # build it, and assign. if not isinstance(key, yaml.events.ScalarEvent): raise YamlCssError('YAML CSS mapping keys must be scalars') key = unicode(key.value) value = events.next() if isinstance(value, yaml.events.MappingStartEvent): value = _BuildMap(events) elif isinstance(value, yaml.events.ScalarEvent): value = unicode(value.value) else: raise YamlCssError('Disallowed YAML type found: %r' % value) m[key] = value # We fell off the end of the event stream. Malformed YAML CSS. raise YamlCssError('Truncated YAML CSS input')
def build_inlinepatterns(md_instance, **kwargs): """ Build the default set of inline patterns for Markdown. """ inlinePatterns = odict.OrderedDict() inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE) inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance) inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance) inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance) inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance) inlinePatterns["image_reference"] = \ ImageReferencePattern(IMAGE_REFERENCE_RE, md_instance) inlinePatterns["short_reference"] = \ ReferencePattern(SHORT_REF_RE, md_instance) inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance) inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance) inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br') if md_instance.safeMode != 'escape': inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance) inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance) inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE) inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'strong,em') inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong') inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em') if md_instance.smart_emphasis: inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em') else: inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em') return inlinePatterns
def build_inlinepatterns(md_instance, **kwargs): inlinePatterns = odict.OrderedDict() inlinePatterns['backtick'] = BacktickPattern(BACKTICK_RE) inlinePatterns['escape'] = EscapePattern(ESCAPE_RE, md_instance) inlinePatterns['reference'] = ReferencePattern(REFERENCE_RE, md_instance) inlinePatterns['link'] = LinkPattern(LINK_RE, md_instance) inlinePatterns['image_link'] = ImagePattern(IMAGE_LINK_RE, md_instance) inlinePatterns['image_reference'] = ImageReferencePattern( IMAGE_REFERENCE_RE, md_instance) inlinePatterns['short_reference'] = ReferencePattern( SHORT_REF_RE, md_instance) inlinePatterns['autolink'] = AutolinkPattern(AUTOLINK_RE, md_instance) inlinePatterns['automail'] = AutomailPattern(AUTOMAIL_RE, md_instance) inlinePatterns['linebreak'] = SubstituteTagPattern(LINE_BREAK_RE, 'br') if md_instance.safeMode != 'escape': inlinePatterns['html'] = HtmlPattern(HTML_RE, md_instance) inlinePatterns['entity'] = HtmlPattern(ENTITY_RE, md_instance) inlinePatterns['not_strong'] = SimpleTextPattern(NOT_STRONG_RE) inlinePatterns['strong_em'] = DoubleTagPattern(STRONG_EM_RE, 'strong,em') inlinePatterns['strong'] = SimpleTagPattern(STRONG_RE, 'strong') inlinePatterns['emphasis'] = SimpleTagPattern(EMPHASIS_RE, 'em') if md_instance.smart_emphasis: inlinePatterns['emphasis2'] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em') else: inlinePatterns['emphasis2'] = SimpleTagPattern(EMPHASIS_2_RE, 'em') return inlinePatterns
def __init__(self, markdown): self.blockprocessors = odict.OrderedDict() self.state = State() self.markdown = markdown
class State: states = odict.OrderedDict() @staticmethod def get_state(grid): gs = State.gridhash(grid) if gs in State.states: return State.states[gs] else: s = State(grid) State.states[gs] = s return s @staticmethod def saddle_energy(s1, s2): return max(s1.energy, s2.energy) + dEsaddle / (max(abs(s1.energy - s2.energy), 1)) #XXX: Ugly @staticmethod def gridhash(grid): #XXX: Hyper-lazy hash function. Still results in speed-up once things get all superbasiney. gs = "" for i in grid: for j in i: gs += "T" if j else "F" return gs def __init__(self, grid, energy=None): self.grid = grid self.w = len(self.grid[0]) self.h = len(self.grid) if not energy: self.energy = self.calc_energy() else: self.energy = energy self.rate_table = None def calc_energy(self): e = 0 for i in range(self.h): for j in range(self.w): if self.grid[i][j]: e += self.calc_energy_at(i, j) return e def calc_energy_at(self, i, j): e = 0 for z in energy_neighbors: e -= self.grid[(i + z[0]) % self.h][(j + z[1]) % self.w] for z in energy_neighbors_2: e -= self.grid[(i + z[0]) % self.h][(j + z[1]) % self.w] * energy_2 return e def __eq__(self, other): return self.grid == other.grid def get_rate_table(self): if self.rate_table: return self.rate_table self.rate_table = [] for i in range(self.h): for j in range(self.w): if self.grid[i][j]: for z in move_neighbors: m, n = (i + z[0]) % self.h, (j + z[1]) % self.w if not self.grid[m][n]: newgrid = copy.deepcopy(self.grid) newgrid[i][j] = False newgrid[m][n] = True dE = self.calc_energy_at( m, n) - self.calc_energy_at(i, j) proc = {} proc['product'] = State(newgrid, self.energy + 2 * dE) proc['barrier'] = State.saddle_energy( self, proc['product']) - self.energy proc['rate'] = exp(-proc['barrier'] / .01) self.rate_table.append(proc) return self.rate_table def save(self, filename): f = open(filename, 'w') print >> f, self f.close() def __hash__(self): return hash(State.gridhash(self.grid)) @staticmethod def load(filename): f = open(filename, 'r') grid = [] for i in f: if i[0] == '+': continue gl = [] for j in i[1:-2]: gl.append(False if j == ' ' else True) grid.append(gl) f.close() return State(grid) def __str__(self): out = "" out += "+" for i in range(self.w): out += "-" out += "+\n" for i in range(self.h): out += "|" for j in range(self.w): if self.grid[i][j]: out += "O" else: out += " " out += "|\n" out += "+" for i in range(self.w): out += "-" out += "+\n" return out
parser = optparse.OptionParser(usage=UNDEPLOY_USAGE, version=OPTPARSE_VERSION, add_help_option=False) (options, args) = parser.parse_args(cmdline) if len(args) != 3: parser.print_help() return 2 deploy.Undeploy(args[0], args[1], args[2]) return 0 COMMANDS = odict.OrderedDict( (('startsite', startsite_cmd), ('generate', generate_cmd), ('vgenerate', vgenerate_cmd), ('vcurrent', vcurrent_cmd), ('vinfo', vinfo_cmd), ('vgc', vgc_cmd), ('deploy', deploy_cmd), ('undeploy', undeploy_cmd))) def main(): USAGE = ('%prog <command> [options] <command args>\n\nCommands:\n ' + '\n '.join(COMMANDS.keys())) parser = optparse.OptionParser(usage=USAGE, version=OPTPARSE_VERSION) parser.disable_interspersed_args() (options, args) = parser.parse_args() if len(args) < 1: parser.print_help() return 2
def build_treeprocessors(md_instance, **kwargs): """ Build the default treeprocessors for Markdown. """ treeprocessors = odict.OrderedDict() treeprocessors["inline"] = InlineProcessor(md_instance) treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance) return treeprocessors
def __init__(self, extensions=[], extension_configs={}, safe_mode=False, output_format=DEFAULT_OUTPUT_FORMAT): """ Creates a new Markdown instance. Keyword arguments: * extensions: A list of extensions. If they are of type string, the module mdx_name.py will be loaded. If they are a subclass of markdown.Extension, they will be used as-is. * extension-configs: Configuration setting for extensions. * safe_mode: Disallow raw html. One of "remove", "replace" or "escape". * output_format: Format of output. Supported formats are: * "xhtml1": Outputs XHTML 1.x. Default. * "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1). * "html4": Outputs HTML 4 * "html": Outputs latest supported version of HTML (currently HTML 4). Note that it is suggested that the more specific formats ("xhtml1" and "html4") be used as "xhtml" or "html" may change in the future if it makes sense at that time. """ self.safeMode = safe_mode self.registeredExtensions = [] self.docType = "" self.stripTopLevelTags = True # Preprocessors self.preprocessors = odict.OrderedDict() self.preprocessors["html_block"] = \ preprocessors.HtmlBlockPreprocessor(self) self.preprocessors["reference"] = \ preprocessors.ReferencePreprocessor(self) # footnote preprocessor will be inserted with "<reference" # Block processors - ran by the parser self.parser = blockparser.BlockParser() self.parser.blockprocessors['empty'] = \ blockprocessors.EmptyBlockProcessor(self.parser) self.parser.blockprocessors['indent'] = \ blockprocessors.ListIndentProcessor(self.parser) self.parser.blockprocessors['code'] = \ blockprocessors.CodeBlockProcessor(self.parser) self.parser.blockprocessors['hashheader'] = \ blockprocessors.HashHeaderProcessor(self.parser) self.parser.blockprocessors['setextheader'] = \ blockprocessors.SetextHeaderProcessor(self.parser) self.parser.blockprocessors['hr'] = \ blockprocessors.HRProcessor(self.parser) self.parser.blockprocessors['olist'] = \ blockprocessors.OListProcessor(self.parser) self.parser.blockprocessors['ulist'] = \ blockprocessors.UListProcessor(self.parser) self.parser.blockprocessors['quote'] = \ blockprocessors.BlockQuoteProcessor(self.parser) self.parser.blockprocessors['paragraph'] = \ blockprocessors.ParagraphProcessor(self.parser) #self.prePatterns = [] # Inline patterns - Run on the tree self.inlinePatterns = odict.OrderedDict() self.inlinePatterns["backtick"] = \ inlinepatterns.BacktickPattern(inlinepatterns.BACKTICK_RE) self.inlinePatterns["escape"] = \ inlinepatterns.SimpleTextPattern(inlinepatterns.ESCAPE_RE) self.inlinePatterns["reference"] = \ inlinepatterns.ReferencePattern(inlinepatterns.REFERENCE_RE, self) self.inlinePatterns["link"] = \ inlinepatterns.LinkPattern(inlinepatterns.LINK_RE, self) self.inlinePatterns["image_link"] = \ inlinepatterns.ImagePattern(inlinepatterns.IMAGE_LINK_RE, self) self.inlinePatterns["image_reference"] = \ inlinepatterns.ImageReferencePattern(inlinepatterns.IMAGE_REFERENCE_RE, self) self.inlinePatterns["autolink"] = \ inlinepatterns.AutolinkPattern(inlinepatterns.AUTOLINK_RE, self) self.inlinePatterns["automail"] = \ inlinepatterns.AutomailPattern(inlinepatterns.AUTOMAIL_RE, self) self.inlinePatterns["linebreak2"] = \ inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_2_RE, 'br') self.inlinePatterns["linebreak"] = \ inlinepatterns.SubstituteTagPattern(inlinepatterns.LINE_BREAK_RE, 'br') self.inlinePatterns["html"] = \ inlinepatterns.HtmlPattern(inlinepatterns.HTML_RE, self) self.inlinePatterns["entity"] = \ inlinepatterns.HtmlPattern(inlinepatterns.ENTITY_RE, self) self.inlinePatterns["not_strong"] = \ inlinepatterns.SimpleTextPattern(inlinepatterns.NOT_STRONG_RE) self.inlinePatterns["strong_em"] = \ inlinepatterns.DoubleTagPattern(inlinepatterns.STRONG_EM_RE, 'strong,em') self.inlinePatterns["strong"] = \ inlinepatterns.SimpleTagPattern(inlinepatterns.STRONG_RE, 'strong') self.inlinePatterns["emphasis"] = \ inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_RE, 'em') self.inlinePatterns["emphasis2"] = \ inlinepatterns.SimpleTagPattern(inlinepatterns.EMPHASIS_2_RE, 'em') # The order of the handlers matters!!! # Tree processors - run once we have a basic parse. self.treeprocessors = odict.OrderedDict() self.treeprocessors["inline"] = treeprocessors.InlineProcessor(self) self.treeprocessors["prettify"] = \ treeprocessors.PrettifyTreeprocessor(self) # Postprocessors - finishing touches. self.postprocessors = odict.OrderedDict() self.postprocessors["raw_html"] = \ postprocessors.RawHtmlPostprocessor(self) self.postprocessors["amp_substitute"] = \ postprocessors.AndSubstitutePostprocessor() # footnote postprocessor will be inserted with ">amp_substitute" # Map format keys to serializers self.output_formats = { 'html': html4.to_html_string, 'html4': html4.to_html_string, 'xhtml': etree.tostring, 'xhtml1': etree.tostring, } self.references = {} self.htmlStash = preprocessors.HtmlStash() self.registerExtensions(extensions=extensions, configs=extension_configs) self.set_output_format(output_format) self.reset()
def build_postprocessors(md_instance, **kwargs): postprocessors = odict.OrderedDict() postprocessors['raw_html'] = RawHtmlPostprocessor(md_instance) postprocessors['amp_substitute'] = AndSubstitutePostprocessor() postprocessors['unescape'] = UnescapePostprocessor() return postprocessors
def __init__(self, name='', parent=None): self.parent = parent self.items = odict.OrderedDict() self.name = str(name) self.__modified = False
def _5identificare(antet, language): global lextins, aMcandidat gunoi, antet = antet.split('021.209.3401') lSeparEtic = copy.deepcopy( param['EJOBS'][language]['identification section separators']) lSiruriTest = copy.deepcopy( param['EJOBS'][language]['identification section test strings']) dsort = odict.OrderedDict(lSeparEtic) lDelimCompleta = dsort.keys() lDelimEfectiva = [] for d in lDelimCompleta: if d in antet: #intreb apare in textul efectiv? lDelimEfectiva.append(d) lRaspEfective = split(lDelimEfectiva, antet, maxsplit=0) # izolez nume ; raman lista perechi intreb:rasp numepren = lRaspEfective[0].strip( ) # numele , cu care incepe, nu e o pereche intreb rasp; il elim ca sa mearga zip lnumepren = word_tokenize(numepren) nume = lnumepren[-1] pren = string.join(lnumepren[:-1], ' ') lextins.append(['1 Nume', string.upper(nume)]) lextins.append(['2 Prenume', string.upper(pren)]) del lRaspEfective[0:1] # ca sa pot face zip # lIntrebRaspEfectiv = zip(lDelimEfectiva, lRaspEfective) lastQuestionWithresponse = '' for intrebare, rasp in lIntrebRaspEfectiv: lastQuestionWithresponse = intrebare etic, val = dsort[intrebare] dsort[intrebare] = [etic, rasp.strip()] #celelalt eramin pe '-' # verific ca n-are in coada declartie proprie declaratie = '-' [etic, rasp] = dsort[lastQuestionWithresponse] if lSiruriTest['ID'] in etic: if len(rasp) > 7: declaratie = rasp[8:].strip() #iulia szente dsort[lastQuestionWithresponse] = [etic, rasp[:6]] if lSiruriTest['Permis'] in etic: xdataob = string.find(rasp, lSiruriTest['Data ob']) if xdataob != -1: if len(rasp[xdataob:]) > 26: declaratie = rasp[xdataob + 26:].strip() dsort[lastQuestionWithresponse] = [ etic, rasp[:xdataob + 26].strip() ] #refac rasp, fara declaratia ce ede fapt next canp else: #nu a trecut si data obt if len(rasp) > 5: # Cat.B declaratie = rasp[6:].strip() dsort[lastQuestionWithresponse] = [ etic, rasp[:6] ] #refac rasp, fara declaratia ce ede fapt next canp if lSiruriTest['Stagiu'] in etic: if len(rasp) > 2: declaratie = rasp[2:].strip() dsort[lastQuestionWithresponse] = [ etic, rasp[:2] ] #refac rasp, fara declaratia ce ede fapt next canp #---------- declaratie ----------------------- for intreb, [etic, rasp ] in dsort.items(): # scrie camp extins, cu cele 2 expandari if lSiruriTest['Data ultimei aplicari'] in intreb: adaug6_desfacut(rasp) continue xdataob = '' if lSiruriTest['Permis conducere'] in intreb: if rasp != '-': # are permis--il despic xdataob = string.find(rasp, lSiruriTest['Data ob']) if xdataob == -1: # nu a completat data obt , desi are permis george teodor croitoru #xvirg=string.find(rasp,',') permis = rasp #[:xvirg] data_obt_permis = '-' else: #normal -arepermis, complet si data obt permis = rasp[:xdataob] data_obt_permis = rasp[-10:] [etic, rasp] = dsort[ intreb] # in dreptul permisului sa nu fie si data obtinerii dsort[intreb] = [etic, permis] lextins.append(['14 Permis conducere', permis]) lextins.append(['15 Data obtinerii', data_obt_permis]) else: lextins.append(['14 Permis conducere', '-']) lextins.append(['15 Data obtinerii', '-']) continue lextins.append([etic, rasp]) lextins.append(['18 Scrisoare intentie', declaratie]) return numepren
def __init__(self, parent): self.parent = parent self.vars = odict.OrderedDict() self.exprs = {}
def _parse(obj, names): if isinstance(obj, basestring): schema = names.get(obj) if schema is not None: return schema else: raise SchemaParseException("Undefined name: " + obj.__str__()) elif isinstance(obj, dict): type = obj.get("type") if type is None: raise SchemaParseException("No type: " + obj.__str__()) if (type == "record" or type == "error" or type == "enum" or type == "fixed"): name = obj.get("name") space = obj.get("namespace") if name is None: raise SchemaParseException("No name in schema: " + obj.__str__()) if type == "record" or type == "error": fields = odict.OrderedDict() schema = _RecordSchema(fields, name, space, type == "error") names[name] = schema fieldsnode = obj.get("fields") if fieldsnode is None: raise SchemaParseException("Record has no fields: " + obj.__str__()) for field in fieldsnode: fieldname = field.get("name") if fieldname is None: raise SchemaParseException("No field name: " + field.__str__()) fieldtype = field.get("type") if fieldtype is None: raise SchemaParseException("No field type: " + field.__str__()) defaultval = field.get("default") fields[fieldname] = Field(fieldname, _parse(fieldtype, names), defaultval) return schema elif type == "enum": symbolsnode = obj.get("symbols") if symbolsnode == None or not isinstance(symbolsnode, list): raise SchemaParseException("Enum has no symbols: " + obj.__str__()) symbols = list() for symbol in symbolsnode: symbols.append(symbol) schema = _EnumSchema(name, space, symbols) names[name] = schema return schema elif type == "fixed": schema = _FixedSchema(name, space, obj.get("size")) names[name] = schema return schema elif type == "array": return _ArraySchema(_parse(obj.get("items"), names)) elif type == "map": return _MapSchema(_parse(obj.get("values"), names)) else: raise SchemaParseException("Type not yet supported: " + type.__str__()) elif isinstance(obj, list): elemtypes = list() for elemtype in obj: elemtypes.append(_parse(elemtype, names)) return _UnionSchema(elemtypes) else: raise SchemaParseException("Schema not yet supported:" + obj.__str__())
def __init__(self): self.toc_data = odict.OrderedDict() self.last_prefix = '' self.heading_count = {} self.previous_level = self.current_level = 0