def try_metacommands(self, line): if len(line) == 0: return False if line.strip() == '%': if self.mode != 'speedy': self.mode = 'speedy' else: self.mode = 'normal' self.update_banner() u.gray("metacommand registered") return True # handle metacommands if line[0] == '!': if line.strip() == '!print': u.y('\n'.join(self.code)) if line.strip() == '!debug': self.debug = not self.debug if line.strip() == '!verbose_exc': self.verbose_exceptions = not self.verbose_exceptions if line.strip() == '!reset': self.__init__(None) #if line.strip() == '!cleanup': #clears the /repl-tmpfiles directory #u.clear_repl_tmpfiles() #os.makedirs(os.path.dirname(self.tmpfile)) #if line.strip() == '!which': #print(self.tmpfile) if line.strip() == '!help': u.b('Currently implemented macros listing:') u.p('\n'.join(codegen.macro_argc.keys())) u.gray("metacommand registered") return True return False
def run_code(self, new_code): # write to tmpfile #with open(self.tmpfile,'w') as f: #f.write('\n'.join(self.code)) codestring = '\n'.join(new_code) u.b(codestring) # `use` statements if codestring.strip()[:4] == 'use ': assert codestring[: 4] == 'use ', "`use` statements can only be used at indentation level 0" items = codestring.strip().split(' ') assert len( items ) == 2, "`use` syntax is: `use IDENTIFIER` where the identifier is a filename WITHOUT '.py' included and that file is in the current directory. These limitations will be relaxed in the future." filename = items[1] + '.py' self.add_context(filename) return try: # Note that this can only take one Interactive line at a time (which may # actually be a multiline for loop etc). # A version that compiled with 'exec' or 'eval' mode and thus could # handle multiple Interactive lines at once was used in an # earlier version, look back thru the git repository if you need it. However # really you should be able to just divide up your input and call run_code multiple # times with the pieces. """ "Remember that at module level, globals and locals are the same dictionary. If exec gets two separate objects as globals and locals, the code will be executed as if it were embedded in a class definition." """ as_ast = ast.parse(codestring, mode='single') # parse into a python ast object as_ast = ast.fix_missing_locations(as_ast) code = compile(as_ast, '<ast>', 'single') exec( code, self.globs ) #passing locals in causes it to only update locals and not globals, when really we just want globals to be updated. Confirmed by looking at the `code` module (pythons interactive interpreter emulator) that this is valid (tho they name the thing they pass in locals not globals). #print(ast.dump(as_ast)) self.code += new_code # keep track of successfully executed code except u.VerbatimExc as e: print(e) if self.context.compile: sys.exit(1) except Exception as e: # This is where exceptions for the code go. # TODO make em look nicer by telling format_exception this is the special case of a repl error thrown by exec() or eval() # (for this you may wanna have ast.parse() in a separate try-except to differentiate. It'll catch syntax errors specifically. print( u.format_exception(e, ['<string>', u.src_path], verbose=self.verbose_exceptions)) if self.context.compile: sys.exit(1)
def _parse_chunk_size(self, data): idx = data.find(b("\r\n")) if idx < 0: return None, None line, rest_chunk = data[:idx], data[idx+2:] chunk_size = line.split(b(";"), 1)[0].strip() try: chunk_size = int(chunk_size, 16) except ValueError: raise InvalidChunkSize(chunk_size) if chunk_size == 0: self._parse_trailers(rest_chunk) return 0, None return chunk_size, rest_chunk
def _parse_body(self): if not self._chunked: body_part = b("").join(self._buf) self._clen_rest -= len(body_part) # maybe decompress if self.__decompress_obj is not None: body_part = self.__decompress_obj.decompress(body_part) self._partial_body = True self._body.append(body_part) self._buf = [] if self._clen_rest <= 0: self.__on_message_complete = True return else: data = b("").join(self._buf) try: size, rest = self._parse_chunk_size(data) except InvalidChunkSize as e: self.errno = INVALID_CHUNK self.errstr = "invalid chunk size [%s]" % str(e) return -1 if size == 0: return size if size is None or len(rest) < size: return None body_part, rest = rest[:size], rest[size:] if len(rest) < 2: self.errno = INVALID_CHUNK self.errstr = "chunk missing terminator [%s]" % data return -1 # maybe decompress if self.__decompress_obj is not None: body_part = self.__decompress_obj.decompress(body_part) self._partial_body = True self._body.append(body_part) self._buf = [rest[2:]] return len(rest)
def recv_body_into(self, barray): """ Receive the last chunk of the parsed bodyand store the data in a buffer rather than creating a new string. """ l = len(barray) body = b("").join(self._body) m = min(len(body), l) data, rest = body[:m], body[m:] barray[0:m] = data if not rest: self._body = [] self._partial_body = False else: self._body = [rest] return m
def prepare_cmds_for_coq_output(coqc_prog, coqc_prog_args, contents, cwd=None, timeout_val=0, **kwargs): key = (coqc_prog, tuple(coqc_prog_args), kwargs['pass_on_stdin'], contents, timeout_val, cwd) if key in COQ_OUTPUT.keys(): file_name = COQ_OUTPUT[key][0] else: with tempfile.NamedTemporaryFile(suffix='.v', delete=False, mode='wb') as f: f.write(util.b(contents)) file_name = f.name file_name_root = os.path.splitext(file_name)[0] cmds = [coqc_prog] + list(coqc_prog_args) pseudocmds = '' input_val = None if kwargs['is_coqtop']: if kwargs['pass_on_stdin']: input_val = contents cmds.extend(['-q']) pseudocmds = '" < "%s' % file_name else: cmds.extend(['-load-vernac-source', file_name_root, '-q']) else: cmds.extend([file_name, '-q']) if kwargs['verbose'] >= kwargs['verbose_base']: kwargs['log']('\nRunning command: "%s%s"' % ('" "'.join(cmds), pseudocmds)) if kwargs['verbose'] >= kwargs['verbose_base'] + 1: kwargs['log']('\nContents:\n%s\n' % contents) return key, file_name, cmds, input_val
def _parse_trailers(self, data): idx = data.find(b("\r\n\r\n")) if data[:2] == b("\r\n"): self._trailers = self._parse_headers(data[:idx])
def _parse_headers(self, data): idx = data.find(b("\r\n\r\n")) if idx < 0: # we don't have all headers return False # Split lines on \r\n keeping the \r\n on each line lines = [bytes_to_str(line) + "\r\n" for line in data[:idx].split(b("\r\n"))] # Parse headers into key/value pairs paying attention # to continuation lines. while len(lines): # Parse initial header name : value pair. curr = lines.pop(0) if curr.find(":") < 0: raise InvalidHeader("invalid line %s" % curr.strip()) name, value = curr.split(":", 1) name = name.rstrip(" \t").upper() if HEADER_RE.search(name): raise InvalidHeader("invalid header name %s" % name) name, value = name.strip(), [value.lstrip()] # Consume value continuation lines while len(lines) and lines[0].startswith((" ", "\t")): value.append(lines.pop(0)) value = ''.join(value).rstrip() # multiple headers if name in self._headers: value = "%s, %s" % (self._headers[name], value) # store new header value self._headers[name] = value # update WSGI environ key = 'HTTP_%s' % name.upper().replace('-','_') self._environ[key] = value # detect now if body is sent by chunks. clen = self._headers.get('content-length') te = self._headers.get('transfer-encoding', '').lower() if clen is not None: try: self._clen_rest = self._clen = int(clen) except ValueError: pass else: self._chunked = (te == 'chunked') if not self._chunked: self._clen_rest = MAXSIZE # detect encoding and set decompress object encoding = self._headers.get('content-encoding') if self.decompress: if encoding == "gzip": self.__decompress_obj = zlib.decompressobj(16+zlib.MAX_WBITS) elif encoding == "deflate": self.__decompress_obj = zlib.decompressobj() rest = data[idx+4:] self._buf = [rest] self.__on_headers_complete = True return len(rest)
def execute(self, data, length): # end of body can be passed manually by putting a length of 0 if length == 0: self.__on_message_complete = True return length # start to parse nb_parsed = 0 while True: if not self.__on_firstline: idx = data.find(b("\r\n")) if idx < 0: self._buf.append(data) return len(data) else: self.__on_firstline = True self._buf.append(data[:idx]) first_line = bytes_to_str(b("").join(self._buf)) nb_parsed = nb_parsed + idx + 2 rest = data[idx+2:] data = b("") if self._parse_firstline(first_line): self._buf = [rest] else: return nb_parsed elif not self.__on_headers_complete: if data: self._buf.append(data) data = b("") try: to_parse = b("").join(self._buf) ret = self._parse_headers(to_parse) if not ret: return length nb_parsed = nb_parsed + (len(to_parse) - ret) except InvalidHeader as e: self.errno = INVALID_HEADER self.errstr = str(e) return nb_parsed elif not self.__on_message_complete: if not self.__on_message_begin: self.__on_message_begin = True if data: self._buf.append(data) data = b("") ret = self._parse_body() if ret is None: return length elif ret < 0: return ret elif ret == 0: self.__on_message_complete = True return length else: nb_parsed = max(length, ret) else: return 0
def recv_body(self): """ return last chunk of the parsed body""" body = b("").join(self._body) self._body = [] self._partial_body = False return body
def split_statements_to_definitions(statements, verbose=DEFAULT_VERBOSITY, log=DEFAULT_LOG, coqtop='coqtop', coqtop_args=tuple(), **kwargs): """Splits a list of statements into chunks which make up independent definitions/hints/etc.""" def fallback(): if verbose: log("Your version of coqtop doesn't support -time. Falling back to more error-prone method." ) return split_definitions_old.split_statements_to_definitions( statements, verbose=verbose, log=log, coqtop=coqtop, coqtop_args=coqtop_args) # check for -time if not get_coq_accepts_time(coqtop, verbose=verbose, log=log): return fallback() if not get_proof_term_works_with_time( coqtop, is_coqtop=True, verbose=verbose, log=log, **kwargs): statements = postprocess_split_proof_term(statements, log=log, verbose=verbose, **kwargs) p = Popen([coqtop, '-q', '-emacs', '-time'] + list(coqtop_args), stdout=PIPE, stderr=STDOUT, stdin=PIPE) split_reg = re.compile( r'Chars ([0-9]+) - ([0-9]+) [^\s]+ (.*?)<prompt>([^<]*?) < ([0-9]+) ([^<]*?) ([0-9]+) < ([^<]*?)</prompt>' .replace(' ', r'\s*'), flags=re.DOTALL) defined_reg = re.compile(r'^([^\s]+) is (?:defined|assumed)$', re.MULTILINE) # goals and definitions are on stdout, prompts are on stderr statements_string = '\n'.join(statements) + '\n\n' if verbose: log('Sending statements to coqtop...') if verbose >= 3: log(statements_string) (stdout, stderr) = p.communicate(input=util.b(statements_string)) stdout = util.s(stdout) if 'know what to do with -time' in stdout.strip().split('\n')[0]: # we're using a version of coqtop that doesn't support -time return fallback() if verbose: log('Done. Splitting to definitions...') rtn = [] cur_definition = {} last_definitions = '||' cur_definition_names = '||' last_char_end = 0 #if verbose >= 3: log('re.findall(' + repr(r'Chars ([0-9]+) - ([0-9]+) [^\s]+ (.*?)<prompt>([^<]*?) < ([0-9]+) ([^<]*?) ([0-9]+) < ([^<]*?)</prompt>'.replace(' ', r'\s*')) + ', ' + repr(stdout) + ', ' + 'flags=re.DOTALL)') responses = split_reg.findall(stdout) for char_start, char_end, response_text, cur_name, line_num1, cur_definition_names, line_num2, unknown in responses: char_start, char_end = int(char_start), int(char_end) statement = strip_newlines( slice_statements_string(statements_string, last_char_end, char_end)) last_char_end = char_end terms_defined = defined_reg.findall(response_text) definitions_removed, definitions_shared, definitions_added = get_definitions_diff( last_definitions, cur_definition_names) # first, to be on the safe side, we add the new # definitions key to the dict, if it wasn't already there. if cur_definition_names.strip( '|') and cur_definition_names not in cur_definition: cur_definition[cur_definition_names] = { 'statements': [], 'terms_defined': [] } if verbose >= 2: log((statement, (char_start, char_end), definitions_removed, terms_defined, 'last_definitions:', last_definitions, 'cur_definition_names:', cur_definition_names, cur_definition.get(last_definitions, []), cur_definition.get(cur_definition_names, []), response_text)) # first, we handle the case where we have just finished # defining something. This should correspond to # len(definitions_removed) > 0 and len(terms_defined) > 0. # If only len(definitions_removed) > 0, then we have # aborted something. If only len(terms_defined) > 0, then # we have defined something with a one-liner. if definitions_removed: cur_definition[last_definitions]['statements'].append(statement) cur_definition[last_definitions]['terms_defined'] += terms_defined if cur_definition_names.strip('|'): # we are still inside a definition. For now, we # flatten all definitions. # # TODO(jgross): Come up with a better story for # nested definitions. cur_definition[cur_definition_names][ 'statements'] += cur_definition[last_definitions][ 'statements'] cur_definition[cur_definition_names][ 'terms_defined'] += cur_definition[last_definitions][ 'terms_defined'] del cur_definition[last_definitions] else: # we're at top-level, so add this as a new # definition rtn.append({ 'statements': tuple(cur_definition[last_definitions]['statements']), 'statement': '\n'.join(cur_definition[last_definitions]['statements']), 'terms_defined': tuple(cur_definition[last_definitions]['terms_defined']) }) del cur_definition[last_definitions] # print('Adding:') # print(rtn[-1]) elif terms_defined: if cur_definition_names.strip('|'): # we are still inside a definition. For now, we # flatten all definitions. # # TODO(jgross): Come up with a better story for # nested definitions. cur_definition[cur_definition_names]['statements'].append( statement) cur_definition[cur_definition_names][ 'terms_defined'] += terms_defined else: # we're at top level, so add this as a new # definition rtn.append({ 'statements': (statement, ), 'statement': statement, 'terms_defined': tuple(terms_defined) }) # now we handle the case where we have just opened a fresh # definition. We've already added the key to the # dictionary. elif definitions_added: # print(definitions_added) cur_definition[cur_definition_names]['statements'].append( statement) else: # if we're in a definition, append the statement to # the queue, otherwise, just add it as it's own # statement if cur_definition_names.strip('|'): cur_definition[cur_definition_names]['statements'].append( statement) else: rtn.append({ 'statements': (statement, ), 'statement': statement, 'terms_defined': tuple() }) last_definitions = cur_definition_names if verbose >= 2: log((last_definitions, cur_definition_names)) if last_definitions.strip('||'): rtn.append({ 'statements': tuple(cur_definition[cur_definition_names]['statements']), 'statement': '\n'.join(cur_definition[cur_definition_names]['statements']), 'terms_defined': tuple(cur_definition[cur_definition_names]['terms_defined']) }) del cur_definition[last_definitions] if last_char_end + 1 < len_statements_string(statements_string): if verbose >= 2: log('Appending end of code from %d to %d: %s' % (last_char_end, len_statements_string(statements_string), slice_statements_string(statements_string, last_char_end, None))) last_statement = strip_newlines( slice_statements_string(statements_string, last_char_end, None)) rtn.append({ 'statements': tuple(last_statement, ), 'statement': last_statement, 'terms_defined': tuple() }) return rtn
def len_statements_string(statements_string): return len(util.b(statements_string))
def slice_statements_string(statements_string, start=None, end=None): statements_string = util.b(statements_string) if start is None: start = 0 if end is None: end = len(statements_string) return util.s(statements_string[start:end])