def validate_file_for_import(csv_file: io.TextIOWrapper, logger, detailed=False, strict=False) -> int: num_errors = 0 num_valid_lines = 0 csv_file.seek( 0) # set to start of file in case it has been read earlier line: str = csv_file.readline() while line and line != "": try: printable_line = line if detailed: # do not print passwords printable_line = line.split(",")[0] UserCommand._validate_user_or_throw(line, logger) else: logger.debug("> username - {}".format(line)) UserCommand._validate_username_or_throw(line) num_valid_lines += 1 except Exception as exc: logger.info( _("importcsvsummary.error.line").format( printable_line, exc, "")) num_errors += 1 line = csv_file.readline() if strict and num_errors > 0: Errors.exit_with_error(logger, _("importcsvsummary.error.too_many_errors")) return num_valid_lines
def get_comments(file_handler: io.TextIOWrapper) -> dict: comments_table = {} line = file_handler.readline()[:-1] comment = "" value = 0 while line[12:25] != "Genes (total)": line = file_handler.readline()[:-1] comment = line[12:25] value = line.split()[3] comments_table.update({comment: value}) line = file_handler.readline()[:-1] comment = line[12:24] value = line.split()[3] comments_table.update({comment: value}) line = file_handler.readline()[:-1] comment = line[12:26] value = line.split()[3] comments_table.update({comment: value}) line = file_handler.readline()[:-1] comment = line[12:15] comment_2 = line.split("(")[1] comment_3 = comment_2.split(")")[0] comment = comment + "(" + comment_3 + ")" value = line.split("::")[1] comments_table.update({comment: value}) return (comments_table)
def _setup(self, stream: io.TextIOWrapper): assert len(self.files) == 3, \ 'received {} files, expected 3'.format(len(self.files)) lines = super(Parser, self)._setup(stream) if not hasattr(self, '_records'): self._records = dict() if not hasattr(self, '_fileno'): self._fileno = 0 if stream.name.endswith('names.txt'): content = stream.readline().strip() lines += 1 logging.debug("file header:\n%s", content) self._parse = self._parseName self._fileno += 1 elif stream.name.endswith('aliases.txt'): logging.debug('parsing aliases') content = stream.readline().strip() lines += 1 logging.debug("file header:\n%s", content) self._parse = self._parseAlias self._fileno += 1 elif stream.name.endswith('tair.txt'): logging.debug('parsing EntrezGene links') self._parse = self._parseEntrez self._fileno += 1 else: raise RuntimeError('unknown TAIR file "{}"'.format(stream.name)) return lines
def loadTRC(file): tfile = TextIOWrapper(file) # Skip first line and read metadata tfile.readline() reader = csv.DictReader(tfile, delimiter='\t') data = dict( map(lambda kv: kv if kv[0] == 'Units' else (kv[0], int(kv[1])), reader.__next__().items())) # Get marker names reader = csv.reader(tfile, delimiter='\t') data["Labels"] = list( filter(lambda x: x != '', map(lambda x: x.strip(), reader.__next__())))[2:] reader.__next__() # Read data data["Data"] = np.empty(shape=(data["NumMarkers"], data["NumFrames"], 3), dtype=np.float) data["Timestamps"] = np.empty(shape=(data["OrigNumFrames"]), dtype=np.float) for row in reader: data["Timestamps"][int(row[0])] = float(row[1]) for label in range(len(data["Labels"])): data["Data"][label][int(row[0])] = list( map(lambda x: float(x), row[2 + label * 3:2 + (label + 1) * 3])) return data
def _get_next_not_empty_line(file_handler: TextIOWrapper) -> str or None: line = file_handler.readline() while line and line.isspace(): line = file_handler.readline() if not line: return None return line
def readCmd(sio: io.TextIOWrapper, cmd: str): dt_out = sio.write(cmd + "\n") sio.flush() if dt_out==0: raise Exception("Unable to write to device.") if sio.readline().strip()!=cmd: raise Exception("Device is not responding.") return sio.readline().strip()
def read_section_to_segment(text: io.TextIOWrapper) -> Dict[str, int]: """Read a section-to-segment map from readelf output.""" section_to_segment = {} while line := text.readline().strip(): s = line.split() segment = int(s[0], 10) for section in s[1:]: section_to_segment[section] = segment
def get_problems_for_one(fin: io.TextIOWrapper): line = fin.readline().strip() while not line: line = fin.readline().strip() prob_list = line.split(' ') prob_list.pop(0) return set([int(c) for c in prob_list])
def load_stations(self, subway: Subway, fp: TextIOWrapper) -> None: line = fp.readline().strip() while len(line.strip()) > 0: subway.add_station(line) line = fp.readline().strip() return None
def generic_parse(fname, **kwargs): warnings.filterwarnings("ignore") num_splits = kwargs.pop("num_splits", None) start = kwargs.pop("start", None) end = kwargs.pop("end", None) header_size = kwargs.pop("header_size", 0) encoding = kwargs.get("encoding", None) callback = kwargs.pop("callback") if start is None or end is None: # This only happens when we are reading with only one worker (Default) return callback(fname, **kwargs) # pop "compression" from kwargs because bio is uncompressed with OpenFile( fname, "rb", kwargs.pop("compression", "infer"), **(kwargs.pop("storage_options", None) or {}), ) as bio: header = b"" # In this case we beware that first line can contain BOM, so # adding this line to the `header` for reading and then skip it if encoding and ("utf" in encoding and "8" not in encoding or encoding == "unicode_escape" or encoding.replace("-", "_") == "utf_8_sig"): # do not 'close' the wrapper - underlying buffer is managed by `bio` handle fio = TextIOWrapper(bio, encoding=encoding, newline="") if header_size == 0: header = fio.readline().encode(encoding) kwargs["skiprows"] = 1 for _ in range(header_size): header += fio.readline().encode(encoding) elif encoding is not None: if header_size == 0: header = bio.readline() # `skiprows` can be only None here, so don't check it's type # and just set to 1 kwargs["skiprows"] = 1 for _ in range(header_size): header += bio.readline() else: for _ in range(header_size): header += bio.readline() bio.seek(start) to_read = header + bio.read(end - start) if "memory_map" in kwargs: kwargs = kwargs.copy() del kwargs["memory_map"] pandas_df = callback(BytesIO(to_read), **kwargs) index = (pandas_df.index if not isinstance(pandas_df.index, pandas.RangeIndex) else len(pandas_df)) return _split_result_for_readers(1, num_splits, pandas_df) + [ index, pandas_df.dtypes, ]
def _setup(self, stream: io.TextIOWrapper): lines = super(Parser, self)._setup(stream) content = stream.readline().strip() lines += 1 while content.startswith('#'): content = stream.readline().strip() lines += 1 logging.debug("file header:\n%s", content) return lines
def read_options(f: TextIOWrapper) -> Dict[str, str]: next_option_line = f.tell() options: Dict[str, str] = {} next_line = f.readline() while (next_line.startswith("#")): option_match = re.match("# (.*): (.*)", next_line) assert option_match key, value = option_match.group(1, 2) options[key] = value next_option_line = f.tell() next_line = f.readline() f.seek(next_option_line) return options
def load_lines(self, subway: Subway, line_name: str, fp: TextIOWrapper) -> None: pre_station = '' cur_station = fp.readline().strip() while len(cur_station.strip()) > 0: if len(pre_station) > 0: subway.add_connection(pre_station, cur_station, line_name) pre_station = cur_station cur_station = fp.readline().strip() return None
def mid_bisect(self, f: io.TextIOWrapper, start: int, end: int) -> None: if f.tell() == self.prev_tell: f.readline() return self.prev_tell = f.tell() self.bisect_count += 1 mid = int((start + end) / 2) line = LogBisect.find_line(f, mid) match = self.regex.search(line) p = parse(match.group()) if self.dtarget < p: self.mid_bisect(f, start, mid) elif self.dtarget > p: self.mid_bisect(f, mid, end)
def _setup(self, stream: io.TextIOWrapper): assert len(self.files) == 2, \ 'received {} files, expected 2'.format(len(self.files)) lines = super(Parser, self)._setup(stream) logging.debug("file header:\n%s", stream.readline().strip()) if not hasattr(self, '_fileno'): self._fileno = 0 idx = stream.name.rfind('/') + 1 if stream.name.startswith('gene_info', idx): if not hasattr(self, '_pmidMapping'): raise RuntimeError( 'gene_info must be after gene2pubmed file') logging.debug("parsed PubMed mappings for %d genes", len(self._pmidMapping)) self._parse = self._parseMain self._generefs = set() self._fileno += 1 elif stream.name.startswith('gene2pubmed', idx): if self._fileno != 0: raise RuntimeError('gene2pubmed file not parsed first') self._pmidMapping = defaultdict(set) self._parse = self._parsePubMed self._fileno += 1 else: raise RuntimeError('unknown Entrez file "{}"'.format(stream.name)) return lines + 1
def read_lines(path, n: int, stream: io.TextIOWrapper) -> typing.Iterable[dict]: for _ in tqdm.trange(n, desc=f"reading {path}"): line = stream.readline() try: yield orjson.loads(line) except orjson.JSONDecodeError: warnings.warn(f"Could not decode line:\n{line}")
class APSFileHandler: def __init__(self, zip_file): self.patents_file = TextIOWrapper(zip_file) def readline(self): return self.patents_file.readline() def list_xmls(self): output = StringIO() try: self.readline() line = self.readline() output.write(line) line = self.readline() except StopIteration: print('error') return while line is not '': if 'PATN' in line: output.seek(0) yield output output = StringIO() output.write(line) else: output.write(line) try: line = self.readline() except StopIteration: break output.seek(0) yield output
def _setup(self, stream: io.TextIOWrapper): assert len(self.files) == 3, \ 'received {} files, expected 3'.format(len(self.files)) lines = super(Parser, self)._setup(stream) if not hasattr(self, '_records'): self._records = dict() if not hasattr(self, '_fileno'): self._fileno = 0 if stream.name.endswith('List1.rpt'): content = stream.readline().strip() lines += 1 logging.debug("file header:\n%s", content) self._parse = self._parseList1 self._fileno += 1 elif stream.name.endswith('SwissProt_TrEMBL.rpt'): logging.info('parsing UniProt links') self._parse = self._parseUniProt self._fileno += 1 elif stream.name.endswith('EntrezGene.rpt'): logging.info('parsing EntrezGene links') self._parse = self._parseEntrez self._fileno += 1 else: raise RuntimeError('unknown MGD file "%s"'.format(stream.name)) return lines
def read_input(file: TextIOWrapper) -> Tuple[int, int]: # You can use 'input()' to get inputs input = lambda: file.readline().rstrip() solution_arg1 = int(input()) solution_arg2 = int(input()) return solution_arg1, solution_arg2
def _setup(self, stream: io.TextIOWrapper): lines = super(Parser, self)._setup(stream) logging.debug('correcting wrong links by Entrez') try: hgnc = self.session.query(GeneRef).filter( GeneRef.namespace == Namespace.hgnc, GeneRef.accession == '31739').one() entrez = self.session.query(GeneRef).filter( GeneRef.namespace == Namespace.entrez, GeneRef.accession == '648809').one() if hgnc.id != entrez.id: hgnc.id = entrez.id logging.info( 'correcting wrong link of %s:31739 from ' '%s:388159 to %s:648809', Namespace.hgnc, Namespace.entrez, Namespace.entrez) self.session.commit() # commit, because otherwise multiple # parsers would hang here waiting for # each others commit except NoResultFound: pass logging.debug("file header:\n%s", stream.readline().strip()) return lines + 1
def _read_table(cls, tar_handle, filename, requied_columns=()): requied_columns = frozenset(requied_columns) | frozenset(("ID", )) handle = TextIOWrapper(tar_handle.extractfile(filename)) result = {} try: header = handle.readline().rstrip("\r\n").split("\t") if len(header) != len(set(header)): raise ZonkeyDBError( "Table %r does contains duplicate columns!" % (filename, )) if requied_columns - set(header): raise ZonkeyDBError("Required columns are missign in table " "%r: %s" % (filename, ", ".join())) for linenum, line in enumerate(handle): fields = line.rstrip("\r\n").split("\t") if len(fields) != len(header): raise ZonkeyDBError( "Error reading %r at line %i; " "expected %i columns, found %i " "columns!" % (filename, linenum, len(header), len(fields))) row = dict(zip(header, fields)) if row["ID"] in result: raise ZonkeyDBError("Duplicate IDs in %r: %s" % (filename, row["ID"])) result[row["ID"]] = row finally: handle.close() return result
def get_users_from_file(csv_file: io.TextIOWrapper, logger=None) -> List[TSC.UserItem]: csv_file.seek( 0) # set to start of file in case it has been read earlier if logger: logger.debug("Reading from file {}".format(csv_file.name)) user_list = [] line = csv_file.readline() if logger: logger.debug("> {}".format(line)) while line: user: Optional[TSC.UserItem] = UserCommand._parse_line(line) if user: user_list.append(user) line = csv_file.readline() return user_list
def parse_declare_saved_begin(file: io.TextIOWrapper, class_and_base: str, filename: str) -> dict: class_and_base_array = class_and_base.split(',') class_name = class_and_base_array[0].strip() base_class_name = class_and_base_array[1].strip() line = file.readline() class_type = extract_macros_params(line, "REGISTER_CLASS_AS") if not class_type: raise Exception( "Next line after 'DECLARE_SAVEABLE' macros does not contain 'REGISTER_CLASS_AS' macros" ) variables = [] on_load_calls = [] while True: line = file.readline() result = extract_macros_params(line, "KV_SAVEABLE") if result: variables.append(result.strip()) continue result = extract_macros_params(line, "KV_ON_LOAD_CALL") if result: on_load_calls.append(result.strip()) continue result = extract_macros_params(line, "END_DECLARE") if result: if result.strip() != class_name: raise Exception( "'END_DECLARE' does not match 'DECLARE_SAVEABLE'") break subdirectory = "sources" header_index = filename.find(subdirectory) + len(subdirectory) + 1 header = filename[header_index:] class_entry = { "class": class_name, "header": header, "base_class": base_class_name, "type": class_type, "variables": variables, "on_load_calls": on_load_calls } return class_entry
def handle_stdin(s: io.TextIOWrapper, select_map: dict[int, socket], listening: int): """ Handles stdin input and dispatches it to its corresponding function depending on the command """ msg = s.readline() trimmed = msg.strip() command, args = input_parse(trimmed) # Text command is a simple message if command == "text": # Create the message to be sent wire_msg = msgs_capnp.PeerMsg.new_message() wire_msg.type = "text" wire_msg.content.text = args # Serialize message bytes_msg = wire_msg.to_bytes() size = len(bytes_msg) # Broadcast the message for p in select_map: if p != 0 and p != listening: sock = select_map[p] sock.send(size.to_bytes(4096, byteorder='big', signed=False)) sock.send(bytes_msg) # File command is to send files elif command == "file": # Open file path try: f = open(args, 'rb') except OSError as e: print(f"[x] Error while opening file \"{e.strerror}\"") return # Read file content into variable file_content = f.read() # Craft message to be sent wire_msg = msgs_capnp.PeerMsg.new_message() wire_msg.type = "file" wire_msg.content.init('file') wire_msg.content.file.filename = args wire_msg.content.file.content = file_content # Serialize message bytes_msg = wire_msg.to_bytes() size = len(bytes_msg) # Broadcast the message for p in select_map: if p != 0 and p != listening: sock = select_map[p] sock.send(size.to_bytes(4096, byteorder='big', signed=False)) sock.send(bytes_msg) print("[i] File sent correctly")
def read_symbols(text: io.TextIOWrapper) -> SymbolDF: """Read a symbol table from readelf output.""" columns = ['symbol', 'address', 'size', 'type', 'bind', 'shndx'] rows = [] decoder = re.compile( r"""^(?P<number>\d+): \s+(?P<address>[0-9a-fA-F]+) \s+(?P<size>\d+) \s+(?P<type>\S+) \s+(?P<bind>\S+) \s+(?P<vis>\S+) \s+(?P<shndx>\S+) \s*(?P<symbol>\S*) """, re.VERBOSE) while line := text.readline(): if not (match := decoder.match(line.strip())): break
def test_readline(rf, fn): f = rf.open(fn) tr = TextIOWrapper(BufferedReader(f)) while 1: ln = tr.readline() if not ln: break tr.close()
def readline_with_check(input: TextIOWrapper, EOF_line='') -> str: """Additionally check if EOF.""" new_line = input.readline() # Print to stdout read lines from subprocess stdout or stderr. print(new_line, end='') if new_line == EOF_line: raise StopIteration return new_line
def searchback(self, f: io.TextIOWrapper, dtarget: datetime): linetime = dtarget while linetime == dtarget: LogBisect.readback(f) saved = f.tell() match = self.regex.search(f.readline()) linetime = parse(match.group()) f.seek(saved, io.SEEK_SET)
def skip_header(rinex_file: TextIOWrapper) -> int: noLine = 0 while True: noLine += 1 if 'END' in rinex_file.readline(): break return noLine
def read_segments(text: io.TextIOWrapper) -> SegmentDF: """Read a segment table from readelf output.""" decoder = re.compile( r"""^(?P<type>\w+) \s+(?P<offset>0x[0-9a-fA-F]+) \s+(?P<vaddress>0x[0-9a-fA-F]+) \s+(?P<paddress>0x[0-9a-fA-F]+) \s+(?P<filesize>0x[0-9a-fA-F]+) \s+(?P<size>0x[0-9a-fA-F]+) \s(?P<flags>.*) \s+0x(?P<align>[0-9a-fA-F]+) """, re.VERBOSE) columns = ['type', 'vaddress', 'paddress', 'size', 'flags'] rows = [] while line := text.readline(): if not (match := decoder.match(line.strip())): break
def _next_line(rinex_file: TextIOWrapper) -> list: line = rinex_file.readline() if not line or line.isspace(): raise EndOfFile nums = [num for num in line.strip().replace('D', 'e').split(' ') if num != ''] fixed_nums = _fix_negative_num(nums) return fixed_nums
def generateCatchStringSlice(self, fileHandle: TextIOWrapper): try: text = fileHandle.readline() matcher = self.pattern.match(text) if matcher is None: raise PatternNotMatchError(text, self.pattern.pattern) return matcher.group(0) finally: pass
def __convert_csv(in_f): f = TextIOWrapper(in_f, encoding='big5') # skip first 2 lines for account info and headers for _ in range(2): f.readline() reader = csv.reader(f) ret = defaultdict(dict) for row in reader: category = None if 'Y177748' in row[5]: category = '中華電信' if '北市水費' in row[4]: category = '水費' if '台電電費' in row[4]: category = '電費' if '信用卡款' in row[4] and '國泰世華卡' in row[5]: category = 'koko卡費' if '604056495' in row[5]: category = '瓦斯' if not category: logging.info("{} no category".format(row)) continue row_date = datetime.strptime(row[0], '%Y%m%d').date() spent = int(row[1]) key = row_date.strftime("%Y/%m") if ret.get(key, {}).get(category): ret[key][category] += spent continue ret[key].update({category: spent}) ret = OrderedDict(sorted(ret.items(), key=lambda t: t[0])) logging.info(ret) return ret
def parse_declare_saved_begin(file: io.TextIOWrapper, class_and_base: str, filename: str) -> dict: class_and_base_array = class_and_base.split(',') class_name = class_and_base_array[0].strip() base_class_name = class_and_base_array[1].strip() line = file.readline() class_type = extract_macros_params(line, "REGISTER_CLASS_AS") if not class_type: raise Exception("Next line after 'DECLARE_SAVEABLE' macros does not contain 'REGISTER_CLASS_AS' macros") variables = [] on_load_calls = [] while True: line = file.readline() result = extract_macros_params(line, "KV_SAVEABLE") if result: variables.append(result.strip()) continue result = extract_macros_params(line, "KV_ON_LOAD_CALL") if result: on_load_calls.append(result.strip()) continue result = extract_macros_params(line, "END_DECLARE") if result: if result.strip() != class_name: raise Exception("'END_DECLARE' does not match 'DECLARE_SAVEABLE'") break subdirectory = "sources" header_index = filename.find(subdirectory) + len(subdirectory) + 1 header = filename[header_index:] class_entry = { "class": class_name, "header": header, "base_class": base_class_name, "type": class_type, "variables": variables, "on_load_calls": on_load_calls } return class_entry
def parse_post_body(request): text_io = TextIOWrapper(request.rfile, encoding="UTF-8") splitted_data = text_io.readline().split("&") print("Splitted data:" + str(splitted_data)) text_io.close() key_value = dict() for data in splitted_data: temp = data.split("=") key_value.update({temp[0]: temp[1]}) return key_value
def line_iterator(stream): from io import TextIOWrapper text_stream = TextIOWrapper(stream) while text_stream.readable(): line = text_stream.readline().strip() if len(line) == 0: break yield line
def build_fa(self, input_config: io.TextIOWrapper) -> FA.FA: """ build FA from your input configuration file :param input_config: input configuration file :return: FA build from given configuration file """ int(input_config.readline()) delta = int(input_config.readline()) input_config.readline() transition_table = [] for i in range(0, delta): transition_table.append([int(number) for number in input_config.readline().rstrip('\n').split(' ')]) input_config.readline() final_states = {int(number) for number in input_config.readline().rstrip('\n').split(' ')} input_config.close() del input_config return FA.FA(tuple(transition_table), final_states)
def parse_csv(data: TextIOWrapper): if 'Category' not in data.readline().split(','): raise SystemExit('CSV file does not contain Category section') result = defaultdict(list) for line in data: try: item = Record(*parse_csv_line(line)) except Exception as _: pprint(line) raise if item.category in IGNORE_CATEGORIES or \ item.id in IGNORE_ISSUES or \ item.status in IGNORE_STATUSES or \ item.tracker in IGNORE_TRACKER: continue result[item.category].append(item) return result
def read_source_lines(filename): buffer = _builtin_open(filename, 'rb') try: encoding, lines, cookie_present = detect_encoding_ex(buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, line_buffering=True) text.mode = 'r' except: buffer.close() raise with text: if cookie_present: for i in lines: yield text.readline().replace("coding", "Coding") # so compile() won't complain about encoding declatation in a Unicode string # see 2.7/Python/ast.c:228 for line in text: yield line
def get_reader(self): f = self.files['file'] if f.name.endswith(".xlsx"): return namedtuple_xlsx_reader(f) enc = self.cleaned_data['encoding'] if enc.lower() in ('', 'autodetect'): enc = chardet.detect(f.read(1024))["encoding"] log.info("Guessed encoding: {enc}".format(**locals())) f.seek(0) f = TextIOWrapper(f.file, encoding=enc) d = self.cleaned_data['dialect'] or 'autodetect' if d == 'autodetect': dialect = csv.Sniffer().sniff(f.readline()) f.seek(0) if dialect.delimiter not in "\t,;": dialect = csv.get_dialect('excel') else: dialect = csv.get_dialect(d) return namedtuple_csv_reader(f, dialect=dialect)
class SmtLibSolver(Solver): """Wrapper for using a solver via textual SMT-LIB interface. The solver is launched in a subprocess using args as arguments of the executable. Interaction with the solver occurs via pipe. """ def __init__(self, args, environment, logic, user_options=None, LOGICS=None): Solver.__init__(self, environment, logic=logic, user_options=user_options) # Flag used to debug interaction with the solver self.dbg = False if LOGICS is not None: self.LOGICS = LOGICS self.args = args self.declared_vars = set() self.solver = Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE) self.parser = SmtLibParser(interactive=True) if PY2: self.solver_stdin = self.solver.stdin self.solver_stdout = self.solver.stdout else: self.solver_stdin = TextIOWrapper(self.solver.stdin) self.solver_stdout = TextIOWrapper(self.solver.stdout) # Initialize solver self.set_option(":print-success", "true") if self.options.generate_models: self.set_option(":produce-models", "true") # Redirect diagnostic output to stdout self.set_option(":diagnostic-output-channel", '"stdout"') if self.options is not None: for o,v in iteritems(self.options): self.set_option(o,v) self.set_logic(logic) def set_option(self, name, value): self._send_silent_command(SmtLibCommand(smtcmd.SET_OPTION, [name, value])) def set_logic(self, logic): self._send_silent_command(SmtLibCommand(smtcmd.SET_LOGIC, [logic])) def _send_command(self, cmd): """Sends a command to the STDIN pipe.""" if self.dbg: print("Sending: " + cmd.serialize_to_string()) cmd.serialize(self.solver_stdin, daggify=True) self.solver_stdin.write("\n") self.solver_stdin.flush() def _send_silent_command(self, cmd): """Sends a command to the STDIN pipe and awaits for acknowledgment.""" self._send_command(cmd) self._check_success() def _get_answer(self): """Reads a line from STDOUT pipe""" res = self.solver_stdout.readline().strip() if self.dbg: print("Read: " + str(res)) return res def _get_value_answer(self): """Reads and parses an assignment from the STDOUT pipe""" lst = self.parser.get_assignment_list(self.solver_stdout) if self.dbg: print("Read: " + str(lst)) return lst def _declare_variable(self, symbol): cmd = SmtLibCommand(smtcmd.DECLARE_FUN, [symbol]) self._send_silent_command(cmd) self.declared_vars.add(symbol) def _check_success(self): res = self._get_answer() if res != "success": raise UnknownSolverAnswerError("Solver returned: '%s'" % res) def solve(self, assumptions=None): assert assumptions is None self._send_command(SmtLibCommand(smtcmd.CHECK_SAT, [])) ans = self._get_answer() if ans == "sat": return True elif ans == "unsat": return False elif ans == "unknown": raise SolverReturnedUnknownResultError else: raise UnknownSolverAnswerError("Solver returned: " + ans) def reset_assertions(self): self._send_silent_command(SmtLibCommand(smtcmd.RESET_ASSERTIONS, [])) return def add_assertion(self, formula, named=None): deps = formula.get_free_variables() for d in deps: if d not in self.declared_vars: self._declare_variable(d) self._send_silent_command(SmtLibCommand(smtcmd.ASSERT, [formula])) def push(self, levels=1): self._send_silent_command(SmtLibCommand(smtcmd.PUSH, [levels])) def pop(self, levels=1): self._send_silent_command(SmtLibCommand(smtcmd.POP, [levels])) def get_value(self, item): self._send_command(SmtLibCommand(smtcmd.GET_VALUE, [item])) lst = self._get_value_answer() assert len(lst) == 1 assert len(lst[0]) == 2 return lst[0][1] def print_model(self, name_filter=None): if name_filter is not None: raise NotImplementedError for v in self.declared_vars: print("%s = %s" % (v, self.get_value(v))) def get_model(self): assignment = {} for s in self.environment.formula_manager.get_all_symbols(): if s.is_term(): v = self.get_value(s) assignment[s] = v return EagerModel(assignment=assignment, environment=self.environment) def _exit(self): self._send_command(SmtLibCommand(smtcmd.EXIT, [])) self.solver_stdin.close() self.solver_stdout.close() self.solver.stderr.close() self.solver.terminate() return
class MultiPageTextImporter: def __init__(self, mainControl): """ mainControl -- Currently PersonalWikiFrame object """ self.mainControl = mainControl def getImportTypes(self, guiparent): """ Return sequence of tuples with the description of import types provided by this object. A tuple has the form (<imp. type>, <human readable description>, <panel for add. options or None>) If panels for additional options must be created, they should use guiparent as parent """ if guiparent: res = wx.xrc.XmlResource.Get() mptPanel = res.LoadPanel(guiparent, "ImportSubMultipageText") # ctrls = XrcControls(htmlPanel) # config = self.mainControl.getConfig() # # ctrls.cbPicsAsLinks.SetValue(config.getboolean("main", # "html_export_pics_as_links")) # ctrls.chTableOfContents.SetSelection(config.getint("main", # "export_table_of_contents")) # ctrls.tfHtmlTocTitle.SetValue(config.get("main", # "html_toc_title")) else: mptPanel = None return ( ("multipage_text", _("Multipage text"), mptPanel), ) def getImportSourceWildcards(self, importType): """ If an export type is intended to go to a file, this function returns a (possibly empty) sequence of tuples (wildcard description, wildcard filepattern). If an export type goes to a directory, None is returned """ if importType == "multipage_text": return ((_("Multipage files (*.mpt)"), "*.mpt"), (_("Text file (*.txt)"), "*.txt")) return None def getAddOptVersion(self): """ Returns the version of the additional options information returned by getAddOpt(). If the return value is -1, the version info can't be stored between application sessions. Otherwise, the addopt information can be stored between sessions and can later handled back to the doImport method of the object without previously showing the import dialog. """ return 0 def getAddOpt(self, addoptpanel): """ Reads additional options from panel addoptpanel. If getAddOptVersion() > -1, the return value must be a sequence of simple string, unicode and/or numeric objects. Otherwise, any object can be returned (normally the addoptpanel itself) """ if addoptpanel is None: return (0,) else: ctrls = XrcControls(addoptpanel) showImportTableAlways = boolToInt(ctrls.cbShowImportTableAlways.GetValue()) return (showImportTableAlways,) def _collectContent(self): """ Collect lines from current position of importFile up to separator or file end collect all lines and return them as list of lines. """ content = [] while True: # Read lines of wikiword line = self.importFile.readline() if line == "": # The last page in mpt file without separator # ends as the real wiki page # content = u"".join(content) break if line == self.separator: if len(content) > 0: # Iff last line of mpt page is empty, the original # page ended with a newline, so remove last # character (=newline) content[-1] = content[-1][:-1] # content = u"".join(content) break content.append(line) return "".join(content) def _skipContent(self): """ Skip content until reaching next separator or end of file """ while True: # Read lines of wikiword line = self.importFile.readline() if line == "": # The last page in mpt file without separator # ends as the real wiki page break if line == self.separator: break def doImport(self, wikiDocument, importType, importSrc, compatFilenames, addOpt, importData=None): """ Run import operation. wikiDocument -- WikiDocument object importType -- string tag to identify how to import importSrc -- Path to source directory or file to import from compatFilenames -- Should the filenames be decoded from the lowest level compatible? addOpt -- additional options returned by getAddOpt() importData -- if not None contains data to import as bytestring. importSrc is ignored in this case. Needed for trashcan. returns True if import was done (needed for trashcan) """ if importData is not None: self.rawImportFile = BytesIO(importData) # TODO bytes or string??? else: try: self.rawImportFile = open(pathEnc(importSrc), "rb") except IOError: raise ImportException(_("Opening import file failed")) self.wikiDocument = wikiDocument self.tempDb = None showImportTableAlways = addOpt[0] # wikiData = self.wikiDocument.getWikiData() # TODO Do not stop on each import error, instead create error list and # continue try: try: # Wrap input file to convert format bom = self.rawImportFile.read(len(BOM_UTF8)) if bom != BOM_UTF8: self.rawImportFile.seek(0) self.importFile = TextIOWrapper(self.rawImportFile, MBCS_ENCODING, "replace") else: self.importFile = TextIOWrapper(self.rawImportFile, "utf-8", "replace") line = self.importFile.readline() if line.startswith("#!"): # Skip initial line with #! to allow execution as shell script line = self.importFile.readline() if not line.startswith("Multipage text format "): raise ImportException( _("Bad file format, header not detected")) # Following in the format identifier line is a version number # of the file format self.formatVer = int(line[22:-1]) if self.formatVer > 1: raise ImportException( _("File format number %i is not supported") % self.formatVer) # Next is the separator line line = self.importFile.readline() if not line.startswith("Separator: "): raise ImportException( _("Bad file format, header not detected")) self.separator = line[11:] startPos = self.importFile.tell() if self.formatVer == 0: self._doImportVer0() elif self.formatVer == 1: # Create temporary database. It is mainly filled during # pass 1 to check for validity and other things before # actual importing in pass 2 # TODO Respect settings for general temp location!!! self.tempDb = ConnectWrapSyncCommit(sqlite3.connect("")) try: # TODO: Remove column "collisionWithPresent", seems to be unused self.tempDb.execSql("create table entries(" "unifName text primary key not null, " # Unified name in import file "seen integer not null default 0, " # data really exists "dontImport integer not null default 0, " # don't import this (set between pass 1 and 2) "missingDep integer not null default 0, " # missing dependency(ies) "importVersionData integer not null default 0, " # versioning data present # "neededBy text default ''," # "versionContentDifferencing text default ''," "collisionWithPresent text not null default ''," # Unif. name of present entry which collides with imported one (if any) "renameImportTo text not null default ''," # Rename imported element to (if at all) "renamePresentTo text not null default ''" # Rename present element in database to (if at all) ");" ) # Dependencies. If unifName isn't imported (or faulty), neededBy shouldn't be either self.tempDb.execSql("create table depgraph(" "unifName text not null default ''," "neededBy text not null default ''," "constraint depgraphpk primary key (unifName, neededBy)" ");" ) # Recursive processing is not supported for this table self.tempDb.execSql("create table renamegraph(" "unifName text not null default ''," "dependent text not null default ''," "constraint renamegraphpk primary key (unifName, dependent)," "constraint renamegraphsingledep unique (dependent)" ");" ) # Collect some initial information into the temporary database self._doImportVer1Pass1() # Draw some logical conclusions on the temp db self._markMissingDependencies() self._markHasVersionData() self._markCollision() # Now ask user if necessary if showImportTableAlways or self._isUserNeeded(): if not self._doUserDecision(): # Canceled by user return False # Further logical processing after possible user editing self._markNonImportedVersionsData() self._markNonImportedDependencies() self._propagateRenames() # TODO: Remove version data without ver. overview or main data # Back to start of import file and import according to settings # in temp db self.importFile.seek(startPos) self._doImportVer1Pass2() return True finally: self.tempDb.close() self.tempDb = None except ImportException: raise except Exception as e: traceback.print_exc() raise ImportException(str(e)) finally: self.importFile.close() def _markMissingDependencies(self): """ If a datablock wasn't present, all dependent data blocks are marked as not to import """ while True: self.tempDb.execSql(""" update entries set missingDep=1, dontImport=1 where (not missingDep) and unifName in (select depgraph.neededBy from depgraph inner join entries on depgraph.unifName = entries.unifName where (not entries.seen) or entries.missingDep); """) if self.tempDb.rowcount == 0: break def _markHasVersionData(self): """ Mark if version data present """ self.tempDb.execSql(""" update entries set importVersionData=1 where (not importVersionData) and unifName in (select substr(unifName, 21) from entries where unifName glob 'versioning/overview/*' and not dontImport) """) # TODO Take missing deps into account here? # self.tempDb.execSql("insert or replace into entries(unifName, importVersionData) " # "values (?, 1)", (depunifName,)) def _markCollision(self): """ Mark collisions between existing and data blocks and such to import """ # First find collisions with wiki words for wikipageUnifName in self.tempDb.execSqlQuerySingleColumn( "select unifName from entries where unifName glob 'wikipage/*' " "and not dontImport"): wpName = wikipageUnifName[9:] if not self.wikiDocument.isDefinedWikiPageName(wpName): continue self.tempDb.execSql("update entries set collisionWithPresent = ? " "where unifName = ?", (wikipageUnifName, wikipageUnifName)) # (u"wikipage/" + collisionWithPresent, wikipageUnifName)) # Then find other collisions (saved searches etc.) for unifName in self.tempDb.execSqlQuerySingleColumn( "select unifName from entries where (unifName glob 'savedsearch/*' " "or unifName glob 'savedpagesearch/*') and not dontImport"): if self.wikiDocument.hasDataBlock(unifName): self.tempDb.execSql("update entries set collisionWithPresent = ? " "where unifName = ?", (unifName, unifName)) def _markNonImportedVersionsData(self): """ After user dialog: If importVersionData is false for some entries the depending version data shouldn't be imported. Only the versioning overview is marked for not importing. The next step propagates this to the other data blocks """ self.tempDb.execSql(""" update entries set dontImport = 1 where unifName in (select 'versioning/overview/' || unifName from entries where not importVersionData) """) # # Vice versa the importVersionData column must be updated if # self.tempDb.execSql(""" # update entries set importVersionData = 0 where importVersionData # and ('versioning/overview/' || unifName) in (select unifName # from entries where dontImport) # """) def _markNonImportedDependencies(self): """ After user dialog: If some data blocks where chosen not to import mark all dependent blocks to not import also (especially version data) """ while True: self.tempDb.execSql(""" update entries set dontImport=1 where (not dontImport) and unifName in (select depgraph.neededBy from depgraph inner join entries on depgraph.unifName = entries.unifName where entries.dontImport); """) if self.tempDb.rowcount == 0: break def _propagateRenames(self): """ Write rename commands for imported items to all parts to import if some parts need renaming. Renaming of present items is not propagated. """ for unifName, renImportTo in self.tempDb.execSqlQuery( "select unifName, renameImportTo from entries " "where renameImportTo != '' and not dontImport"): for depUnifName in self.tempDb.execSqlQuerySingleColumn( "select dependent from renamegraph where unifName = ? and " "dependent in (select unifName from entries where " "not dontImport)", (unifName,)): if depUnifName.endswith(unifName): newName = depUnifName[:-len(unifName)] + renImportTo self.tempDb.execSql(""" update entries set renameImportTo=? where unifName = ? """, (newName, depUnifName)) def _doUserDecision(self): """ Called to present GUI to user for deciding what to do. This method is overwritten for trashcan GUI. Returns False if user canceled operation """ return MultiPageTextImporterDialog.runModal( self.mainControl, self.tempDb, self.mainControl) def _isUserNeeded(self): """ Decide if a dialog must be shown to ask user how to proceed. Under some circumstances the dialog may be shown regardless of the result. """ if self.tempDb.execSqlQuerySingleItem("select missingDep from entries " "where missingDep limit 1", default=False): # Missing dependency return True if len(self.tempDb.execSqlQuerySingleItem("select collisionWithPresent " "from entries where collisionWithPresent != '' limit 1", default="")) > 0: # Name collision return True # No problems found return False def _doImportVer0(self): """ Import wikiwords if format version is 0. """ langHelper = wx.GetApp().createWikiLanguageHelper( self.wikiDocument.getWikiDefaultWikiLanguage()) while True: # Read next wikiword line = self.importFile.readline() if line == "": break wikiWord = line[:-1] errMsg = langHelper.checkForInvalidWikiWord(wikiWord, self.wikiDocument) if errMsg: raise ImportException(_("Bad wiki word: %s, %s") % (wikiWord, errMsg)) content = self._collectContent() page = self.wikiDocument.getWikiPageNoError(wikiWord) page.replaceLiveText(content) def _doImportVer1Pass1(self): while True: tag = self.importFile.readline() if tag == "": # End of file break tag = tag[:-1] if tag.startswith("funcpage/"): self._skipContent() elif tag.startswith("savedsearch/"): self._skipContent() elif tag.startswith("savedpagesearch/"): self._skipContent() elif tag.startswith("wikipage/"): self._skipContent() elif tag.startswith("versioning/overview/"): self._doImportItemVersioningOverviewVer1Pass1(tag[20:]) elif tag.startswith("versioning/packet/versionNo/"): self._skipContent() else: # Unknown tag -> Ignore until separator self._skipContent() continue self.tempDb.execSql("insert or replace into entries(unifName, seen) " "values (?, 1)", (tag,)) def _readHintedDatablockVer1(self): """ Reads datablock and preprocesses encoding if necessary. Returns either (hintStrings, content) or (None, None) if either an unknown important hint was found or if encoding had an error. hintStrings is a list of hints (as unistrings) which were not processed by the function (therefore encoding hint is removed). content can be a bytestring or a unistring. If (None, None) is returned, the remaining content of the entry was skipped already by the function. """ hintLine = self.importFile.readline()[:-1] hintStrings = hintLine.split(" ") resultHintStrings = [] # Set default useB64 = False # Process hints for hint in hintStrings: if hint.startswith("important/encoding/"): if hint[19:] == "text": useB64 = False elif hint[19:] == "base64": useB64 = True else: # Unknown encoding: don't read further self._skipContent() return None, None elif hint.startswith("important/"): # There is something important we do not understand self._skipContent() return None, None else: resultHintStrings.append(hint) content = self._collectContent() if useB64: try: content = base64BlockDecode(content) except TypeError: # base64 decoding failed self._skipContent() return None, None return (resultHintStrings, content) def _doImportItemVersioningOverviewVer1Pass1(self, subtag): hintStrings, content = self._readHintedDatablockVer1() if content is None: return # Always encode to UTF-8 no matter what the import file encoding is content = content.encode("utf-8") try: ovw = Versioning.VersionOverview(self.wikiDocument, unifiedBasePageName=subtag) ovw.readOverviewFromBytes(content) ovwUnifName = ovw.getUnifiedName() self.tempDb.execSql("insert or replace into depgraph(unifName, neededBy) " "values (?, ?)", (subtag, ovwUnifName)) self.tempDb.execSql("insert or replace into renamegraph(unifName, dependent) " "values (?, ?)", (subtag, ovwUnifName)) for depUnifName in ovw.getDependentDataBlocks(omitSelf=True): # Mutual dependency between version overview and each version packet self.tempDb.execSql("insert or replace into depgraph(unifName, neededBy) " "values (?, ?)", (depUnifName, ovwUnifName)) self.tempDb.execSql("insert or replace into depgraph(unifName, neededBy) " "values (?, ?)", (ovwUnifName, depUnifName)) self.tempDb.execSql("insert or replace into renamegraph(unifName, dependent) " "values (?, ?)", (subtag, depUnifName)) # self.tempDb.execSql("insert or replace into entries(unifName, needed) " # "values (?, 1)", (depUnifName,)) except VersioningException: return def _doImportVer1Pass2(self): wikiDoc = self.wikiDocument # We have to rename present items # First wikipages because this automatically renames depending version data for pageFrom, pageTo in self.tempDb.execSqlQuery( """ select substr(unifName, 10), substr(renamePresentTo, 10) from entries where unifName glob 'wikipage/*' and renamePresentTo glob 'wikipage/*' """): if wikiDoc.isDefinedWikiPageName(pageFrom): wikiDoc.renameWikiWords({pageFrom: pageTo}, Consts.ModifyText.off) # TODO How to handle rename of home page? # Then remaining data blocks for oldUnifName, newUnifName in self.tempDb.execSqlQuery( """ select unifName, renamePresentTo from entries where unifName not glob 'wikipage/*' and renamePresentTo != '' """): wikiDoc.renameDataBlock(oldUnifName, newUnifName) # For wiki pages with versions to import, existing versions must be # deleted for wikiWord in self.tempDb.execSqlQuerySingleColumn( """ select substr(unifName, 10) from entries where unifName glob 'wikipage/*' and renameImportTo == '' and not dontImport and importVersionData union select substr(renameImportTo, 10) from entries where unifName glob 'wikipage/*' and renameImportTo glob 'wikipage/*' and not dontImport and importVersionData """): if not wikiDoc.isDefinedWikiPageName(wikiWord): continue page = wikiDoc.getWikiPage(wikiWord) versionOverview = page.getExistingVersionOverview() if versionOverview is not None: versionOverview.delete() while True: tag = self.importFile.readline() if tag == "": # End of file break tag = tag[:-1] # Remove line end try: dontImport, renameImportTo = \ self.tempDb.execSqlQuery( "select dontImport, renameImportTo from " "entries where unifName = ?", (tag,))[0] except IndexError: # Maybe dangerous traceback.print_exc() self._skipContent() continue if dontImport: self._skipContent() continue if renameImportTo == "": renameImportTo = tag if tag.startswith("wikipage/"): self._importItemWikiPageVer1Pass2(renameImportTo[9:]) elif tag.startswith("funcpage/"): self._importItemFuncPageVer1Pass2(tag[9:]) elif tag.startswith("savedsearch/"): self._importB64DatablockVer1Pass2(renameImportTo) elif tag.startswith("savedpagesearch/"): self._importHintedDatablockVer1Pass2(renameImportTo) elif tag.startswith("versioning/"): self._importHintedDatablockVer1Pass2(renameImportTo) else: # Unknown tag -> Ignore until separator self._skipContent() for wikiWord in self.tempDb.execSqlQuerySingleColumn( """ select substr(unifName, 10) from entries where unifName glob 'wikipage/*' and renamePresentTo == '' and importVersionData union select substr(renamePresentTo, 10) from entries where unifName glob 'wikipage/*' and renamePresentTo glob 'wikipage/*' and importVersionData """): if not wikiDoc.isDefinedWikiPageName(wikiWord): continue page = wikiDoc.getWikiPage(wikiWord) versionOverview = page.getExistingVersionOverview() if versionOverview is not None: versionOverview.readOverview() def _importItemWikiPageVer1Pass2(self, wikiWord): timeStampLine = self.importFile.readline()[:-1] timeStrings = timeStampLine.split(" ") if len(timeStrings) < 3: traceback.print_exc() self._skipContent() return # TODO Report error timeStrings = timeStrings[:3] try: timeStrings = [str(ts) for ts in timeStrings] except UnicodeEncodeError: traceback.print_exc() self._skipContent() return # TODO Report error try: timeStamps = [timegm(time.strptime(ts, "%Y-%m-%d/%H:%M:%S")) for ts in timeStrings] except (ValueError, OverflowError): traceback.print_exc() self._skipContent() return # TODO Report error content = self._collectContent() page = self.wikiDocument.getWikiPageNoError(wikiWord) # TODO How to handle versions here? page.replaceLiveText(content) if page.getTxtEditor() is not None: page.writeToDatabase() page.setTimestamps(timeStamps) def _importItemFuncPageVer1Pass2(self, subtag): # The subtag is functional page tag try: # subtag is unicode but func tags are bytestrings subtag = str(subtag) except UnicodeEncodeError: self._skipContent() return content = self._collectContent() try: page = self.wikiDocument.getFuncPage(subtag) page.replaceLiveText(content) except BadFuncPageTagException: # This function tag is bad or unknown -> ignore return # TODO Report error def _importB64DatablockVer1Pass2(self, unifName): # Content is base64 encoded b64Content = self._collectContent() try: datablock = base64BlockDecode(b64Content) self.wikiDocument.getWikiData().storeDataBlock(unifName, datablock, storeHint=Consts.DATABLOCK_STOREHINT_INTERN) except TypeError: # base64 decoding failed return # TODO Report error def _importTextDatablockVer1Pass2(self, unifName): content = self._collectContent() try: self.wikiDocument.getWikiData().storeDataBlock(unifName, content, storeHint=Consts.DATABLOCK_STOREHINT_INTERN) except TypeError: return # TODO Report error def _importHintedDatablockVer1Pass2(self, unifName): """ A hinted datablock starts with an extra line defining encoding (text or B64) and storage hint. It was introduced later therefore only versioning packets use this while saved searches don't. """ hintStrings, content = self._readHintedDatablockVer1() if hintStrings is None: return # Set defaults storeHint = Consts.DATABLOCK_STOREHINT_INTERN # Process hints for hint in hintStrings: if hint.startswith("storeHint/"): if hint[10:] == "extern": storeHint = Consts.DATABLOCK_STOREHINT_EXTERN elif hint[10:] == "intern": storeHint = Consts.DATABLOCK_STOREHINT_INTERN # No else. It is not vital to get the right storage hint try: if isinstance(content, str): content = BOM_UTF8 + content.encode("utf-8") self.wikiDocument.getWikiData().storeDataBlock(unifName, content, storeHint=storeHint) except TypeError: traceback.print_exc() return # TODO Report error
class SmtLibSolver(Solver): """Wrapper for using a solver via textual SMT-LIB interface. The solver is launched in a subprocess using args as arguments of the executable. Interaction with the solver occurs via pipe. """ OptionsClass = SmtLibOptions def __init__(self, args, environment, logic, LOGICS=None, **options): Solver.__init__(self, environment, logic=logic, **options) self.to = self.environment.typeso if LOGICS is not None: self.LOGICS = LOGICS self.args = args self.declared_vars = set() self.declared_sorts = set() self.solver = Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE, bufsize=-1) # Give time to the process to start-up time.sleep(0.01) self.parser = SmtLibParser(interactive=True) if PY2: self.solver_stdin = self.solver.stdin self.solver_stdout = self.solver.stdout else: self.solver_stdin = TextIOWrapper(self.solver.stdin) self.solver_stdout = TextIOWrapper(self.solver.stdout) # Initialize solver self.options(self) self.set_logic(logic) def set_option(self, name, value): self._send_silent_command(SmtLibCommand(smtcmd.SET_OPTION, [name, value])) def set_logic(self, logic): self._send_silent_command(SmtLibCommand(smtcmd.SET_LOGIC, [logic])) def _debug(self, msg, *format_args): if self.options.debug_interaction: print(msg % format_args) def _send_command(self, cmd): """Sends a command to the STDIN pipe.""" self._debug("Sending: %s", cmd.serialize_to_string()) cmd.serialize(self.solver_stdin, daggify=True) self.solver_stdin.write("\n") self.solver_stdin.flush() def _send_silent_command(self, cmd): """Sends a command to the STDIN pipe and awaits for acknowledgment.""" self._send_command(cmd) self._check_success() def _get_answer(self): """Reads a line from STDOUT pipe""" res = self.solver_stdout.readline().strip() self._debug("Read: %s", res) return res def _get_value_answer(self): """Reads and parses an assignment from the STDOUT pipe""" lst = self.parser.get_assignment_list(self.solver_stdout) self._debug("Read: %s", lst) return lst def _declare_sort(self, sort): cmd = SmtLibCommand(smtcmd.DECLARE_SORT, [sort]) self._send_silent_command(cmd) self.declared_sorts.add(sort) def _declare_variable(self, symbol): cmd = SmtLibCommand(smtcmd.DECLARE_FUN, [symbol]) self._send_silent_command(cmd) self.declared_vars.add(symbol) def _check_success(self): res = self._get_answer() if res != "success": raise UnknownSolverAnswerError("Solver returned: '%s'" % res) def solve(self, assumptions=None): assert assumptions is None self._send_command(SmtLibCommand(smtcmd.CHECK_SAT, [])) ans = self._get_answer() if ans == "sat": return True elif ans == "unsat": return False elif ans == "unknown": raise SolverReturnedUnknownResultError else: raise UnknownSolverAnswerError("Solver returned: " + ans) def reset_assertions(self): self._send_silent_command(SmtLibCommand(smtcmd.RESET_ASSERTIONS, [])) return def add_assertion(self, formula, named=None): # This is needed because Z3 (and possibly other solvers) incorrectly # recognize N * M * x as a non-linear term formula = formula.simplify() sorts = self.to.get_types(formula, custom_only=True) for s in sorts: if s not in self.declared_sorts: self._declare_sort(s) deps = formula.get_free_variables() for d in deps: if d not in self.declared_vars: self._declare_variable(d) self._send_silent_command(SmtLibCommand(smtcmd.ASSERT, [formula])) def push(self, levels=1): self._send_silent_command(SmtLibCommand(smtcmd.PUSH, [levels])) def pop(self, levels=1): self._send_silent_command(SmtLibCommand(smtcmd.POP, [levels])) def get_value(self, item): self._send_command(SmtLibCommand(smtcmd.GET_VALUE, [item])) lst = self._get_value_answer() assert len(lst) == 1 assert len(lst[0]) == 2 return lst[0][1] def print_model(self, name_filter=None): if name_filter is not None: raise NotImplementedError for v in self.declared_vars: print("%s = %s" % (v, self.get_value(v))) def get_model(self): assignment = {} for s in self.environment.formula_manager.get_all_symbols(): if s.is_term(): v = self.get_value(s) assignment[s] = v return EagerModel(assignment=assignment, environment=self.environment) def _exit(self): self._send_command(SmtLibCommand(smtcmd.EXIT, [])) self.solver_stdin.close() self.solver_stdout.close() self.solver.stderr.close() self.solver.terminate() return
class FileObjectPosix(object): """ A file-like object that operates on non-blocking files but provides a synchronous, cooperative interface. .. caution:: This object is most effective wrapping files that can be used appropriately with :func:`select.select` such as sockets and pipes. In general, on most platforms, operations on regular files (e.g., ``open('/etc/hosts')``) are considered non-blocking already, even though they can take some time to complete as data is copied to the kernel and flushed to disk (this time is relatively bounded compared to sockets or pipes, though). A :func:`~os.read` or :func:`~os.write` call on such a file will still effectively block for some small period of time. Therefore, wrapping this class around a regular file is unlikely to make IO gevent-friendly: reading or writing large amounts of data could still block the event loop. If you'll be working with regular files and doing IO in large chunks, you may consider using :class:`~gevent.fileobject.FileObjectThread` or :func:`~gevent.os.tp_read` and :func:`~gevent.os.tp_write` to bypass this concern. .. note:: Random read/write (e.g., ``mode='rwb'``) is not supported. For that, use :class:`io.BufferedRWPair` around two instance of this class. .. tip:: Although this object provides a :meth:`fileno` method and so can itself be passed to :func:`fcntl.fcntl`, setting the :data:`os.O_NONBLOCK` flag will have no effect; however, removing that flag will cause this object to no longer be cooperative. .. versionchanged:: 1.1 Now uses the :mod:`io` package internally. Under Python 2, previously used the undocumented class :class:`socket._fileobject`. This provides better file-like semantics (and portability to Python 3). """ #: platform specific default for the *bufsize* parameter default_bufsize = io.DEFAULT_BUFFER_SIZE def __init__(self, fobj, mode='rb', bufsize=-1, close=True): """ :keyword fobj: Either an integer fileno, or an object supporting the usual :meth:`socket.fileno` method. The file *will* be put in non-blocking mode using :func:`gevent.os.make_nonblocking`. :keyword str mode: The manner of access to the file, one of "rb", "rU" or "wb" (where the "b" or "U" can be omitted). If "U" is part of the mode, IO will be done on text, otherwise bytes. :keyword int bufsize: If given, the size of the buffer to use. The default value means to use a platform-specific default, and a value of 0 is translated to a value of 1. Other values are interpreted as for the :mod:`io` package. Buffering is ignored in text mode. """ if isinstance(fobj, int): fileno = fobj fobj = None else: fileno = fobj.fileno() if not isinstance(fileno, int): raise TypeError('fileno must be int: %r' % fileno) orig_mode = mode mode = (mode or 'rb').replace('b', '') if 'U' in mode: self._translate = True mode = mode.replace('U', '') else: self._translate = False if len(mode) != 1 and mode not in 'rw': # pragma: no cover # Python 3 builtin `open` raises a ValueError for invalid modes; # Python 2 ignores it. In the past, we raised an AssertionError, if __debug__ was # enabled (which it usually was). Match Python 3 because it makes more sense # and because __debug__ may not be enabled. # NOTE: This is preventing a mode like 'rwb' for binary random access; # that code was never tested and was explicitly marked as "not used" raise ValueError('mode can only be [rb, rU, wb], not %r' % (orig_mode,)) self._fobj = fobj self._closed = False self._close = close self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close) if bufsize < 0 or bufsize == 1: bufsize = self.default_bufsize elif bufsize == 0: bufsize = 1 if mode == 'r': self.io = BufferedReader(self.fileio, bufsize) else: assert mode == 'w' self.io = BufferedWriter(self.fileio, bufsize) #else: # QQQ: not used, not reachable # # self.io = BufferedRandom(self.fileio, bufsize) if self._translate: self.io = TextIOWrapper(self.io) @property def closed(self): """True if the file is closed""" return self._closed def close(self): if self._closed: # make sure close() is only run once when called concurrently return self._closed = True try: self.io.close() self.fileio.close() finally: self._fobj = None def flush(self): self.io.flush() def fileno(self): return self.io.fileno() def write(self, data): self.io.write(data) def writelines(self, lines): self.io.writelines(lines) def read(self, size=-1): return self.io.read(size) def readline(self, size=-1): return self.io.readline(size) def readlines(self, sizehint=0): return self.io.readlines(sizehint) def readable(self): """ .. versionadded:: 1.1b2 """ return self.io.readable() def writable(self): """ .. versionadded:: 1.1b2 """ return self.io.writable() def seek(self, *args, **kwargs): return self.io.seek(*args, **kwargs) def seekable(self): return self.io.seekable() def tell(self): return self.io.tell() def truncate(self, size=None): return self.io.truncate(size) def __iter__(self): return self.io def __getattr__(self, name): # XXX: Should this really be _fobj, or self.io? # _fobj can easily be None but io never is return getattr(self._fobj, name)
class FileObjectPosix(object): """ A file-like object that operates on non-blocking files. .. seealso:: :func:`gevent.os.make_nonblocking` """ default_bufsize = io.DEFAULT_BUFFER_SIZE def __init__(self, fobj, mode='rb', bufsize=-1, close=True): """ :param fobj: Either an integer fileno, or an object supporting the usual :meth:`socket.fileno` method. The file will be put in non-blocking mode. """ if isinstance(fobj, int): fileno = fobj fobj = None else: fileno = fobj.fileno() if not isinstance(fileno, int): raise TypeError('fileno must be int: %r' % fileno) mode = (mode or 'rb').replace('b', '') if 'U' in mode: self._translate = True mode = mode.replace('U', '') else: self._translate = False assert len(mode) == 1, 'mode can only be [rb, rU, wb]' self._fobj = fobj self._closed = False self._close = close self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close) if bufsize < 0: bufsize = self.default_bufsize if mode == 'r': if bufsize == 0: bufsize = 1 elif bufsize == 1: bufsize = self.default_bufsize self.io = BufferedReader(self.fileio, bufsize) elif mode == 'w': if bufsize == 0: bufsize = 1 elif bufsize == 1: bufsize = self.default_bufsize self.io = BufferedWriter(self.fileio, bufsize) else: # QQQ: not used self.io = BufferedRandom(self.fileio, bufsize) if self._translate: self.io = TextIOWrapper(self.io) @property def closed(self): """True if the file is cloed""" return self._closed def close(self): if self._closed: # make sure close() is only ran once when called concurrently return self._closed = True try: self.io.close() self.fileio.close() finally: self._fobj = None def flush(self): self.io.flush() def fileno(self): return self.io.fileno() def write(self, data): self.io.write(data) def writelines(self, lines): self.io.writelines(lines) def read(self, size=-1): return self.io.read(size) def readline(self, size=-1): return self.io.readline(size) def readlines(self, sizehint=0): return self.io.readlines(sizehint) def seek(self, *args, **kwargs): return self.io.seek(*args, **kwargs) def seekable(self): return self.io.seekable() def tell(self): return self.io.tell() def truncate(self, size=None): return self.io.truncate(size) def __iter__(self): return self.io def __getattr__(self, name): return getattr(self._fobj, name)
class FileObjectPosix(object): """ A file-like object that operates on non-blocking files. .. seealso:: :func:`gevent.os.make_nonblocking` """ default_bufsize = io.DEFAULT_BUFFER_SIZE def __init__(self, fobj, mode='rb', bufsize=-1, close=True): """ :param fobj: Either an integer fileno, or an object supporting the usual :meth:`socket.fileno` method. The file will be put in non-blocking mode. """ if isinstance(fobj, int): fileno = fobj fobj = None else: fileno = fobj.fileno() if not isinstance(fileno, int): raise TypeError('fileno must be int: %r' % fileno) orig_mode = mode mode = (mode or 'rb').replace('b', '') if 'U' in mode: self._translate = True mode = mode.replace('U', '') else: self._translate = False if len(mode) != 1: # Python 3 builtin `open` raises a ValueError for invalid modes; # Python 2 ignores in. In the past, we raised an AssertionError, if __debug__ was # enabled (which it usually was). Match Python 3 because it makes more sense # and because __debug__ may not be enabled raise ValueError('mode can only be [rb, rU, wb], not %r' % (orig_mode,)) self._fobj = fobj self._closed = False self._close = close self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close) if bufsize < 0: bufsize = self.default_bufsize if mode == 'r': if bufsize == 0: bufsize = 1 elif bufsize == 1: bufsize = self.default_bufsize self.io = BufferedReader(self.fileio, bufsize) elif mode == 'w': if bufsize == 0: bufsize = 1 elif bufsize == 1: bufsize = self.default_bufsize self.io = BufferedWriter(self.fileio, bufsize) else: # QQQ: not used self.io = BufferedRandom(self.fileio, bufsize) if self._translate: self.io = TextIOWrapper(self.io) @property def closed(self): """True if the file is cloed""" return self._closed def close(self): if self._closed: # make sure close() is only ran once when called concurrently return self._closed = True try: self.io.close() self.fileio.close() finally: self._fobj = None def flush(self): self.io.flush() def fileno(self): return self.io.fileno() def write(self, data): self.io.write(data) def writelines(self, lines): self.io.writelines(lines) def read(self, size=-1): return self.io.read(size) def readline(self, size=-1): return self.io.readline(size) def readlines(self, sizehint=0): return self.io.readlines(sizehint) def readable(self): return self.io.readable() def writable(self): return self.io.writable() def seek(self, *args, **kwargs): return self.io.seek(*args, **kwargs) def seekable(self): return self.io.seekable() def tell(self): return self.io.tell() def truncate(self, size=None): return self.io.truncate(size) def __iter__(self): return self.io def __getattr__(self, name): # XXX: Should this really be _fobj, or self.io? # _fobj can easily be None but io never is return getattr(self._fobj, name)
class FileObjectPosix: default_bufsize = io.DEFAULT_BUFFER_SIZE def __init__(self, fobj, mode='rb', bufsize=-1, close=True): if isinstance(fobj, int): fileno = fobj fobj = None else: fileno = fobj.fileno() if not isinstance(fileno, int): raise TypeError('fileno must be int: %r' % fileno) mode = (mode or 'rb').replace('b', '') if 'U' in mode: self._translate = True mode = mode.replace('U', '') else: self._translate = False assert len(mode) == 1, 'mode can only be [rb, rU, wb]' self._fobj = fobj self._closed = False self._close = close self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close) if bufsize < 0: bufsize = self.default_bufsize if mode == 'r': if bufsize == 0: bufsize = 1 elif bufsize == 1: bufsize = self.default_bufsize self.io = BufferedReader(self.fileio, bufsize) elif mode == 'w': if bufsize == 0: bufsize = 1 elif bufsize == 1: bufsize = self.default_bufsize self.io = BufferedWriter(self.fileio, bufsize) else: # QQQ: not used self.io = BufferedRandom(self.fileio, bufsize) if self._translate: self.io = TextIOWrapper(self.io) @property def closed(self): """True if the file is cloed""" return self._closed def close(self): if self._closed: # make sure close() is only ran once when called concurrently return self._closed = True try: self.io.close() self.fileio.close() finally: self._fobj = None def flush(self): self.io.flush() def fileno(self): return self.io.fileno() def write(self, data): self.io.write(data) def writelines(self, list): self.io.writelines(list) def read(self, size=-1): return self.io.read(size) def readline(self, size=-1): return self.io.readline(size) def readlines(self, sizehint=0): return self.io.readlines(sizehint) def __iter__(self): return self.io
class FileObjectPosix(object): """ A file-like object that operates on non-blocking files but provides a synchronous, cooperative interface. .. note:: Random read/write (e.g., ``mode='rwb'``) is not supported. For that, use :class:`io.BufferedRWPair` around two instance of this class. .. tip:: Although this object provides a :meth:`fileno` method and so can itself be passed to :func:`fcntl.fcntl`, setting the :data:`os.O_NONBLOCK` flag will have no effect; likewise, removing that flag will cause this object to no longer be cooperative. """ #: platform specific default for the *bufsize* parameter default_bufsize = io.DEFAULT_BUFFER_SIZE def __init__(self, fobj, mode='rb', bufsize=-1, close=True): """ :keyword fobj: Either an integer fileno, or an object supporting the usual :meth:`socket.fileno` method. The file *will* be put in non-blocking mode using :func:`gevent.os.make_nonblocking`. :keyword str mode: The manner of access to the file, one of "rb", "rU" or "wb" (where the "b" or "U" can be omitted). If "U" is part of the mode, IO will be done on text, otherwise bytes. :keyword int bufsize: If given, the size of the buffer to use. The default value means to use a platform-specific default, and a value of 0 is translated to a value of 1. Other values are interpreted as for the :mod:`io` package. Buffering is ignored in text mode. """ if isinstance(fobj, int): fileno = fobj fobj = None else: fileno = fobj.fileno() if not isinstance(fileno, int): raise TypeError('fileno must be int: %r' % fileno) orig_mode = mode mode = (mode or 'rb').replace('b', '') if 'U' in mode: self._translate = True mode = mode.replace('U', '') else: self._translate = False if len(mode) != 1 and mode not in 'rw': # pragma: no cover # Python 3 builtin `open` raises a ValueError for invalid modes; # Python 2 ignores it. In the past, we raised an AssertionError, if __debug__ was # enabled (which it usually was). Match Python 3 because it makes more sense # and because __debug__ may not be enabled. # NOTE: This is preventing a mode like 'rwb' for binary random access; # that code was never tested and was explicitly marked as "not used" raise ValueError('mode can only be [rb, rU, wb], not %r' % (orig_mode,)) self._fobj = fobj self._closed = False self._close = close self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close) if bufsize < 0 or bufsize == 1: bufsize = self.default_bufsize elif bufsize == 0: bufsize = 1 if mode == 'r': self.io = BufferedReader(self.fileio, bufsize) else: assert mode == 'w' self.io = BufferedWriter(self.fileio, bufsize) #else: # QQQ: not used, not reachable # # self.io = BufferedRandom(self.fileio, bufsize) if self._translate: self.io = TextIOWrapper(self.io) @property def closed(self): """True if the file is cloed""" return self._closed def close(self): if self._closed: # make sure close() is only run once when called concurrently return self._closed = True try: self.io.close() self.fileio.close() finally: self._fobj = None def flush(self): self.io.flush() def fileno(self): return self.io.fileno() def write(self, data): self.io.write(data) def writelines(self, lines): self.io.writelines(lines) def read(self, size=-1): return self.io.read(size) def readline(self, size=-1): return self.io.readline(size) def readlines(self, sizehint=0): return self.io.readlines(sizehint) def readable(self): return self.io.readable() def writable(self): return self.io.writable() def seek(self, *args, **kwargs): return self.io.seek(*args, **kwargs) def seekable(self): return self.io.seekable() def tell(self): return self.io.tell() def truncate(self, size=None): return self.io.truncate(size) def __iter__(self): return self.io def __getattr__(self, name): # XXX: Should this really be _fobj, or self.io? # _fobj can easily be None but io never is return getattr(self._fobj, name)