def odps(self, line, cell): sql = StringIO(cell).getvalue() column = [] fields = [] instance = self.odps.execute_sql(sql) if sql[0:4].upper() == "DROP" or sql[0:6].upper() == "CREATE" or sql[0:6].upper() == "INSERT": if instance.is_successful(): return "successfully finished {}".format(sql.strip()) else: return "Error Occured {}".format(sql.strip()) with instance.open_reader() as reader: if sql[0:4].upper() == "DESC": field_flag = False idx = 1 for record in reader: for field in record: if field[1][0:2] == "+-": continue if field_flag: x = field[1][1:-1].split("|") column.append("FieldName-{}".format(idx)) fields.append(x[0].strip()) column.append("FieldType-{}".format(idx)) fields.append(x[1].strip()) idx += 1 else: for item in field[1][1:-1].strip().split("|"): x = item.split(":", 1) if x[0].strip() == "Field": field_flag = True break if len(x) == 1: continue column.append(x[0].strip()) fields.append(x[1].strip() if len(x) == 2 else "") return pd.DataFrame([fields], columns=column) else: # SELECT for record in reader: c = [] f = [] for field in record: c.append(field[0]) f.append(field[1]) if len(column) == 0: column = c fields.append(f) return pd.DataFrame(fields, columns=column)
def save(self, path, separator=",", encoder=lambda v: v, headers=False, password=None, **kwargs): """ Exports the table to a unicode text file at the given path. Rows in the file are separated with a newline. Columns in a row are separated with the given separator (by default, comma). For data types other than string, int, float, bool or None, a custom string encoder can be given. """ # Optional parameters include all arguments for csv.writer(), see: # http://docs.python.org/library/csv.html#csv.writer kwargs.setdefault("delimiter", separator) kwargs.setdefault("quoting", csvlib.QUOTE_ALL) # csv.writer will handle str, int, float and bool: s = StringIO() w = csvlib.writer(s, **kwargs) if headers and self.fields is not None: w.writerows([[ csv_header_encode(name, type) for name, type in self.fields ]]) w.writerows([[encoder(v) for v in row] for row in self]) s = s.getvalue() s = s.strip() s = re.sub("([^\"]|^)\"None\"", "\\1None", s) s = s if not password else encrypt_string(s, password) f = open(path, "w", encoding="utf-8") f.write(BOM_UTF8) f.write(s) f.close()
def exec_pdftk(filename): stdout = BytesIO() stderr = StringIO() sh.pdftk( filename, 'output', '-', 'dont_ask', 'flatten', _out=stdout, _err=stderr ) stderr = stderr.getvalue() if stderr.strip(): raise IOError(stderr) return stdout
def _exec_pdftk(pdf, items, outfile): stderr = StringIO() fdf = forge_fdf("", items, [], [], []) # pylint: disable=E1101 sh.pdftk(pdf, "fill_form", "-", "output", outfile, "dont_ask", "flatten", _in=fdf, _err=stderr) stderr = stderr.getvalue() if stderr.strip(): raise IOError(stderr)
def test_read_properties(self): data = r""" [section] # a comment ! another comment k\:e\\y = v:a\\lue """ s = StringIO(data) df = UntypedDf.read_properties(s) assert df.column_names() == ["key", "value"] assert df.values.tolist() == [[r"section.k:e\y", r"v:a\lue"]] data: str = df.to_properties() lines = [s.strip() for s in data.splitlines()] assert "[section]" in lines assert r"k\:e\\y = v:a\\lue" in lines s = StringIO(data) df2 = UntypedDf.read_properties(s) assert df2.values.tolist() == df.values.tolist()
def run_integration_test(input, args): input = StringIO(input.strip()) output = StringIO() run(['pawk'] + args, input, output) return output.getvalue().strip()
def parse_data(df): x = StringIO(df['Locations']) y = np.genfromtxt(x, delimiter=')', dtype=tuple) elements = [ele.decode() for ele in y] return [x.strip(" (),{}()").split(',') for x in elements]