def summarize_records(recids, of, ln, searchpattern="", searchfield="", req=None, collections=CFG_CITESUMMARY_COLLECTIONS): """Write summary report for records RECIDS in the format OF in language LN. SEARCHPATTERN and SEARCHFIELD are search query that led to RECIDS, for instance p='Smith, Paul' and f='author'. They are used for links. REQ is the Apache/mod_python request object. """ if of == 'xcs': # this is XML cite summary citedbylist = get_cited_by_list(recids) return render_citation_summary_xml(citedbylist) has_req = req is not None if not has_req: req = StringIO() if of == 'hcs': renderer = render_citation_summary else: renderer = render_extended_citation_summary renderer(req, ln, recids, collections, searchpattern, searchfield) req.write(websearch_templates.tmpl_citesummary_footer()) if has_req: return '' else: return req.getvalue()
def write_css(context, folder, meta_bundle): registry = getUtility(IRegistry) resources = [] bundles = registry.collectionOfInterface( IBundleRegistry, prefix='plone.bundles', check=False ) for bundle in bundles.values(): if bundle.merge_with == meta_bundle and bundle.csscompilation: css = get_resource(context, bundle.csscompilation) if not css: continue (path, sep, filename) = bundle.csscompilation.rpartition('/') # Process relative urls: # we prefix with current resource path any url not starting with # '/' or http: or data: css = re.sub( r"""(url\(['"]?(?!['"]?([a-z]+:|\/)))""", r'\1%s/' % path, css) resources.append(css) fi = StringIO() for script in resources: fi.write(script + '\n') folder.writeFile(meta_bundle + '.css', fi)
def make_word(self, seed=None, min=3, max=30, tries=100): if seed is not None or not self._seeded: self._random.seed(seed) out = StringIO() tail = CircularBuffer(self.tail) tail.append(WORD_START) while True: c = self.choose_transition(self.transitions(tail.tuple()), self._random.random()) if c == WORD_STOP: break else: out.write(c) tail.append(c) result = out.getvalue() out.close() if min <= len(result) <= max: return result elif tries > 0: return self.make_word(seed, min, max, tries - 1) else: raise MatrixError
def __call__(self, environ, start_response): script_name = environ.get('SCRIPT_NAME', '') path_info = environ.get('PATH_INFO', '') sent = [] written_response = StringIO() def replacement_start_response(status, headers, exc_info=None): if not self.should_filter(status, headers): return start_response(status, headers, exc_info) else: sent[:] = [status, headers, exc_info] return written_response.write app_iter = self.app(environ, replacement_start_response) if not sent: return app_iter status, headers, exc_info = sent try: for chunk in app_iter: written_response.write(chunk) finally: if hasattr(app_iter, 'close'): app_iter.close() body = written_response.getvalue() status, headers, body = self.filter( script_name, path_info, environ, status, headers, body) start_response(status, headers, exc_info) return [body]
def manage_test(self, query, REQUEST=None): """Executes the SQL in parameter 'query' and returns results""" dbc = self() # get our connection res = dbc.query(query) if isinstance(res, type('')): f = StringIO() f.write(res) f.seek(0) result = RDB.File(f) else: result = Results(res) if REQUEST is None: return result # return unadulterated result objects if result._searchable_result_columns(): r = custom_default_report(self.id, result) else: r = 'This statement returned no results.' report = HTML( '<html><body bgcolor="#ffffff" link="#000099" vlink="#555555">\n' '<dtml-var name="manage_tabs">\n<hr>\n%s\n\n' '<hr><h4>SQL Used:</strong><br>\n<pre>\n%s\n</pre>\n<hr>\n' '</body></html>' % (r, query)) report = report(*(self, REQUEST), **{self.id: result}) return report
def _parse_config(): """Parse the config file, set up defaults. """ config = configparser.RawConfigParser(defaults=_defaults) if not os.path.exists(config_file): # Create an empty config file if there was none so far fh = open(config_file, "w") fh.close() logger.info("Could not find a configuration file at %s. Going to " "create an empty file there." % config_file) try: # Cheat the ConfigParser module by adding a fake section header config_file_ = StringIO() config_file_.write("[FAKE_SECTION]\n") with open(config_file) as fh: for line in fh: config_file_.write(line) config_file_.seek(0) config.readfp(config_file_) except OSError as e: logging.info("Error opening file %s: %s", config_file, e.message) return config
def _parse_config(): """Parse the config file, set up defaults. """ defaults = {'apikey': apikey, 'server': server, 'verbosity': 0, 'cachedir': os.path.expanduser('~/.openml/cache'), 'avoid_duplicate_runs': 'True'} config_file = os.path.expanduser('~/.openml/config') config = configparser.RawConfigParser(defaults=defaults) if not os.path.exists(config_file): # Create an empty config file if there was none so far fh = open(config_file, "w") fh.close() logger.info("Could not find a configuration file at %s. Going to " "create an empty file there." % config_file) try: # Cheat the ConfigParser module by adding a fake section header config_file_ = StringIO() config_file_.write("[FAKE_SECTION]\n") with open(config_file) as fh: for line in fh: config_file_.write(line) config_file_.seek(0) config.readfp(config_file_) except OSError as e: logging.info("Error opening file %s: %s" % config_file, e.message) return config
class _type_parser(object): NORMAL, IN_PAREN = 0, 1 def __init__(self, value): self.value = value self.state = self.NORMAL self.buf = StringIO() self.types = [] for c in value: self._step(c) self._push() def _push(self): val = self.buf.getvalue().strip() if val: self.types.append(val) self.buf = StringIO() def _step(self, c): if self.state == self.NORMAL: if c == '(': self.state = self.IN_PAREN elif c == ',': self._push() return elif self.state == self.IN_PAREN: if c == ')': self.state = self.NORMAL self.buf.write(c)
def __init__(self, settings=None, setup=True, check=False): if isinstance(settings, six.string_types): settingsfile = StringIO() settings = re.sub(r" *\\\n *", " ", settings) settingsfile.write(settings) settingsfile.seek(0) self.settings = configobj.ConfigObj( settingsfile, configspec=os.path.join(config.CONFIG_DIR, "settings.spec")) tools.cobj_check(self.settings, exception=PyfigError) elif check: self.settings = configobj.ConfigObj( settings, configspec=os.path.join(config.CONFIG_DIR, "settings.spec")) tools.cobj_check(self.settings, exception=PyfigError) else: self.settings = settings self.rows = [] self.cols = [] self.title = None self.repo = self._get_repo() self.plotlines = collections.defaultdict(list) self.labels = collections.defaultdict(list) self.style = collections.defaultdict( lambda: collections.defaultdict(dict)) if setup: matplotlib.figure.Figure.__init__( self, figsize=self.settings["figsize"]) self.setup()
def apply_locale(self, locale, out_fn=None): # Adjust the locals value to the new value newconf = StringIO() for line in util.load_file(self.login_conf_fn).splitlines(): newconf.write(re.sub(r'^default:', r'default:lang=%s:' % locale, line)) newconf.write("\n") # Make a backup of login.conf. util.copy(self.login_conf_fn, self.login_conf_fn_bak) # And write the new login.conf. util.write_file(self.login_conf_fn, newconf.getvalue()) try: LOG.debug("Running cap_mkdb for %s", locale) util.subp(['cap_mkdb', self.login_conf_fn]) except util.ProcessExecutionError: # cap_mkdb failed, so restore the backup. util.logexc(LOG, "Failed to apply locale %s", locale) try: util.copy(self.login_conf_fn_bak, self.login_conf_fn) except IOError: util.logexc(LOG, "Failed to restore %s backup", self.login_conf_fn)
def to_string(table): """ Returns a list of the maximum width for each column across all rows >>> type(to_string([['foo', 'goodbye'], ['llama', 'bar']])) <type 'unicode'> """ result = StringIO() (columns, rows) = get_dimensions(table) result.write(" {} columns, {} rows\n".format(columns, rows)) col_widths = find_column_widths(table) table_width = sum(col_widths) + len(col_widths) + 2 hbar = ' {}\n'.format('-' * table_width) result.write(" {}\n".format(' '.join( [six.text_type(col_index).rjust(width, ' ') for (col_index, width) in enumerate(col_widths)]))) result.write(hbar) for row_index, row in enumerate(table): cells = [cell.rjust(width, ' ') for (cell, width) in zip(row, col_widths)] result.write("{:>3} | {}|\n".format(row_index, '|'.join(cells))) result.write(hbar) result.seek(0) return six.text_type(result.read())
def format_postamble(self): buf = StringIO() lines = 0 if len(self.order_by) > 0: buf.write('ORDER BY ') formatted = [] for expr in self.order_by: key = expr.op() translated = self._translate(key.expr) if not key.ascending: translated += ' DESC' formatted.append(translated) buf.write(', '.join(formatted)) lines += 1 if self.limit is not None: if lines: buf.write('\n') n, offset = self.limit['n'], self.limit['offset'] buf.write('LIMIT {0}'.format(n)) if offset is not None and offset != 0: buf.write(' OFFSET {0}'.format(offset)) lines += 1 if not lines: return None return buf.getvalue()
def report(result, csv_file): '''generate report from backtest output file ''' result_df = pd.read_pickle(result) csv_txt = StringIO() csv_txt.write("Trades\n") fieldnames = ['date', 'order_book_id', 'amount', 'price', "commission", "tax"] writer = csv.DictWriter(csv_txt, fieldnames=fieldnames) writer.writeheader() for dt, trades in result_df.trades.iteritems(): for t in trades: trade = dict(t.__dict__) trade.pop("order_id") trade["date"] = trade["date"].strftime("%Y-%m-%d %H:%M:%S") writer.writerow(trade) csv_txt.write("\nPositions\n") fieldnames = ['date', 'order_book_id', 'market_value', 'quantity'] writer = csv.DictWriter(csv_txt, fieldnames=fieldnames) writer.writeheader() for _dt, positions in result_df.positions.iteritems(): dt = _dt.strftime("%Y-%m-%d %H:%M:%S") for order_book_id, position in iteritems(positions): writer.writerow({ "date": dt, "order_book_id": order_book_id, "market_value": position.market_value, "quantity": position.quantity, }) with open(csv_file, 'w') as csvfile: csvfile.write(csv_txt.getvalue())
def getInnerHTML(self): html = StringIO() for tag in self.tag.contents: html.write(str(tag)) return html.getvalue()
def load_dataset(infile, selection, verbose=1, **kwargs): """ Loads selected distribution from selected infile. Arguments: infile (str): Path to text input file selection (str): Start of lines containing desired dataset verbose (int): Level of verbose output Returns: dataset (DataFrame): Selected dataset """ from six import StringIO import pandas if verbose >= 1: print("loading '{0}' from '{1}'".format(selection, infile)) s = StringIO() with open(infile) as open_infile: for line in open_infile: if line.startswith(selection): s.write(line) s.seek(0) dataset = pandas.read_csv(s, delim_whitespace=True, header=None, usecols=[3,4,5,6], names=["phi", "psi", "probability", "free energy"]) return dataset
def _build_sample_yaml(args, app_desc): schema = app_desc.schema f = StringIO() for key, value in UWSGI_OPTIONS.items(): for field in ["desc", "default"]: if field not in value: continue field_value = value[field] if not isinstance(field_value, six.string_types): continue new_field_value = string.Template(field_value).safe_substitute(**{ 'default_port': str(app_desc.default_port), 'app_name': app_desc.app_name, 'uwsgi_module': app_desc.uwsgi_module, }) value[field] = new_field_value description = getattr(schema, "description", None) if description: description = description.lstrip() as_comment = "\n".join(["# %s" % l for l in description.split("\n")]) + "\n" f.write(as_comment) _write_sample_section(args, f, 'uwsgi', Schema(UWSGI_OPTIONS), as_comment=False, uwsgi_hack=True) _write_sample_section(args, f, app_desc.app_name, schema) destination = os.path.join(args.galaxy_root, app_desc.sample_destination) _write_to_file(args, f, destination)
def get_result(self): # Got to unravel the join stack; the nesting order could be # arbitrary, so we do a depth first search and push the join tokens # and predicates onto a flat list, then format them op = self.expr.op() if isinstance(op, ops.Join): self._walk_join_tree(op) else: self.join_tables.append(self._format_table(self.expr)) # TODO: Now actually format the things buf = StringIO() buf.write(self.join_tables[0]) for jtype, table, preds in zip(self.join_types, self.join_tables[1:], self.join_predicates): buf.write('\n') buf.write(util.indent('{0} {1}'.format(jtype, table), self.indent)) if len(preds): buf.write('\n') fmt_preds = [self._translate(pred) for pred in preds] conj = ' AND\n{0}'.format(' ' * 3) fmt_preds = util.indent('ON ' + conj.join(fmt_preds), self.indent * 2) buf.write(fmt_preds) return buf.getvalue()
def _diff(self, baselinedir, outputdir, dc=None): if dc is None: dc = filecmp.dircmp(baselinedir, outputdir, ['.svn']) if dc.left_only: self.fail("Files or subdirectories missing from output: " +str(dc.left_only)) if dc.right_only: self.fail("Files or subdirectories missing from baseline: " +str(dc.right_only)) for name in dc.diff_files: fromfile = join(dc.left, name) tofile = join(dc.right, name) with open(fromfile, 'r') as f_from: fromlines = f_from.readlines() with open(tofile, 'r') as f_to: tolines = f_to.readlines() diff = difflib.context_diff(fromlines, tolines, fromfile+" (baseline)", tofile+" (output)") out = StringIO() out.write("Output file does not match baseline:\n") for line in diff: out.write(line) self.fail(out.getvalue()) for subdir in dc.subdirs: self._diff(join(baselinedir, subdir), join(outputdir, subdir), dc=dc.subdirs[subdir]) shutil.rmtree(outputdir, ignore_errors=True)
def construct_cli_call(cli_target, params): cli_call = StringIO() cli_call.write(cli_target) params = OrderedDict(sorted(params.items(), key=lambda t: t[0])) for param in params: cli_call.write(" -" + param + " \"'" + str(params[param]) + "'\"") return cli_call.getvalue()
def _build_attributes_table(tag, attributes, hide_attributes=False, attribute_names=None, header_level=3): attribute_table = StringIO() attribute_table.write("\n\n") if attributes and not hide_attributes: header_prefix = '#' * header_level attribute_table.write("\n%s Attributes\n" % header_prefix) attribute_table.write("Attribute | Details | Required\n") attribute_table.write("--- | --- | ---\n") for attribute in attributes: name = attribute.attrib["name"] if attribute_names and name not in attribute_names: continue details = _doc_or_none(attribute) if details is None: type_el = _type_el(attribute) assert type_el is not None, "No details or type found for %s" % name details = _doc_or_none(type_el) annotation_el = type_el.find("{http://www.w3.org/2001/XMLSchema}annotation") else: annotation_el = attribute.find("{http://www.w3.org/2001/XMLSchema}annotation") use = attribute.attrib.get("use", "optional") == "required" if "|" in details: # This seems to work fine for now, but potentially can cause problems. pass details = details.replace("\n", " ").strip() best_practices = _get_bp_link(annotation_el) if best_practices: details += """ Find the Intergalactic Utilities Commision suggested best practices for this element [here](%s).""" % best_practices attribute_table.write("``%s`` | %s | %s\n" % (name, details, use)) return attribute_table.getvalue()
def fetch_data(self): # create a data frame directly from the full text of # the response from the returned file-descriptor. data = self.fetch_url(self.url) fd = StringIO() if isinstance(data, str): fd.write(data) else: for chunk in data: fd.write(chunk) self.fetch_size = fd.tell() fd.seek(0) try: # see if pandas can parse csv data frames = read_csv(fd, **self.pandas_kwargs) frames_hash = hashlib.md5(str(fd.getvalue()).encode('utf-8')) self.fetch_hash = frames_hash.hexdigest() except pd.parser.CParserError: # could not parse the data, raise exception raise Exception('Error parsing remote CSV data.') finally: fd.close() return frames
def basic_config(config_file): from six import StringIO import yaml buff = StringIO() buff.write(config_file) buff.seek(0) return Config.from_dict(yaml.load(buff))
def __str__(self): contents = self.write() out_contents = StringIO() if isinstance(contents, (list, tuple)): out_contents.write("\n".join(contents)) else: out_contents.write(str(contents)) return out_contents.getvalue()
def file_from_string(xml_str): """ Create a fake file object with string's contents. """ stringio = StringIO() stringio.write(xml_str) stringio.seek(0) return stringio
def dump_stats(): s = StringIO() s.write('Frame Stats:\n') for k, v in stats.items(): s.write('%20s: %d\n' % (k, v)) print(s.getvalue())
def test_supress_up_when_ignoring_colors(): "file-like filter output: supress #{up} when ignoring colors" io = StringIO() couleur.proxy(io).enable() couleur.proxy(io).ignore() io.write("This is visible#{up}but this is invisible\n") assert_equals('This is visible', io.getvalue())
def test_read_experiments_list(self): experiments_list_file = StringIO() experiments_list_file.write('a\nb\n\nc d\n') experiments_list_file.seek(0) experiments_list = self.meta_optimizer.read_experiments_list( experiments_list_file) self.assertEqual(4, len(experiments_list)) self.assertEqual(2, len(experiments_list[3]))
def dump(self, data): io = StringIO() for obj in data: self._dump_object(io, obj, 0) io.write('\n') io.write('\n') result = io.getvalue() io.close() return result
def test_get_json_data(self): output = StringIO() output.write(self.data) json.cached_data = output data = json.get_json_data(self.base_dir) assert self.data in (data, ) json.cached_data = None assert not json.get_json_data(self.base_dir)
def _handle_exit(signum, frame): (msg, rc) = EXIT_FOR[signum] msg = msg % ({'version': vr.version()}) contents = StringIO() contents.write("%s\n" % (msg)) _pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents) util.multi_log(contents.getvalue(), console=True, stderr=False, log=LOG) sys.exit(rc)
def render(self, mode='human'): outfile = StringIO() if mode == 'ansi' else sys.stdout row, col = self.s // self.ncol, self.s % self.ncol desc = self.desc.tolist() desc = [[c.decode('utf-8') for c in line] for line in desc] desc[row][col] = "X" desc[row][col] = utils.colorize( desc[row][col], "red", highlight=True ) # note: this does not work on all setups you can try to uncomment asnd see what happends (if it does work you'll see weird symbols) if self.lastaction is not None: outfile.write(" ({})\n".format(["Left", "Down", "Right", "Up"][self.lastaction])) else: outfile.write("\n") outfile.write("\n".join(''.join(line) for line in desc) + "\n") if mode != 'human': return outfile
def render(self, mode='human'): outfile = StringIO() if mode == 'ansi' else sys.stdout out = self.desc.copy().tolist() out = [[c.decode('utf-8') for c in line] for line in out] taxi_row, taxi_col, pass_idx, dest_idx = self.decode(self.s) def ul(x): return "_" if x == " " else x if pass_idx < 4: out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize( out[1 + taxi_row][2 * taxi_col + 1], 'yellow', highlight=True) pi, pj = self.locs[pass_idx] out[1 + pi][2 * pj + 1] = utils.colorize(out[1 + pi][2 * pj + 1], 'blue', bold=True) else: # passenger in taxi out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize( ul(out[1 + taxi_row][2 * taxi_col + 1]), 'green', highlight=True) di, dj = self.locs[dest_idx] out[1 + di][2 * dj + 1] = utils.colorize(out[1 + di][2 * dj + 1], 'magenta') outfile.write("\n".join(["".join(row) for row in out]) + "\n") if self.lastaction is not None: outfile.write(" ({})\n".format( ["South", "North", "East", "West", "Pickup", "Dropoff"][self.lastaction])) else: outfile.write("\n") # No need to return anything for human if mode != 'human': with closing(outfile): return outfile.getvalue()
def bulk_write_datapoints(self, datapoints): """Perform a bulk write of a number of datapoints to this stream It is assumed that all datapoints here are to be written to this stream and the stream_id on each will be set by this method to this streams id (regardless of whether it is set or not). To write multiple datapoints which span multiple streams, use :meth:`~StreamsAPI.bulk_write_endpoints` instead. :param list datapoints: A list of datapoints to be written into this stream """ datapoints = list(datapoints) # effectively performs validation that we have the right type for dp in datapoints: if not isinstance(dp, DataPoint): raise TypeError("All items in the datapoints list must be DataPoints") dp.set_stream_id(self.get_stream_id()) # One could argue that this should just call out to StreamAPI.bulk_write_datapoints. At # the time of writing, the stream has no reference back to StreamsAPI, so that is less # simple. remaining_datapoints = datapoints while remaining_datapoints: # take up to 250 points and post them until complete this_chunk_of_datapoints = remaining_datapoints[:MAXIMUM_DATAPOINTS_PER_POST] remaining_datapoints = remaining_datapoints[MAXIMUM_DATAPOINTS_PER_POST:] # Build XML list containing data for all points datapoints_out = StringIO() datapoints_out.write("<list>") for dp in this_chunk_of_datapoints: datapoints_out.write(dp.to_xml()) datapoints_out.write("</list>") # And send the HTTP Post self._conn.post("/ws/DataPoint/{}".format(self.get_stream_id()), datapoints_out.getvalue()) logger.info('DataPoint batch of %s datapoints written to stream %s', len(this_chunk_of_datapoints), self.get_stream_id())
def convert_conf2smac_string(configuration): """ Convert configuration to string for SMAC option --initialChallengers. The expected format looks like this: .. code:: bash --initialChallengers "-alpha 1 -rho 1 -ps 0.1 -wp 0.00" :param configuration: :return: """ config_string = StringIO() config_string.write("--initial-challengers \"") for hp_name in sorted(configuration): value = configuration[hp_name] if value is None: continue config_string.write(" -%s '%s'" % (hp_name, value)) config_string.write("\"") return config_string.getvalue()
def render(self, pause=0.00001, mode='rgb_array', close=False): if mode == 'human' or mode == 'ansi': outfile = StringIO() if mode == 'ansi' else sys.stdout desc = self.current_grid_map.tolist() desc = [[str_color(c) for c in line] for line in desc] if self.lastaction is not None: outfile.write(" ({})\n".format( ["South", "North", "West", "East"][self.lastaction])) else: outfile.write("\n") outfile.write("\n".join(''.join(line) for line in desc) + "\n") if mode != 'human': with closing(outfile): return outfile.getvalue() return img = self._gridmap_to_img(self.current_grid_map) fig = plt.figure(self.this_fig_num) plt.clf() plt.imshow(img) fig.canvas.draw() if pause > 0: plt.pause(pause) return img
def integrations(since, to, write, force): """ Generates a markdown file containing the list of integrations shipped in a given Agent release. Agent version numbers are derived inspecting tags on `integrations-core` so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither `--since` or `--to` are passed (the most common use case), the tool will generate the list for every Agent since version 6.3.0 (before that point we don't have enough information to build the log). """ agent_tags = get_agent_tags(since, to) # get the list of integrations shipped with the agent from the requirements file req_file_name = os.path.basename(get_agent_release_requirements()) integrations_contents = StringIO() for tag in agent_tags: integrations_contents.write( '## Datadog Agent version {}\n\n'.format(tag)) # Requirements for current tag file_contents = git_show_file(req_file_name, tag) for name, ver in iteritems(parse_agent_req_file(file_contents)): integrations_contents.write('* {}: {}\n'.format(name, ver)) integrations_contents.write('\n') # save the changelog on disk if --write was passed if write: dest = get_agent_integrations_file() # don't overwrite an existing file if os.path.exists(dest) and not force: msg = "Output file {} already exists, run the command again with --force to overwrite" abort(msg.format(dest)) write_file(dest, integrations_contents.getvalue()) else: echo_info(integrations_contents.getvalue())
def render(self, mode='human'): print(self.initial_loc) outfile = StringIO() if mode == 'ansi' else sys.stdout out = self.desc.copy().tolist() out = [[c.decode('utf-8') for c in line] for line in out] init, dly, robotrow, robotcol, row1, col1, row2, col2 = self.decode( self.s) def ul(x): return "_" if x == " " else x out[1 + robotrow][2 * robotcol + 1] = utils.colorize(ul( out[1 + robotrow][2 * robotcol + 1]), 'blue', highlight=True) self.fill_coverage(init, init, row1, col1, row2, col2) for row in range(nR): for col in range(nC): if (self.coverage[row][col] == True) and (row != robotrow or col != robotcol): out[1 + row][2 * col + 1] = utils.colorize(ul( out[1 + row][2 * col + 1]), 'yellow', highlight=True) outfile.write("\n".join(["".join(row) for row in out]) + "\n") if self.lastaction is not None: outfile.write(" ({})\n".format( ["South", "North", "East", "West", "Deploy"][self.lastaction])) else: outfile.write("\n") # No need to return anything for human if mode != 'human': return outfile
def render(self, mode='human'): outfile = StringIO() if mode == 'ansi' else sys.stdout currState = [list(s) for s in self.state_mapping[self.s]] ringN = np.max(self.initial_state)[0] poleN = len(self.initial_state) pole = ' ' * ringN + '||' + ' ' * ringN rings = [] for r in range(1, ringN + 1): rings.append(' ' * (ringN - r) + '~' * r + '||' + '~' * r + ' ' * (ringN - r)) rings = np.array(rings) floor = '\u203e' * (ringN * 2 + 4) vis = [] for p in currState: p = np.array(p) - 1 if p.size == 0: vis.append([pole] * (ringN + 1)) else: temp = rings[p[::-1]] vis.append([pole] * (ringN - temp.size + 1) + temp.tolist()) vis = ''.join([''.join(i) + '\n' for i in np.array(vis).T.tolist() ]) + floor * poleN + '\n' if self.lastaction is not None: outfile.write('Pole {} to Pole {}\n'.format( *(self.action_list[self.lastaction]) + 1)) else: outfile.write('\n') outfile.write(vis) if mode != 'human': with closing(outfile): return outfile.getvalue()
def render(self, mode='human'): outfile = StringIO() if mode == 'ansi' else sys.stdout row, col = self.s[0] // self.ncol, self.s[0] % self.ncol row_e, col_e = self.s[1] // self.ncol, self.s[1] % self.ncol desc = self.desc.tolist() desc = [[c.decode('utf-8') for c in line] for line in desc] desc[row][col] = utils.colorize(desc[row][col], "blue", highlight=True) try: desc[row_e][col_e] = utils.colorize(desc[row_e][col_e], "red", highlight=True) except IndexError: pass if self.lastaction is not None: outfile.write(" ({})\n".format(["Left", "Down", "Right", "Up"][self.lastaction])) else: outfile.write("\n") outfile.write("\n".join(''.join(line) for line in desc) + "\n") if mode != 'human': with closing(outfile): return outfile.getvalue()
def make_log_context(log_events): """Get error context from a log file. Args: log_events (list of LogEvent): list of events created by ``ctest_log_parser.parse()`` Returns: str: context from the build log with errors highlighted Parses the log file for lines containing errors, and prints them out with line numbers and context. Errors are highlighted with '>>' and with red highlighting (if color is enabled). """ error_lines = set(e.line_no for e in log_events) out = StringIO() next_line = 1 for event in log_events: start = event.start if start > next_line: out.write(' [ ... ]\n') if start < next_line: start = next_line for i in range(start, event.end): if i in error_lines: out.write(colorize(' @R{>> %-6d%s}\n' % (i, event[i]))) else: out.write(' %-6d%s\n' % (i, event[i])) next_line = event.end return out.getvalue()
def __str__(self): # print keys in order sorted_keys = sorted(self.keys()) # Separate boolean variants from key-value pairs as they print # differently. All booleans go first to avoid ' ~foo' strings that # break spec reuse in zsh. bool_keys = [] kv_keys = [] for key in sorted_keys: bool_keys.append(key) if isinstance(self[key].value, bool) \ else kv_keys.append(key) # add spaces before and after key/value variants. string = StringIO() for key in bool_keys: string.write(str(self[key])) for key in kv_keys: string.write(' ') string.write(str(self[key])) return string.getvalue()
def render(self, mode='human'): outfile = StringIO() if mode == 'ansi' else sys.stdout out = self.desc.copy().tolist() out = [[c.decode('utf-8') for c in line] for line in out] player1_row, player1_column, player2_row, player2_column, player1_possession = self.decode_state( self.s) print(player1_row, player1_column, player2_row, player2_column, player1_possession) out[player1_row + 1][player1_column + 1] = 'A' if player1_possession else 'a' out[player2_row + 1][player2_column + 1] = 'b' if player1_possession else 'B' outfile.write("\n".join(["".join(row) for row in out]) + "\n") if self.lastaction is not None: action1, action2 = SoccerEnv.decode_action(self.lastaction) outfile.write(" ({},{})\n".format(action1, action2)) else: outfile.write("\n") # No need to return anything for human if mode != 'human': return outfile
class PSGC(basecore2d.GraphicsContextBase): def __init__(self, size, *args, **kwargs): super(PSGC, self).__init__(size, *args, **kwargs) self.size = size self._height = size[1] self.contents = StringIO() self._clipmap = {} self.clip_id = None def clear(self): self.contents = StringIO() def width(self): return self.size[0] def height(self): return self.size[1] def save(self, filename): f = open(filename, 'w') ext = os.path.splitext(filename)[1] if ext in ('.eps', '.epsf'): f.write("%!PS-Adobe-3.0 EPSF-3.0\n") f.write('%%%%BoundingBox: 0 0 %d %d\n' % self.size) f.write(self.contents.getvalue()) elif ext == '.ps': f.write("%!PS-Adobe-2.0\n") f.write(self.contents.getvalue()) else: raise ValueError("don't know how to write a %s file" % ext) # Text handling code def set_font(self, font): self.face_name = font_face_map.get(font.face_name, font.face_name) self.font = pdfmetrics.Font(self.face_name, self.face_name, pdfmetrics.defaultEncoding) self.font_size = font.size self.contents.write("""/%s findfont %3.3f scalefont setfont\n""" % (self.face_name, self.font_size)) def device_show_text(self, text): ttm = self.get_text_matrix() ctm = self.get_ctm() # not device_ctm!! m = affine.concat(ctm, ttm) if self.state.clipping_path: self.contents.write('clipsave\n') self.contents.write('%3.3f %3.3f %3.3f %3.3f rectclip\n' % self.state.clipping_path) self.contents.write('gsave\n') self.device_transform_device_ctm(LOAD_CTM, [m]) self.contents.write('%3.3f %3.3f moveto\n' % (0, 0)) r, g, b, a = self.state.line_color self.contents.write('%1.3f %1.3f %1.3f setrgbcolor\n' % (r, g, b)) self.contents.write('(%s) show\n' % text) self.contents.write('grestore\n') if self.state.clipping_path: self.contents.write('cliprestore\n') def get_full_text_extent(self, text): ascent, descent = _fontdata.ascent_descent[self.face_name] descent = (-descent) * self.font_size / 1000.0 ascent = ascent * self.font_size / 1000.0 height = ascent + descent width = pdfmetrics.stringWidth(text, self.face_name, self.font_size) return width, height, descent, height * 1.2 # assume leading of 1.2*height # actual implementation =) def device_draw_image(self, img, rect): """ draw_image(img_gc, rect=(x,y,w,h)) Draws another gc into this one. If 'rect' is not provided, then the image gc is drawn into this one, rooted at (0,0) and at full pixel size. If 'rect' is provided, then the image is resized into the (w,h) given and drawn into this GC at point (x,y). img_gc is either a Numeric array (WxHx3 or WxHx4) or a GC from Kiva's Agg backend (kiva.agg.GraphicsContextArray). Requires the Python Imaging Library (PIL). """ from kiva.compat import pilfromstring, piltostring if type(img) == type(array([])): # Numeric array converted_img = agg.GraphicsContextArray(img, pix_format='rgba32') format = 'RGBA' elif isinstance(img, agg.GraphicsContextArray): if img.format().startswith('RGBA'): format = 'RGBA' elif img.format().startswith('RGB'): format = 'RGB' else: converted_img = img.convert_pixel_format('rgba32', inplace=0) format = 'RGBA' # Should probably take this into account # interp = img.get_image_interpolation() else: warnings.warn("Cannot render image of type %r into EPS context." % type(img)) return # converted_img now holds an Agg graphics context with the image pil_img = pilfromstring( format, (converted_img.width(), converted_img.height()), piltostring(converted_img.bmp_array)) if rect is None: rect = (0, 0, img.width(), img.height()) # PIL PS output doesn't support alpha. if format != 'RGB': pil_img = pil_img.convert('RGB') left, top, width, height = rect if width != img.width() or height != img.height(): # This is not strictly required. pil_img = pil_img.resize((int(width), int(height)), PilImage.NEAREST) self.contents.write('gsave\n') self.contents.write('initmatrix\n') m = self.get_ctm() self.contents.write('[%.3f %.3f %.3f %.3f %.3f %.3f] concat\n' % \ affine.affine_params(m)) self.contents.write('%.3f %.3f translate\n' % (left, top)) # Rely on PIL's EpsImagePlugin to do the hard work here. pil_img.save(self.contents, 'eps', eps=0) self.contents.write('grestore\n') def device_transform_device_ctm(self, func, args): if func == LOAD_CTM: self.contents.write('initmatrix\n') func = CONCAT_CTM if func == SCALE_CTM: sx, sy = args self.contents.write('%.3f %.3f scale\n' % (sx, sy)) elif func == ROTATE_CTM: r, = args self.contents.write('%.3f rotate\n' % r) elif func == TRANSLATE_CTM: tx, ty = args self.contents.write('%.3f %.3f translate\n' % (tx, ty)) elif func == CONCAT_CTM: m, = args self.contents.write('[%.3f %.3f %.3f %.3f %.3f %.3f] concat\n' % \ affine.affine_params(m)) def device_fill_points(self, points, mode): if self.state.clipping_path: self.contents.write('clipsave\n') self.contents.write('%3.3f %3.3f %3.3f %3.3f rectclip\n' % self.state.clipping_path) linecap = line_cap_map[self.state.line_cap] linejoin = line_join_map[self.state.line_join] dasharray = self._dasharray() if dasharray: self.contents.write('%s 0 setdash\n' % dasharray) self.contents.write('%3.3f setlinewidth\n' % self.state.line_width) self.contents.write('%d setlinecap\n' % linecap) self.contents.write('%d setlinejoin\n' % linejoin) self.contents.write('newpath\n') x, y = points[0] self.contents.write(' %3.3f %3.3f moveto\n' % (x, y)) for (x, y) in points[1:]: self.contents.write(' %3.3f %3.3f lineto\n' % (x, y)) first_pass, second_pass = fill_stroke_map[mode] if second_pass: if first_pass in ('fill', 'eofill'): r, g, b, a = self.state.fill_color self.contents.write('%1.3f %1.3f %1.3f setrgbcolor\n' % (r, g, b)) else: r, g, b, a = self.state.line_color self.contents.write('%1.3f %1.3f %1.3f setrgbcolor\n' % (r, g, b)) self.contents.write('gsave %s grestore %s\n' % (first_pass, second_pass)) else: if first_pass in ('fill', 'eofill'): r, g, b, a = self.state.fill_color self.contents.write('%1.3f %1.3f %1.3f setrgbcolor\n' % (r, g, b)) else: r, g, b, a = self.state.line_color self.contents.write('%1.3f %1.3f %1.3f setrgbcolor\n' % (r, g, b)) self.contents.write(first_pass + '\n') if self.state.clipping_path: self.contents.write('cliprestore\n') def device_stroke_points(self, points, mode): # handled by device_fill_points pass def device_set_clipping_path(self, x, y, width, height): pass def device_destroy_clipping_path(self): pass # utility routines def _color(self, color): r, g, b, a = color return '#%02x%02x%02x' % (r * 255, g * 255, b * 255) def _dasharray(self): dasharray = '' for x in self.state.line_dash: if type(x) == type(arange(3)): # why is this so hard? x = ravel(x)[0] dasharray += ' ' + '%3.2f' % x if not dasharray or dasharray == " 0.00 0.00": return '[]' return '[ ' + dasharray + ' ]' # noops which seem to be needed def device_update_line_state(self): pass def device_update_fill_state(self): pass
def _render(self, mode="human", close=False): if close: return outfile = StringIO() if mode == 'ansi' else sys.stdout outfile.write(repr(self.state) + '\n') return outfile
# Write it back out again line by line with substitutions for #includes. output = StringIO() if args.dry_run else open(file_path, 'w') includes = [] for line in lines: parts = line.replace('<', '"').replace('>', '"').split('"') if (len(parts) == 3 and '#' in parts[0] and 'include' in parts[0] and os.path.basename(parts[1]) in headers): header = fix_path(os.path.relpath(headers[os.path.basename(parts[1])], '.')) includes.append(parts[0] + '"%s"' % header + parts[2]) else: for inc in sorted(includes): output.write(inc.strip('\n') + '\n') includes = [] output.write(line.strip('\n') + '\n') if args.dry_run and output.getvalue() != open(file_path).read(): need_rewriting.append(file_path) rc = 1 output.close() if need_rewriting: print('Some files need rewritten #includes:') for path in need_rewriting: print('\t' + path) print('To do this automatically, run') print('python tools/rewrite_includes.py ' + ' '.join(need_rewriting)) sys.exit(1)
def toXml(self, filename='', compress=False): """drawing.toXml() ---->to the screen drawing.toXml(filename)---->to the file writes a svg drawing to the screen or to a file compresses if filename ends with svgz or if compress is true """ doctype = implementation.createDocumentType( 'svg', "-//W3C//DTD SVG 1.0//EN""", 'http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd ') global root # root is defined global so it can be used by the appender. Its also possible to use it as an arugument but # that is a bit messy. root = implementation.createDocument(None, None, doctype) # Create the xml document. global appender def appender(element, elementroot): """This recursive function appends elements to an element and sets the attributes and type. It stops when alle elements have been appended""" if element.namespace: e = root.createElementNS(element.namespace, element.type) else: e = root.createElement(element.type) if element.text: textnode = root.createTextNode(element.text) e.appendChild(textnode) # in element.attributes is supported from python 2.2 for attribute in list(element.attributes.keys()): e.setAttribute( attribute, str(element.attributes[attribute])) if element.elements: for el in element.elements: e = appender(el, e) elementroot.appendChild(e) return elementroot root = appender(self.svg, root) if not filename: xml = StringIO() PrettyPrint(root, xml) if compress: import gzip f = StringIO() zf = gzip.GzipFile(fileobj=f, mode='wb') zf.write(xml.getvalue()) zf.close() f.seek(0) return f.read() else: return xml.getvalue() else: try: if filename[-4:] == 'svgz': import gzip xml = StringIO() PrettyPrint(root, xml) f = gzip.GzipFile( filename=filename, mode='wb', compresslevel=9) f.write(xml.getvalue()) f.close() else: f = open(filename, 'w') PrettyPrint(root, f) f.close() except: print("Cannot write SVG file: " + filename)
def _encode(self, points, dists, abs_max_dist): encoded_levels = StringIO() encoded_points = StringIO() plat = 0 plng = 0 if (self._force_endpoints): encoded_levels.write(self._encode_number(self._num_levels - 1)) else: encoded_levels.write( self._encode_number(self._num_levels - self._compute_level(abs_max_dist) - 1)) n_points = len(points) for i, p in enumerate(points): if (i > 0) and (i < n_points - 1) and (i in dists): encoded_levels.write( self._encode_number(self._num_levels - self._compute_level(dists[i]) - 1)) if (i in dists) or (i == 0) or (i == n_points - 1): late5 = int(math.floor(p[1] * 1E5)) lnge5 = int(math.floor(p[0] * 1E5)) dlat = late5 - plat dlng = lnge5 - plng plat = late5 plng = lnge5 encoded_points.write(self._encode_signed_number(dlat)) encoded_points.write(self._encode_signed_number(dlng)) if (self._force_endpoints): encoded_levels.write(self._encode_number(self._num_levels - 1)) else: encoded_levels.write( self._encode_number(self._num_levels - self._compute_level(abs_max_dist) - 1)) return ( encoded_points.getvalue(), # .replace("\\", "\\\\"), encoded_levels.getvalue())
def run_process_with_timeout(args, filename_in=None, filename_out=None, filename_err=None, cwd=None, timeout=None, sudo=None): """Execute the specified process but within a certain timeout. @param args: the actuall process. This should be a list of string as in: ['/usr/bin/foo', '--bar', 'baz'] @type args: list of string @param filename_in: the path to a file that should be provided as standard input to the process. If None this will default to /dev/null @type filename_in: string @param filename_out: Desired filename for stdout output (optional; see below). @type filename_out: string @param filename_err: Desired filename for stderr output (optional; see below). @type filename_err: string @param cwd: the path from where to execute the process @type cwd: string @param timeout: the timeout in seconds after which to consider the process execution as failed. a Timeout exception will be raised @type timeout: int @param sudo: the optional name of the user under which to execute the process (by using sudo, without prompting for a password) @type sudo: string @return: Tuple (exit code, string containing stdout output buffer, string containing stderr output buffer). However, if either filename_out or filename_err are defined, then the output buffers are not passed back but rather written into filename_out/filename_err pathnames. This is useful for commands that produce big files, for which it is not practical to pass results back to the callers in a Python text buffer. Note that it is the client's responsibility to name these files in the proper fashion (e.g. to be unique) and to close these files after use. @rtype: (number, string, string) @raise Timeout: if the process does not terminate within the timeout """ if timeout is None: from invenio.config import CFG_MISCUTIL_DEFAULT_PROCESS_TIMEOUT timeout = CFG_MISCUTIL_DEFAULT_PROCESS_TIMEOUT stdout = stderr = None if filename_in is not None: stdin = open(filename_in) else: ## FIXME: should use NUL on Windows stdin = open('/dev/null', 'r') if filename_out: stdout = open(filename_out, 'w') if filename_err: stderr = open(filename_err, 'w') tmp_stdout = StringIO() tmp_stderr = StringIO() if sudo is not None: args = ['sudo', '-u', sudo, '-S'] + list(args) ## See: <http://stackoverflow.com/questions/3876886/timeout-a-subprocess> process = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=cwd, preexec_fn=os.setpgrp) ## See: <http://stackoverflow.com/questions/375427/non-blocking-read-on-a-stream-in-python> fd = process.stdout.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) fd = process.stderr.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) fd_to_poll = [process.stdout, process.stderr] select_timeout = 0.5 t1 = time.time() try: while process.poll() is None: if time.time() - t1 >= timeout: if process.stdin is not None: process.stdin.close() time.sleep(1) if process.poll() is None: ## See: <http://stackoverflow.com/questions/3876886/timeout-a-subprocess> os.killpg(process.pid, signal.SIGTERM) time.sleep(1) if process.poll() is None: os.killpg(process.pid, signal.SIGKILL) try: os.waitpid(process.pid, 0) except OSError: pass raise Timeout() for fd in select.select(fd_to_poll, [], [], select_timeout)[0]: if fd == process.stdout: buf = process.stdout.read(65536) if stdout is None: tmp_stdout.write(buf) else: stdout.write(buf) elif fd == process.stderr: buf = process.stderr.read(65536) if stderr is None: tmp_stderr.write(buf) else: stderr.write(buf) else: raise OSError("fd %s is not a valid file descriptor" % fd) finally: while True: ## Let's just read what is remaining to read. for fd in select.select(fd_to_poll, [], [], select_timeout)[0]: if fd == process.stdout: buf = process.stdout.read(65536) tmp_stdout.write(buf) if stdout is not None: stdout.write(buf) elif fd == process.stderr: buf = process.stderr.read(65536) tmp_stderr.write(buf) if stderr is not None: stderr.write(buf) else: raise OSError("fd %s is not a valid file descriptor" % fd) else: break return process.poll(), tmp_stdout.getvalue(), tmp_stderr.getvalue()
class TextStream(object): def __init__(self): self.stream = StringIO() self._Line = 1 self._Column = 1 self._currentLine = 1 self._currentColumn = 1 @property def Line(self): return self._Line @property def Column(self): return self._Column @property def AtEndOfLine(self): sstream = self.stream.getvalue().split('\n') line = sstream[self._currentLine] if not line[self._currentColumn:]: return True return False @property def AtEndOfStream(self): if self._currentLine == self._Line and self._currentColumn == self._Column: return True return False def Read(self, characters): consume = characters sstream = self.stream.getvalue().split('\n') result = "" while consume > 0: line = sstream[self._currentLine] eline = line[self._currentColumn:] length = min(len(eline), consume) result += eline[:length] consume -= length if consume > 0: self._currentLine += 1 self._currentColumn = 1 else: self._currentColumn += length return result def ReadLine(self): sstream = self.stream.getvalue().split('\n') result = sstream[self._currentLine] self._currentLine += 1 return result def ReadAll(self): return self.stream.getvalue() def Write(self, _string): _str_string = str(_string) sstring = _str_string.split('\n') if len(sstring) > 1: self._Line += len(sstring) self._Column = len(sstring[-1]) + 1 else: self._Column += len(_str_string) self.stream.write(_str_string) def WriteLine(self, _string): self.Write(str(_string) + '\n') def WriteBlankLines(self, lines): self.Write(lines * '\n') def Skip(self, characters): skip = characters sstream = self.stream.getvalue().split('\n') while skip > 0: line = sstream[self._currentLine] eline = line[self._currentColumn:] if skip > len(eline): self._currentLine += 1 self._currentColumn = 1 skip -= len(eline) else: self._currentColumn += skip def SkipLine(self): self._currentLine += 1 def Close(self): content = self.stream.getvalue() log.info(content) data = { 'content' : content, 'status' : 200, 'md5' : hashlib.md5(content).hexdigest(), 'sha256' : hashlib.sha256(content).hexdigest(), 'fsize' : len(content), 'ctype' : 'textstream', 'mtype' : Magic(content).get_mime(), } log.ThugLogging.log_location(log.ThugLogging.url, data) log.TextClassifier.classify(log.ThugLogging.url, content) if not log.ThugOpts.file_logging: return log_dir = os.path.join(log.ThugLogging.baseDir, "analysis", "textstream") try: os.makedirs(log_dir) except OSError as e: if e.errno == errno.EEXIST: pass else: raise filename = self._filename.split('\\')[-1] if '\\' in self._filename else self._filename if not filename: filename = ''.join(random.choice(string.lowercase) for i in range(8)) log_file = os.path.join(log_dir, filename) with open(log_file, 'wb') as fd: fd.write(content)
def write_string(self, **kw): res = StringIO() res.write('# -*- coding: utf-8 -*-\n') super(INI, self).write(res, **kw) return res.getvalue()
class WSClient: def __init__(self, configuration, url, headers, capture_all): """A websocket client with support for channels. Exec command uses different channels for different streams. for example, 0 is stdin, 1 is stdout and 2 is stderr. Some other API calls like port forwarding can forward different pods' streams to different channels. """ enableTrace(False) header = [] self._connected = False self._channels = {} self._ordered_all = [] if capture_all: self._all = StringIO() else: self._all = _IgnoredIO() # We just need to pass the Authorization, ignore all the other # http headers we get from the generated code if headers and 'authorization' in headers: header.append("authorization: %s" % headers['authorization']) if headers and 'sec-websocket-protocol' in headers: header.append("sec-websocket-protocol: %s" % headers['sec-websocket-protocol']) else: header.append("sec-websocket-protocol: v4.channel.k8s.io") if url.startswith('wss://') and configuration.verify_ssl: ssl_opts = { 'cert_reqs': ssl.CERT_REQUIRED, 'ca_certs': configuration.ssl_ca_cert or certifi.where(), } if configuration.assert_hostname is not None: ssl_opts['check_hostname'] = configuration.assert_hostname else: ssl_opts = {'cert_reqs': ssl.CERT_NONE} if configuration.cert_file: ssl_opts['certfile'] = configuration.cert_file if configuration.key_file: ssl_opts['keyfile'] = configuration.key_file self.sock = WebSocket(sslopt=ssl_opts, skip_utf8_validation=False) if configuration.proxy: proxy_url = urlparse(configuration.proxy) self.sock.connect(url, header=header, http_proxy_host=proxy_url.hostname, http_proxy_port=proxy_url.port) else: self.sock.connect(url, header=header) self._connected = True def peek_channel(self, channel, timeout=0): """Peek a channel and return part of the input, empty string otherwise.""" self.update(timeout=timeout) if channel in self._channels: return self._channels[channel] return "" def read_channel(self, channel, timeout=0): """Read data from a channel.""" if channel not in self._channels: ret = self.peek_channel(channel, timeout) else: ret = self._channels[channel] if channel in self._channels: del self._channels[channel] return ret def readline_channel(self, channel, timeout=None): """Read a line from a channel.""" if timeout is None: timeout = float("inf") start = time.time() while self.is_open() and time.time() - start < timeout: if channel in self._channels: data = self._channels[channel] if "\n" in data: index = data.find("\n") ret = data[:index] data = data[index+1:] if data: self._channels[channel] = data else: del self._channels[channel] return ret self.update(timeout=(timeout - time.time() + start)) def readline_any(self, timeout=None): """Read a line from any output channel.""" import logging from datetime import datetime logging.basicConfig(level=logging.INFO) logger = logging.getLogger('kubernetes.client.rest') if timeout is None: timeout = float("inf") start = time.time() chunks = {STDOUT_CHANNEL : "", STDERR_CHANNEL : ""} while self.is_open() and time.time() - start < timeout: for position, entry in enumerate(self._ordered_all): index = entry["data"].find("\n") if index == -1: chunks[entry["channel"]] += entry["data"] del self._ordered_all[position] else: chunks[entry["channel"]] += entry["data"][:index] ret = {"channel": entry["channel"], "data": chunks[entry["channel"]]} entry["data"] = entry["data"][index+1:] if not entry["data"]: del self._ordered_all[position] if ret["data"]: chunks[entry["channel"]] = "" return ret self.update(timeout=(timeout - time.time() + start)) # def readline_any(self, channels=[STDOUT_CHANNEL, STDERR_CHANNEL], timeout=None): # """Read a line from any output channel.""" # if timeout is None: # timeout = float("inf") # start = time.time() # while self.is_open() and time.time() - start < timeout: # for channel in channels: # if channel in self._channels: # data = self._channels[channel] # if not data: # continue # if "\n" in data: # index = data.find("\n") # ret = {"channel": channel, "data": data[:index]} # data = data[index+1:] # if data: # self._channels[channel] = data # else: # del self._channels[channel] # return ret # self.update(timeout=(timeout - time.time() + start)) def write_channel(self, channel, data): """Write data to a channel.""" # check if we're writing binary data or not binary = six.PY3 and type(data) == six.binary_type opcode = ABNF.OPCODE_BINARY if binary else ABNF.OPCODE_TEXT channel_prefix = chr(channel) if binary: channel_prefix = six.binary_type(channel_prefix, "ascii") payload = channel_prefix + data self.sock.send(payload, opcode=opcode) def peek_stdout(self, timeout=0): """Same as peek_channel with channel=1.""" return self.peek_channel(STDOUT_CHANNEL, timeout=timeout) def read_stdout(self, timeout=None): """Same as read_channel with channel=1.""" return self.read_channel(STDOUT_CHANNEL, timeout=timeout) def readline_stdout(self, timeout=None): """Same as readline_channel with channel=1.""" return self.readline_channel(STDOUT_CHANNEL, timeout=timeout) def peek_stderr(self, timeout=0): """Same as peek_channel with channel=2.""" return self.peek_channel(STDERR_CHANNEL, timeout=timeout) def read_stderr(self, timeout=None): """Same as read_channel with channel=2.""" return self.read_channel(STDERR_CHANNEL, timeout=timeout) def readline_stderr(self, timeout=None): """Same as readline_channel with channel=2.""" return self.readline_channel(STDERR_CHANNEL, timeout=timeout) def read_all(self): """Return buffered data received on stdout and stderr channels. This is useful for non-interactive call where a set of command passed to the API call and their result is needed after the call is concluded. Should be called after run_forever() or update() TODO: Maybe we can process this and return a more meaningful map with channels mapped for each input. """ out = self._all.getvalue() self._all = self._all.__class__() self._channels = {} return out def is_open(self): """True if the connection is still alive.""" return self._connected def write_stdin(self, data): """The same as write_channel with channel=0.""" self.write_channel(STDIN_CHANNEL, data) def update(self, timeout=0): """Update channel buffers with at most one complete frame of input.""" if not self.is_open(): return if not self.sock.connected: self._connected = False return r, _, _ = select.select( (self.sock.sock, ), (), (), timeout) if r: op_code, frame = self.sock.recv_data_frame(True) if op_code == ABNF.OPCODE_CLOSE: self._connected = False return elif op_code == ABNF.OPCODE_BINARY or op_code == ABNF.OPCODE_TEXT: data = frame.data if six.PY3: data = data.decode("utf-8", "replace") if len(data) > 1: channel = ord(data[0]) data = data[1:] if data: if channel in [STDOUT_CHANNEL, STDERR_CHANNEL]: # keeping all messages in the order they received # for non-blocking call. self._all.write(data) self._ordered_all.append({"channel": channel, "data": data}) if channel not in self._channels: self._channels[channel] = data else: self._channels[channel] += data def run_forever(self, timeout=None): """Wait till connection is closed or timeout reached. Buffer any input received during this time.""" if timeout: start = time.time() while self.is_open() and time.time() - start < timeout: self.update(timeout=(timeout - time.time() + start)) else: while self.is_open(): self.update(timeout=None) @property def returncode(self): """ The return code, A None value indicates that the process hasn't terminated yet. """ if self.is_open(): return None else: err = self.read_channel(ERROR_CHANNEL) err = yaml.safe_load(err) if err['status'] == "Success": return 0 return int(err['details']['causes'][0]['message']) def close(self, **kwargs): """ close websocket connection. """ self._connected = False if self.sock: self.sock.close(**kwargs)
def to_xml(self): """Convert this datapoint into a form suitable for pushing to device cloud An XML string will be returned that will contain all pieces of information set on this datapoint. Values not set (e.g. quality) will be ommitted. """ out = StringIO() out.write("<DataPoint>") out.write("<streamId>{}</streamId>".format(self.get_stream_id())) out.write("<data>{}</data>".format(self.get_data())) conditional_write(out, "<description>{}</description>", self.get_description()) if self.get_timestamp() is not None: out.write("<timestamp>{}</timestamp>".format(isoformat(self.get_timestamp()))) conditional_write(out, "<quality>{}</quality>", self.get_quality()) if self.get_location() is not None: out.write("<location>%s</location>" % ",".join(map(str, self.get_location()))) conditional_write(out, "<streamType>{}</streamType>", self.get_data_type()) conditional_write(out, "<streamUnits>{}</streamUnits>", self.get_units()) out.write("</DataPoint>") return out.getvalue()
def changelog(since, to, write, force): """ Generates a markdown file containing the list of checks that changed for a given Agent release. Agent version numbers are derived inspecting tags on `integrations-core` so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither `--since` or `--to` are passed (the most common use case), the tool will generate the whole changelog since Agent version 6.3.0 (before that point we don't have enough information to build the log). """ agent_tags = get_agent_tags(since, to) # store the changes in a mapping {agent_version --> {check_name --> current_version}} changes_per_agent = OrderedDict() # to keep indexing easy, we run the loop off-by-one for i in range(1, len(agent_tags)): req_file_name = os.path.basename(get_agent_release_requirements()) current_tag = agent_tags[i - 1] # Requirements for current tag file_contents = git_show_file(req_file_name, current_tag) catalog_now = parse_agent_req_file(file_contents) # Requirements for previous tag file_contents = git_show_file(req_file_name, agent_tags[i]) catalog_prev = parse_agent_req_file(file_contents) changes_per_agent[current_tag] = OrderedDict() for name, ver in iteritems(catalog_now): # at some point in the git history, the requirements file erroneusly # contained the folder name instead of the package name for each check, # let's be resilient old_ver = catalog_prev.get(name) \ or catalog_prev.get(get_folder_name(name)) \ or catalog_prev.get(get_package_name(name)) # normalize the package name to the check_name if name.startswith(DATADOG_PACKAGE_PREFIX): name = get_folder_name(name) if old_ver and old_ver != ver: # determine whether major version changed breaking = old_ver.split('.')[0] < ver.split('.')[0] changes_per_agent[current_tag][name] = (ver, breaking) elif not old_ver: # New integration changes_per_agent[current_tag][name] = (ver, False) # store the changelog in memory changelog_contents = StringIO() # prepare the links agent_changelog_url = 'https://github.com/DataDog/datadog-agent/blob/master/CHANGELOG.rst#{}' check_changelog_url = 'https://github.com/DataDog/integrations-core/blob/master/{}/CHANGELOG.md' # go through all the agent releases for agent, version_changes in iteritems(changes_per_agent): url = agent_changelog_url.format(agent.replace( '.', '')) # Github removes dots from the anchor changelog_contents.write( '## Datadog Agent version [{}]({})\n\n'.format(agent, url)) if not version_changes: changelog_contents.write( '* There were no integration updates for this version of the Agent.\n\n' ) else: for name, ver in iteritems(version_changes): # get the "display name" for the check manifest_file = os.path.join(get_root(), name, 'manifest.json') if os.path.exists(manifest_file): decoded = json.loads(read_file(manifest_file).strip(), object_pairs_hook=OrderedDict) display_name = decoded.get('display_name') else: display_name = name breaking_notice = " **BREAKING CHANGE**" if ver[1] else "" changelog_url = check_changelog_url.format(name) changelog_contents.write('* {} [{}]({}){}\n'.format( display_name, ver[0], changelog_url, breaking_notice)) # add an extra line to separate the release block changelog_contents.write('\n') # save the changelog on disk if --write was passed if write: dest = get_agent_changelog() # don't overwrite an existing file if os.path.exists(dest) and not force: msg = "Output file {} already exists, run the command again with --force to overwrite" abort(msg.format(dest)) write_file(dest, changelog_contents.getvalue()) else: echo_info(changelog_contents.getvalue())
def make_file(txt): f = StringIO() f.write(txt) f.seek(0) return f
def _mol_writer(data, fmt='sdf', filepath_or_buffer=None, update_properties=True, molecule_column=None, columns=None): """Universal writing function for private use. .. versionadded:: 0.3 Parameters ---------- fmt : string The format of molecular file filepath_or_buffer : string or None File path update_properties : bool, optional (default=True) Switch to update properties from the DataFrames to the molecules while writting. molecule_column : string or None, optional (default='mol') Name of molecule column. If None the molecules will be skipped. columns : list or None, optional (default=None) A list of columns to write to file. If None then all available fields are written. """ if filepath_or_buffer is None: out = StringIO() elif hasattr(filepath_or_buffer, 'write'): out = filepath_or_buffer else: out = oddt.toolkit.Outputfile(fmt, filepath_or_buffer, overwrite=True) if isinstance(data, pd.DataFrame): molecule_column = molecule_column or data._molecule_column for ix, row in data.iterrows(): mol = row[molecule_column].clone if update_properties: new_data = row.to_dict() del new_data[molecule_column] mol.data.update(new_data) if columns: for k in mol.data.keys(): if k not in columns: del mol.data[k] if filepath_or_buffer is None or hasattr(filepath_or_buffer, 'write'): out.write(mol.write(fmt)) else: out.write(mol) elif isinstance(data, pd.Series): for mol in data: if filepath_or_buffer is None or hasattr(filepath_or_buffer, 'write'): out.write(mol.write(fmt)) else: out.write(mol) if filepath_or_buffer is None: return out.getvalue() elif not hasattr(filepath_or_buffer, 'write'): # dont close foreign buffer out.close()
def _strpoints(points): c = StringIO() for x, y in points: c.write('%3.2f,%3.2f ' % (x, y)) return c.getvalue()
def format_args(self): # type: () -> unicode args = [] last_kind = None for i, param in enumerate(self.parameters.values()): # skip first argument if subject is bound method if self.skip_first_argument and i == 0: continue arg = StringIO() # insert '*' between POSITIONAL args and KEYWORD_ONLY args:: # func(a, b, *, c, d): if param.kind == param.KEYWORD_ONLY and last_kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY, None): args.append('*') if param.kind in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD, param.KEYWORD_ONLY): arg.write(param.name) if param.annotation is not param.empty: if isinstance(param.annotation, string_types) and \ param.name in self.annotations: arg.write(': ') arg.write(self.format_annotation(self.annotations[param.name])) else: arg.write(': ') arg.write(self.format_annotation(param.annotation)) if param.default is not param.empty: if param.annotation is param.empty: arg.write('=') arg.write(object_description(param.default)) # type: ignore else: arg.write(' = ') arg.write(object_description(param.default)) # type: ignore elif param.kind == param.VAR_POSITIONAL: arg.write('*') arg.write(param.name) elif param.kind == param.VAR_KEYWORD: arg.write('**') arg.write(param.name) args.append(arg.getvalue()) last_kind = param.kind if self.return_annotation is inspect.Parameter.empty: # type: ignore return '(%s)' % ', '.join(args) else: if 'return' in self.annotations: annotation = self.format_annotation(self.annotations['return']) else: annotation = self.format_annotation(self.return_annotation) return '(%s) -> %s' % (', '.join(args), annotation)
def _build_tag(tag, hide_attributes): tag_el = _find_tag_el(tag) attributes = _find_attributes(tag) tag_help = StringIO() annotation_el = tag_el.find("{http://www.w3.org/2001/XMLSchema}annotation") text = annotation_el.find( "{http://www.w3.org/2001/XMLSchema}documentation").text for line in text.splitlines(): if line.startswith("$attribute_list:"): attributes_str, header_level = line.split(":")[1:3] attribute_names = attributes_str.split(",") header_level = int(header_level) text = text.replace( line, _build_attributes_table(tag, attributes, attribute_names=attribute_names, header_level=header_level)) if line.startswith("$assertions"): assertions_tag = xmlschema_doc.find( "//{http://www.w3.org/2001/XMLSchema}complexType[@name='TestAssertions']" ) assertion_tag = xmlschema_doc.find( "//{http://www.w3.org/2001/XMLSchema}group[@name='TestAssertion']" ) assertions_buffer = StringIO() assertions_buffer.write(_doc_or_none(assertions_tag)) assertions_buffer.write("\n\n") assertions_buffer.write("Child Element/Assertion | Details \n") assertions_buffer.write("--- | ---\n") elements = assertion_tag.findall( "{http://www.w3.org/2001/XMLSchema}choice/{http://www.w3.org/2001/XMLSchema}element" ) for element in elements: doc = _doc_or_none(element).strip() assertions_buffer.write("``%s`` | %s\n" % (element.attrib["name"], doc)) text = text.replace(line, assertions_buffer.getvalue()) tag_help.write(text) best_practices = _get_bp_link(annotation_el) if best_practices: tag_help.write("\n\n### Best Practices\n") tag_help.write(""" Find the Intergalactic Utilities Commision suggested best practices for this element [here](%s).""" % best_practices) tag_help.write(_build_attributes_table(tag, attributes, hide_attributes)) return tag_help.getvalue()
def render(self, mode='human', close=False): if close: return board = self.state outfile = StringIO() if mode == 'ansi' else sys.stdout outfile.write(colored(' ' * 7)) for j in range(board.shape[1]): outfile.write(colored(' ' + str(j + 1) + ' | ', 'grey')) outfile.write('\n') outfile.write(' ' * 5) outfile.write(colored('-' * (board.shape[1] * 6 - 1), "grey")) outfile.write('\n') for i in range(board.shape[1]): outfile.write(colored(' ' + str(i + 1) + ' |', "grey")) for j in range(board.shape[1]): if board[2, i, j] == 1: outfile.write(colored(' O ', "blue")) elif board[0, i, j] == 1: outfile.write(colored(' B ', "green")) else: outfile.write(colored(' W ', "red")) outfile.write(colored('|', "grey")) outfile.write('\n') outfile.write(' ') outfile.write(colored('-' * (board.shape[1] * 7 - 1), "grey")) outfile.write('\n') if mode != 'human': return outfile
def __call__(self, REQUEST=None, __ick__=None, src__=0, test__=0, **kw): """Call the database method The arguments to the method should be passed via keyword arguments, or in a single mapping object. If no arguments are given, and if the method was invoked through the Web, then the method will try to acquire and use the Web REQUEST object as the argument mapping. The returned value is a sequence of record objects. """ __traceback_supplement__ = (SQLMethodTracebackSupplement, self) if REQUEST is None: if kw: REQUEST = kw else: if hasattr(self, 'REQUEST'): REQUEST = self.REQUEST else: REQUEST = {} # connection hook c = self.connection_id # for backwards compatability hk = self.connection_hook # go get the connection hook and call it if hk: c = getattr(self, hk)() try: dbc = getattr(self, c) except AttributeError: raise AttributeError( 'The database connection <em>%s</em> cannot be found.' % c) try: DB__ = dbc() except Exception: raise DatabaseError('%s is not connected to a database' % self.id) if hasattr(self, 'aq_parent'): p = self.aq_parent else: p = None argdata = self._argdata(REQUEST) argdata['sql_delimiter'] = '\0' argdata['sql_quote__'] = dbc.sql_quote__ security = getSecurityManager() security.addContext(self) try: try: query = self.template(p, **argdata) except TypeError as msg: msg = str(msg) if msg.find('client') >= 0: raise NameError('"client" may not be used as an ' 'argument name in this context') else: raise finally: security.removeContext(self) if src__: return query if self.cache_time_ > 0 and self.max_cache_ > 0: result = self._cached_result(DB__, query, self.max_rows_, c) else: result = DB__.query(query, self.max_rows_) if hasattr(self, '_v_brain'): brain = self._v_brain else: brain = self._v_brain = getBrain(self.class_file_, self.class_name_) if isinstance(result, type('')): f = StringIO() f.write(result) f.seek(0) result = File(f, brain, p) else: result = Results(result, brain, p) columns = result._searchable_result_columns() if test__ and columns != self._col: self._col = columns # If run in test mode, return both the query and results so # that the template doesn't have to be rendered twice! if test__: return query, result return result