def safe_header(self, name, value): if name in SENSITIVE_HEADERS: value = hashlib.sha1(value.encode("utf-8")).hexdigest() return (encodeutils.safe_decode(name), "{SHA1}%s" % value) else: return (encodeutils.safe_decode(name), encodeutils.safe_decode(value))
def main(): CONF.register_cli_opt(command_opt) try: cfg_files = cfg.find_config_files(project='glance', prog='glance-registry') cfg_files.extend(cfg.find_config_files(project='glance', prog='glance-api')) cfg_files.extend(cfg.find_config_files(project='glance', prog='glance-manage')) config.parse_args(default_config_files=cfg_files, usage="%(prog)s [options] <cmd>") log.setup('glance') except RuntimeError as e: sys.exit("ERROR: %s" % e) try: if CONF.command.action.startswith('db'): return CONF.command.action_fn() else: func_kwargs = {} for k in CONF.command.action_kwargs: v = getattr(CONF.command, 'action_kwarg_' + k) if v is None: continue func_kwargs[k] = encodeutils.safe_decode(v) func_args = [encodeutils.safe_decode(arg) for arg in CONF.command.action_args] return CONF.command.action_fn(*func_args, **func_kwargs) except exception.GlanceException as e: sys.exit("ERROR: %s" % utils.exception_to_str(e))
def _hmap_to_msgenv_kwargs(hmap): claim_id = hmap[b'c'] if claim_id: claim_id = encodeutils.safe_decode(claim_id) else: claim_id = None # NOTE(kgriffs): Under Py3K, redis-py converts all strings # into binary. Woohoo! res = { 'id': encodeutils.safe_decode(hmap[b'id']), 'ttl': int(hmap[b't']), 'created': int(hmap[b'cr']), 'expires': int(hmap[b'e']), 'client_uuid': encodeutils.safe_decode(hmap[b'u']), 'claim_id': claim_id, 'claim_expires': int(hmap[b'c.e']), 'claim_count': int(hmap[b'c.c']), 'delay_expires': int(hmap.get(b'd', 0)) } checksum = hmap.get(b'cs') if checksum: res['checksum'] = encodeutils.safe_decode(hmap[b'cs']) return res
def log_curl_request(self, method, url, kwargs): curl = ['curl -i -X %s' % method] for (key, value) in kwargs['headers'].items(): if key in ('X-Auth-Token', 'X-Auth-Key'): value = '*****' header = '-H \'%s: %s\'' % (encodeutils.safe_decode(key), encodeutils.safe_decode(value)) curl.append(header) conn_params_fmt = [ ('key_file', '--key %s'), ('cert_file', '--cert %s'), ('os_cacert', '--cacert %s'), ] for (key, fmt) in conn_params_fmt: value = self.ssl_connection_params.get(key) if value: curl.append(fmt % value) if self.ssl_connection_params.get('insecure'): curl.append('-k') if 'data' in kwargs: curl.append('-d \'%s\'' % kwargs['data']) curl.append('%s%s' % (self.endpoint, url)) LOG.debug(' '.join(curl))
def main(): CONF.register_cli_opt(command_opt) try: logging.register_options(CONF) cfg_files = cfg.find_config_files(project='daisy', prog='daisy-registry') cfg_files.extend(cfg.find_config_files(project='daisy', prog='daisy-api')) cfg_files.extend(cfg.find_config_files(project='daisy', prog='daisy-manage')) cfg_files.extend(cfg.find_config_files(project='daisy', prog='daisy-orchestration')) config.parse_args(default_config_files=cfg_files, usage="%(prog)s [options] <cmd>") logging.setup(CONF, 'daisy') except RuntimeError as e: sys.exit("ERROR: %s" % e) try: if CONF.command.action.startswith('db'): return CONF.command.action_fn() else: func_kwargs = {} for k in CONF.command.action_kwargs: v = getattr(CONF.command, 'action_kwarg_' + k) if v is None: continue if isinstance(v, six.string_types): v = encodeutils.safe_decode(v) func_kwargs[k] = v func_args = [encodeutils.safe_decode(arg) for arg in CONF.command.action_args] return CONF.command.action_fn(*func_args, **func_kwargs) except exception.DaisyException as e: sys.exit("ERROR: %s" % utils.exception_to_str(e))
def main(): CONF.register_cli_opt(command_opt) if len(sys.argv) < 2: script_name = sys.argv[0] print("%s command action [<args>]" % script_name) print(_("Available commands:")) for command in COMMANDS: print(_("\t%s") % command) sys.exit(2) try: logging.register_options(CONF) cfg_files = cfg.find_config_files(project='searchlight', prog='searchlight') config.parse_args(default_config_files=cfg_files) config.set_config_defaults() logging.setup(CONF, 'searchlight') func_kwargs = {} for k in CONF.command.action_kwargs: v = getattr(CONF.command, 'action_kwarg_' + k) if v is None: continue if isinstance(v, six.string_types): v = encodeutils.safe_decode(v) func_kwargs[k] = v func_args = [encodeutils.safe_decode(arg) for arg in CONF.command.action_args] return CONF.command.action_fn(*func_args, **func_kwargs) except RuntimeError as e: sys.exit("ERROR: %s" % e)
def _safe_header(self, name, value): if name in HTTPClient.SENSITIVE_HEADERS: encoded = value.encode("utf-8") hashed = hashlib.sha1(encoded) digested = hashed.hexdigest() return encodeutils.safe_decode(name), "{SHA1}%s" % digested else: return (encodeutils.safe_decode(name), encodeutils.safe_decode(value))
def _safe_header(self, name, value): if name in SENSITIVE_HEADERS: # because in python3 byte string handling is ... ug v = value.encode("utf-8") h = hashlib.sha1(v) d = h.hexdigest() return encodeutils.safe_decode(name), "{SHA1}%s" % d else: return (encodeutils.safe_decode(name), encodeutils.safe_decode(value))
def _safe_header(self, name, value): if name in ["X-Auth-Token", "X-Subject-Token"]: # because in python3 byte string handling is ... ug v = value.encode("utf-8") h = hashlib.sha1(v) d = h.hexdigest() return encodeutils.safe_decode(name), "{SHA1}%s" % d else: return (encodeutils.safe_decode(name), encodeutils.safe_decode(value))
def paginate_measurements(measurements, uri, limit): parsed_uri = urlparse.urlparse(uri) self_link = build_base_uri(parsed_uri) self_link = encodeutils.safe_decode(self_link, 'utf-8') old_query_params = _get_old_query_params(parsed_uri) if old_query_params: self_link += '?' + '&'.join(old_query_params) if measurements: measurement_elements = [] resource = {u'links': [{u'rel': u'self', u'href': self_link}, ]} for measurement in measurements: if len(measurement['measurements']) >= limit: new_offset = ('_').join([measurement['id'], measurement['measurements'][limit - 1][0]]) next_link = build_base_uri(parsed_uri) next_link = encodeutils.safe_decode(next_link, 'utf-8') new_query_params = [u'offset' + '=' + urlparse.quote( new_offset.encode('utf8'), safe='')] _get_old_query_params_except_offset(new_query_params, parsed_uri) if new_query_params: next_link += '?' + '&'.join(new_query_params) resource[u'links'].append({u'rel': u'next', u'href': next_link}) truncated_measurement = {u'dimensions': measurement['dimensions'], u'measurements': (measurement ['measurements'][:limit]), u'name': measurement['name'], u'columns': measurement['columns'], u'id': measurement['id']} measurement_elements.append(truncated_measurement) break else: limit -= len(measurement['measurements']) measurement_elements.append(measurement) resource[u'elements'] = measurement_elements else: resource = {u'links': ([{u'rel': u'self', u'href': self_link}]), u'elements': []} return resource
def encrypt(msg): '''Encrypt message with random key. :param msg: message to be encrypted :returns: encrypted msg and key to decrypt ''' password = fernet.Fernet.generate_key() f = fernet.Fernet(password) key = f.encrypt(encodeutils.safe_encode(msg)) return encodeutils.safe_decode(password), encodeutils.safe_decode(key)
def log_http_response(resp, body=None): status = (resp.raw.version / 10.0, resp.status_code, resp.reason) dump = ["\nHTTP/%.1f %s %s" % status] headers = resp.headers.items() dump.extend(["%s: %s" % safe_header(k, v) for k, v in headers]) dump.append("") if body: body = encodeutils.safe_decode(body) dump.extend([body, ""]) LOG.debug("\n".join([encodeutils.safe_decode(x, errors="ignore") for x in dump]))
def check_output(*args, **kwargs): kwargs["stderr"] = subprocess.STDOUT try: output = costilius.sp_check_output(*args, **kwargs) except subprocess.CalledProcessError as e: LOG.debug("failed cmd: '%s'" % e.cmd) LOG.debug("error output: '%s'" % encodeutils.safe_decode(e.output)) raise LOG.debug("subprocess output: '%s'" % encodeutils.safe_decode(output)) return output
def __init__(self, member_id, parsed_url, options): super(ConsulDriver, self).__init__(member_id, parsed_url, options) options = utils.collapse(options) self._host = parsed_url.hostname self._port = parsed_url.port or self.DEFAULT_PORT self._session_id = None self._session_name = encodeutils.safe_decode(member_id) self._ttl = int(options.get('ttl', self.DEFAULT_TTL)) namespace = options.get('namespace', self.TOOZ_NAMESPACE) self._namespace = encodeutils.safe_decode(namespace) self._client = None
def check_output(*args, **kwargs): debug = kwargs.pop("debug", True) kwargs["stderr"] = subprocess.STDOUT try: output = subprocess.check_output(*args, **kwargs) except subprocess.CalledProcessError as e: LOG.error("Failed cmd: '%s'" % e.cmd) LOG.error("Error output: '%s'" % encodeutils.safe_decode(e.output)) raise if debug: LOG.debug("Subprocess output: '%s'" % encodeutils.safe_decode(output)) return output
def _alarm_definition_create(self, tenant_id, name, expression, description, severity, match_by, alarm_actions, undetermined_actions, ok_actions): try: sub_expr_list = ( monasca_api.expression_parser.alarm_expr_parser. AlarmExprParser(expression).sub_expr_list) except (pyparsing.ParseException, pyparsing.ParseFatalException) as ex: LOG.exception(ex) title = u"Invalid alarm expression" msg = u"parser failed on expression '{}' at column {}: {}".format( encodeutils.safe_decode(expression, 'utf-8'), encodeutils.safe_decode(str(ex.column), 'utf-8'), encodeutils.safe_decode(ex.msg, 'utf-8')) raise HTTPUnprocessableEntityError(title, msg) self._validate_name_not_conflicting(tenant_id, name) alarm_definition_id = ( self._alarm_definitions_repo. create_alarm_definition(tenant_id, name, expression, sub_expr_list, description, severity, match_by, alarm_actions, undetermined_actions, ok_actions)) self._send_alarm_definition_created_event(tenant_id, alarm_definition_id, name, expression, sub_expr_list, description, match_by) result = ( {u'alarm_actions': alarm_actions, u'ok_actions': ok_actions, u'description': description, u'match_by': match_by, u'severity': severity, u'actions_enabled': True, u'undetermined_actions': undetermined_actions, u'expression': expression, u'id': alarm_definition_id, u'deterministic': is_definition_deterministic(expression), u'name': name}) return result
def check(self, instance): host = instance.get('host', 'localhost') port = int(instance.get('port', 2181)) timeout = float(instance.get('timeout', 3.0)) dimensions = self._set_dimensions( {'component': 'zookeeper', 'service': 'zookeeper'}, instance) sock = socket.socket() sock.settimeout(timeout) buf = StringIO() chunk_size = 1024 # try-finally and try-except to stay compatible with python 2.4 try: try: # Connect to the zk client port and send the stat command sock.connect((host, port)) sock.sendall(b'stat') # Read the response into a StringIO buffer chunk = encodeutils.safe_decode(sock.recv(chunk_size), 'utf-8') buf.write(chunk) num_reads = 1 max_reads = 10000 while chunk: if num_reads > max_reads: # Safeguard against an infinite loop raise Exception( "Read %s bytes before exceeding max reads of %s. " % (buf.tell(), max_reads)) chunk = encodeutils.safe_decode(sock.recv(chunk_size), 'utf-8') buf.write(chunk) num_reads += 1 except socket.timeout: buf = None finally: sock.close() if buf is not None: # Parse the response metrics, new_dimensions = self.parse_stat(buf) if new_dimensions is not None: dimensions.update(new_dimensions.copy()) # Write the data for metric, value in metrics: self.gauge(metric, value, dimensions=dimensions) else: # Reading from the client port timed out, track it as a metric self.increment('zookeeper.timeouts', dimensions=dimensions)
def extract_angular(fileobj, keywords, comment_tags, options): """Extract messages from angular template (HTML) files. It extract messages from angular template (HTML) files that use angular-gettext translate directive as per https://angular-gettext.rocketeer.be/ :param fileobj: the file-like object the messages should be extracted from :param keywords: This is a standard parameter so it isaccepted but ignored. :param comment_tags: This is a standard parameter so it is accepted but ignored. :param options: Another standard parameter that is accepted but ignored. :return: an iterator over ``(lineno, funcname, message, comments)`` tuples :rtype: ``iterator`` """ parser = AngularGettextHTMLParser() for line in fileobj: parser.feed(encodeutils.safe_decode(line)) for string in parser.strings: yield(string)
def _http_log_request(self, url, method=None, data=None, headers=None, logger=_logger): if not logger.isEnabledFor(logging.DEBUG): # NOTE(morganfainberg): This whole debug section is expensive, # there is no need to do the work if we're not going to emit a # debug log. return string_parts = ["REQ: curl -g -i"] # NOTE(jamielennox): None means let requests do its default validation # so we need to actually check that this is False. if self.verify is False: string_parts.append("--insecure") elif isinstance(self.verify, six.string_types): string_parts.append('--cacert "%s"' % self.verify) if method: string_parts.extend(["-X", method]) string_parts.append(url) if headers: for header in six.iteritems(headers): string_parts.append('-H "%s: %s"' % self._process_header(header)) if data: string_parts.append("-d '%s'" % data) try: logger.debug(" ".join(string_parts)) except UnicodeDecodeError: logger.debug( "Replaced characters that could not be decoded" " in log output, original caused UnicodeDecodeError" ) string_parts = [encodeutils.safe_decode(part, errors="replace") for part in string_parts] logger.debug(" ".join(string_parts))
def test_call(self): conf = mock.Mock() controller = versions.Controller(conf) environ = { 'REQUEST_METHOD': 'GET', 'SERVER_NAME': 'host', 'SERVER_PORT': 8778, 'SCRIPT_NAME': '/v1', 'PATH_INFO': '/tenant_id/versions', 'wsgi.url_scheme': 'http', } req = wsgi.Request(environ) expected_dict = { 'versions': [{ 'id': 'v1.0', 'status': 'CURRENT', 'links': [{ 'rel': 'self', 'href': 'http://host:8778/v1/' }] }] } expected_body = json.dumps(expected_dict) resp = controller(req) self.assertIsInstance(resp, webob.Response) self.assertEqual(expected_body, encodeutils.safe_decode(resp.body)) self.assertEqual(http_client.MULTIPLE_CHOICES, resp.status_code) self.assertEqual('application/json', resp.content_type)
def _post_parse(self): # parse unknown entities for test_id in self._unknown_entities: # NOTE(andreykurilin): When whole TestCase is marked as skipped, # there is only one event with reason and status, so we should # modify all tests of TestCase manually. matcher = lambda i: i == test_id or i.startswith("%s." % test_id) known_ids = filter(matcher, self._tests) for id_ in known_ids: if self._tests[id_]["status"] == "init": self._tests[id_]["status"] = ( self._unknown_entities[test_id]["status"]) if self._unknown_entities[test_id].get("reason"): self._tests[id_]["reason"] = ( self._unknown_entities[test_id]["reason"]) elif self._unknown_entities[test_id].get("traceback"): self._tests[id_]["traceback"] = ( self._unknown_entities[test_id]["traceback"]) # decode data for test_id in self._tests: for file_name in ["traceback", "reason"]: # FIXME(andreykurilin): decode fields based on mime_type if file_name in self._tests[test_id]: self._tests[test_id][file_name] = ( encodeutils.safe_decode( self._tests[test_id][file_name])) self._is_parsed = True
def __call__(self, cmd, getjson=False, report_path=None, raw=False, suffix=None, extension=None, keep_old=False, write_report=False): """Call yardstick in the shell :param cmd: yardstick command :param getjson: in cases, when yardstick prints JSON, you can catch output deserialized TO DO: :param report_path: if present, yardstick command and its output will be written to file with passed file name :param raw: don't write command itself to report file. Only output will be written """ if not isinstance(cmd, list): cmd = cmd.split(" ") try: output = encodeutils.safe_decode(subprocess.check_output( self.args + cmd, stderr=subprocess.STDOUT, env=self.env)) if getjson: return json.loads(output) return output except subprocess.CalledProcessError as e: raise e
def __call__(self, cmd, getjson=False, report_path=None, raw=False, suffix=None, extension=None, keep_old=False, write_report=True): """Call rally in the shell :param cmd: rally command :param getjson: in cases, when rally prints JSON, you can catch output deserialized :param report_path: if present, rally command and its output will be written to file with passed file name :param raw: don't write command itself to report file. Only output will be written """ if not isinstance(cmd, list): cmd = cmd.split(" ") try: output = encodeutils.safe_decode(subprocess.check_output( self.args + cmd, stderr=subprocess.STDOUT)) if write_report: if not report_path: report_path = self.gen_report_path( suffix=suffix, extension=extension, keep_old=keep_old) with open(report_path, "a") as rep: if not raw: rep.write("\n%s:\n" % " ".join(self.args + cmd)) rep.write("%s\n" % output) if getjson: return json.loads(output) return output except subprocess.CalledProcessError as e: raise RallyCmdError(e.returncode, e.output)
def _cache_schemas(self, options, client, home_dir='~/.glanceclient'): homedir = os.path.expanduser(home_dir) path_prefix = homedir if options.os_auth_url: hash_host = hashlib.sha1(options.os_auth_url.encode('utf-8')) path_prefix = os.path.join(path_prefix, hash_host.hexdigest()) if not os.path.exists(path_prefix): try: os.makedirs(path_prefix) except OSError as e: # This avoids glanceclient to crash if it can't write to # ~/.glanceclient, which may happen on some env (for me, # it happens in Jenkins, as Glanceclient can't write to # /var/lib/jenkins). msg = '%s' % e print(encodeutils.safe_decode(msg), file=sys.stderr) resources = ['image', 'metadefs/namespace', 'metadefs/resource_type'] schema_file_paths = [os.path.join(path_prefix, x + '_schema.json') for x in ['image', 'namespace', 'resource_type']] failed_download_schema = 0 for resource, schema_file_path in zip(resources, schema_file_paths): if (not os.path.exists(schema_file_path)) or options.get_schema: try: schema = client.schemas.get(resource) with open(schema_file_path, 'w') as f: f.write(json.dumps(schema.raw())) except Exception: # NOTE(esheffield) do nothing here, we'll get a message # later if the schema is missing failed_download_schema += 1 pass return failed_download_schema >= len(resources)
def print_list(objs, fields, formatters=None, field_settings=None): formatters = formatters or {} field_settings = field_settings or {} pt = prettytable.PrettyTable([f for f in fields], caching=False) pt.align = 'l' for o in objs: row = [] for field in fields: if field in field_settings: for setting, value in six.iteritems(field_settings[field]): setting_dict = getattr(pt, setting) setting_dict[field] = value if field in formatters: row.append(formatters[field](o)) else: field_name = field.lower().replace(' ', '_') if isinstance(o, dict): data = o[field_name] \ if field_name in o else None else: data = getattr(o, field_name, None) or '' row.append(data) pt.add_row(row) print(encodeutils.safe_decode(pt.get_string()))
def find_resource(manager, name_or_id): """Helper for the _find_* methods.""" # first try to get entity as integer id try: if isinstance(name_or_id, int) or name_or_id.isdigit(): return manager.get(int(name_or_id)) except exc.NotFound: pass # now try to get entity as uuid try: # This must be unicode for Python 3 compatibility. # If you pass a bytestring to uuid.UUID, you will get a TypeError uuid.UUID(encodeutils.safe_decode(name_or_id)) return manager.get(name_or_id) except (ValueError, exc.NotFound): pass # finally try to find entity by name matches = list(manager.list(filters={'name': name_or_id})) num_matches = len(matches) if num_matches == 0: msg = "No %s with a name or ID of '%s' exists." % \ (manager.resource_class.__name__.lower(), name_or_id) raise exc.CommandError(msg) elif num_matches > 1: msg = ("Multiple %s matches found for '%s', use an ID to be more" " specific." % (manager.resource_class.__name__.lower(), name_or_id)) raise exc.CommandError(msg) else: return matches[0]
def _get_claim(self, message_id): """Gets minimal claim doc for a message. :returns: {'id': cid, 'expires': ts} IFF the message is claimed, and that claim has not expired. """ claim = self._client.hmget(message_id, 'c', 'c.e') if claim == [None, None]: # NOTE(kgriffs): message_id was not found return None info = { # NOTE(kgriffs): A "None" claim is serialized as an empty str 'id': encodeutils.safe_decode(claim[0]) or None, 'expires': int(claim[1]), } # Is the message claimed? now = timeutils.utcnow_ts() if info['id'] and (now < info['expires']): return info # Not claimed return None
def log_curl_request(self, method, url, headers, data, kwargs): curl = ["curl -g -i -X %s" % method] headers = copy.deepcopy(headers) headers.update(self.session.headers) for (key, value) in six.iteritems(headers): header = "-H '%s: %s'" % safe_header(key, value) curl.append(header) if not self.session.verify: curl.append("-k") else: if isinstance(self.session.verify, six.string_types): curl.append(" --cacert %s" % self.session.verify) if self.session.cert: curl.append(" --cert %s --key %s" % self.session.cert) if data and isinstance(data, six.string_types): curl.append("-d '%s'" % data) curl.append(url) msg = " ".join([encodeutils.safe_decode(item, errors="ignore") for item in curl]) LOG.debug(msg)
def _cache_schemas(self, options, home_dir='~/.glanceclient'): homedir = expanduser(home_dir) if not os.path.exists(homedir): try: os.makedirs(homedir) except OSError as e: # This avoids glanceclient to crash if it can't write to # ~/.glanceclient, which may happen on some env (for me, # it happens in Jenkins, as Glanceclient can't write to # /var/lib/jenkins). msg = '%s' % e print(encodeutils.safe_decode(msg), file=sys.stderr) resources = ['image', 'metadefs/namespace', 'metadefs/resource_type'] schema_file_paths = [homedir + os.sep + x + '_schema.json' for x in ['image', 'namespace', 'resource_type']] client = None for resource, schema_file_path in zip(resources, schema_file_paths): if (not os.path.exists(schema_file_path)) or options.get_schema: try: if not client: client = self._get_versioned_client('2', options, force_auth=True) schema = client.schemas.get(resource) with open(schema_file_path, 'w') as f: f.write(json.dumps(schema.raw())) except Exception: # NOTE(esheffield) do nothing here, we'll get a message # later if the schema is missing pass
def decrypt(method, data, encryption_key=None): if method is None or data is None: return None decryptor = getattr(sys.modules[__name__], method) value = decryptor(data, encryption_key) if value is not None: return encodeutils.safe_decode(value, 'utf-8')
def _http_request(self, url, method, **kwargs): """Send an http request with the specified characteristics. Wrapper around requests.request to handle tasks such as setting headers and error handling. """ # Copy the kwargs so we can reuse the original in case of redirects kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {})) kwargs['headers'].setdefault('User-Agent', USER_AGENT) if self.auth_token: kwargs['headers'].setdefault('X-Auth-Token', self.auth_token) else: kwargs['headers'].update(self.credentials_headers()) if self.auth_url: kwargs['headers'].setdefault('X-Auth-Url', self.auth_url) if self.region_name: kwargs['headers'].setdefault('X-Region-Name', self.region_name) if self.include_pass and 'X-Auth-Key' not in kwargs['headers']: kwargs['headers'].update(self.credentials_headers()) if osprofiler_web: kwargs['headers'].update(osprofiler_web.get_trace_id_headers()) self.log_curl_request(method, url, kwargs) if self.cert_file and self.key_file: kwargs['cert'] = (self.cert_file, self.key_file) if self.verify_cert is not None: kwargs['verify'] = self.verify_cert if self.timeout is not None: kwargs['timeout'] = float(self.timeout) # Allow caller to specify not to follow redirects, in which case we # just return the redirect response. Useful for using stacks:lookup. redirect = kwargs.pop('redirect', True) # Since requests does not follow the RFC when doing redirection to sent # back the same method on a redirect we are simply bypassing it. For # example if we do a DELETE/POST/PUT on a URL and we get a 302 RFC says # that we should follow that URL with the same method as before, # requests doesn't follow that and send a GET instead for the method. # Hopefully this could be fixed as they say in a comment in a future # point version i.e.: 3.x # See issue: https://github.com/kennethreitz/requests/issues/1704 allow_redirects = False # Use fully qualified URL from response header for redirects if not parse.urlparse(url).netloc: url = self.endpoint_url + url try: resp = requests.request(method, url, allow_redirects=allow_redirects, **kwargs) except socket.gaierror as e: message = (_("Error finding address for %(url)s: %(e)s") % { 'url': self.endpoint_url + url, 'e': e }) raise exc.InvalidEndpoint(message=message) except (socket.error, socket.timeout) as e: endpoint = self.endpoint message = (_("Error communicating with %(endpoint)s %(e)s") % { 'endpoint': endpoint, 'e': e }) raise exc.CommunicationError(message=message) self.log_http_response(resp) txt_content = encodeutils.safe_decode(resp.content, 'utf-8') if not ('X-Auth-Key' in kwargs['headers']) and ( resp.status_code == 401 or (resp.status_code == 500 and "(HTTP 401)" in txt_content)): raise exc.HTTPUnauthorized( _("Authentication failed: %s") % resp.content) elif 400 <= resp.status_code < 600: raise exc.from_response(resp) elif resp.status_code in (301, 302, 305): # Redirected. Reissue the request to the new location, # unless caller specified redirect=False if redirect: location = resp.headers.get('location') if not location: message = _("Location not returned with redirect") raise exc.InvalidEndpoint(message=message) resp = self._http_request(location, method, **kwargs) elif resp.status_code == 300: raise exc.from_response(resp) return resp
def paginate_measurements(measurements, uri, limit): parsed_uri = urlparse.urlparse(uri) self_link = build_base_uri(parsed_uri) self_link = encodeutils.safe_decode(self_link, 'utf-8') old_query_params = _get_old_query_params(parsed_uri) if old_query_params: self_link += '?' + '&'.join(old_query_params) if measurements: measurement_elements = [] resource = { u'links': [ { u'rel': u'self', u'href': self_link }, ] } for measurement in measurements: if len(measurement['measurements']) >= limit: new_offset = ('_').join([ measurement['id'], measurement['measurements'][limit - 1][0] ]) next_link = build_base_uri(parsed_uri) next_link = encodeutils.safe_decode(next_link, 'utf-8') new_query_params = [ u'offset' + '=' + urlparse.quote(new_offset.encode('utf8'), safe='') ] _get_old_query_params_except_offset(new_query_params, parsed_uri) if new_query_params: next_link += '?' + '&'.join(new_query_params) resource[u'links'].append({ u'rel': u'next', u'href': next_link }) truncated_measurement = { u'dimensions': measurement['dimensions'], u'measurements': (measurement['measurements'][:limit]), u'name': measurement['name'], u'columns': measurement['columns'], u'id': measurement['id'] } measurement_elements.append(truncated_measurement) break else: limit -= len(measurement['measurements']) measurement_elements.append(measurement) resource[u'elements'] = measurement_elements else: resource = { u'links': ([{ u'rel': u'self', u'href': self_link }]), u'elements': [] } return resource
def next(self): curr = next(self.queue_iter) queue = self.client.hmget(curr, ['c', 'm']) return self.denormalizer(queue, encodeutils.safe_decode(curr))
def _encoded_xml(self): return encodeutils.safe_decode(self._domain.XMLDesc(0))
def safe_decode(string): return string and encodeutils.safe_decode(string, errors='ignore')
def do_create(self, obj): """Create a server for the node object. :param obj: The node object for which a server will be created. """ kwargs = {} for key in self.KEYS: if self.properties[key] is not None: kwargs[key] = self.properties[key] image_ident = self.properties[self.IMAGE] if image_ident is not None: image = self._validate_image(obj, image_ident, 'create') kwargs.pop(self.IMAGE) kwargs['imageRef'] = image.id flavor_ident = self.properties[self.FLAVOR] flavor = self._validate_flavor(obj, flavor_ident, 'create') kwargs.pop(self.FLAVOR) kwargs['flavorRef'] = flavor.id keypair_name = self.properties[self.KEY_NAME] if keypair_name: keypair = self._validate_keypair(obj, keypair_name, 'create') kwargs['key_name'] = keypair.name kwargs['name'] = obj.name metadata = self._build_metadata(obj, {}) kwargs['metadata'] = metadata jj_vars = {} cluster_data = self._get_cluster_data(obj) kwargs['networks'] = [{'uuid': cluster_data[self.PRIVATE_NETWORK]}] # Get user_data parameters from metadata jj_vars['KUBETOKEN'] = cluster_data[self.KUBEADM_TOKEN] jj_vars['MASTER_FLOATINGIP'] = cluster_data[ self.KUBE_MASTER_FLOATINGIP] block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] if block_device_mapping_v2 is not None: kwargs['block_device_mapping_v2'] = self._resolve_bdm( obj, block_device_mapping_v2, 'create') # user_data = self.properties[self.USER_DATA] user_data = base.loadScript('./scripts/master.sh') if user_data is not None: # Use jinja2 to replace variables defined in user_data try: jj_t = jinja2.Template(user_data) user_data = jj_t.render(**jj_vars) except (jinja2.exceptions.UndefinedError, ValueError) as ex: # TODO(anyone) Handle jinja2 error pass ud = encodeutils.safe_encode(user_data) kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) sgid = self._get_security_group(obj) kwargs['security_groups'] = [{'name': sgid}] server = None resource_id = None try: server = self.compute(obj).server_create(**kwargs) self.compute(obj).wait_for_server(server.id) server = self.compute(obj).server_get(server.id) self._update_master_ip(obj, server.addresses[''][0]['addr']) self._associate_floatingip(obj, server) LOG.info("Created master node: %s" % server.id) return server.id except exc.InternalError as ex: if server and server.id: resource_id = server.id raise exc.EResourceCreation(type='server', message=six.text_type(ex), resource_id=resource_id)
def make_string(self): result = '' for line in self.content: result += encodeutils.safe_decode(line, 'utf-8') return result
def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: msg = _("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % { 'action': action, 'body': six.text_type(body, 'utf-8'), 'meth': str(meth) } LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': str(meth)}) # Now, deserialize the request body... try: contents = self._get_request_content(body, request) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('nova.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request URL: URL's project_id '%(project_id)s'" " doesn't match Context's project_id" " '%(context_project_id)s'") % \ {'project_id': project_id, 'context_project_id': context.project_id} return Fault(webob.exc.HTTPBadRequest(explanation=msg)) response = None try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: # Do a preserialize to set up the response object if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code # Process extensions response = self.process_extensions(extensions, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept) if hasattr(response, 'headers'): for hdr, val in list(response.headers.items()): if six.PY2: # In Py2.X Headers must be byte strings response.headers[hdr] = utils.utf8(val) else: # In Py3.X Headers must be utf-8 strings response.headers[hdr] = encodeutils.safe_decode( utils.utf8(val)) if not request.api_version_request.is_null(): response.headers[API_VERSION_REQUEST_HEADER] = \ 'compute ' + request.api_version_request.get_string() response.headers[LEGACY_API_VERSION_REQUEST_HEADER] = \ request.api_version_request.get_string() response.headers.add('Vary', API_VERSION_REQUEST_HEADER) response.headers.add('Vary', LEGACY_API_VERSION_REQUEST_HEADER) return response
def __init__(self, auth_token=None, user=None, project=None, domain=None, user_domain=None, project_domain=None, is_admin=None, read_only=False, show_deleted=False, request_id=None, auth_url=None, trusts=None, user_name=None, project_name=None, domain_name=None, user_domain_name=None, project_domain_name=None, auth_token_info=None, region_name=None, roles=None, password=None, **kwargs): '''Initializer of request context.''' # We still have 'tenant' param because oslo_context still use it. super(RequestContext, self).__init__(auth_token=auth_token, user=user, tenant=project, domain=domain, user_domain=user_domain, project_domain=project_domain, roles=roles, read_only=read_only, show_deleted=show_deleted, request_id=request_id) # request_id might be a byte array self.request_id = encodeutils.safe_decode(self.request_id) # we save an additional 'project' internally for use self.project = project # Session for DB access self._session = None self.auth_url = auth_url self.trusts = trusts self.user_name = user_name self.project_name = project_name self.domain_name = domain_name self.user_domain_name = user_domain_name self.project_domain_name = project_domain_name self.auth_token_info = auth_token_info self.region_name = region_name self.roles = roles or [] self.password = password # Check user is admin or not if is_admin is None: self.is_admin = policy.enforce(self, 'context_is_admin', target={'project': self.project}, do_raise=False) else: self.is_admin = is_admin
def _run(self, client, cmd, stdin=None, stdout=None, stderr=None, raise_on_error=True, timeout=3600, keep_stdin_open=False, pty=False): transport = client.get_transport() session = transport.open_session() if pty: session.get_pty() session.exec_command(cmd) start_time = time.time() # encode on transmit, decode on receive data_to_send = encodeutils.safe_encode("", incoming='utf-8') stderr_data = None # If we have data to be sent to stdin then `select' should also # check for stdin availability. if stdin and not stdin.closed: writes = [session] else: writes = [] while True: # Block until data can be read/write. e = select.select([session], writes, [session], 1)[2] if session.recv_ready(): data = encodeutils.safe_decode(session.recv(4096), 'utf-8') self.log.debug("stdout: %r", data) if stdout is not None: stdout.write(data) continue if session.recv_stderr_ready(): stderr_data = encodeutils.safe_decode( session.recv_stderr(4096), 'utf-8') self.log.debug("stderr: %r", stderr_data) if stderr is not None: stderr.write(stderr_data) continue if session.send_ready(): if stdin is not None and not stdin.closed: if not data_to_send: stdin_txt = stdin.read(4096) if stdin_txt is None: stdin_txt = '' data_to_send = encodeutils.safe_encode( stdin_txt, incoming='utf-8') if not data_to_send: # we may need to keep stdin open if not keep_stdin_open: stdin.close() session.shutdown_write() writes = [] if data_to_send: sent_bytes = session.send(data_to_send) # LOG.debug("sent: %s" % data_to_send[:sent_bytes]) data_to_send = data_to_send[sent_bytes:] if session.exit_status_ready(): break if timeout and (time.time() - timeout) > start_time: message = ( 'Timeout executing command %(cmd)s on host %(host)s' % { "cmd": cmd, "host": self.host }) raise exceptions.SSHTimeout(error_msg=message) if e: raise exceptions.SSHError(error_msg='Socket error') exit_status = session.recv_exit_status() if exit_status != 0 and raise_on_error: fmt = "Command '%(cmd)s' failed with exit_status %(status)d." details = fmt % {"cmd": cmd, "status": exit_status} if stderr_data: details += " Last stderr data: '%s'." % stderr_data raise exceptions.SSHError(error_msg=details) return exit_status
def paginate_statistics(statistics, uri, limit): parsed_uri = urlparse.urlparse(uri) self_link = build_base_uri(parsed_uri) old_query_params = _get_old_query_params(parsed_uri) if old_query_params: self_link += '?' + '&'.join(old_query_params) self_link = encodeutils.safe_decode(self_link, 'utf-8') if statistics: statistic_elements = [] resource = {u'links': [{u'rel': u'self', u'href': self_link}]} for statistic in statistics: stat_id = statistic['id'] if len(statistic['statistics']) >= limit: # cassadra impl use both id and timestamp to paginate in group by if 'end_time' in statistic: new_offset = '_'.join([stat_id, statistic['end_time']]) del statistic['end_time'] else: new_offset = ( statistic['statistics'][limit - 1][0]) next_link = build_base_uri(parsed_uri) new_query_params = [u'offset' + '=' + urlparse.quote( new_offset.encode('utf8'), safe='')] _get_old_query_params_except_offset(new_query_params, parsed_uri) if new_query_params: next_link += '?' + '&'.join(new_query_params) next_link = encodeutils.safe_decode(next_link, 'utf-8') resource[u'links'].append({u'rel': u'next', u'href': next_link}) truncated_statistic = {u'dimensions': statistic['dimensions'], u'statistics': (statistic['statistics'][:limit]), u'name': statistic['name'], u'columns': statistic['columns'], u'id': statistic['id']} statistic_elements.append(truncated_statistic) break else: limit -= len(statistic['statistics']) if 'end_time' in statistic: del statistic['end_time'] statistic_elements.append(statistic) resource[u'elements'] = statistic_elements else: resource = {u'links': ([{u'rel': u'self', u'href': self_link}]), u'elements': []} return resource
def get_backup_args(): defaults = DEFAULT_PARAMS.copy() class FreezerConfig(object): def __init__(self, args): self.__dict__.update(args) cli_options = dict([(x, y) for x, y in CONF.iteritems() if y is not None]) defaults.update(cli_options) conf = None if CONF.get('config'): conf = freezer_config.Config.parse(CONF.get('config')) # force log_config_append to always exists in defaults even if not # provided. defaults['log_config_append'] = None defaults.update(conf.default) if defaults['log_file']: CONF.set_override('log_file', defaults['log_file']) CONF.set_override('default_log_levels', _DEFAULT_LOG_LEVELS) if not CONF.get('log_file'): log_file = None for file_name in [ '/var/log/freezer-agent/freezer-agent.log', '/var/log/freezer.log' ]: try: log_file = prepare_logging(file_name) except IOError: pass if not log_file: # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') if not os.path.exists(work_dir): try: os.makedirs(work_dir) log_file = prepare_logging( os.path.join(work_dir, 'freezer.log')) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) if log_file: CONF.set_default('log_file', log_file) else: LOG.warning("log file cannot be created. Freezer will proceed with" " default stdout and stderr") backup_args = FreezerConfig(defaults) if CONF.get('config'): backup_args.__dict__['config'] = CONF.get('config') # Set default working directory to ~/.freezer. If the directory # does not exists it is created work_dir = os.path.join(home, '.freezer') backup_args.__dict__['work_dir'] = work_dir if not os.path.exists(work_dir): try: os.makedirs(work_dir) except (OSError, IOError) as err_msg: # This avoids freezer-agent to crash if it can't write to # ~/.freezer, which may happen on some env (for me, # it happens in Jenkins, as freezer-agent can't write to # /var/lib/jenkins). print(encodeutils.safe_decode('{}'.format(err_msg)), file=sys.stderr) # If hostname is not set, hostname of the current node will be used if not backup_args.hostname: backup_args.__dict__['hostname'] = socket.gethostname() # If we have provided --proxy then overwrite the system HTTP_PROXY and # HTTPS_PROXY if backup_args.proxy: utils.alter_proxy(backup_args.proxy) # MySQLdb object backup_args.__dict__['mysql_db_inst'] = '' backup_args.__dict__['storages'] = None if conf and conf.storages: backup_args.__dict__['storages'] = conf.storages # Windows volume backup_args.__dict__['shadow'] = '' backup_args.__dict__['shadow_path'] = '' backup_args.__dict__['file_name'] = '' if winutils.is_windows(): if backup_args.path_to_backup: backup_args.__dict__['windows_volume'] = \ backup_args.path_to_backup[:3] backup_media = 'fs' if backup_args.cinder_vol_id: backup_media = 'cinder' elif backup_args.cindernative_vol_id or backup_args.cindernative_backup_id: backup_media = 'cindernative' elif backup_args.engine_name == 'nova' and (backup_args.project_id or backup_args.nova_inst_id): backup_media = 'nova' elif backup_args.cinderbrick_vol_id: backup_media = 'cinderbrick' backup_args.__dict__['backup_media'] = backup_media backup_args.__dict__['time_stamp'] = None if backup_args.upload_limit != -1 or backup_args.download_limit != -1 and \ not winutils.is_windows(): # handle --config option with tmp config file if backup_args.config: conf_file = tempfile.NamedTemporaryFile(prefix='freezer_job_', delete=False) # remove the limits from the new file if 'upload_limit' in conf.default: conf.default.pop('upload_limit') elif 'download_limit' in conf.default: conf.default.pop('download_limit') utils.save_config_to_file(conf.default, conf_file, 'default') # replace the original file with the tmp one conf_index = sys.argv.index('--config') + 1 sys.argv[conf_index] = conf_file.name # if limits provided from cli remove it ! if '--upload-limit' in sys.argv: index = sys.argv.index('--upload-limit') sys.argv.pop(index) sys.argv.pop(index) if '--download-limit' in sys.argv: index = sys.argv.index('--download-limit') sys.argv.pop(index) sys.argv.pop(index) # locate trickle trickle_executable = distspawn.find_executable('trickle') if trickle_executable is None: trickle_executable = distspawn.find_executable('trickle', path=":".join( sys.path)) if trickle_executable is None: trickle_executable = distspawn.find_executable( 'trickle', path=":".join(os.environ.get('PATH'))) if trickle_executable: LOG.info("Info: Starting trickle ...") trickle_command = '{0} -d {1} -u {2} '.\ format(trickle_executable, getattr(backup_args, 'download_limit') or -1, getattr(backup_args, 'upload_limit') or -1) backup_args.__dict__['trickle_command'] = trickle_command if backup_args.config: backup_args.__dict__['tmp_file'] = conf_file.name # maintain env variable not to get into infinite loop if "tricklecount" in os.environ: tricklecount = int(os.environ.get("tricklecount", 1)) tricklecount += 1 os.environ["tricklecount"] = str(tricklecount) else: os.environ["tricklecount"] = str(1) else: LOG.warning("Trickle not found. Switching to normal mode without " "limiting bandwidth") if backup_args.config: # remove index tmp_file from backup arguments dict utils.delete_file(conf_file.name) return backup_args
def get_fsid(self): with RADOSClient(self) as client: return encodeutils.safe_decode(client.cluster.get_fsid())
def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: decoded_body = encodeutils.safe_decode(body, errors='ignore') msg = ("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % {'action': action, 'body': six.text_type(decoded_body), 'meth': six.text_type(meth)} LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': six.text_type(meth)}) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = {} except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('cinder.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request url") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if isinstance(action_result, dict) or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: _set_request_id_header(request, resp_obj) # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) # Process post-processing extensions response = self.post_process_extensions(post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = _LI("%(url)s returned with HTTP %(status)d") except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = _LI("%(url)s returned a fault: %(e)s") LOG.info(msg, msg_dict) if hasattr(response, 'headers'): for hdr, val in response.headers.items(): # Headers must be utf-8 strings val = utils.convert_str(val) response.headers[hdr] = val if (not request.api_version_request.is_null() and not _is_legacy_endpoint(request)): response.headers[API_VERSION_REQUEST_HEADER] = ( VOLUME_SERVICE + ' ' + request.api_version_request.get_string()) response.headers['Vary'] = API_VERSION_REQUEST_HEADER return response
def print_err(msg): print(encodeutils.safe_decode(msg), file=sys.stderr)
def metrics_statistics(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, statistics, period, offset, limit, merge_metrics_flag, group_by): if not period: period = 300 else: period = int(period) series_list = self.measurement_list(tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, None, merge_metrics_flag, group_by) json_statistics_list = [] if not series_list: return json_statistics_list statistics = [stat.lower() for stat in statistics] columns = [u'timestamp'] columns.extend([ x for x in ['avg', 'min', 'max', 'count', 'sum'] if x in statistics ]) start_time = datetime.utcfromtimestamp(start_timestamp) if end_timestamp: end_time = datetime.utcfromtimestamp(end_timestamp) else: end_time = datetime.utcnow() for series in series_list: if limit <= 0: break measurements = series['measurements'] if not measurements: continue first_measure = measurements[0] first_measure_start_time = MetricsRepository._parse_time_string( first_measure[0]) # skip blank intervals at the beginning, finds the start time of stat # period that is not empty stat_start_time = start_time + timedelta(seconds=( (first_measure_start_time - start_time).seconds / period) * period) stats_list = [] stats_count = 0 stats_sum = 0 stats_min = stats_max = first_measure[1] for measurement in series['measurements']: time_stamp = MetricsRepository._parse_time_string( measurement[0]) value = measurement[1] if (time_stamp - stat_start_time).seconds >= period: stat = MetricsRepository._create_stat( statistics, stat_start_time, stats_count, stats_sum, stats_min, stats_max) stats_list.append(stat) limit -= 1 if limit <= 0: break # initialize the new stat period stats_sum = value stats_count = 1 stats_min = value stats_max = value stat_start_time += timedelta(seconds=period) else: stats_min = min(stats_min, value) stats_max = max(stats_max, value) stats_count += 1 stats_sum += value if stats_count: stat = MetricsRepository._create_stat(statistics, stat_start_time, stats_count, stats_sum, stats_min, stats_max) stats_list.append(stat) limit -= 1 stats_end_time = stat_start_time + timedelta( seconds=period) - timedelta(milliseconds=1) if stats_end_time > end_time: stats_end_time = end_time statistic = { u'name': encodeutils.safe_decode(name, 'utf-8'), u'id': series['id'], u'dimensions': series['dimensions'], u'columns': columns, u'statistics': stats_list, u'end_time': self._isotime_msec(stats_end_time) } json_statistics_list.append(statistic) return json_statistics_list
def test_do_create(self): cc = mock.Mock() nc = mock.Mock() profile = server.ServerProfile('t', self.spec) profile._computeclient = cc profile._networkclient = nc self._stubout_profile(profile, mock_image=True, mock_flavor=True, mock_keypair=True, mock_net=True) node_obj = mock.Mock(id='FAKE_NODE_ID', index=123, cluster_id='FAKE_CLUSTER_ID', data={ 'placement': { 'zone': 'AZ1', 'servergroup': 'SERVER_GROUP_1' } }) node_obj.name = 'TEST_SERVER' cc.server_create.return_value = mock.Mock(id='FAKE_ID') server_id = profile.do_create(node_obj) attrs = dict( adminPass='******', availability_zone='AZ1', config_drive=False, flavorRef='FAKE_FLAVOR_ID', imageRef='FAKE_IMAGE_ID', key_name='FAKE_KEYNAME', metadata={ 'cluster_id': 'FAKE_CLUSTER_ID', 'cluster_node_id': 'FAKE_NODE_ID', 'cluster_node_index': '123', 'meta var': 'meta val' }, name='FAKE_SERVER_NAME', networks=[{ 'fixed_ip': 'FAKE_IP', 'port': 'FAKE_PORT', 'uuid': 'FAKE_NETWORK_ID', }], personality=[{ 'path': '/etc/motd', 'contents': 'foo' }], scheduler_hints={ 'same_host': 'HOST_ID', 'group': 'SERVER_GROUP_1', }, security_groups=[{ 'name': 'HIGH_SECURITY_GROUP' }], user_data='FAKE_USER_DATA', ) ud = encodeutils.safe_encode('FAKE_USER_DATA') attrs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) attrs['OS-DCF:diskConfig'] = 'AUTO' cc.server_create.assert_called_once_with(**attrs) self.assertEqual('FAKE_ID', server_id)
def run(argv, categories): parser = lambda subparsers: _add_command_parsers(categories, subparsers) category_opt = cfg.SubCommandOpt("category", title="Command categories", help="Available categories", handler=parser) CONF.register_cli_opt(category_opt) try: CONF(argv[1:], project="rally", version=version.version_string()) logging.setup("rally") if not CONF.get("log_config_append"): # The below two lines are to disable noise from request module. The # standard way should be we make such lots of settings on the root # rally. However current oslo codes doesn't support such interface. # So I choose to use a 'hacking' way to avoid INFO logs from # request module where user didn't give specific log configuration. # And we could remove this hacking after oslo.log has such # interface. LOG.debug("INFO logs from urllib3 and requests module are hide.") requests_log = logging.getLogger("requests").logger requests_log.setLevel(logging.WARNING) urllib3_log = logging.getLogger("urllib3").logger urllib3_log.setLevel(logging.WARNING) # NOTE(wtakase): This is for suppressing boto error logging. LOG.debug("ERROR log from boto module is hide.") boto_log = logging.getLogger("boto").logger boto_log.setLevel(logging.CRITICAL) except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv) except Exception: print(_("sudo failed, continuing as if nothing happened")) print(_("Please re-run %s as root.") % argv[0]) return (2) if CONF.category.name == "version": print(version.version_string()) return (0) if CONF.category.name == "bash-completion": print(_generate_bash_completion_script()) return (0) fn = CONF.category.action_fn fn_args = [ encodeutils.safe_decode(arg) for arg in CONF.category.action_args ] fn_kwargs = {} for k in CONF.category.action_kwargs: v = getattr(CONF.category, "action_kwarg_" + k) if v is None: continue if isinstance(v, six.string_types): v = encodeutils.safe_decode(v) fn_kwargs[k] = v # call the action with the remaining arguments # check arguments try: validate_args(fn, *fn_args, **fn_kwargs) except MissingArgs as e: # NOTE(mikal): this isn't the most helpful error message ever. It is # long, and tells you a lot of things you probably don't want to know # if you just got a single arg wrong. print(fn.__doc__) CONF.print_help() print("Missing arguments:") for missing in e.missing: for arg in fn.args: if arg[1].get("dest", "").endswith(missing): print(" " + arg[0][0]) break return (1) try: utils.load_plugins("/opt/rally/plugins/") utils.load_plugins(os.path.expanduser("~/.rally/plugins/")) validate_deprecated_args(argv, fn) if getattr(fn, "_suppress_warnings", False): with warnings.catch_warnings(): warnings.simplefilter("ignore") ret = fn(*fn_args, **fn_kwargs) else: ret = fn(*fn_args, **fn_kwargs) return (ret) except (IOError, TypeError, ValueError, exceptions.DeploymentNotFound, exceptions.TaskNotFound, jsonschema.ValidationError) as e: if logging.is_debug(): LOG.exception(e) print(e) return 1 except Exception: print(_("Command failed, please check log for more info")) raise
def __str__(self): if self.hidden(): return super(JsonParam, self).__str__() return encodeutils.safe_decode(jsonutils.dumps(self.value()))
def _check_no_zwsp(self, tpl, raw): matches = encodeutils.safe_decode(raw).find(u"\u200B") self.assertEqual( matches, -1, "Found zero width space characters in file %s" % (tpl))
def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): ex = exception.ConvertedException(exception.NotFound()) return Fault(ex) except KeyError as ex: ex = exception.ConvertedException( exception.NoSuchAction(ex.args[0])) return Fault(ex) except exception.MalformedRequestBody as ex: ex = exception.ConvertedException(ex) return Fault(ex) try: method_name = meth.__qualname__ except AttributeError: method_name = 'Controller: %s Method: %s' % (six.text_type( self.controller), meth.__name__) if body: decoded_body = encodeutils.safe_decode(body, errors='ignore') msg = ("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % { 'action': action, 'body': decoded_body, 'meth': method_name } LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': method_name}) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = {} except exception.InvalidContentType as ex: ex = exception.ConvertedException(ex) return Fault(ex) except exception.MalformedRequestBody as ex: ex = exception.ConvertedException(ex) return Fault(ex) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('delfin.context') if (context and project_id and (project_id != context.project_id)): ex = exception.ConvertedException(exception.MalformedRequestUrl()) return Fault(ex) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: _set_request_id_header(request, resp_obj) # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) # Process post-processing extensions response = self.post_process_extensions( post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)s") % msg_dict except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s") % msg_dict LOG.info(msg) return response
def find_resource(manager, name_or_id, **find_args): """Look for resource in a given manager. Used as a helper for the _find_* methods. Example: .. code-block:: python def _find_hypervisor(cs, hypervisor): #Get a hypervisor by name or ID. return cliutils.find_resource(cs.hypervisors, hypervisor) """ # first try to get entity as integer id try: return manager.get(int(name_or_id)) except (TypeError, ValueError, exceptions.NotFound): pass # now try to get entity as uuid try: if six.PY2: tmp_id = encodeutils.safe_encode(name_or_id) else: tmp_id = encodeutils.safe_decode(name_or_id) if uuidutils.is_uuid_like(tmp_id): return manager.get(tmp_id) except (TypeError, ValueError, exceptions.NotFound): pass # for str id which is not uuid if getattr(manager, 'is_alphanum_id_allowed', False): try: return manager.get(name_or_id) except exceptions.NotFound: pass try: try: return manager.find(human_id=name_or_id, **find_args) except exceptions.NotFound: pass # finally try to find entity by name try: resource = getattr(manager, 'resource_class', None) name_attr = resource.NAME_ATTR if resource else 'name' kwargs = {name_attr: name_or_id} kwargs.update(find_args) return manager.find(**kwargs) except exceptions.NotFound: msg = _("No %(name)s with a name or " "ID of '%(name_or_id)s' exists.") % \ { "name": manager.resource_class.__name__.lower(), "name_or_id": name_or_id } raise exceptions.CommandError(msg) except exceptions.NoUniqueMatch: msg = _("Multiple %(name)s matches found for " "'%(name_or_id)s', use an ID to be more specific.") % \ { "name": manager.resource_class.__name__.lower(), "name_or_id": name_or_id } raise exceptions.CommandError(msg)
def gc(self): """Garbage-collect expired message data. Not all message data can be automatically expired. This method cleans up the remainder. :returns: Number of messages removed """ claim_ctrl = self.driver.claim_controller client = self._client num_removed = 0 offset_msgsets = 0 while True: # NOTE(kgriffs): Iterate across all message sets; there will # be one set of message IDs per queue. msgset_keys = client.zrange(MSGSET_INDEX_KEY, offset_msgsets, offset_msgsets + GC_BATCH_SIZE - 1) if not msgset_keys: break offset_msgsets += len(msgset_keys) for msgset_key in msgset_keys: msgset_key = encodeutils.safe_decode(msgset_key) # NOTE(kgriffs): Drive the claim controller GC from # here, because we already know the queue and project # scope. queue, project = utils.descope_message_ids_set(msgset_key) claim_ctrl._gc(queue, project) offset_mids = 0 while True: # NOTE(kgriffs): Look up each message in the message set, # see if it has expired, and if so, remove it from msgset. mids = client.zrange(msgset_key, offset_mids, offset_mids + GC_BATCH_SIZE - 1) if not mids: break offset_mids += len(mids) # NOTE(kgriffs): If redis expired the message, it will # not exist, so all we have to do is remove mid from # the msgset collection. with client.pipeline() as pipe: for mid in mids: pipe.exists(mid) mid_exists_flags = pipe.execute() with client.pipeline() as pipe: for mid, exists in zip(mids, mid_exists_flags): if not exists: pipe.zrem(msgset_key, mid) num_removed += 1 pipe.execute() return num_removed
def run(argv, categories): parser = lambda subparsers: _add_command_parsers(categories, subparsers) category_opt = cfg.SubCommandOpt("category", title="Command categories", help="Available categories", handler=parser) CONF.register_cli_opt(category_opt) help_msg = ("Additional custom plugin locations. Multiple files or " "directories may be specified. All plugins in the specified" " directories and subdirectories will be imported. Plugins in" " /opt/rally/plugins and ~/.rally/plugins will always be " "imported.") CONF.register_cli_opt( cfg.ListOpt("plugin-paths", default=os.environ.get("RALLY_PLUGIN_PATHS"), help=help_msg)) try: rapi = api.API(config_args=argv[1:], skip_db_check=True) except exceptions.RallyException as e: print(e) return (2) if CONF.category.name == "version": print(CONF.version) return (0) if CONF.category.name == "bash-completion": print(_generate_bash_completion_script()) return (0) fn = CONF.category.action_fn fn_args = [ encodeutils.safe_decode(arg) for arg in CONF.category.action_args ] # api instance always is the first argument fn_args.insert(0, rapi) fn_kwargs = {} for k in CONF.category.action_kwargs: v = getattr(CONF.category, "action_kwarg_" + k) if v is None: continue if isinstance(v, six.string_types): v = encodeutils.safe_decode(v) fn_kwargs[k] = v # call the action with the remaining arguments # check arguments try: validate_args(fn, *fn_args, **fn_kwargs) except MissingArgs as e: # NOTE(mikal): this isn't the most helpful error message ever. It is # long, and tells you a lot of things you probably don't want to know # if you just got a single arg wrong. print(fn.__doc__) CONF.print_help() print("Missing arguments:") for missing in e.missing: for arg in fn.args: if arg[1].get("dest", "").endswith(missing): print(" " + arg[0][0]) break return (1) try: validate_deprecated_args(argv, fn) # skip db check for db and plugin commands if CONF.category.name not in ("db", "plugin"): rapi.check_db_revision() if getattr(fn, "_suppress_warnings", False): with warnings.catch_warnings(): warnings.simplefilter("ignore") ret = fn(*fn_args, **fn_kwargs) else: ret = fn(*fn_args, **fn_kwargs) return (ret) except (IOError, TypeError, ValueError, exceptions.RallyException, jsonschema.ValidationError) as e: if logging.is_debug(): LOG.exception(e) print(e) return 1 except sqlalchemy.exc.OperationalError as e: if logging.is_debug(): LOG.exception(e) print(e) print("Looks like Rally can't connect to its DB.") print("Make sure that connection string in rally.conf is proper:") print(CONF.database.connection) return 1 except Exception: print(_("Command failed, please check log for more info")) raise
def _paths_join(*args): pieces = [] for arg in args: pieces.append(encodeutils.safe_decode(arg)) return "/".join(pieces)
def resolve_static_data(self, snippet): try: return self.t.parse(self, snippet) except Exception as ex: raise exception.StackValidationFailed( message=encodeutils.safe_decode(six.text_type(ex)))
def _value_as_text(cls, value): return encodeutils.safe_decode(jsonutils.dumps(value))
def exit(msg='', exit_code=1): if msg: print(encodeutils.safe_decode(msg), file=sys.stderr) sys.exit(exit_code)
def get_comma_separated_str_as_list(comma_separated_str): if not comma_separated_str: return [] else: return encodeutils.safe_decode(comma_separated_str, 'utf-8').split(',')
def __init__(self, cmd, code, output): self.command = cmd self.code = code self.output = encodeutils.safe_decode(output) self.msg = "Command: %s Code: %d Output: %s\n" % ( self.command, self.code, self.output)