def __call__(self, target, creds, enforcer): """Check http: rules by calling to a remote server. This example implementation simply verifies that the response is exactly 'True'. """ url = ("http:" + self.match) % target data = {"target": jsonutils.dumps(target), "credentials": jsonutils.dumps(creds)} post_data = urlparse.urlencode(data) f = urlrequest.urlopen(url, post_data) return f.read() == "True"
def __call__(self, target, creds, enforcer): """Check http: rules by calling to a remote server. This example implementation simply verifies that the response is exactly 'True'. """ url = ('http:' + self.match) % target data = {'target': jsonutils.dumps(target), 'credentials': jsonutils.dumps(creds)} post_data = urllib.urlencode(data) f = urllib2.urlopen(url, post_data) return f.read() == "True"
def _check_http(brain, match_kind, match, target_dict, cred_dict): """Check http: rules by calling to a remote server. This example implementation simply verifies that the response is exactly 'True'. A custom brain using response codes could easily be implemented. """ url = 'http:' + (match % target_dict) data = {'target': jsonutils.dumps(target_dict), 'credentials': jsonutils.dumps(cred_dict)} post_data = urllib.urlencode(data) f = urllib2.urlopen(url, post_data) return f.read() == "True"
def __call__(self, target, creds): """ Check http: rules by calling to a remote server. This example implementation simply verifies that the response is exactly 'True'. """ url = ('http:' + self.match) % target data = {'target': jsonutils.dumps(target), 'credentials': jsonutils.dumps(creds)} post_data = urllib.urlencode(data) f = urllib2.urlopen(url, post_data) return f.read() == "True"
def _pack_json_msg(self, msg): """Qpid cannot serialize dicts containing strings longer than 65535 characters. This function dumps the message content to a JSON string, which Qpid is able to handle. :param msg: May be either a Qpid Message object or a bare dict. :returns: A Qpid Message with its content field JSON encoded. """ try: msg.content = jsonutils.dumps(msg.content) except AttributeError: # Need to have a Qpid message so we can set the content_type. msg = qpid_messaging.Message(jsonutils.dumps(msg)) msg.content_type = JSON_CONTENT_TYPE return msg
def _pack_json_msg(self, msg): """Qpid cannot serialize dicts containing strings longer than 65535 characters. This function dumps the message content to a JSON string, which Qpid is able to handle. :param msg: May be either a Qpid Message object or a bare dict. :returns: A Qpid Message with its content field JSON encoded. """ try: msg.content = jsonutils.dumps(msg.content) except AttributeError: # Need to have a Qpid message so we can set the content_type. msg = qpid_messaging.Message(jsonutils.dumps(msg)) msg.content_type = JSON_CONTENT_TYPE return msg
def serialize_msg(raw_msg): # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # information about this format. msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, _MESSAGE_KEY: jsonutils.dumps(raw_msg)} return msg
def format(self, record): message = { "message": record.getMessage(), "asctime": self.formatTime(record, self.datefmt), "name": record.name, "msg": record.msg, "args": record.args, "levelname": record.levelname, "levelno": record.levelno, "pathname": record.pathname, "filename": record.filename, "module": record.module, "lineno": record.lineno, "funcname": record.funcName, "created": record.created, "msecs": record.msecs, "relative_created": record.relativeCreated, "thread": record.thread, "thread_name": record.threadName, "process_name": record.processName, "process": record.process, "traceback": None, } if hasattr(record, "extra"): message["extra"] = record.extra if record.exc_info: message["traceback"] = self.formatException(record.exc_info) return jsonutils.dumps(message)
def encode(self, version, target, json_msg): """This is the main encoding function. It takes a target and a message and returns a tuple consisting of a JSON serialized metadata object, a JSON serialized (and optionally encrypted) message, and a signature. :param version: the current envelope version :param target: The name of the target service (usually with hostname) :param json_msg: a serialized json message object """ ticket = self._get_ticket(target) metadata = jsonutils.dumps({'source': self._name, 'destination': target, 'timestamp': time.time(), 'nonce': _get_nonce(), 'esek': ticket.esek, 'encryption': self._encrypt}) message = json_msg if self._encrypt: message = self._crypto.encrypt(ticket.ekey, message) signature = self._crypto.sign(ticket.skey, version + metadata + message) return (metadata, message, signature)
def get_ticket(self, source, target, crypto, key): # prepare metadata md = {'requestor': source, 'target': target, 'timestamp': time.time(), 'nonce': struct.unpack('Q', os.urandom(8))[0]} metadata = base64.b64encode(jsonutils.dumps(md)) # sign metadata signature = crypto.sign(key, metadata) # HTTP request reply = self._get_ticket({'metadata': metadata, 'signature': signature}) # verify reply signature = crypto.sign(key, (reply['metadata'] + reply['ticket'])) if signature != reply['signature']: raise InvalidEncryptedTicket(md['source'], md['destination']) md = jsonutils.loads(base64.b64decode(reply['metadata'])) if ((md['source'] != source or md['destination'] != target or md['expiration'] < time.time())): raise InvalidEncryptedTicket(md['source'], md['destination']) # return ticket data tkt = jsonutils.loads(crypto.decrypt(key, reply['ticket'])) return tkt, md['expiration']
def __init__(self, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": False, # auto-delete isn't implemented for exchanges in qpid, # but put in here anyway "auto-delete": True, }, }, } if node_opts: addr_opts["node"]["x-declare"].update(node_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.reconnect(session)
def __init__(self, session, callback, node_name, node_opts, link_name, link_opts): """Declare a queue on an amqp session. 'session' is the amqp session to use 'callback' is the callback to call when messages are received 'node_name' is the first part of the Qpid address string, before ';' 'node_opts' will be applied to the "x-declare" section of "node" in the address string. 'link_name' goes into the "name" field of the "link" in the address string 'link_opts' will be applied to the "x-declare" section of "link" in the address string. """ self.callback = callback self.receiver = None self.session = None addr_opts = { "create": "always", "node": {"type": "topic", "x-declare": {"durable": True, "auto-delete": True}}, "link": { "name": link_name, "durable": True, "x-declare": {"durable": False, "auto-delete": True, "exclusive": False}, }, } addr_opts["node"]["x-declare"].update(node_opts) addr_opts["link"]["x-declare"].update(link_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.reconnect(session)
def format(self, record): message = {'message': record.getMessage(), 'asctime': self.formatTime(record, self.datefmt), 'name': record.name, 'msg': record.msg, 'args': record.args, 'levelname': record.levelname, 'levelno': record.levelno, 'pathname': record.pathname, 'filename': record.filename, 'module': record.module, 'lineno': record.lineno, 'funcname': record.funcName, 'created': record.created, 'msecs': record.msecs, 'relative_created': record.relativeCreated, 'thread': record.thread, 'thread_name': record.threadName, 'process_name': record.processName, 'process': record.process, 'traceback': None} if hasattr(record, 'extra'): message['extra'] = record.extra if record.exc_info: message['traceback'] = self.formatException(record.exc_info) return jsonutils.dumps(message)
def serialize_remote_exception(failure_info): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] LOG.error(_("Returning exception %s to caller"), unicode(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs data = { 'class': str(failure.__class__.__name__), 'module': str(failure.__class__.__module__), 'message': unicode(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data
def serialize_remote_exception(failure_info): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] LOG.error(_("Returning exception %s to caller"), unicode(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs data = { 'class': str(failure.__class__.__name__), 'module': str(failure.__class__.__module__), 'message': unicode(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data
def __init__(self, conf, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session if conf.qpid_topology_version == 1: addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": False, # auto-delete isn't implemented for exchanges in qpid, # but put in here anyway "auto-delete": True, }, }, } if node_opts: addr_opts["node"]["x-declare"].update(node_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) elif conf.qpid_topology_version == 2: self.address = node_name else: raise_invalid_topology_version() self.reconnect(session)
def notify(action, alarm_id, previous, current, reason, reason_data): LOG.info(_( "Notifying alarm %(alarm_id)s from %(previous)s " "to %(current)s with action %(action)s because " "%(reason)s") % ({'alarm_id': alarm_id, 'previous': previous, 'current': current, 'action': action, 'reason': reason})) body = {'alarm_id': alarm_id, 'previous': previous, 'current': current, 'reason': reason, 'reason_data': reason_data} kwargs = {'data': jsonutils.dumps(body)} if action.scheme == 'https': default_verify = int(cfg.CONF.alarm.rest_notifier_ssl_verify) options = urlparse.parse_qs(action.query) verify = bool(int(options.get('ceilometer-alarm-ssl-verify', [default_verify])[-1])) kwargs['verify'] = verify cert = cfg.CONF.alarm.rest_notifier_certificate_file key = cfg.CONF.alarm.rest_notifier_certificate_key if cert: kwargs['cert'] = (cert, key) if key else cert eventlet.spawn_n(requests.post, action.geturl(), **kwargs)
def format(self, record): message = { 'message': record.getMessage(), 'asctime': self.formatTime(record, self.datefmt), 'name': record.name, 'msg': record.msg, 'args': record.args, 'levelname': record.levelname, 'levelno': record.levelno, 'pathname': record.pathname, 'filename': record.filename, 'module': record.module, 'lineno': record.lineno, 'funcname': record.funcName, 'created': record.created, 'msecs': record.msecs, 'relative_created': record.relativeCreated, 'thread': record.thread, 'thread_name': record.threadName, 'process_name': record.processName, 'process': record.process, 'traceback': None } if hasattr(record, 'extra'): message['extra'] = record.extra if record.exc_info: message['traceback'] = self.formatException(record.exc_info) return jsonutils.dumps(message)
def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, "kwargs"): kwargs = failure.kwargs data = { "class": str(failure.__class__.__name__), "module": str(failure.__class__.__module__), "message": six.text_type(failure), "tb": tb, "args": failure.args, "kwargs": kwargs, } json_data = jsonutils.dumps(data) return json_data
def notify(action, alarm_id, previous, current, reason, reason_data, headers=None): LOG.info(_( "Notifying alarm %(alarm_id)s from %(previous)s " "to %(current)s with action %(action)s because " "%(reason)s") % ({'alarm_id': alarm_id, 'previous': previous, 'current': current, 'action': action, 'reason': reason})) body = {'alarm_id': alarm_id, 'previous': previous, 'current': current, 'reason': reason, 'reason_data': reason_data} kwargs = {'data': jsonutils.dumps(body)} if headers: kwargs['headers'] = headers if action.scheme == 'https': default_verify = int(cfg.CONF.alarm.rest_notifier_ssl_verify) options = urlparse.parse_qs(action.query) verify = bool(int(options.get('ceilometer-alarm-ssl-verify', [default_verify])[-1])) kwargs['verify'] = verify cert = cfg.CONF.alarm.rest_notifier_certificate_file key = cfg.CONF.alarm.rest_notifier_certificate_key if cert: kwargs['cert'] = (cert, key) if key else cert eventlet.spawn_n(requests.post, action.geturl(), **kwargs)
def default(self, data): def sanitizer(obj): if isinstance(obj, datetime.datetime): _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) return _dtime.isoformat() return six.text_type(obj) return jsonutils.dumps(data, default=sanitizer)
def notify(_context, message): """Notifies the recipient of the desired event given the model. Log notifications using openstack's default logging system""" priority = message.get("priority", CONF.default_notification_level) priority = priority.lower() logger = logging.getLogger("ceilometer.openstack.common.notification.%s" % message["event_type"]) getattr(logger, priority)(jsonutils.dumps(message))
def default(self, data): def sanitizer(obj): if isinstance(obj, datetime.datetime): _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) return _dtime.isoformat() return six.text_type(obj) return jsonutils.dumps(data, default=sanitizer)
def __init__(self, conf, session, callback, node_name, node_opts, link_name, link_opts): """Declare a queue on an amqp session. 'session' is the amqp session to use 'callback' is the callback to call when messages are received 'node_name' is the first part of the Qpid address string, before ';' 'node_opts' will be applied to the "x-declare" section of "node" in the address string. 'link_name' goes into the "name" field of the "link" in the address string 'link_opts' will be applied to the "x-declare" section of "link" in the address string. """ self.callback = callback self.receiver = None self.session = None if conf.qpid_topology_version == 1: addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": True, "auto-delete": True, }, }, "link": { "durable": True, "x-declare": { "durable": False, "auto-delete": True, "exclusive": False, }, }, } addr_opts["node"]["x-declare"].update(node_opts) elif conf.qpid_topology_version == 2: addr_opts = { "link": { "x-declare": { "auto-delete": True, "exclusive": False, }, }, } else: raise_invalid_topology_version() addr_opts["link"]["x-declare"].update(link_opts) if link_name: addr_opts["link"]["name"] = link_name self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.connect(session)
def notify(_context, message): """Notifies the recipient of the desired event given the model. Log notifications using openstack's default logging system""" priority = message.get('priority', CONF.default_notification_level) priority = priority.lower() logger = logging.getLogger('ceilometer.openstack.common.notification.%s' % message['event_type']) getattr(logger, priority)(jsonutils.dumps(message))
def serialize_msg(raw_msg, force_envelope=False): if not _SEND_RPC_ENVELOPE and not force_envelope: return raw_msg # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # information about this format. msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, _MESSAGE_KEY: jsonutils.dumps(raw_msg)} return msg
def _serialize(data): """Serialization wrapper. We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ try: return jsonutils.dumps(data, ensure_ascii=True) except TypeError: with excutils.save_and_reraise_exception(): LOG.error(_("JSON serialization failed."))
def _create_resource(conn, res_id, user_id, project_id, source_id, rmeta): # TODO(gordc): implement lru_cache to improve performance try: res = models.Resource.__table__ m_hash = hashlib.md5(jsonutils.dumps(rmeta, sort_keys=True)).hexdigest() trans = conn.begin_nested() if conn.dialect.name == 'sqlite': trans = conn.begin() with trans: res_row = conn.execute( sa.select([res.c.internal_id]).where( sa.and_(res.c.resource_id == res_id, res.c.user_id == user_id, res.c.project_id == project_id, res.c.source_id == source_id, res.c.metadata_hash == m_hash))).first() internal_id = res_row[0] if res_row else None if internal_id is None: result = conn.execute(res.insert(), resource_id=res_id, user_id=user_id, project_id=project_id, source_id=source_id, resource_metadata=rmeta, metadata_hash=m_hash) internal_id = result.inserted_primary_key[0] if rmeta and isinstance(rmeta, dict): meta_map = {} for key, v in utils.dict_to_keyval(rmeta): try: _model = sql_utils.META_TYPE_MAP[type(v)] if meta_map.get(_model) is None: meta_map[_model] = [] meta_map[_model].append({ 'id': internal_id, 'meta_key': key, 'value': v }) except KeyError: LOG.warn( _("Unknown metadata type. Key (%s) " "will not be queryable."), key) for _model in meta_map.keys(): conn.execute(_model.__table__.insert(), meta_map[_model]) except dbexc.DBDuplicateEntry: # retry function to pick up duplicate committed object internal_id = Connection._create_resource(conn, res_id, user_id, project_id, source_id, rmeta) return internal_id
def test_verify_signature_nested_json(): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'c': ('c',), 'd': ['d'] }, } data['message_signature'] = meter.compute_signature(data, 'not-so-secret') jsondata = jsonutils.loads(jsonutils.dumps(data)) assert meter.verify_signature(jsondata, 'not-so-secret')
def notify(self, action, alarm, state, reason): LOG.info("Notifying alarm %s in state %s with action %s because %s", alarm, state, action, reason) body = {'state': state, 'reason': reason} kwargs = {'data': jsonutils.dumps(body)} cert = cfg.CONF.alarm.rest_notifier_certificate_file key = cfg.CONF.alarm.rest_notifier_certificate_key if action.scheme == 'https' and cert: kwargs['cert'] = (cert, key) if key else cert eventlet.spawn_n(requests.post, action.geturl(), **kwargs)
def __str__(self): """Dumps a string representation of the rules.""" # Start by building the canonical strings for the rules out_rules = {} for key, value in self.items(): # Use empty string for singleton TrueCheck instances if isinstance(value, TrueCheck): out_rules[key] = '' else: out_rules[key] = str(value) # Dump a pretty-printed JSON representation return jsonutils.dumps(out_rules, indent=4)
def __str__(self): """Dumps a string representation of the rules.""" # Start by building the canonical strings for the rules out_rules = {} for key, value in self.items(): # Use empty string for singleton TrueCheck instances if isinstance(value, TrueCheck): out_rules[key] = '' else: out_rules[key] = str(value) # Dump a pretty-printed JSON representation return jsonutils.dumps(out_rules, indent=4)
def test_verify_signature_nested_json(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'c': ('c',), 'd': ['d'] }, } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') jsondata = jsonutils.loads(jsonutils.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret'))
def _create_resource(conn, res_id, user_id, project_id, source_id, rmeta): # TODO(gordc): implement lru_cache to improve performance try: res = models.Resource.__table__ m_hash = hashlib.md5(jsonutils.dumps(rmeta, sort_keys=True)).hexdigest() trans = conn.begin_nested() if conn.dialect.name == 'sqlite': trans = conn.begin() with trans: res_row = conn.execute( sa.select([res.c.internal_id]) .where(sa.and_(res.c.resource_id == res_id, res.c.user_id == user_id, res.c.project_id == project_id, res.c.source_id == source_id, res.c.metadata_hash == m_hash))).first() internal_id = res_row[0] if res_row else None if internal_id is None: result = conn.execute(res.insert(), resource_id=res_id, user_id=user_id, project_id=project_id, source_id=source_id, resource_metadata=rmeta, metadata_hash=m_hash) internal_id = result.inserted_primary_key[0] if rmeta and isinstance(rmeta, dict): meta_map = {} for key, v in utils.dict_to_keyval(rmeta): try: _model = sql_utils.META_TYPE_MAP[type(v)] if meta_map.get(_model) is None: meta_map[_model] = [] meta_map[_model].append( {'id': internal_id, 'meta_key': key, 'value': v}) except KeyError: LOG.warn(_("Unknown metadata type. Key (%s) " "will not be queryable."), key) for _model in meta_map.keys(): conn.execute(_model.__table__.insert(), meta_map[_model]) except dbexc.DBDuplicateEntry: # retry function to pick up duplicate committed object internal_id = Connection._create_resource( conn, res_id, user_id, project_id, source_id, rmeta) return internal_id
def _do_get(self, url, request): req_kwargs = dict() req_kwargs['headers'] = dict() req_kwargs['headers']['User-Agent'] = self.USER_AGENT req_kwargs['headers']['Content-Type'] = 'application/json' req_kwargs['data'] = jsonutils.dumps({'request': request}) if self.timeout is not None: req_kwargs['timeout'] = self.timeout try: resp = requests.get(url, **req_kwargs) except requests.ConnectionError as e: err = "Unable to establish connection. %s" % e raise CommunicationError(url, err) return resp
def _create_resource(session, res_id, user_id, project_id, source_id, rmeta): # TODO(gordc): implement lru_cache to improve performance try: nested = session.connection().dialect.name != 'sqlite' m_hash = jsonutils.dumps(rmeta, sort_keys=True) with session.begin(nested=nested, subtransactions=not nested): obj = (session.query(models.Resource.internal_id) .filter(models.Resource.resource_id == res_id) .filter(models.Resource.user_id == user_id) .filter(models.Resource.project_id == project_id) .filter(models.Resource.source_id == source_id) .filter(models.Resource.metadata_hash == hashlib.md5(m_hash).hexdigest()).first()) obj_id = obj[0] if obj else None if obj_id is None: obj = models.Resource(resource_id=res_id, user_id=user_id, project_id=project_id, source_id=source_id, resource_metadata=rmeta) session.add(obj) session.flush() obj_id = obj.internal_id if rmeta and isinstance(rmeta, dict): meta_map = {} for key, v in utils.dict_to_keyval(rmeta): try: _model = sql_utils.META_TYPE_MAP[type(v)] if meta_map.get(_model) is None: meta_map[_model] = [] meta_map[_model].append( {'id': obj_id, 'meta_key': key, 'value': v}) except KeyError: LOG.warn(_("Unknown metadata type. Key (%s) " "will not be queryable."), key) for _model in meta_map.keys(): session.execute(_model.__table__.insert(), meta_map[_model]) except dbexc.DBDuplicateEntry: # retry function to pick up duplicate committed object obj_id = Connection._create_resource(session, res_id, user_id, project_id, source_id, rmeta) return obj_id
def notify(action, alarm, state, reason): LOG.info("Notifying alarm %s in state %s with action %s because %s", alarm, state, action, reason) body = {'alarm': alarm, 'state': state, 'reason': reason} kwargs = {'data': jsonutils.dumps(body)} if action.scheme == 'https': default_verify = int(cfg.CONF.alarm.rest_notifier_ssl_verify) options = urlparse.parse_qs(action.query) verify = bool(int(options.get('ceilometer-alarm-ssl-verify', [default_verify])[-1])) kwargs['verify'] = verify cert = cfg.CONF.alarm.rest_notifier_certificate_file key = cfg.CONF.alarm.rest_notifier_certificate_key if cert: kwargs['cert'] = (cert, key) if key else cert eventlet.spawn_n(requests.post, action.geturl(), **kwargs)
def notify(action, alarm_id, previous, current, reason, reason_data, headers=None): headers = headers or {} if not headers.get('x-openstack-request-id'): headers['x-openstack-request-id'] = context.generate_request_id() LOG.info(_( "Notifying alarm %(alarm_id)s from %(previous)s " "to %(current)s with action %(action)s because " "%(reason)s. request-id: %(request_id)s") % ({'alarm_id': alarm_id, 'previous': previous, 'current': current, 'action': action, 'reason': reason, 'request_id': headers['x-openstack-request-id']})) body = {'alarm_id': alarm_id, 'previous': previous, 'current': current, 'reason': reason, 'reason_data': reason_data} headers['content-type'] = 'application/json' kwargs = {'data': jsonutils.dumps(body), 'headers': headers} if action.scheme == 'https': default_verify = int(cfg.CONF.alarm.rest_notifier_ssl_verify) options = urlparse.parse_qs(action.query) verify = bool(int(options.get('ceilometer-alarm-ssl-verify', [default_verify])[-1])) kwargs['verify'] = verify cert = cfg.CONF.alarm.rest_notifier_certificate_file key = cfg.CONF.alarm.rest_notifier_certificate_key if cert: kwargs['cert'] = (cert, key) if key else cert # FIXME(rhonjo): Retries are automatically done by urllib3 in requests # library. However, there's no interval between retries in urllib3 # implementation. It will be better to put some interval between # retries (future work). max_retries = cfg.CONF.alarm.rest_notifier_max_retries session = requests.Session() session.mount(action.geturl(), requests.adapters.HTTPAdapter(max_retries=max_retries)) eventlet.spawn_n(session.post, action.geturl(), **kwargs)
def notify(action, alarm, state, reason): LOG.info("Notifying alarm %s in state %s with action %s because %s", alarm, state, action, reason) body = {'alarm': alarm, 'state': state, 'reason': reason} kwargs = {'data': jsonutils.dumps(body)} if action.scheme == 'https': default_verify = int(cfg.CONF.alarm.rest_notifier_ssl_verify) options = urlparse.parse_qs(action.query) verify = bool( int( options.get('ceilometer-alarm-ssl-verify', [default_verify])[-1])) kwargs['verify'] = verify cert = cfg.CONF.alarm.rest_notifier_certificate_file key = cfg.CONF.alarm.rest_notifier_certificate_key if cert: kwargs['cert'] = (cert, key) if key else cert eventlet.spawn_n(requests.post, action.geturl(), **kwargs)
def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs # NOTE(matiu): With cells, it's possible to re-raise remote, remote # exceptions. Lets turn it back into the original exception type. cls_name = str(failure.__class__.__name__) mod_name = str(failure.__class__.__module__) if (cls_name.endswith(_REMOTE_POSTFIX) and mod_name.endswith(_REMOTE_POSTFIX)): cls_name = cls_name[:-len(_REMOTE_POSTFIX)] mod_name = mod_name[:-len(_REMOTE_POSTFIX)] data = { 'class': cls_name, 'module': mod_name, 'message': six.text_type(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data
def check_serialize(msg): """Make sure a message intended for rpc can be serialized.""" jsonutils.dumps(msg)
def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) resource = sa.Table( 'resource', meta, sa.Column('internal_id', sa.Integer, primary_key=True), sa.Column('resource_id', sa.String(255)), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('source_id', sa.String(255)), sa.Column('resource_metadata', sa.Text), sa.Column('metadata_hash', sa.String(32)), mysql_engine='InnoDB', mysql_charset='utf8') resource.create() # copy resource data in to resource table sample = sa.Table('sample', meta, autoload=True) sa.Column('metadata_hash', sa.String(32)).create(sample) for row in sa.select([sample.c.id, sample.c.resource_metadata]).execute(): sample.update().where(sample.c.id == row['id']).values( {sample.c.metadata_hash: hashlib.md5(jsonutils.dumps( row['resource_metadata'], sort_keys=True)).hexdigest()}).execute() query = sa.select([sample.c.resource_id, sample.c.user_id, sample.c.project_id, sample.c.source_id, sample.c.resource_metadata, sample.c.metadata_hash]).distinct() for row in query.execute(): resource.insert().values( resource_id=row['resource_id'], user_id=row['user_id'], project_id=row['project_id'], source_id=row['source_id'], resource_metadata=row['resource_metadata'], metadata_hash=row['metadata_hash']).execute() # link sample records to new resource records sa.Column('resource_id_new', sa.Integer).create(sample) for row in sa.select([resource]).execute(): (sample.update(). where(sa.and_( sample.c.resource_id == row['resource_id'], sample.c.user_id == row['user_id'], sample.c.project_id == row['project_id'], sample.c.source_id == row['source_id'], sample.c.metadata_hash == row['metadata_hash'])). values({sample.c.resource_id_new: row['internal_id']}).execute()) sample.c.resource_id.drop() sample.c.metadata_hash.drop() sample.c.resource_id_new.alter(name='resource_id') # re-bind metadata to pick up alter name change meta = sa.MetaData(bind=migrate_engine) sample = sa.Table('sample', meta, autoload=True) resource = sa.Table('resource', meta, autoload=True) if migrate_engine.name != 'sqlite': sa.Index('ix_resource_resource_id', resource.c.resource_id).create() sa.Index('ix_sample_user_id', sample.c.user_id).drop() sa.Index('ix_sample_project_id', sample.c.project_id).drop() sa.Index('ix_sample_resource_id', sample.c.resource_id).create() sa.Index('ix_sample_meter_id_resource_id', sample.c.meter_id, sample.c.resource_id).create() params = {'columns': [sample.c.resource_id], 'refcolumns': [resource.c.internal_id]} if migrate_engine.name == 'mysql': params['name'] = 'fk_sample_resource_internal_id' migrate.ForeignKeyConstraint(**params).create() sample.c.user_id.drop() sample.c.project_id.drop() sample.c.source_id.drop() sample.c.resource_metadata.drop() _migrate_meta_tables(meta, sample.c.id, sample.c.resource_id, 'resource.internal_id')
def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) resource = sa.Table('resource', meta, sa.Column('internal_id', sa.Integer, primary_key=True), sa.Column('resource_id', sa.String(255)), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('source_id', sa.String(255)), sa.Column('resource_metadata', sa.Text), sa.Column('metadata_hash', sa.String(32)), mysql_engine='InnoDB', mysql_charset='utf8') resource.create() # copy resource data in to resource table sample = sa.Table('sample', meta, autoload=True) sa.Column('metadata_hash', sa.String(32)).create(sample) for row in sa.select([sample.c.id, sample.c.resource_metadata]).execute(): sample.update().where(sample.c.id == row['id']).values({ sample.c.metadata_hash: hashlib.md5( jsonutils.dumps(row['resource_metadata'], sort_keys=True)).hexdigest() }).execute() query = sa.select([ sample.c.resource_id, sample.c.user_id, sample.c.project_id, sample.c.source_id, sample.c.resource_metadata, sample.c.metadata_hash ]).distinct() for row in query.execute(): resource.insert().values(resource_id=row['resource_id'], user_id=row['user_id'], project_id=row['project_id'], source_id=row['source_id'], resource_metadata=row['resource_metadata'], metadata_hash=row['metadata_hash']).execute() # link sample records to new resource records sa.Column('resource_id_new', sa.Integer).create(sample) for row in sa.select([resource]).execute(): (sample.update().where( sa.and_(sample.c.resource_id == row['resource_id'], sample.c.user_id == row['user_id'], sample.c.project_id == row['project_id'], sample.c.source_id == row['source_id'], sample.c.metadata_hash == row['metadata_hash'])).values({ sample.c.resource_id_new: row['internal_id'] }).execute()) sample.c.resource_id.drop() sample.c.metadata_hash.drop() sample.c.resource_id_new.alter(name='resource_id') # re-bind metadata to pick up alter name change meta = sa.MetaData(bind=migrate_engine) sample = sa.Table('sample', meta, autoload=True) resource = sa.Table('resource', meta, autoload=True) if migrate_engine.name != 'sqlite': sa.Index('ix_resource_resource_id', resource.c.resource_id).create() sa.Index('ix_sample_user_id', sample.c.user_id).drop() sa.Index('ix_sample_project_id', sample.c.project_id).drop() sa.Index('ix_sample_resource_id', sample.c.resource_id).create() sa.Index('ix_sample_meter_id_resource_id', sample.c.meter_id, sample.c.resource_id).create() params = { 'columns': [sample.c.resource_id], 'refcolumns': [resource.c.internal_id] } if migrate_engine.name == 'mysql': params['name'] = 'fk_sample_resource_internal_id' migrate.ForeignKeyConstraint(**params).create() sample.c.user_id.drop() sample.c.project_id.drop() sample.c.source_id.drop() sample.c.resource_metadata.drop() _migrate_meta_tables(meta, sample.c.id, sample.c.resource_id, 'resource.internal_id')
def test_verify_signature_nested_json(self): data = {"a": "A", "b": "B", "nested": {"a": "A", "b": "B", "c": ("c",), "d": ["d"]}} data["message_signature"] = rpc.compute_signature(data, "not-so-secret") jsondata = jsonutils.loads(jsonutils.dumps(data)) self.assertTrue(rpc.verify_signature(jsondata, "not-so-secret"))
def check_serialize(msg): """Make sure a message intended for rpc can be serialized.""" jsonutils.dumps(msg)