def _pack_json_msg(self, msg): """Qpid cannot serialize dicts containing strings longer than 65535 characters. This function dumps the message content to a JSON string, which Qpid is able to handle. :param msg: May be either a Qpid Message object or a bare dict. :returns: A Qpid Message with its content field JSON encoded. """ try: msg.content = jsonutils.dumps(msg.content) except AttributeError: # Need to have a Qpid message so we can set the content_type. msg = qpid_messaging.Message(jsonutils.dumps(msg)) msg.content_type = JSON_CONTENT_TYPE return msg
def encode(self, version, target, json_msg): """This is the main encoding function. It takes a target and a message and returns a tuple consisting of a JSON serialized metadata object, a JSON serialized (and optionally encrypted) message, and a signature. :param version: the current envelope version :param target: The name of the target service (usually with hostname) :param json_msg: a serialized json message object """ ticket = self._get_ticket(target) metadata = jsonutils.dumps({ 'source': self._name, 'destination': target, 'timestamp': time.time(), 'nonce': _get_nonce(), 'esek': ticket.esek, 'encryption': self._encrypt }) message = json_msg if self._encrypt: message = self._crypto.encrypt(ticket.ekey, message) signature = self._crypto.sign(ticket.skey, version + metadata + message) return (metadata, message, signature)
def __init__(self, conf, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session if conf.qpid_topology_version == 1: addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": False, # auto-delete isn't implemented for exchanges in qpid, # but put in here anyway "auto-delete": True, }, }, } if node_opts: addr_opts["node"]["x-declare"].update(node_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) elif conf.qpid_topology_version == 2: self.address = node_name else: raise_invalid_topology_version() self.reconnect(session)
def scale_cluster(self, cluster, instances): processor = self._get_blueprint_processor(cluster) cluster_spec = clusterspec.ClusterSpec( json.dumps(processor.blueprint), cluster=cluster) rpm = self._get_rpm_uri(cluster_spec) servers = [] for instance in instances: host_role = utils.get_host_role(instance) servers.append(h.HadoopServer(instance, cluster_spec.node_groups [host_role], ambari_rpm=rpm)) ambari_info = self.get_ambari_info(cluster_spec, self._get_servers(cluster)) for server in servers: self._spawn('Ambari provisioning thread', server.provision_ambari, ambari_info) self._wait_for_host_registrations(self._get_num_hosts(cluster), ambari_info) # now add the hosts and the component self._add_hosts_and_components(cluster_spec, servers, ambari_info, cluster.name) self._install_and_start_components(cluster.name, servers, ambari_info)
def format(self, record): message = {'message': record.getMessage(), 'asctime': self.formatTime(record, self.datefmt), 'name': record.name, 'msg': record.msg, 'args': record.args, 'levelname': record.levelname, 'levelno': record.levelno, 'pathname': record.pathname, 'filename': record.filename, 'module': record.module, 'lineno': record.lineno, 'funcname': record.funcName, 'created': record.created, 'msecs': record.msecs, 'relative_created': record.relativeCreated, 'thread': record.thread, 'thread_name': record.threadName, 'process_name': record.processName, 'process': record.process, 'traceback': None} if hasattr(record, 'extra'): message['extra'] = record.extra if record.exc_info: message['traceback'] = self.formatException(record.exc_info) return jsonutils.dumps(message)
def default(self, data): def sanitizer(obj): if isinstance(obj, datetime.datetime): _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) return _dtime.isoformat() return unicode(obj) return jsonutils.dumps(data, default=sanitizer)
def _add_configurations_to_cluster(self, cluster_spec, ambari_public_ip, name): configs = cluster_spec.configurations config_url = 'http://{0}:8080/api/v1/clusters/{1}'.format( ambari_public_ip, name) body = {} clusters = {} body['Clusters'] = clusters for config_name in configs: config_body = {} clusters['desired_config'] = config_body config_body['type'] = config_name #TODO(jspeidel): hard coding for now config_body['tag'] = 'v1' config_body['properties'] = configs[config_name] result = requests.put(config_url, data=json.dumps(body), auth=(self.ambari_user, self.ambari_password)) if result.status_code != 200: LOG.warning('Set configuration command failed. {0}'.format( result.text)) return False return True
def _add_configurations_to_cluster( self, cluster_spec, ambari_info, name): configs = cluster_spec.configurations config_url = 'http://{0}/api/v1/clusters/{1}'.format( ambari_info.get_address(), name) body = {} clusters = {} body['Clusters'] = clusters for config_name in configs: if config_name == 'ambari': continue config_body = {} clusters['desired_config'] = config_body config_body['type'] = config_name #TODO(jspeidel): hard coding for now config_body['tag'] = 'v1' config_body['properties'] = configs[config_name] result = requests.put(config_url, data=json.dumps(body), auth=( ambari_info.user, ambari_info.password)) if result.status_code != 200: LOG.warning( 'Set configuration command failed. {0}'.format( result.text)) return False return True
def scale_cluster(self, cluster, instances): processor = self._get_blueprint_processor(cluster) cluster_spec = clusterspec.ClusterSpec( json.dumps(processor.blueprint), cluster=cluster) rpm = self._get_rpm_uri(cluster_spec) servers = [] for instance in instances: host_role = utils.get_host_role(instance) servers.append(h.HadoopServer(instance, cluster_spec.node_groups [host_role], ambari_rpm=rpm)) ambari_info = self.get_ambari_info(cluster_spec) self._update_ambari_info_credentials(cluster_spec, ambari_info) for server in servers: self._spawn('Ambari provisioning thread', server.provision_ambari, ambari_info) self._wait_for_host_registrations(self._get_num_hosts(cluster), ambari_info) # now add the hosts and the component self._add_hosts_and_components(cluster_spec, servers, ambari_info, cluster.name) self._install_and_start_components(cluster.name, servers, ambari_info)
def get_ticket(self, source, target, crypto, key): # prepare metadata md = { 'requestor': source, 'target': target, 'timestamp': time.time(), 'nonce': struct.unpack('Q', os.urandom(8))[0] } metadata = base64.b64encode(jsonutils.dumps(md)) # sign metadata signature = crypto.sign(key, metadata) # HTTP request reply = self._get_ticket({ 'metadata': metadata, 'signature': signature }) # verify reply signature = crypto.sign(key, (reply['metadata'] + reply['ticket'])) if signature != reply['signature']: raise InvalidEncryptedTicket(md['source'], md['destination']) md = jsonutils.loads(base64.b64decode(reply['metadata'])) if ((md['source'] != source or md['destination'] != target or md['expiration'] < time.time())): raise InvalidEncryptedTicket(md['source'], md['destination']) # return ticket data tkt = jsonutils.loads(crypto.decrypt(key, reply['ticket'])) return tkt, md['expiration']
def __init__(self, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": False, # auto-delete isn't implemented for exchanges in qpid, # but put in here anyway "auto-delete": True, }, }, } if node_opts: addr_opts["node"]["x-declare"].update(node_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.reconnect(session)
def serialize_msg(raw_msg): # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # information about this format. msg = { _VERSION_KEY: _RPC_ENVELOPE_VERSION, _MESSAGE_KEY: jsonutils.dumps(raw_msg) } return msg
def notify(_context, message): """Notifies the recipient of the desired event given the model. Log notifications using OpenStack's default logging system. """ priority = message.get("priority", CONF.default_notification_level) priority = priority.lower() logger = logging.getLogger("savanna.openstack.common.notification.%s" % message["event_type"]) getattr(logger, priority)(jsonutils.dumps(message))
def __init__(self, conf, session, callback, node_name, node_opts, link_name, link_opts): """Declare a queue on an amqp session. 'session' is the amqp session to use 'callback' is the callback to call when messages are received 'node_name' is the first part of the Qpid address string, before ';' 'node_opts' will be applied to the "x-declare" section of "node" in the address string. 'link_name' goes into the "name" field of the "link" in the address string 'link_opts' will be applied to the "x-declare" section of "link" in the address string. """ self.callback = callback self.receiver = None self.session = None if conf.qpid_topology_version == 1: addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": True, "auto-delete": True, }, }, "link": { "name": link_name, "durable": True, "x-declare": { "durable": False, "auto-delete": True, "exclusive": False, }, }, } addr_opts["node"]["x-declare"].update(node_opts) elif conf.qpid_topology_version == 2: addr_opts = { "link": { "x-declare": { "auto-delete": True, }, }, } else: raise_invalid_topology_version() addr_opts["link"]["x-declare"].update(link_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.connect(session)
def notify(_context, message): """Notifies the recipient of the desired event given the model. Log notifications using OpenStack's default logging system. """ priority = message.get('priority', CONF.default_notification_level) priority = priority.lower() logger = logging.getLogger('savanna.openstack.common.notification.%s' % message['event_type']) getattr(logger, priority)(jsonutils.dumps(message))
def _serialize(data): """Serialization wrapper. We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ try: return jsonutils.dumps(data, ensure_ascii=True) except TypeError: with excutils.save_and_reraise_exception(): LOG.error(_("JSON serialization failed."))
def _do_get(self, url, request): req_kwargs = dict() req_kwargs['headers'] = dict() req_kwargs['headers']['User-Agent'] = self.USER_AGENT req_kwargs['headers']['Content-Type'] = 'application/json' req_kwargs['data'] = jsonutils.dumps({'request': request}) if self.timeout is not None: req_kwargs['timeout'] = self.timeout try: resp = requests.get(url, **req_kwargs) except requests.ConnectionError as e: err = "Unable to establish connection. %s" % e raise CommunicationError(url, err) return resp
def configure_cluster(self, cluster): # take the user inputs from the cluster and node groups and convert # to a ambari blueprint processor = self._get_blueprint_processor(cluster) # NOTE: for the time being we are going to ignore the node group # level configurations. we are not currently # defining node level configuration items (i.e. scope='cluster' in # all cases for returned configs) #create a cloud context #TODO(jmaron): is base host name really necessary any longer? #cloud_ctx = ClusterContext(None, LOG) #self._add_instances_to_cluster_context (cloud_ctx, cluster) self.create_cluster(cluster, json.dumps(processor.blueprint))
def _generate_host_manifest(self, servers): host_manifest = {} hosts = [] host_id = 1 for server in servers: hosts.append({'host_id': host_id, 'hostname': server.hostname, 'role': server.role, 'vm_image': server.nova_info.image, 'vm_flavor': server.nova_info.flavor, 'public_ip': server.management_ip, 'private_ip': server.internal_ip}) host_id += 1 host_manifest['hosts'] = hosts return json.dumps(host_manifest).strip('{}')
def _generate_host_manifest(self): host_manifest = {} hosts = [] host_id = 1 for server in self.servers: instance_info = n_helper.get_instance_info(server) hosts.append({'host_id': host_id, 'hostname': server.hostname, 'role': utils.get_host_role(server), 'vm_image': instance_info.image, 'vm_flavor': instance_info.flavor, 'public_ip': server.management_ip, 'private_ip': server.internal_ip}) host_id += 1 host_manifest['hosts'] = hosts return json.dumps(host_manifest).strip('{}')
def _generate_host_manifest(self, servers): host_manifest = {} hosts = [] host_id = 1 for server in servers: instance_info = n_helper.get_instance_info(server) hosts.append({ 'host_id': host_id, 'hostname': server.hostname, 'role': utils.get_host_role(server), 'vm_image': instance_info.image, 'vm_flavor': instance_info.flavor, 'public_ip': server.management_ip, 'private_ip': server.internal_ip }) host_id += 1 host_manifest['hosts'] = hosts return json.dumps(host_manifest).strip('{}')
def configure_cluster(self, cluster): # take the user inputs from the cluster and node groups and convert # to a ambari blueprint processor = bp.BlueprintProcessor(json.load( open(os.path.join(os.path.dirname(__file__), 'resources', 'default-cluster.template'), "r"))) processor.process_user_inputs(cluster.cluster_configs) processor.process_node_groups(cluster.node_groups) # NOTE: for the time being we are going to ignore the node group # level configurations. we are not currently # defining node level configuration items (i.e. scope='cluster' in # all cases for returned configs) #create a cloud context #TODO(jmaron): is base host name really necessary any longer? #cloud_ctx = ClusterContext(None, LOG) #self._add_instances_to_cluster_context (cloud_ctx, cluster) self.create_cluster(cluster, json.dumps(processor.blueprint))
def _add_configurations_to_cluster(self, cluster_spec, ambari_public_ip, name): configs = cluster_spec.configurations config_url = "http://{0}:8080/api/v1/clusters/{1}".format(ambari_public_ip, name) body = {} clusters = {} body["Clusters"] = clusters for config_name in configs: config_body = {} clusters["desired_config"] = config_body config_body["type"] = config_name # TODO(jspeidel): hard coding for now config_body["tag"] = "v1" config_body["properties"] = configs[config_name] result = requests.put(config_url, data=json.dumps(body), auth=(self.ambari_user, self.ambari_password)) if result.status_code != 200: LOG.warning("Set configuration command failed. {0}".format(result.text)) return False return True
def _generate_host_manifest(self, servers): host_manifest = {} hosts = [] host_id = 1 for server in servers: instance_info = n_helper.get_instance_info(server) hosts.append( { "host_id": host_id, "hostname": server.hostname, "role": utils.get_host_role(server), "vm_image": instance_info.image, "vm_flavor": instance_info.flavor, "public_ip": server.management_ip, "private_ip": server.internal_ip, } ) host_id += 1 host_manifest["hosts"] = hosts return json.dumps(host_manifest).strip("{}")
def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs # NOTE(matiu): With cells, it's possible to re-raise remote, remote # exceptions. Lets turn it back into the original exception type. cls_name = str(failure.__class__.__name__) mod_name = str(failure.__class__.__module__) if (cls_name.endswith(_REMOTE_POSTFIX) and mod_name.endswith(_REMOTE_POSTFIX)): cls_name = cls_name[:-len(_REMOTE_POSTFIX)] mod_name = mod_name[:-len(_REMOTE_POSTFIX)] data = { 'class': cls_name, 'module': mod_name, 'message': six.text_type(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data
def _add_configurations_to_cluster(self, cluster_spec, ambari_public_ip, name): configs = cluster_spec.configurations add_configuration_url = 'http://{0}:8080/api/v1/clusters/{1}'.format( ambari_public_ip, name) body = {} clusters = {} body['Clusters'] = clusters for config_name in configs: config_body = {} clusters['desired_config'] = config_body config_body['type'] = config_name #TODO(jspeidel): hard coding for now config_body['tag'] = 'v1' config_body['properties'] = configs[config_name] result = requests.put(add_configuration_url, data=json.dumps(body), auth=('admin', 'admin')) if result.status_code != 200: LOG.warning( 'Set configuration command failed. {0}'.format( result.text)) return False return True
def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, "kwargs"): kwargs = failure.kwargs # NOTE(matiu): With cells, it's possible to re-raise remote, remote # exceptions. Lets turn it back into the original exception type. cls_name = str(failure.__class__.__name__) mod_name = str(failure.__class__.__module__) if cls_name.endswith(_REMOTE_POSTFIX) and mod_name.endswith(_REMOTE_POSTFIX): cls_name = cls_name[: -len(_REMOTE_POSTFIX)] mod_name = mod_name[: -len(_REMOTE_POSTFIX)] data = { "class": cls_name, "module": mod_name, "message": six.text_type(failure), "tb": tb, "args": failure.args, "kwargs": kwargs, } json_data = jsonutils.dumps(data) return json_data
def configure_cluster(self, cluster): # take the user inputs from the cluster and node groups and convert # to a ambari blueprint processor = bp.BlueprintProcessor( json.load( open( os.path.join(os.path.dirname(__file__), 'resources', 'default-cluster.template'), "r"))) processor.process_user_inputs( self._map_to_user_inputs('1.3.0', cluster.cluster_configs)) processor.process_node_groups(cluster.node_groups) # NOTE: for the time being we are going to ignore the node group # level configurations. we are not currently # defining node level configuration items (i.e. scope='cluster' in # all cases for returned configs) #create a cloud context #TODO(jmaron): is base host name really necessary any longer? #cloud_ctx = ClusterContext(None, LOG) #self._add_instances_to_cluster_context (cloud_ctx, cluster) self.create_cluster(cluster, json.dumps(processor.blueprint))
def serialize_msg(raw_msg): # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more # information about this format. msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION, _MESSAGE_KEY: jsonutils.dumps(raw_msg)} return msg
def process_bind_param(self, value, dialect): if value is not None: value = jsonutils.dumps(value) return value