def get_act_target_count(node): nodename = node['name'] infraid = node['infra_id'] targetmin = node['scaling']['min'] targetcount = int(util.coalesce(main_uds.get_scaling_target_count(infraid,nodename), targetmin)) return targetcount
def perform(self, cloud_handler): instance = get_instance(cloud_handler, self.instance_data['instance_id']) int_ip = getTagText(instance.getElementsByTagName('internal-ip-address').item(0).childNodes) ext_ip = getTagText(instance.getElementsByTagName('external-ip-address').item(0).childNodes) log.debug("[%s] Internal IP is: %s, External IP is: %s", cloud_handler.name, int_ip, ext_ip) return coalesce(ext_ip, int_ip)
def perform(self, cloud_handler): """ Return network address of the container. """ instance_id = ast.literal_eval(self.instance_data['instance_id'])['Id'] info = cloud_handler.cli.inspect_container(container=instance_id) return coalesce(info['NetworkSettings']['IPAddress'])
def report(instances): if not instances: raise Exception("Internal error: instances not found!") oneinstance = instances[list(instances.keys())[0]] infraid = oneinstance['infra_id'] nodename = oneinstance['resolved_node_definition']['name'] count = len(instances) target_count = int( util.coalesce(main_uds.get_scaling_target_count(infraid, nodename), count)) target_count += len( list(main_uds.get_scaling_createnode(infraid, nodename).keys())) target_count -= len( list(main_uds.get_scaling_destroynode(infraid, nodename).keys())) target_min, target_max = get_scaling_limits( oneinstance['node_description']) target_count = keep_limits_for_scaling(target_count, oneinstance['node_description']) return dict(actual=count, target=target_count, min=target_min, max=target_max)
def perform(self, resource_handler): instance = get_instance(resource_handler, self.instance_data['instance_id']) int_ip = getTagText(instance.getElementsByTagName('internal-ip-address').item(0).childNodes) ext_ip = getTagText(instance.getElementsByTagName('external-ip-address').item(0).childNodes) log.debug("[%s] Internal IP is: %s, External IP is: %s", resource_handler.name, int_ip, ext_ip) return coalesce(ext_ip, int_ip)
def perform(self, cloud_handler): log.debug("[%s] Acquiring address for %r", cloud_handler.name, self.instance_data['node_id']) inst = get_instance(self.conn, self.instance_data['instance_id']) return coalesce(inst.public_dns_name, inst.ip_address, inst.private_ip_address)
def perform(self, cloud_handler): log.debug("[%s] Acquiring IP address for %r", cloud_handler.name, self.instance_data['node_id']) inst = get_instance(self.conn, self.instance_data['instance_id']) ip_address = None if inst.ip_address is '' else inst.ip_address private_ip_address = None if inst.private_ip_address is '' else inst.private_ip_address return coalesce(ip_address, private_ip_address)
def perform(self, resource_handler): log.debug("[%s] Acquiring address for %r", resource_handler.name, self.instance_data['node_id']) inst = get_instance(self.conn, self.instance_data['instance_id']) public_dns_name = None if inst.public_dns_name is '' else inst.public_dns_name ip_address = None if inst.ip_address is '' else inst.ip_address private_ip_address = None if inst.private_ip_address is '' else inst.private_ip_address return coalesce(public_dns_name, ip_address, private_ip_address)
def get_act_target_count(node): nodename = node['name'] infraid = node['infra_id'] targetmin, targetmax = get_scaling_limits(node) targetcount = int( util.coalesce(main_uds.get_scaling_target_count(infraid, nodename), targetmin)) return targetcount
def effective_exchange(self, override=None): """Selects the exchange in effect. The effective value is determined based on the following order: 1. The one specified as the argument ``override`` 2. The default exchange of this object (``MQHandler.__init__(exchange=...)``) 3. Default: ``''`` """ return util.coalesce(override, self.default_exchange, '')
def __init__(self, host='localhost', port='6379', db=0, altdbs=None, serialize=yaml.dump, deserialize=yaml.load, **kwargs): super(RedisKVStore, self).__init__(**kwargs) self.host, self.port, self.default_db = host, port, db self.altdbs = util.coalesce(altdbs, dict()) self.inverse_altdbs = dict((v, k) for k, v in self.altdbs.iteritems()) if len(self.altdbs) != len(self.inverse_altdbs): raise exc.ConfigurationError('The specified altdbs is not a bijection', self.altdbs) self.serialize = serialize self.deserialize = deserialize
def effective_routing_key(self, override=None): """Selects the routing key in effect. The effective value is determined based on the following order: 1. The one specified as the argument ``override`` 2. The default routing key of this object (``MQHandler.__init__(routing_key=...)``) :raises ValueError: if no routing key is in effect. (Assuming that a routing key is mandatory.) """ return util.coalesce( override, self.default_routing_key, ValueError('publish_message: Routing key is mandatory'))
def __init__(self, host='localhost', port='6379', db=0, altdbs=None, serialize=yaml.dump, deserialize=yaml.load, **kwargs): super(RedisKVStore, self).__init__(**kwargs) self.host, self.port, self.default_db = host, port, db self.altdbs = util.coalesce(altdbs, dict()) self.inverse_altdbs = dict( (v, k) for k, v in list(self.altdbs.items())) if len(self.altdbs) != len(self.inverse_altdbs): raise exc.ConfigurationError( 'The specified altdbs is not a bijection', self.altdbs) self.serialize = serialize self.deserialize = deserialize
def report(instances): if not instances: raise Exception("Internal error: instances not found!") oneinstance = instances[instances.keys()[0]] infraid = oneinstance['infra_id'] nodename = oneinstance['resolved_node_definition']['name'] count = len(instances) target_count = int(util.coalesce(main_uds.get_scaling_target_count(infraid,nodename), count)) target_count += len(main_uds.get_scaling_createnode(infraid,nodename).keys()) target_count -= len(main_uds.get_scaling_destroynode(infraid,nodename).keys()) target_min = oneinstance['node_description'].get('scaling',dict()).get('min',1) target_max = oneinstance['node_description'].get('scaling',dict()).get('max',1) target_count = max(target_count,target_min) target_count = min(target_count,target_max) return dict(actual=count, target=target_count, min=target_min, max=target_max)
def test_first(self): self.assertEqual(util.coalesce('first', None, 'third'), 'first')
def test_empty(self): self.assertIsNone(util.coalesce())
def query_item(self, key, default=None): log.debug('Querying %r', key) backend, key = self.transform_key(key) data = backend.get(key) retval = self.deserialize(data) if data else None return util.coalesce(retval, default)
def setup(setup_args=None, cfg_path=None, auth_data_path=None): """ Build an OCCO application from configuration. :param function setup_args: A function that accepts an :class:`argparse.ArgumentParser` object. This function can set up the argument parser as needed (mainly: add command line arguments). :param str cfg_path: Optional. The path of the configuration file. If unspecified, other sources will be used (see :func:`occo.util.config.config` for details). **OCCO Configuration** OCCO uses YAML as a configuration language, mainly for its dynamic properties, and its human readability. The parsed configuration is a dictionary, containing both static parameters and objects already instantiated (or executed, sometimes!) by the YAML parser. The configuration must contain the following items. ``logging`` The :mod:`logging` configuration dictionary that will be used with :func:`logging.config.dictConfig` to setup logging. ``components`` The components of the OCCO architecture that's need to be built. ``resourcehandler`` *The* ``ResourceHandler`` instance (singleton) to be used by other components (e.g. the :class:`~occo.infraprocessor.InfraProcessor`. Multiple backends can be supported by using a basic :class:`occo.resourcehandler.ResourceHandler` instance here configured with multiple backend clouds/resources. ``configmanager`` *The* ``ConfigManager`` instance (singleton) to be used by other components (e.g. the :class:`~occo.infraprocessor.InfraProcessor`. Multiple backends can be supported by using a basic :class:`occo.resourcehandler.ConfigManager` instance here configured with multiple backend service composers [#f1]_. ``uds`` The storage used by this OCCO application. .. [#f1] This feature is not yet implemented at the time of writing. .. todo:: Change conditionals and scattered error handling in this function to preliminary schema-checking (when the schema has been finalized). """ import occo.exceptions as exc import occo.util as util import occo.util.config as config import occo.infobroker as ib import logging import os cfg = config.config(setup_args=setup_args, cfg_path=cfg_path, auth_data_path=auth_data_path) log = logging.getLogger('occo') # This is shorter and faster than setting all variables through # `globals()`, and much shorter than listing all variables as "global" modvars = globals() modvars['args'] = cfg modvars['configuration'] = cfg.configuration try: occo_infra = cfg.configuration['components'] modvars['components'] = occo_infra ib.real_main_info_broker = occo_infra['infobroker'] ib.real_main_uds = occo_infra['uds'] ib.real_main_resourcehandler = occo_infra['resourcehandler'] ib.real_main_configmanager = occo_infra['configmanager'] ib.configured_auth_data_path = cfg.auth_data_path util.global_dry_run_set(util.coalesce(occo_infra.get('dry_run'), False)) except KeyError as ex: raise exc.MissingConfigurationError(ex.args[0])
def test_third(self): self.assertEqual(util.coalesce(None, None, 'third'), 'third')
def test_error(self): with self.assertRaises(DummyException): return util.coalesce(None, None, None, DummyException(':P'))
def query_item(self, key, default=None): log.debug('Querying %r', key) backend, key = self.transform_key(key) data = backend.get(key) retval = self.deserialize(data, Loader=yaml.Loader) if data else None return util.coalesce(retval, default)