def execute(self, provider_id, generic_filters=None, meta_filters=None, skip_store=False): """Return information on existing machine(s). :param provider_id: Provider's Id. :param generic_filters: Filter returned machines by some properites but metadata properties. :param meta_filters: Filter returned machines by metadata properties. :param skip_store: Proceed anyway if there is no information on the machine in the state store. Default is False. """ rset = ResultSet(names=('uuid', 'provider_id', 'av_zone', 'addresses'), types=(str, str, str, str)) if not skip_store: if generic_filters or meta_filters: raise _errors.ConfigurationError( "Filters are only supported when the 'skip_store' option " "is set.") provider = _retrieve_provider(provider_id) for mc in Machine.machines(provider.provider_id): rset.append_row( (str(mc.uuid), mc.provider_id, mc.av_zone, mc.addresses)) else: generic_filters, meta_filters = \ _preprocess_filters(generic_filters, meta_filters) manager = _retrieve_manager(provider_id) for mc in manager.search_machines(generic_filters, meta_filters): rset.append_row( (str(mc.uuid), mc.provider_id, mc.av_zone, mc.addresses)) return CommandResult(None, results=rset)
def check_number_threads(increasing=0): """Check the number of threads that are running and whether the maximum number of connections in the state store is configured accordingly. :param increasing: Whether you want to increase the number of threads and how many threads. Default is zero. It raises a ConfigurationError exception if the number of connections is too small. """ from mysql.fabric import ( errors as _errors, executor as _executor, persistence as _persistence, services as _services, server as _server, ) n_sessions = _services.ServiceManager().get_number_sessions() n_executors = _executor.Executor().get_number_executors() n_failure_detectors = \ len(_server.Group.groups_by_status(_server.Group.ACTIVE)) n_controls = 1 persister = _persistence.current_persister() max_allowed_connections = persister.max_allowed_connections() if (n_sessions + n_executors + n_controls + n_failure_detectors +\ increasing) > (max_allowed_connections - 1): raise _errors.ConfigurationError( "Too many threads requested. Session threads (%s), Executor " "threads (%s), Control threads (%s) and Failure Detector threads " "(%s). The maximum number of threads allowed is (%s). Increase " "the maximum number of connections in the state store in order " "to increase this limit." % (n_sessions, n_executors, n_controls, n_failure_detectors, max_allowed_connections - 1))
def _check_extra_parameter(extra): """Check whether the extra parameters can be parsed. """ if extra: if not isinstance(extra, list): _errors.ConfigurationError( "Extra parameters must be provided as a list: %s." % (extra, ) ) kv_to_dict(extra)
def find_resource(meta, finder): """Find a resource based on some meta information. """ proc_meta = preprocess_meta(meta) resources = finder(**proc_meta) if not resources: raise _errors.ConfigurationError( "There is no image with the requested properties: %s" % (proc_meta, ) ) elif len(resources) > 1: _LOGGER.warning( "There are more than one image with the requested properties: " "(%s). Using (%s).", proc_meta, resources[0] ) _LOGGER.info("Using resource (%s).", resources[0]) return resources[0]
def _create_syslog_handler(config, info): """Define a syslog handler where logging information will be sent to. """ from logging.handlers import SYSLOG_UDP_PORT, SysLogHandler assert info.scheme == 'syslog' if info.netloc and info.path: raise _errors.ConfigurationError("Malformed syslog URL '%s'" % (info.geturl(), )) if info.netloc: assert not info.path address = info.netloc.split(':') if len(address) == 1: address.append(SYSLOG_UDP_PORT) elif info.path: assert not info.netloc address = info.path return SysLogHandler(address=address)
def _create_file_handler(config, info, delay=0): """Define a file handler where logging information will be sent to. """ from logging.handlers import RotatingFileHandler assert info.scheme == 'file' if info.netloc: raise _errors.ConfigurationError("Malformed file URL '%s'" % (info.geturl(), )) if os.path.isabs(info.path): path = info.path else: # Relative path, fetch the logdir from the configuration. # Using 'logging' section instead of 'DEFAULT' to allow # configuration parameters to be overridden in the logging # section. logdir = config.get('logging', 'logdir') path = os.path.join(logdir, info.path) return RotatingFileHandler(path, delay=delay)
def read_config_value(config, config_group, config_name): """Read the value of the configuration option from the config files. :param config: The config class that encapsulates the config parsing logic. :param config_group: The configuration group to which the configuration belongs :param config_name: The name of the configuration that needs to be read, """ config_value = None try: config_value = config.get(config_group, config_name) except AttributeError: pass if config_value is None: raise _errors.ConfigurationError(CONFIG_NOT_FOUND % (config_group, config_name)) return config_value
def _preprocess_filters(generic_filters, meta_filters): """Process filters. """ if generic_filters: generic_filters = _kv_to_dict(generic_filters) else: generic_filters = {} if meta_filters: meta_filters = _kv_to_dict(meta_filters) else: meta_filters = {} for filters in (generic_filters, meta_filters): if any([key in reserved_meta for key in filters.iterkeys()]): raise _errors.ConfigurationError( "The filters option cannot have key words in the following " "list: %s. They are reserved for internal use." % (str(reserved_meta), )) meta_filters['mysql-fabric-group'] = str(FabricNode().group_uuid) return generic_filters, meta_filters
def _preprocess_paramaters(parameters, machine_group_uuid, provider): """Process paramaters. """ # 1. Put image parameter in the appropriate format. if parameters['image']: parameters['image'] = _kv_to_dict(parameters['image']) elif provider.default_image: parameters['image'] = {'name': provider.default_image} if not parameters['image']: raise _errors.MachineError("No valid image hasn't been found.") # 2. Put flavor parameter in the appropriate format. if parameters['flavor']: parameters['flavor'] = _kv_to_dict(parameters['flavor']) elif provider.default_flavor: parameters['flavor'] = {'name': provider.default_flavor} if not parameters['flavor']: raise _errors.MachineError("No valid flavor hasn't been found.") # 3. Check the parameter number_machines. number_machines = parameters['number_machines'] try: number_machines = int(number_machines) parameters['number_machines'] = number_machines except TypeError: number_machines = 1 parameters['number_machines'] = number_machines if number_machines <= 0: raise _errors.MachineError( "Number of machines must be greater than zero (%s)." % (number_machines, )) # 4. We don't need to check the availability_zone parameter # 5. We don't need to check the parameter key_name parameter. # 6. Put the security_groups parameter in the appropriate format. if parameters['security_groups']: security_groups = parameters['security_groups'].split(',') parameters['security_groups'] = security_groups # 7. Check the private_newtwork parameter. private_nics = parameters['private_nics'] private_network = parameters['private_network'] if private_network and private_nics: raise _errors.ConfigurationError( "Can't define both private_network (%s) and private_nics " "parameters (%s)." % (private_network, private_nics)) # 8. Check the public_newtwork parameter. public_nics = parameters['public_nics'] public_network = parameters['public_network'] if public_network and public_nics: raise _errors.ConfigurationError( "Can't define both public_network (%s) and public_nics " "parameters (%s)." % (public_network, public_nics)) # 9. Read userdata parameter which must be a path to a file. if parameters['userdata']: try: src = parameters['userdata'] userdata = open(src) except IOError as error: raise _errors.ConfigurationError("Can't open '%(src)s': %(exc)s" % { 'src': src, 'exc': error }) parameters['userdata'] = userdata # 10. We don't need to check the swap parameter # 11. Put the block_device parameter in the appropriate format. if parameters['block_device']: raise _errors.ConfigurationError( "Parameter block_device is not supported yet.") # 12. Put the scheduler_hints parameter in the appropriate format. if parameters['scheduler_hints']: parameters['scheduler_hints'] = \ _kv_to_dict(parameters['scheduler_hints']) # 13. Put the private_nics parameter in the appropriate format. if parameters['private_nics']: raise _errors.ConfigurationError( "Parameter private_nics is not supported yet.") # 14. Put the public_nics parameter in the appropriate format. if parameters['public_nics']: raise _errors.ConfigurationError( "Parameter public_nics is not supported yet.") # 15. Put meta parameter in the appropriate format. reserved_value = ('True', str(FabricNode().version), str(FabricNode().uuid), str(FabricNode().group_uuid), machine_group_uuid) assert len(reserved_meta) == len(reserved_value) if parameters['meta']: parameters['meta'] = _kv_to_dict(parameters['meta']) if any([key in reserved_meta for key in parameters['meta'].iterkeys()]): raise _errors.ConfigurationError( "The meta parameter cannot have key words in the following " "list: %s. They are reserved for internal use." % (str(reserved_meta), )) else: parameters['meta'] = {} meta = dict(zip(reserved_meta, reserved_value)) parameters['meta'].update(meta)
def _configure_connections(config): """Configure information on database connection and remote servers. """ # Configure the number of concurrent executors. try: number_executors = config.get('executor', "executors") number_executors = int(number_executors) except (_config.NoOptionError, _config.NoSectionError, ValueError): number_executors = DEFAULT_N_EXECUTORS executor = _executor.Executor() executor.set_number_executors(number_executors) services = {} ssl_config = {} # XML-RPC service try: services['protocol.xmlrpc'] = config.get('protocol.xmlrpc', "address") try: number_threads = config.get('protocol.xmlrpc', "threads") number_threads = int(number_threads) except (_config.NoOptionError, ValueError): number_threads = DEFAULT_N_THREADS try: for option in ('ssl_ca', 'ssl_key', 'ssl_cert'): ssl_config[option] = config.get('protocol.xmlrpc', option) except _config.NoOptionError: ssl_config = {} except _config.NoSectionError: raise _errors.ConfigurationError( 'Configuration for protocol.xmlrpc is required') # MySQL-RPC service try: services['protocol.mysql'] = config.get('protocol.mysql', "address") except _config.NoSectionError: # No MySQL-RPC configured pass # Define service configuration _services.ServiceManager(services, number_threads, ssl_config) # Fetch options to configure the state store. address = config.get('storage', 'address') try: host, port = address.split(':') port = int(port) except ValueError: host = address port = _MYSQL_PORT user = config.get('storage', 'user') database = config.get('storage', 'database') try: password = config.get('storage', 'password') except _config.NoOptionError: password = getpass.getpass() try: connection_timeout = config.get("storage", "connection_timeout") connection_timeout = float(connection_timeout) except (_config.NoOptionError, _config.NoSectionError, ValueError): connection_timeout = None try: connection_attempts = config.get("storage", "connection_attempts") connection_attempts = int(connection_attempts) except (_config.NoOptionError, _config.NoSectionError, ValueError): connection_attempts = None try: connection_delay = config.get("storage", "connection_delay") connection_delay = int(connection_delay) except (_config.NoOptionError, _config.NoSectionError, ValueError): connection_delay = None try: auth_plugin = config.get("storage", "auth_plugin") except (_config.NoOptionError, _config.NoSectionError, ValueError): auth_plugin = None # Define state store configuration. _persistence.init(host=host, port=port, user=user, password=password, database=database, connection_timeout=connection_timeout, connection_attempts=connection_attempts, connection_delay=connection_delay, auth_plugin=auth_plugin)