def _start(options, config): """Start Fabric server. """ # Remove temporary defaults file, which migh have left behind # by former runs of Fabric. _backup.cleanup_temp_defaults_files() #Configure TTL _setup_ttl(config) # Configure modules that are not dynamic loaded. _server.configure(config) _error_log.configure(config) _failure_detector.configure(config) # Load information on all providers. providers.find_providers() # Load all services into the service manager _services.ServiceManager().load_services(options, config) # Initilize the state store. _persistence.init_thread() # Check the maximum number of threads. _utils.check_number_threads() # Configure Fabric Node. fabric = FabricNode() reported = _utils.get_time() _LOGGER.info( "Fabric node version (%s) started. ", fabric.version, extra={ 'subject' : str(fabric.uuid), 'category' : MySQLHandler.NODE, 'type' : MySQLHandler.START, 'reported' : reported } ) fabric.startup = reported # Start the executor, failure detector and then service manager. In this # scenario, the recovery is sequentially executed after starting the # executor and before starting the service manager. _events.Handler().start() _recovery.recovery() _failure_detector.FailureDetector.register_groups() _services.ServiceManager().start()
def _start(options, config): """Start Fabric server. """ # Remove temporary defaults file, which migh have left behind # by former runs of Fabric. _backup.cleanup_temp_defaults_files() #Configure TTL _setup_ttl(config) # Configure modules that are not dynamic loaded. _server.configure(config) _error_log.configure(config) _failure_detector.configure(config) # Load information on all providers. providers.find_providers() # Load all services into the service manager _services.ServiceManager().load_services(options, config) # Initilize the state store. _persistence.init_thread() # Check the maximum number of threads. _utils.check_number_threads() # Configure Fabric Node. fabric = FabricNode() reported = _utils.get_time() _LOGGER.info("Fabric node version (%s) started. ", fabric.version, extra={ 'subject': str(fabric.uuid), 'category': MySQLHandler.NODE, 'type': MySQLHandler.START, 'reported': reported }) fabric.startup = reported # Start the executor, failure detector and then service manager. In this # scenario, the recovery is sequentially executed after starting the # executor and before starting the service manager. _events.Handler().start() _recovery.recovery() _failure_detector.FailureDetector.register_groups() _services.ServiceManager().start()
def test_statistics_node(self): """Test node view. """ # Check statistics on Fabric node. res = self.proxy.statistics.node() fabric = FabricNode() self.check_xmlrpc_simple(res, { 'node_startup': str(fabric.startup), }, rowcount=1)
def _start(options, config): """Start Fabric server. """ # Configure modules that are not dynamic loaded. _server.configure(config) _error_log.configure(config) _failure_detector.configure(config) # Load all services into the service manager _services.ServiceManager().load_services(options, config) # Initilize the state store. _persistence.init_thread() # Check the maximum number of threads. _utils.check_number_threads() # Configure Fabric Node. fabric = FabricNode() reported = _utils.get_time() _LOGGER.info( "Fabric node starting.", extra={ 'subject' : str(fabric.uuid), 'category' : MySQLHandler.NODE, 'type' : MySQLHandler.START, 'reported' : reported } ) fabric.startup = reported # Start the executor, failure detector and then service manager. In this # scenario, the recovery is sequentially executed after starting the # executor and before starting the service manager. _events.Handler().start() _recovery.recovery() _failure_detector.FailureDetector.register_groups() _services.ServiceManager().start()
def execute(self): """Statistics on the Fabric node. It returns information on the Fabric node, specifically a list with the following fileds: node identification, how long it is running, when it was started. """ fabric = FabricNode() node_id = fabric.uuid node_startup = fabric.startup node_uptime = _utils.get_time() - node_startup rset = ResultSet(names=('node_id', 'node_uptime', 'node_startup'), types=(str, str, str)) rset.append_row([node_id, node_uptime, node_startup]) return CommandResult(None, results=rset)
def _preprocess_filters(generic_filters, meta_filters): """Process filters. """ if generic_filters: generic_filters = _kv_to_dict(generic_filters) else: generic_filters = {} if meta_filters: meta_filters = _kv_to_dict(meta_filters) else: meta_filters = {} for filters in (generic_filters, meta_filters): if any([key in reserved_meta for key in filters.iterkeys()]): raise _errors.ConfigurationError( "The filters option cannot have key words in the following " "list: %s. They are reserved for internal use." % (str(reserved_meta), )) meta_filters['mysql-fabric-group'] = str(FabricNode().group_uuid) return generic_filters, meta_filters
def _preprocess_parameters(parameters, machine_group_uuid, provider): """Process paramaters. """ # Check whether all parameters are expected. for key, value in parameters.items(): if key not in VALID_PARAMETERS and (value is not None and value): raise MachineError( "Parameter (%s) is not in the set of possible parameters: %s.", key, VALID_PARAMETERS) elif key not in VALID_PARAMETERS: del parameters[key] # 1. Put image parameter in the appropriate format. if parameters['image']: parameters['image'] = kv_to_dict(parameters['image']) elif provider.default_image: parameters['image'] = {'name': provider.default_image} if not parameters['image']: raise MachineError("No valid image hasn't been found.") # 2. Put flavor parameter in the appropriate format. if parameters['flavor']: parameters['flavor'] = kv_to_dict(parameters['flavor']) elif provider.default_flavor: parameters['flavor'] = {'name': provider.default_flavor} if not parameters['flavor']: raise MachineError("No valid flavor hasn't been found.") # 3. Check the parameter number_machines. number_machines = parameters['number_machines'] try: number_machines = int(number_machines) parameters['number_machines'] = number_machines except TypeError: number_machines = 1 parameters['number_machines'] = number_machines if number_machines <= 0: raise MachineError( "Number of machines must be greater than zero (%s)." % (number_machines, )) # 4. We don't need to check the availability_zone parameter # 5. We don't need to check the parameter key_name parameter. # 6. Put the security_groups parameter in the appropriate format. if parameters['security_groups']: security_groups = parameters['security_groups'].split(',') parameters['security_groups'] = security_groups # 7. Check the private_newtwork parameter. private_nics = parameters['private_nics'] private_network = parameters['private_network'] if private_network and private_nics: raise ConfigurationError( "Can't define both private_network (%s) and private_nics " "parameters (%s)." % (private_network, private_nics)) # 8. Check the public_newtwork parameter. public_nics = parameters['public_nics'] public_network = parameters['public_network'] if public_network and public_nics: raise ConfigurationError( "Can't define both public_network (%s) and public_nics " "parameters (%s)." % (public_network, public_nics)) # 9. Read userdata parameter which must be a path to a file. if parameters['userdata']: try: src = parameters['userdata'] userdata = open(src) except IOError as error: raise ConfigurationError("Can't open '%(src)s': %(exc)s" % { 'src': src, 'exc': error }) parameters['userdata'] = userdata # 10. We don't need to check the swap parameter # 11. Put the block_device parameter in the appropriate format. if parameters['block_device']: raise ConfigurationError( "Parameter block_device is not supported yet.") # 12. Put the scheduler_hints parameter in the appropriate format. if parameters['scheduler_hints']: parameters['scheduler_hints'] = \ kv_to_dict(parameters['scheduler_hints']) # 13. Put the private_nics parameter in the appropriate format. if parameters['private_nics']: raise ConfigurationError( "Parameter private_nics is not supported yet.") # 14. Put the public_nics parameter in the appropriate format. if parameters['public_nics']: raise ConfigurationError("Parameter public_nics is not supported yet.") # 15. Put meta parameter in the appropriate format. reserved_value = ('True', str(FabricNode().version), str(FabricNode().uuid), str(FabricNode().group_uuid), machine_group_uuid) assert len(reserved_meta) == len(reserved_value) if parameters['meta']: parameters['meta'] = kv_to_dict(parameters['meta']) if any(key in reserved_meta for key in parameters['meta'].iterkeys()): raise ConfigurationError( "The meta parameter cannot have keywords in the following " "list: %s. They are reserved for internal use." % (str(reserved_meta), )) else: parameters['meta'] = {} meta = dict(zip(reserved_meta, reserved_value)) parameters['meta'].update(meta)