def go(self, engine): """ Starts the prepare process :param engine: access to global parameters and functions :type engine: :class:`plumbery.PlumberyEngine` """ super(PreparePolisher, self).go(engine) self.report = [] self.user = engine.get_shared_user() self.secret = engine.get_shared_secret() self.key_files = engine.get_shared_key_files() if 'key' in self.settings: key = self.settings['key'] file = os.path.expanduser(key) if os.path.isfile(file): plogging.debug("- using shared key {}".format(key)) if self.key_files is None: self.key_files = [key] else: self.key_files.insert(0, key) else: plogging.error("Error: missing file {}".format(key))
def destroy(self, settings, safe=True): tf_path = settings.get('tf_path', None) if tf_path is None: # default back to the directory of the fittings file. tf_path = self.working_directory parameters = settings.get('parameters', {}) with open(os.path.join(tf_path, '.tfvars'), 'w') as tf_vars: for (key, value) in parameters.items(): tf_vars.write('%s = "%s"\n' % (key, value)) if safe: _, o, _ = self._run_tf( 'plan', tf_path, var_file=os.path.join(tf_path, '.tfvars'), input=False, detailed_exitcode=True, destroy=True) plogging.debug("STDOUT from terraform %s", o) else: _, o, _ = self._run_tf( 'destroy', tf_path, var_file=os.path.join(tf_path, '.tfvars'), input=False, force=True) plogging.debug("STDOUT from terraform %s", o)
def configure(self, node, settings): if self._element_name_ in settings: memory = int(settings[self._element_name_]) plogging.debug("- setting {} GB of memory".format( memory)) return memory return False
def __init__(self, working_directory): self.working_directory = working_directory self.tf_path = os.getenv('TERRAFORM_PATH') if self.tf_path is None or not os.path.exists(self.tf_path): plogging.debug("Could not locate terraform binary. " "Please check TERRAFORM_PATH ENV var." "Ignore if no multicloud fittings are present")
def build(self, settings): tf_path = settings.get('tf_path', None) if tf_path is None: # default back to the directory of the fittings file. tf_path = self.working_directory parameters = settings.get('parameters', {}) with open(os.path.join(tf_path, '.tfvars'), 'w') as tf_vars: for (key, value) in parameters.items(): tf_vars.write('%s = "%s"\n' % (key, value)) ret, o, err = self._run_tf( 'plan', tf_path, var_file=os.path.join(tf_path, '.tfvars'), input=False, detailed_exitcode=True, out=os.path.join(tf_path, '.tfstate')) plogging.debug("STDOUT from terraform plan %s", o) if err != '' or None: plogging.error(err) if ret == 2: _, o, err = self._run_tf('apply', os.path.join(tf_path, '.tfstate')) plogging.debug("STDOUT from terraform apply %s", o) if err != '' or None: plogging.error(err) if os.path.isfile(os.path.join(tf_path, '.tfstate')): os.remove(os.path.join(tf_path, '.tfstate')) if os.path.isfile(os.path.join(tf_path, '.tfvars')): os.remove(os.path.join(tf_path, '.tfvars'))
def destroy_all_blueprints(self): """ Destroys all blueprints at this facility """ self.power_on() nodes = PlumberyNodes(self) infrastructure = PlumberyInfrastructure(self) basement = self.list_basement() for name in self.expand_blueprint('*'): if name in basement: continue blueprint = self.get_blueprint(name) plogging.debug("Destroying blueprint '{}'".format(name)) nodes.destroy_blueprint(blueprint) infrastructure.destroy_blueprint(blueprint) for name in basement: blueprint = self.get_blueprint(name) plogging.debug("Destroying blueprint '{}'".format(name)) nodes.destroy_blueprint(blueprint) infrastructure.destroy_blueprint(blueprint)
def load_all(cls, settings={}): actions = {} there = os.path.abspath(os.path.dirname(__file__))+'/actions' for unused1, unused2, files in os.walk(there): plogging.debug("Loading actions from %s", there) for file in files: label, extension = file.split('.', 1) if len(label) < 2: continue if label[0] in ('-', '_', '~'): continue if extension == 'py': plogging.debug("Loading action %s", file) if label in settings: parameters = settings[ label ] else: parameters = {} actions[ label ] = PlumberyActionLoader.load(label, parameters) return actions
def upgrade_vmware_tools(self, node): """ Upgrade VMware tools on target node :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` """ if self.engine.safeMode: return True while True: try: self.region.ex_update_vm_tools(node=node) plogging.info("- upgrading vmware tools") return True except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue if 'Please try again later' in str(feedback): time.sleep(10) continue if 'NO_CHANGE' in str(feedback): plogging.debug("- vmware tools is already up-to-date") return True plogging.warning("- unable to upgrade vmware tools") plogging.warning(str(feedback)) return False
def configure(self, node, settings): if self._element_name_ in settings: tokens = str(settings[self._element_name_]).split(' ') if len(tokens) < 2: tokens.append('1') if len(tokens) < 3: tokens.append('standard') plogging.debug("- setting compute {}".format(' '.join(tokens))) cpu = DimensionDataServerCpuSpecification( cpu_count=tokens[0], cores_per_socket=tokens[1], performance=tokens[2].upper()) return cpu return False
def _run_tf(self, command, state_directory, **kwargs): if self.tf_path is None: plogging.error("Could not locate terraform binary. " "Please check TERRAFORM_PATH ENV var.") raise RuntimeError("Missing terraform binary") params = [self.tf_path, command] for (key, value) in kwargs.items(): params.append("-%s=%s" % (key.replace('_', '-'), value)) params.append(state_directory) plogging.debug(params) process = subprocess.Popen(params, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() retcode = process.returncode return (retcode, stdout, stderr)
def shine_node(self, node, settings, container): """ Shows information attached to node, if any :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param settings: the fittings plan for this node :type settings: ``dict`` :param container: the container of this node :type container: :class:`plumbery.PlumberyInfrastructure` """ plogging.info("- examinating node '{}'".format(settings['name'])) lines = [] if (node is not None and 'description' in node.extra): description = node.extra['description'].replace( '#plumbery', '').strip() if len(description) > 0: lines.append(description) if node is None: plogging.debug("- not found") lines.append("node is unknown") elif node.state == NodeState.RUNNING: lines.append("node is up and running") elif node.state in [NodeState.TERMINATED, NodeState.STOPPED, NodeState.SUSPENDED]: lines.append("node has been stopped") else: lines.append("state: {}".format(node.state)) if node is not None: lines += self.list_information( node=node, settings=settings, container=container) if len(lines) < 1: return self.information.append("About '{}':".format(settings['name'])) for line in lines: self.information.append("- {}".format(line))
def shine_container(self, container): """ Configures a container :param container: the container to be polished :type container: :class:`plumbery.PlumberyInfrastructure` """ plogging.info("Configuring blueprint '{}'".format( container.blueprint['target'])) if container.network is None: plogging.error("- aborted - no network here") return self.container = container plogging.info("- waiting for nodes to be deployed") names = self.nodes.list_nodes(container.blueprint) for name in sorted(names): while True: node = self.nodes.get_node(name) if node is None: plogging.error("- aborted - missing node '{}'".format(name)) return if node.extra['status'].action is None: plogging.debug("- {} is ready".format(node.name)) break if (node is not None and node.extra['status'].failure_reason is not None): plogging.error("- aborted - failed deployment " "of node '{}'".format(name)) return time.sleep(20) plogging.info("- nodes have been deployed") container._build_firewall_rules() container._build_balancer()
def test_direct(self): class DullHandler(logging.NullHandler): level = logging.DEBUG def emit(self, record): log_entry = self.format(record) plogging.addHandler(DullHandler()) plogging.setLevel(logging.DEBUG) self.assertEqual(plogging.getEffectiveLevel(), logging.DEBUG) plogging.debug("hello world -- debug") plogging.info("hello world -- info") plogging.warning("hello world -- warning") plogging.error("hello world -- error") plogging.critical("hello world -- critical") self.assertEqual(plogging.foundErrors(), True) plogging.reset() self.assertEqual(plogging.foundErrors(), False)
def polish_all_blueprints(self, polishers): """ Walks all resources at this facility and polish them :param polishers: polishers to be applied :type polishers: list of :class:`plumbery.PlumberyPolisher` """ basement = self.list_basement() for name in basement: plogging.debug("Processing blueprint '{}'".format(name)) self.polish_blueprint(name, polishers) for name in self.expand_blueprint('*'): if name not in basement: plogging.debug("Processing blueprint '{}'".format(name)) self.polish_blueprint(name, polishers)
def power_on(self): """ Switches electricity on """ regionId = self.get_setting('regionId') host = self.get_setting('apiHost') locationId = self.get_setting('locationId') try: if self.region is None: plogging.debug("Getting driver for '%s / %s'", regionId, host) self.region = self.plumbery.get_compute_driver( region=regionId, host=host) self.backup = self.plumbery.get_backup_driver( region=regionId, host=host) if os.getenv('LIBCLOUD_HTTP_PROXY') is not None: plogging.debug('Setting proxy to %s' % (os.getenv('LIBCLOUD_HTTP_PROXY'))) self.region.connection.set_http_proxy( proxy_url=os.getenv('LIBCLOUD_HTTP_PROXY')) self.backup.connection.set_http_proxy( proxy_url=os.getenv('LIBCLOUD_HTTP_PROXY')) plogging.debug('Disabling SSL verification') import libcloud.security libcloud.security.VERIFY_SSL_CERT = False if self.location is None: plogging.debug("Getting location '{}'".format(locationId)) locations = [] for location in self.region.list_locations(): locations.append(location.id) if location.id == locationId: self.location = location if self.location is None: plogging.info("Known locations: {}".format(locations)) raise PlumberyException("Unknown location '{}' in '{}'" .format(locationId, regionId)) except ValueError: raise PlumberyException("Unknown region '{}'" .format(regionId)) except socket.gaierror: raise PlumberyException("Cannot communicate with the API endpoint")
def configure(self, node, settings): if self._element_name_ in settings: for item in settings['disks']: plogging.debug("- setting disk {}".format(item)) attributes = item.split() if len(attributes) < 2: plogging.info("- malformed disk attributes;" " provide disk id and size in GB, e.g., 1 50;" " add disk type if needed, e.g., economy") elif len(attributes) < 3: id = int(attributes[0]) size = int(attributes[1]) speed = 'standard' else: id = int(attributes[0]) size = int(attributes[1]) speed = attributes[2] self.set_node_disk(node, id, size, speed) return True return False
def _setup_winrm(self, node): """ Setup WinRM on a remote node :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` """ ip = node.private_ips[0] plogging.debug("Testing out quick function on %s", ip) out = run_cmd( 'echo hello', args=[], user=self.username, password=self.secret, host=ip) plogging.info(out) plogging.debug("Running winexe to remotely configure %s", ip) cmds = [ "winrm quickconfig -quiet", "winrm set winrm/config/service/auth @{Basic=\"true\"}", "winrm set winrm/config/service @{AllowUnencrypted=\"true\"}" ] for cmd in cmds: plogging.debug('Running command "%s"', cmd) out = run_cmd( cmd, args=[], user=self.username, password=self.secret, host=ip) plogging.info(out)
def focus(self): """ Where are we plumbing? """ self.power_on() plogging.info("Plumbing at '{}' {} ({})".format( self.location.id, self.location.name, self.location.country)) blueprints = self.list_blueprints() if len(blueprints) < 1: plogging.warning("- no blueprint has been found") else: plogging.debug("- available blueprints: {}".format( "'"+"', '".join(blueprints)+"'")) basement = self.list_basement() if len(basement) > 0: plogging.debug("- basement: {}".format( "'"+"', '".join(basement)+"'"))
def from_shelf(cls, polishId, settings={}): """ Picks up a polisher from the shelf :param polishId: name of the polisher to use, e.g., ``inventory`` :type polishId: ``str`` :param settings: specific settings for this polisher :type param: ``dict`` :return: instance of a polisher ready to use :rtype: :class:`plumbery.PlumberyPolisher` """ moduleName = 'plumbery.polishers.' + polishId polisherName = polishId.capitalize() + 'Polisher' try: plogging.debug("Importing '{}'".format(moduleName)) polisherModule = __import__( moduleName, globals(), locals(), [polisherName]) plogging.debug("Instantiating '{}'".format(polisherName)) polisherClass = getattr(polisherModule, polisherName) if settings is None: settings = {} settings['name'] = polishId return polisherClass(settings) except ImportError as feedback: plogging.debug("Unable to find module '{}'".format(moduleName)) raise feedback except Exception as feedback: plogging.debug("Unable to import '{}' from '{}'".format( polisherName, moduleName)) raise feedback
def from_shelf(cls, polishId, settings={}): """ Picks up a polisher from the shelf :param polishId: name of the polisher to use, e.g., ``inventory`` :type polishId: ``str`` :param settings: specific settings for this polisher :type param: ``dict`` :return: instance of a polisher ready to use :rtype: :class:`plumbery.PlumberyPolisher` """ moduleName = 'plumbery.polishers.' + polishId polisherName = polishId.capitalize() + 'Polisher' try: plogging.debug("Importing '{}'".format(moduleName)) polisherModule = __import__(moduleName, globals(), locals(), [polisherName]) plogging.debug("Instantiating '{}'".format(polisherName)) polisherClass = getattr(polisherModule, polisherName) if settings is None: settings = {} settings['name'] = polishId return polisherClass(settings) except ImportError as feedback: plogging.debug("Unable to find module '{}'".format(moduleName)) raise feedback except Exception as feedback: plogging.debug("Unable to import '{}' from '{}'".format( polisherName, moduleName)) raise feedback
def load(cls, label, parameters=PlumberyParameters()): """ Picks up an action from the shelf :param label: name of the action to use, e.g., ``inventory`` :type label: ``str`` :param parameters: specific parameters for this action :type parameters: ``dict`` :return: instance of a action ready to use :rtype: :class:`plumbery.PlumberyAction` """ moduleName = 'plumbery.actions.' + label actionName = label.capitalize() + 'Action' try: plogging.debug("Importing '{}'".format(moduleName)) actionModule = __import__( moduleName, globals(), locals(), [actionName]) plogging.debug("Instantiating '{}'".format(actionName)) actionClass = getattr(actionModule, actionName) if parameters is None: parameters = PlumberyParameters() elif isinstance(parameters, dict): parameters = PlumberyParameters(parameters) action = actionClass(parameters) action.label = label return action except ImportError as feedback: plogging.debug("Unable to find module '{}'".format(moduleName)) raise except TypeError as feedback: plogging.debug("Invalid parameters for '{}'".format(moduleName)) raise except ValueError as feedback: plogging.debug("Invalid parameters for '{}'".format(moduleName)) raise except Exception as feedback: plogging.debug("Unable to import '{}' from '{}'".format( actionName, moduleName)) raise
def from_shelf(cls, label, engine, facility, settings={}): """ Picks up fitting from the shelf :param label: name of fitting to use, e.g., ``domain`` :type label: ``str`` :param settings: specific settings for this fitting :type param: ``dict`` :param engine: the automate that is coordinating plumbing activities at multiple facilities :type engine: :class:`plumbery.PlumberyEngine` :param facility: the place that is making a context for this fitting :type facility: :class:`plumbery.PlumberyFacility` :return: instance of fitting ready to use :rtype: :class:`plumbery.PlumberyFitting` """ moduleName = 'plumbery.fittings.' + label fittingsName = label.capitalize() + 'Fitting' try: plogging.debug("Importing '{}'".format(moduleName)) fittingsModule = __import__( moduleName, globals(), locals(), [fittingsName]) plogging.debug("Instantiating '{}'".format(fittingsName)) fittingsClass = getattr(fittingsModule, fittingsName) if settings is None: settings = {} fitting = fittingsClass(engine, facility) fitting.parse(settings) fitting.label = label return fitting except ImportError as feedback: plogging.debug("Unable to find module '{}'".format(moduleName)) raise except TypeError as feedback: plogging.debug("Invalid settings for '{}'".format(moduleName)) raise except ValueError as feedback: plogging.debug("Invalid settings for '{}'".format(moduleName)) raise except Exception as feedback: plogging.debug("Unable to import '{}' from '{}'".format( fittingsName, moduleName)) raise
def from_shelf(cls, label, engine, facility, settings={}): """ Picks up fitting from the shelf :param label: name of fitting to use, e.g., ``domain`` :type label: ``str`` :param settings: specific settings for this fitting :type param: ``dict`` :param engine: the automate that is coordinating plumbing activities at multiple facilities :type engine: :class:`plumbery.PlumberyEngine` :param facility: the place that is making a context for this fitting :type facility: :class:`plumbery.PlumberyFacility` :return: instance of fitting ready to use :rtype: :class:`plumbery.PlumberyFitting` """ moduleName = 'plumbery.fittings.' + label fittingsName = label.capitalize() + 'Fitting' try: plogging.debug("Importing '{}'".format(moduleName)) fittingsModule = __import__(moduleName, globals(), locals(), [fittingsName]) plogging.debug("Instantiating '{}'".format(fittingsName)) fittingsClass = getattr(fittingsModule, fittingsName) if settings is None: settings = {} fitting = fittingsClass(engine, facility) fitting.parse(settings) fitting.label = label return fitting except ImportError as feedback: plogging.debug("Unable to find module '{}'".format(moduleName)) raise except TypeError as feedback: plogging.debug("Invalid settings for '{}'".format(moduleName)) raise except ValueError as feedback: plogging.debug("Invalid settings for '{}'".format(moduleName)) raise except Exception as feedback: plogging.debug("Unable to import '{}' from '{}'".format( fittingsName, moduleName)) raise
def attach_node(self, node, networks): """ Glues a node to multiple networks :param node: the target node :type node: :class:`libcloud.compute.base.Node` :param networks: a list of networks to connect, and ``internet`` :type networks: list of ``str`` This function adds network interfaces to a node, or adds address translation to the public Internet. Example in the fittings plan:: - web: domain: ipv4: 6 ethernet: name: gigafox.data nodes: - web[10..12]: glue: - gigafox.control - internet 80 443 In this example, another network interface is added to each node for connection to the Ethernet network ``gigafox.control``. Also, public IPv4 addresses are mapped on private addresses, so that each node web10, web11 and web12 is reachable from the internet. Public IPv4 addresses are taken from pool declared at the domain level, with the attribute ``ipv4``. In the example above, 6 addresses are assigned to the network domain, of which 3 are given to web nodes. If one or multiple numbers are mentioned after the keyword `internet`, they are used to configure the firewall appropriately. """ hasChanged = False if node is None: return hasChanged for line in networks: tokens = line.strip(' ').split(' ') token = tokens.pop(0) if token.lower() == 'internet': self.attach_node_to_internet(node, tokens) continue if token == self.container.blueprint['ethernet']['name']: continue if token.lower() == 'primary': continue plogging.info("Glueing node '{}' to network '{}'" .format(node.name, token)) vlan = self.container.get_ethernet(token.split('::')) if vlan is None: plogging.info("- network '{}' is unknown".format(token)) continue kwargs = {} if len(tokens) > 0: numbers = tokens.pop(0).strip('.').split('.') subnet = vlan.private_ipv4_range_address.split('.') while len(numbers) < 4: numbers.insert(0, subnet[3-len(numbers)]) private_ipv4 = '.'.join(numbers) plogging.debug("- using address '{}'".format(private_ipv4)) kwargs['private_ipv4'] = private_ipv4 if self.engine.safeMode: plogging.info("- skipped - safe mode") continue if 'private_ipv4' not in kwargs: kwargs['vlan'] = vlan while True: try: self.region.ex_attach_node_to_vlan(node, **kwargs) plogging.info("- in progress") hasChanged = True except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue elif 'RESOURCE_LOCKED' in str(feedback): plogging.info("- not now - locked") elif 'INVALID_INPUT_DATA' in str(feedback): plogging.info("- already there") else: plogging.info("- unable to glue node") plogging.error(str(feedback)) break return hasChanged
def expand_parameters(cls, text, context): """ Binds parameters and produces a string :param text: the text to be expanded :type text: ``str`` :param context: context for lookup of parameters :type context: :class:`PlumberyContext` :return: the expanded text :rtype: ``str`` This function provides low-level binding of parameters. """ opening = '{{' closing = '}}' if not isinstance(text, string_types): raise TypeError("Parameters expansion requires textual input") expanded = '' index = 0 debugged = [] while index < len(text): head = text.find(opening, index) if head < 0: expanded += text[index:] break tail = text.find(closing, head + len(opening)) if tail < 0: expanded += text[index:] break while True: head_next = text.find(opening, head + len(opening), tail) if head_next > head: head = head_next else: break token = text[head + len(opening):tail].strip(' \\\t') if len(token) < 1: expanded += text[index:tail + len(closing)] index = tail + len(closing) continue if not token.startswith('parameter.'): expanded += text[index:tail + len(closing)] index = tail + len(closing) continue replacement = context.lookup(token) if replacement is None: raise KeyError("Missing parameter '{}'".format(token)) if token not in debugged: plogging.debug("- '{}' -> '{}'".format(token, replacement)) debugged.append(token) expanded += text[index:head] + str(replacement) index = tail + len(closing) return expanded
def build_blueprint(self, blueprint, container): """ Create missing nodes :param blueprint: the blueprint to build :type blueprint: ``dict`` :param container: the container where nodes will be built :type container: :class:`plumbery.PlumberyInfrastructure` """ plogging.debug("Building nodes of blueprint '{}'".format( blueprint['target'])) self.facility.power_on() if ('nodes' not in blueprint or not isinstance(blueprint['nodes'], list)): plogging.debug("No nodes have been defined in '{}'".format( blueprint['target'])) blueprint['nodes'] = [] for item in blueprint['nodes']: if type(item) is dict: label = list(item.keys())[0] settings = list(item.values())[0] else: label = item settings = {} for label in self.expand_labels(label): plogging.info("Creating node '{}'".format(label)) if self.get_node(label): plogging.info("- already there") continue description = '#plumbery' if 'description' in settings: description = settings['description'] + ' #plumbery' if 'appliance' in settings: imageName = settings['appliance'] else: imageName = None image = self.facility.get_image(imageName) if image is None: raise PlumberyException("Error: unable to find image " "for '{}'!".format(imageName)) plogging.debug("- using image '{}'".format(image.name)) cpu = None if 'cpu' in settings: tokens = str(settings['cpu']).split(' ') if len(tokens) < 2: tokens.append('1') if len(tokens) < 3: tokens.append('standard') if (int(tokens[0]) < 1 or int(tokens[0]) > 32): plogging.info("- cpu should be between 1 and 32") elif (int(tokens[1]) < 1 or int(tokens[1]) > 2): plogging.info("- core per cpu should be either 1 or 2") elif tokens[2].upper() not in ('STANDARD', 'HIGHPERFORMANCE'): plogging.info("- cpu speed should be either 'standard'" " or 'highspeed'") else: cpu = DimensionDataServerCpuSpecification( cpu_count=tokens[0], cores_per_socket=tokens[1], performance=tokens[2].upper()) plogging.debug("- assigning {} cpus".format( cpu.cpu_count)) plogging.debug("- core per cpu: {}".format( cpu.cores_per_socket)) plogging.debug("- cpu performance: {}".format( cpu.performance.lower())) memory = None if 'memory' in settings: memory = int(settings['memory']) if memory < 1 or memory > 256: plogging.info("- memory should be between 1 and 256") memory = None else: plogging.debug("- assigning {} GB of memory".format( memory)) if self.plumbery.safeMode: plogging.info("- skipped - safe mode") continue if container.domain is None: plogging.info("- missing network domain") continue if container.network is None: plogging.info("- missing Ethernet network") continue primary_ipv4 = None if 'glue' in settings: for line in settings['glue']: tokens = line.strip(' ').split(' ') token = tokens.pop(0) if token.lower() == 'primary': token = container.network.name if token != container.network.name: continue if len(tokens) < 1: break plogging.info("Glueing node '{}' to network '{}'" .format(label, token)) numbers = tokens.pop(0).strip('.').split('.') subnet = container.network.private_ipv4_range_address.split('.') while len(numbers) < 4: numbers.insert(0, subnet[3-len(numbers)]) primary_ipv4 = '.'.join(numbers) plogging.debug("- using address '{}'" .format(primary_ipv4)) break retries = 2 should_start = False while True: try: if primary_ipv4 is not None: self.region.create_node( name=label, image=image, auth=NodeAuthPassword( self.plumbery.get_shared_secret()), ex_network_domain=container.domain, ex_primary_ipv4=primary_ipv4, ex_cpu_specification=cpu, ex_memory_gb=memory, ex_is_started=should_start, ex_description=description) else: self.region.create_node( name=label, image=image, auth=NodeAuthPassword( self.plumbery.get_shared_secret()), ex_network_domain=container.domain, ex_vlan=container.network, ex_cpu_specification=cpu, ex_memory_gb=memory, ex_is_started=should_start, ex_description=description) plogging.info("- in progress") if should_start: # stop the node after start plogging.info("- waiting for node to be deployed") node = None while True: node = self.get_node(label) if node is None: plogging.error("- aborted - missing node '{}'".format(label)) return if node.extra['status'].action is None: break if (node is not None and node.extra['status'].failure_reason is not None): plogging.error("- aborted - failed deployment " "of node '{}'".format(label)) return time.sleep(20) if node is not None: self.region.ex_shutdown_graceful(node) plogging.info("- shutting down after deployment") except SocketError as feedback: if feedback.errno == errno.ECONNRESET and retries > 0: retries -= 1 time.sleep(10) continue else: plogging.info("- unable to create node") plogging.error(str(feedback)) except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue elif 'RESOURCE_NOT_FOUND' in str(feedback): plogging.info("- not now") plogging.error(str(feedback)) elif 'RESOURCE_LOCKED' in str(feedback): plogging.info("- not now - locked") plogging.error(str(feedback)) elif ('INVALID_INPUT_DATA: Cannot deploy server ' 'with Software Labels in the "Stopped" state.' in str(feedback)): should_start = True continue else: plogging.info("- unable to create node") plogging.error(str(feedback)) break
def get_node(self, path): """ Retrieves a node by name :param path: the name of the target node, or its location :type path: ``str`` or ``list``of ``str`` :return: the target node, or None :rtype: :class:`libcloud.compute.base.Node` This function always make a real API call to get fresh state of the target node. Therefore, it can be used in loops where you monitor the evolution of the node during build or other change operation. This function searches firstly at the current facility. If the name is a complete path to a remote node, then plumbery looks there. If a different region is provided, then authentication is done against the related endpoint. For example if ``MyServer`` has been defined in a data centre in Europe:: >>>infrastructure.get_ethernet('MyServer') >>>infrastructure.get_ethernet(['EU6', 'MyServer']) Looking for remote node 'EU6::MyServer' - found it >>>infrastructure.get_ethernet(['dd-eu', 'EU6', 'MyServer']) Looking for offshore node 'dd-eu::EU6::MyServer' - found it """ if isinstance(path, str): path = path.split('::') node = None if len(path) == 2: # force offshore lookup if needed target_region = self.facility.get_region(path[0]) if target_region != self.facility.get_region(): path.insert(0, target_region) if len(path) == 1: # local name self.facility.power_on() for node in self.region.list_nodes(): if node.extra['datacenterId'] != self.facility.get_location_id(): continue if node.name == path[0]: self._enrich_node(node) return node elif len(path) == 2: # different location, same region self.facility.power_on() try: self.region.ex_get_location_by_id(path[0]) except IndexError: plogging.warning("'{}' is unknown".format(path[0])) return None plogging.debug("Looking for remote node '{}'" .format('::'.join(path))) for node in self.region.list_nodes(): if node.extra['datacenterId'] != path[0]: continue if node.name == path[1]: plogging.debug("- found it") self._enrich_node(node) return node elif len(path) == 3: # other region offshore = self.plumbery.get_compute_driver(region=path[0]) try: remoteLocation = offshore.ex_get_location_by_id(path[1]) except IndexError: plogging.warning("'{}' is unknown".format(path[1])) return None plogging.debug("Looking for offshore node '{}'" .format('::'.join(path))) for node in offshore.list_nodes(): if node.extra['datacenterId'] != path[1]: continue if node.name == path[2]: plogging.debug("- found it") self._enrich_node(node, region=offshore) return node return None
def expand_string(cls, text, context): """ Binds variables and produces a string :param text: the text or the structure to be expanded :type text: ``str`` or ``dict`` :param context: context for lookup of tokens :type context: :class:`PlumberyContext` :return: the expanded text :rtype: ``str`` This function allows for dynamic binding of data known by plumbery. """ opening = '{{' closing = '}}' serialized = False if not isinstance(text, string_types): # serialize python object plogging.debug("- serializing object before expansion") if six.PY2: text = str(text) else: text = str(text) serialized = True expanded = '' index = 0 debugged = [] while index < len(text): head = text.find(opening, index) if head < 0: expanded += text[index:] break tail = text.find(closing, head + len(opening)) if tail < 0: expanded += text[index:] break token = text[head + len(opening):tail].strip(' \\\t') if len(token) < 1: expanded += text[index:tail + len(closing)] index = tail + len(closing) continue replacement = context.lookup(token) if replacement is None: # preserve unmatched tag if token not in debugged: plogging.debug("- no match for '{}'".format(token)) debugged.append(token) expanded += text[index:tail + len(closing)] index = tail + len(closing) else: # actual expansion if token not in debugged: plogging.debug("- '{}' -> '{}'".format(token, replacement)) debugged.append(token) if serialized: # preserve line breaks replacement = ensure_string(replacement).replace( '\n', '\\' + 'n') expanded += text[index:head] + str(replacement) index = tail + len(closing) if serialized: # from serialized python to yaml representation # protect \ followed by \ watermark1 = '-=_+*=-' expanded = expanded.replace('\\' + '\\', watermark1 + '|' + watermark1) # protect \ followed by ' watermark2 = '-=*+_=-' expanded = expanded.replace('\\' + "'", watermark2 + '|' + watermark2) # protect None watermark3 = '=-_+*-=' expanded = expanded.replace('None', watermark3 + 'None' + watermark3) instanciated = yaml.load(expanded) expanded = PlumberyText.dump(instanciated) expanded = expanded.replace(watermark1 + '|' + watermark1, '\\') expanded = expanded.replace(watermark2 + '|' + watermark2, "'") expanded = expanded.replace(watermark3 + 'None' + watermark3, 'None') return expanded
def expand_string(cls, text, context): """ Binds variables and produces a string :param text: the text or the structure to be expanded :type text: ``str`` or ``dict`` :param context: context for lookup of tokens :type context: :class:`PlumberyContext` :return: the expanded text :rtype: ``str`` This function allows for dynamic binding of data known by plumbery. """ opening = "{{" closing = "}}" serialized = False if not isinstance(text, string_types): # serialize python object plogging.debug("- serializing object before expansion") if six.PY2: text = str(text) else: text = str(text) serialized = True expanded = "" index = 0 debugged = [] while index < len(text): head = text.find(opening, index) if head < 0: expanded += text[index:] break tail = text.find(closing, head + len(opening)) if tail < 0: expanded += text[index:] break token = text[head + len(opening) : tail].strip(" \\\t") if len(token) < 1: expanded += text[index : tail + len(closing)] index = tail + len(closing) continue replacement = context.lookup(token) if replacement is None: # preserve unmatched tag if token not in debugged: plogging.debug("- no match for '{}'".format(token)) debugged.append(token) expanded += text[index : tail + len(closing)] index = tail + len(closing) else: # actual expansion if token not in debugged: plogging.debug("- '{}' -> '{}'".format(token, replacement)) debugged.append(token) if serialized: # preserve line breaks replacement = ensure_string(replacement).replace("\n", "\\" + "n") expanded += text[index:head] + str(replacement) index = tail + len(closing) if serialized: # from serialized python to yaml representation # protect \ followed by \ watermark1 = "-=_+*=-" expanded = expanded.replace("\\" + "\\", watermark1 + "|" + watermark1) # protect \ followed by ' watermark2 = "-=*+_=-" expanded = expanded.replace("\\" + "'", watermark2 + "|" + watermark2) # protect None watermark3 = "=-_+*-=" expanded = expanded.replace("None", watermark3 + "None" + watermark3) instanciated = yaml.load(expanded) expanded = PlumberyText.dump(instanciated) expanded = expanded.replace(watermark1 + "|" + watermark1, "\\") expanded = expanded.replace(watermark2 + "|" + watermark2, "'") expanded = expanded.replace(watermark3 + "None" + watermark3, "None") return expanded
def attach_node(self, node, networks): """ Glues a node to multiple networks :param node: the target node :type node: :class:`libcloud.compute.base.Node` :param networks: a list of networks to connect, and ``internet`` :type networks: list of ``str`` This function adds network interfaces to a node, or adds address translation to the public Internet. Example in the fittings plan:: - web: domain: ipv4: 6 ethernet: name: gigafox.data nodes: - web[10..12]: glue: - gigafox.control - internet 80 443 In this example, another network interface is added to each node for connection to the Ethernet network ``gigafox.control``. Also, public IPv4 addresses are mapped on private addresses, so that each node web10, web11 and web12 is reachable from the internet. Public IPv4 addresses are taken from pool declared at the domain level, with the attribute ``ipv4``. In the example above, 6 addresses are assigned to the network domain, of which 3 are given to web nodes. If one or multiple numbers are mentioned after the keyword `internet`, they are used to configure the firewall appropriately. """ hasChanged = False if node is None: return hasChanged for line in networks: tokens = line.strip(' ').split(' ') token = tokens.pop(0) if token.lower() == 'internet': self.attach_node_to_internet(node, tokens) continue if token == self.container.blueprint['ethernet']['name']: continue if token.lower() == 'primary': continue plogging.info("Glueing node '{}' to network '{}'".format( node.name, token)) vlan = self.container.get_ethernet(token.split('::')) if vlan is None: plogging.info("- network '{}' is unknown".format(token)) continue kwargs = {} if len(tokens) > 0: numbers = tokens.pop(0).strip('.').split('.') subnet = vlan.private_ipv4_range_address.split('.') while len(numbers) < 4: numbers.insert(0, subnet[3 - len(numbers)]) private_ipv4 = '.'.join(numbers) plogging.debug("- using address '{}'".format(private_ipv4)) kwargs['private_ipv4'] = private_ipv4 if self.engine.safeMode: plogging.info("- skipped - safe mode") continue if 'private_ipv4' not in kwargs: kwargs['vlan'] = vlan while True: try: self.region.ex_attach_node_to_vlan(node, **kwargs) plogging.info("- in progress") hasChanged = True except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue elif 'RESOURCE_LOCKED' in str(feedback): plogging.info("- not now - locked") elif 'INVALID_INPUT_DATA' in str(feedback): plogging.info("- already there") else: plogging.info("- unable to glue node") plogging.error(str(feedback)) break return hasChanged
def build_blueprint(self, blueprint, container): """ Create missing nodes :param blueprint: the blueprint to build :type blueprint: ``dict`` :param container: the container where nodes will be built :type container: :class:`plumbery.PlumberyInfrastructure` """ plogging.debug("Building nodes of blueprint '{}'".format( blueprint['target'])) self.facility.power_on() if ('nodes' not in blueprint or not isinstance(blueprint['nodes'], list)): plogging.debug("No nodes have been defined in '{}'".format( blueprint['target'])) blueprint['nodes'] = [] for item in blueprint['nodes']: if type(item) is dict: label = list(item.keys())[0] settings = list(item.values())[0] else: label = item settings = {} for label in self.expand_labels(label): plogging.info("Creating node '{}'".format(label)) if self.get_node(label): plogging.info("- already there") continue description = '#plumbery' if 'description' in settings: description = settings['description'] + ' #plumbery' if 'appliance' in settings: imageName = settings['appliance'] else: imageName = None image = self.facility.get_image(imageName) if image is None: raise PlumberyException("Error: unable to find image " "for '{}'!".format(imageName)) plogging.debug("- using image '{}'".format(image.name)) cpu = None if 'cpu' in settings: tokens = str(settings['cpu']).split(' ') if len(tokens) < 2: tokens.append('1') if len(tokens) < 3: tokens.append('standard') if (int(tokens[0]) < 1 or int(tokens[0]) > 32): plogging.info("- cpu should be between 1 and 32") elif (int(tokens[1]) < 1 or int(tokens[1]) > 2): plogging.info("- core per cpu should be either 1 or 2") elif tokens[2].upper() not in ('STANDARD', 'HIGHPERFORMANCE'): plogging.info("- cpu speed should be either 'standard'" " or 'highspeed'") else: cpu = DimensionDataServerCpuSpecification( cpu_count=tokens[0], cores_per_socket=tokens[1], performance=tokens[2].upper()) plogging.debug("- assigning {} cpus".format( cpu.cpu_count)) plogging.debug("- core per cpu: {}".format( cpu.cores_per_socket)) plogging.debug("- cpu performance: {}".format( cpu.performance.lower())) memory = None if 'memory' in settings: memory = int(settings['memory']) if memory < 1 or memory > 256: plogging.info("- memory should be between 1 and 256") memory = None else: plogging.debug( "- assigning {} GB of memory".format(memory)) if self.plumbery.safeMode: plogging.info("- skipped - safe mode") continue if container.domain is None: plogging.info("- missing network domain") continue if container.network is None: plogging.info("- missing Ethernet network") continue primary_ipv4 = None if 'glue' in settings: for line in settings['glue']: tokens = line.strip(' ').split(' ') token = tokens.pop(0) if token.lower() == 'primary': token = container.network.name if token != container.network.name: continue if len(tokens) < 1: break plogging.info( "Glueing node '{}' to network '{}'".format( label, token)) numbers = tokens.pop(0).strip('.').split('.') subnet = container.network.private_ipv4_range_address.split( '.') while len(numbers) < 4: numbers.insert(0, subnet[3 - len(numbers)]) primary_ipv4 = '.'.join(numbers) plogging.debug( "- using address '{}'".format(primary_ipv4)) break retries = 2 should_start = False while True: try: if primary_ipv4 is not None: self.region.create_node( name=label, image=image, auth=NodeAuthPassword( self.plumbery.get_shared_secret()), ex_network_domain=container.domain, ex_primary_ipv4=primary_ipv4, ex_cpu_specification=cpu, ex_memory_gb=memory, ex_is_started=should_start, ex_description=description) else: self.region.create_node( name=label, image=image, auth=NodeAuthPassword( self.plumbery.get_shared_secret()), ex_network_domain=container.domain, ex_vlan=container.network, ex_cpu_specification=cpu, ex_memory_gb=memory, ex_is_started=should_start, ex_description=description) plogging.info("- in progress") if should_start: # stop the node after start plogging.info("- waiting for node to be deployed") node = None while True: node = self.get_node(label) if node is None: plogging.error( "- aborted - missing node '{}'".format( label)) return if node.extra['status'].action is None: break if (node is not None and node.extra['status'].failure_reason is not None): plogging.error( "- aborted - failed deployment " "of node '{}'".format(label)) return time.sleep(20) if node is not None: self.region.ex_shutdown_graceful(node) plogging.info( "- shutting down after deployment") except SocketError as feedback: if feedback.errno == errno.ECONNRESET and retries > 0: retries -= 1 time.sleep(10) continue else: plogging.info("- unable to create node") plogging.error(str(feedback)) except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue elif 'RESOURCE_NOT_FOUND' in str(feedback): plogging.info("- not now") plogging.error(str(feedback)) elif 'RESOURCE_LOCKED' in str(feedback): plogging.info("- not now - locked") plogging.error(str(feedback)) elif ('INVALID_INPUT_DATA: Cannot deploy server ' 'with Software Labels in the "Stopped" state.' in str(feedback)): should_start = True continue else: plogging.info("- unable to create node") plogging.error(str(feedback)) break
def set_node_disk(self, node, id, size, speed='standard'): """ Sets a virtual disk :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param id: the disk id, starting at 0 and growing :type id: ``int`` :param size: the disk size, expressed in Giga bytes :type size: ``int`` :param speed: storage type, either 'standard', 'highperformance' or 'economy' :type speed: ``str`` """ if size < 1: plogging.info("- minimum disk size is 1 GB") return if size > 1000: plogging.info("- disk size cannot exceed 1000 GB") return if speed not in ('standard', 'highperformance', 'economy'): plogging.info("- disk speed should be either 'standard' " "or 'highperformance' or 'economy'") return if 'disks' in node.extra: for disk in node.extra['disks']: if disk['scsiId'] == id: changed = False if disk['size'] > size: plogging.info("- disk shrinking could break the node") plogging.info( "- skipped - disk {} will not be reduced".format( id)) if disk['size'] < size: plogging.info("- expanding disk {} to {} GB".format( id, size)) self.change_node_disk_size(node, disk['id'], size) changed = True if disk['speed'].lower() != speed.lower(): plogging.info("- changing disk {} to '{}'".format( id, speed)) self.change_node_disk_speed(node, disk['id'], speed) changed = True if not changed: plogging.debug("- no change in disk {}".format(id)) return plogging.info("- adding {} GB '{}' disk".format(size, speed)) # if self.engine.safeMode: # plogging.info("- skipped - safe mode") # return while True: try: self.facility.region.ex_add_storage_to_node( node=node, amount=size, speed=speed.upper()) plogging.info("- in progress") except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue if 'Please try again later' in str(feedback): time.sleep(10) continue plogging.info("- unable to add disk {} GB '{}'".format( size, speed)) plogging.error(str(feedback)) break
def set_node_compute(self, node, cpu, memory): """ Sets compute capability :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param cpu: the cpu specification :type cpu: ``DimensionDataServerCpuSpecification`` :param memory: the memory size, expressed in Giga bytes :type memory: ``int`` """ changed = False if cpu is not None and 'cpu' in node.extra: if int(cpu.cpu_count) != int(node.extra['cpu'].cpu_count): plogging.info("- changing to {} cpu".format( cpu.cpu_count)) changed = True if (int(cpu.cores_per_socket) != int(node.extra['cpu'].cores_per_socket)): plogging.info("- changing to {} core(s) per socket".format( cpu.cores_per_socket)) changed = True if cpu.performance != node.extra['cpu'].performance: plogging.info("- changing to '{}' cpu performance".format( cpu.performance.lower())) changed = True if memory is not None and 'memoryMb' in node.extra: if memory != int(node.extra['memoryMb']/1024): plogging.info("- changing to {} GB memory".format( memory)) changed = True if not changed: plogging.debug("- no change in compute") return if self.engine.safeMode: plogging.info("- skipped - safe mode") return while True: try: self.region.ex_reconfigure_node( node=node, memory_gb=memory, cpu_count=cpu.cpu_count, cores_per_socket=cpu.cores_per_socket, cpu_performance=cpu.performance) plogging.info("- in progress") except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue if 'Please try again later' in str(feedback): time.sleep(10) continue plogging.info("- unable to reconfigure node") plogging.error(str(feedback)) break
def reap(self, *args): plogging.debug('Reap for windows polisher (noop)') return
def _get_prepares(self, node, settings, container): """ Defines the set of actions to be done on a node :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param settings: the fittings plan for this node :type settings: ``dict`` :param container: the container of this node :type container: :class:`plumbery.PlumberyInfrastructure` :return: a list of actions to be performed, and related descriptions :rtype: a ``list`` of `{ 'description': ..., 'genius': ... }`` """ if not isinstance(settings, dict): return [] environment = PlumberyNodeContext(node=node, container=container, context=self.facility) prepares = [] if self.key is not None: prepares.append({ 'description': 'deploy SSH public key', 'genius': SSHKeyDeployment(self.key)}) if ('prepare' in settings and isinstance(settings['prepare'], list) and len(settings['prepare']) > 0): plogging.info('- using prepare commands') for script in settings['prepare']: tokens = script.split(' ') if len(tokens) == 1: tokens.insert(0, 'run') if tokens[0] in ['run', 'run_raw']: # send and run a script script = tokens[1] if len(tokens) > 2: args = tokens[2:] else: args = [] plogging.debug("- {} {} {}".format( tokens[0], script, ' '.join(args))) try: with open(script) as stream: text = stream.read() if(tokens[0] == 'run' and PlumberyText.could_expand(text)): plogging.debug("- expanding script '{}'" .format(script)) text = PlumberyText.expand_string( text, environment) if len(text) > 0: plogging.info("- running '{}'" .format(script)) prepares.append({ 'description': ' '.join(tokens), 'genius': ScriptDeployment( script=text, args=args, name=script)}) else: plogging.error("- script '{}' is empty" .format(script)) except IOError: plogging.error("- unable to read script '{}'" .format(script)) elif tokens[0] in ['put', 'put_raw']: # send a file file = tokens[1] if len(tokens) > 2: destination = tokens[2] else: destination = './'+file plogging.debug("- {} {} {}".format( tokens[0], file, destination)) try: with open(file) as stream: content = stream.read() if(tokens[0] == 'put' and PlumberyText.could_expand(content)): plogging.debug("- expanding file '{}'" .format(file)) content = PlumberyText.expand_string( content, environment) plogging.info("- putting file '{}'" .format(file)) prepares.append({ 'description': ' '.join(tokens), 'genius': FileContentDeployment( content=content, target=destination)}) except IOError: plogging.error("- unable to read file '{}'" .format(file)) else: # echo a sensible message eventually if tokens[0] == 'echo': tokens.pop(0) message = ' '.join(tokens) message = PlumberyText.expand_string( message, environment) plogging.info("- {}".format(message)) if ('cloud-config' in settings and isinstance(settings['cloud-config'], dict) and len(settings['cloud-config']) > 0): plogging.info('- using cloud-config') # mandatory, else cloud-init will not consider user-data plogging.debug('- preparing meta-data') meta_data = 'instance_id: dummy\n' destination = '/var/lib/cloud/seed/nocloud-net/meta-data' prepares.append({ 'description': 'put meta-data', 'genius': FileContentDeployment( content=meta_data, target=destination)}) plogging.debug('- preparing user-data') expanded = PlumberyText.expand_string( settings['cloud-config'], environment) user_data = '#cloud-config\n'+expanded plogging.debug(user_data) destination = '/var/lib/cloud/seed/nocloud-net/user-data' prepares.append({ 'description': 'put user-data', 'genius': FileContentDeployment( content=user_data, target=destination)}) plogging.debug('- preparing remote install of cloud-init') script = 'prepare.cloud-init.sh' try: path = os.path.dirname(__file__)+'/'+script with open(path) as stream: text = stream.read() if text: prepares.append({ 'description': 'run '+script, 'genius': ScriptDeployment( script=text, name=script)}) except IOError: raise PlumberyException("Error: cannot read '{}'" .format(script)) plogging.debug('- preparing reboot to trigger cloud-init') prepares.append({ 'description': 'reboot node', 'genius': RebootDeployment( container=container)}) return prepares
def move_to(self, facility): """ Checks if we can beachhead at this facility :param facility: access to local parameters and functions :type facility: :class:`plumbery.PlumberyFacility` This function lists all addresses of the computer that is running plumbery. If there is at least one routable IPv6 address, then it assumes that communication with nodes is possible. If no suitable IPv6 address can be found, then plumbery falls back to IPv4. Beachheading is granted only if the address of the computer running plumbery matches the fitting parameter ``beachhead``. """ self.facility = facility self.region = facility.region self.nodes = PlumberyNodes(facility) self.beachheading = False try: self.addresses = [] for interface in netifaces.interfaces(): addresses = netifaces.ifaddresses(interface) if netifaces.AF_INET in addresses.keys(): for address in addresses[netifaces.AF_INET]: # strip local loop if address['addr'].startswith('127.0.0.1'): continue self.addresses.append(address['addr']) if netifaces.AF_INET6 in addresses.keys(): for address in addresses[netifaces.AF_INET6]: # strip local loop if address['addr'].startswith('::1'): continue # strip local link addresses if address['addr'].startswith('fe80::'): continue # we have a routable ipv6, so let's go self.beachheading = True except Exception as feedback: plogging.error(str(feedback)) for item in self.facility.get_setting('prepare', []): if not isinstance(item, dict): continue if 'beachhead' not in item.keys(): continue if item['beachhead'] in self.addresses: self.beachheading = True break if self.beachheading: plogging.debug("- beachheading at '{}'".format( self.facility.get_setting('locationId'))) else: plogging.debug("- not beachheading at '{}'".format( self.facility.get_setting('locationId')))
def configure(self, node, settings): """ prepares a node :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param settings: the fittings plan for this node :type settings: ``dict`` :param container: the container of this node :type container: :class:`plumbery.PlumberyInfrastructure` """ if self._element_name_ in settings: plogging.info("preparing node '{}'".format(settings['name'])) if node is None: plogging.info("- not found") return timeout = 300 tick = 6 while node.extra['status'].action == 'START_SERVER': time.sleep(tick) node = self.nodes.get_node(node.name) timeout -= tick if timeout < 0: break if node.state != NodeState.RUNNING: plogging.info("- skipped - node is not running") return ipv6 = node.extra['ipv6'] ip = node.private_ips[0] if ipv6 is None: plogging.error('No ipv6 address for node, cannot configure') return # Check to see if WinRM works.. try: self._try_winrm(node) except winrm.exceptions.InvalidCredentialsError: plogging.warn('initial login to %s failed, trying to setup winrm remotely', ip) self._setup_winrm(node) self._try_winrm(node) except requests.exceptions.ConnectionError: plogging.warn('initial connection to %s failed, trying to setup winrm remotely', ip) self._setup_winrm(node) self._try_winrm(node) # OK, we're all ready. Let's look at the node config and start commands cmds = [] hostname = settings[self._element_name_].get('hostname', None) if hostname is not None and isinstance(hostname, str): cmds.append(('powershell.exe', ['Rename-Computer', '-NewName', hostname])) extra_cmds = settings[self._element_name_].get('cmds', []) for command in extra_cmds: command = command.rstrip() command_parts = command.split(' ') cmds.append((command_parts[0], command_parts[1:])) out, err = self._winrm_commands(node, cmds) plogging.info(out) plogging.warning(err) plogging.debug('locking down winrm') self._lockdown_winrm(node) else: return False
def get_node(self, path): """ Retrieves a node by name :param path: the name of the target node, or its location :type path: ``str`` or ``list``of ``str`` :return: the target node, or None :rtype: :class:`libcloud.compute.base.Node` This function always make a real API call to get fresh state of the target node. Therefore, it can be used in loops where you monitor the evolution of the node during build or other change operation. This function searches firstly at the current facility. If the name is a complete path to a remote node, then plumbery looks there. If a different region is provided, then authentication is done against the related endpoint. For example if ``MyServer`` has been defined in a data centre in Europe:: >>>infrastructure.get_ethernet('MyServer') >>>infrastructure.get_ethernet(['EU6', 'MyServer']) Looking for remote node 'EU6::MyServer' - found it >>>infrastructure.get_ethernet(['dd-eu', 'EU6', 'MyServer']) Looking for offshore node 'dd-eu::EU6::MyServer' - found it """ if isinstance(path, str): path = path.split('::') node = None if len(path) == 2: # force offshore lookup if needed target_region = self.facility.get_region(path[0]) if target_region != self.facility.get_region(): path.insert(0, target_region) if len(path) == 1: # local name self.facility.power_on() for node in self.region.list_nodes(): if node.extra['datacenterId'] != self.facility.get_location_id( ): continue if node.name == path[0]: self._enrich_node(node) return node elif len(path) == 2: # different location, same region self.facility.power_on() try: self.region.ex_get_location_by_id(path[0]) except IndexError: plogging.warning("'{}' is unknown".format(path[0])) return None plogging.debug("Looking for remote node '{}'".format( '::'.join(path))) for node in self.region.list_nodes(): if node.extra['datacenterId'] != path[0]: continue if node.name == path[1]: plogging.debug("- found it") self._enrich_node(node) return node elif len(path) == 3: # other region offshore = self.plumbery.get_compute_driver(region=path[0]) try: remoteLocation = offshore.ex_get_location_by_id(path[1]) except IndexError: plogging.warning("'{}' is unknown".format(path[1])) return None plogging.debug("Looking for offshore node '{}'".format( '::'.join(path))) for node in offshore.list_nodes(): if node.extra['datacenterId'] != path[1]: continue if node.name == path[2]: plogging.debug("- found it") self._enrich_node(node, region=offshore) return node return None
def __init__(self, engine, facility): self.secret = engine.get_shared_secret() # todo: provide a fittings-wide override. self.username = '******' plogging.debug('Loading windows polisher')
def set_node_compute(self, node, cpu, memory): """ Sets compute capability :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param cpu: the cpu specification :type cpu: ``DimensionDataServerCpuSpecification`` :param memory: the memory size, expressed in Giga bytes :type memory: ``int`` """ changed = False if cpu is not None and 'cpu' in node.extra: if int(cpu.cpu_count) != int(node.extra['cpu'].cpu_count): plogging.info("- changing to {} cpu".format(cpu.cpu_count)) changed = True if (int(cpu.cores_per_socket) != int( node.extra['cpu'].cores_per_socket)): plogging.info("- changing to {} core(s) per socket".format( cpu.cores_per_socket)) changed = True if cpu.performance != node.extra['cpu'].performance: plogging.info("- changing to '{}' cpu performance".format( cpu.performance.lower())) changed = True if memory is not None and 'memoryMb' in node.extra: if memory != int(node.extra['memoryMb'] / 1024): plogging.info("- changing to {} GB memory".format(memory)) changed = True if not changed: plogging.debug("- no change in compute") return if self.engine.safeMode: plogging.info("- skipped - safe mode") return while True: try: self.region.ex_reconfigure_node( node=node, memory_gb=memory, cpu_count=cpu.cpu_count, cores_per_socket=cpu.cores_per_socket, cpu_performance=cpu.performance) plogging.info("- in progress") except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue if 'Please try again later' in str(feedback): time.sleep(10) continue plogging.info("- unable to reconfigure node") plogging.error(str(feedback)) break
def _apply_prepares(self, node, steps): """ Does the actual job over SSH :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param steps: the various steps of the preparing :type steps: ``list`` of ``dict`` :return: ``True`` if everything went fine, ``False`` otherwise :rtype: ``bool`` """ if node is None or node.state != NodeState.RUNNING: plogging.warning("- skipped - node is not running") return False # select the address to use if len(node.public_ips) > 0: target_ip = node.public_ips[0] elif node.extra['ipv6']: target_ip = node.extra['ipv6'] else: target_ip = node.private_ips[0] # use libcloud to communicate with remote nodes session = SSHClient(hostname=target_ip, port=22, username=self.user, password=self.secret, key_files=self.key_files, timeout=10) repeats = 0 while True: try: session.connect() break except Exception as feedback: repeats += 1 if repeats > 5: plogging.error( "Error: can not connect to '{}'!".format(target_ip)) plogging.error("- failed to connect") return False plogging.debug(str(feedback)) plogging.debug( "- connection {} failed, retrying".format(repeats)) time.sleep(10) continue while True: try: if self.engine.safeMode: plogging.info( "- skipped - no ssh interaction in safe mode") else: for step in steps: plogging.info('- {}'.format(step['description'])) step['genius'].run(node, session) except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue plogging.error("Error: unable to prepare '{}' at '{}'!".format( node.name, target_ip)) plogging.error(str(feedback)) plogging.error("- failed") result = False else: result = True break try: session.close() except: pass return result
def parse_args(args=[]): """ Guesses the intention of the runner of this program :param args: arguments to be considered for this invocation :type args: a list of ``str`` You have to run the following command to know more:: $ python -m plumbery fittings.yaml -h """ parser = argparse.ArgumentParser( prog='python -m plumbery', description='Plumbing infrastructure with Apache Libcloud.', epilog='example: python -m plumbery fittings.yaml build') parser.add_argument( 'fittings', nargs=1, help="File that is containing fittings plan, or '-' to read stdin") parser.add_argument( 'action', nargs=1, help="An action, or a polisher: 'deploy', 'refresh', dispose', " "'secrets', 'build', 'configure', 'start', 'prepare', " "'information', 'ping', 'inventory', 'ansible', " "'stop', 'wipe', 'destroy'") parser.add_argument( 'tokens', nargs='*', help="One blueprint, or several, e.g., 'web' or 'web sql'." "If omitted, all blueprints will be considered. " "Zero or more locations, e.g., '@NA12'. " "If omitted, all locations will be considered.", default=None) parser.add_argument('-p', '--parameters', nargs='*', help='Parameters for this fittings plan') parser.add_argument( '-s', '--safe', help='Safe mode, no actual change is made to the infrastructure', action='store_true') group = parser.add_mutually_exclusive_group() group.add_argument('-d', '--debug', help='Log as much information as possible', action='store_true') group.add_argument('-q', '--quiet', help='Silent mode, log only warnings and errors', action='store_true') parser.add_argument('-v', '--version', help='Print version of this software', action='version', version='plumbery ' + __version__) args = parser.parse_args(args) if args.debug: plogging.setLevel(logging.DEBUG) elif args.quiet: plogging.setLevel(logging.WARNING) else: plogging.setLevel(logging.INFO) if 'version' in args: print(args.version) args.fittings = args.fittings[0] plogging.debug("- loading '{}'".format(args.fittings)) args.action = args.action[0].lower() args.blueprints = [] args.facilities = [] for token in args.tokens: if token[0] == '@': if token == '@': raise ValueError("Missing location after @. " "Correct example: '@AU11'") args.facilities.append(token[1:]) else: args.blueprints.append(token) if len(args.blueprints) < 1: args.blueprints = None else: plogging.debug('blueprints: ' + ' '.join(args.blueprints)) if len(args.facilities) < 1: args.facilities = None else: plogging.debug('facilities: ' + ' '.join(args.facilities)) return args
def _get_prepares(self, node, settings, container): """ Defines the set of actions to be done on a node :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param settings: the fittings plan for this node :type settings: ``dict`` :param container: the container of this node :type container: :class:`plumbery.PlumberyInfrastructure` :return: a list of actions to be performed, and related descriptions :rtype: a ``list`` of `{ 'description': ..., 'genius': ... }`` """ if not isinstance(settings, dict): return [] environment = PlumberyNodeContext(node=node, container=container, context=self.facility) prepares = [] for key_file in self.key_files: try: path = os.path.expanduser(key_file) with open(path) as stream: key = stream.read() stream.close() prepares.append({ 'description': 'deploy SSH public key', 'genius': SSHKeyDeployment(key=key) }) except IOError: plogging.warning("no ssh key in {}".format(key_file)) if ('prepare' in settings and isinstance(settings['prepare'], list) and len(settings['prepare']) > 0): plogging.info('- using prepare commands') for script in settings['prepare']: tokens = script.split(' ') if len(tokens) == 1: tokens.insert(0, 'run') if tokens[0] in ['run', 'run_raw']: # send and run a script script = tokens[1] if len(tokens) > 2: args = tokens[2:] else: args = [] plogging.debug("- {} {} {}".format(tokens[0], script, ' '.join(args))) try: with open(script) as stream: text = stream.read() if (tokens[0] == 'run' and PlumberyText.could_expand(text)): plogging.debug( "- expanding script '{}'".format(script)) text = PlumberyText.expand_string( text, environment) if len(text) > 0: plogging.info("- running '{}'".format(script)) prepares.append({ 'description': ' '.join(tokens), 'genius': ScriptDeployment(script=text, args=args, name=script) }) else: plogging.error( "- script '{}' is empty".format(script)) except IOError: plogging.error( "- unable to read script '{}'".format(script)) elif tokens[0] in ['put', 'put_raw']: # send a file file = tokens[1] if len(tokens) > 2: destination = tokens[2] else: destination = './' + file plogging.debug("- {} {} {}".format(tokens[0], file, destination)) try: with open(file) as stream: content = stream.read() if (tokens[0] == 'put' and PlumberyText.could_expand(content)): plogging.debug( "- expanding file '{}'".format(file)) content = PlumberyText.expand_string( content, environment) plogging.info("- putting file '{}'".format(file)) prepares.append({ 'description': ' '.join(tokens), 'genius': FileContentDeployment(content=content, target=destination) }) except IOError: plogging.error( "- unable to read file '{}'".format(file)) else: # echo a sensible message eventually if tokens[0] == 'echo': tokens.pop(0) message = ' '.join(tokens) message = PlumberyText.expand_string(message, environment) plogging.info("- {}".format(message)) if ('cloud-config' in settings and isinstance(settings['cloud-config'], dict) and len(settings['cloud-config']) > 0): plogging.info('- using cloud-config') # mandatory, else cloud-init will not consider user-data plogging.debug('- preparing meta-data') meta_data = 'instance_id: dummy\n' destination = '/var/lib/cloud/seed/nocloud-net/meta-data' prepares.append({ 'description': 'put meta-data', 'genius': FileContentDeployment(content=meta_data, target=destination) }) plogging.debug('- preparing user-data') expanded = PlumberyText.expand_string(settings['cloud-config'], environment) user_data = '#cloud-config\n' + expanded plogging.debug(user_data) destination = '/var/lib/cloud/seed/nocloud-net/user-data' prepares.append({ 'description': 'put user-data', 'genius': FileContentDeployment(content=user_data, target=destination) }) plogging.debug('- preparing remote install of cloud-init') script = 'prepare.cloud-init.sh' try: path = os.path.dirname(__file__) + '/' + script with open(path) as stream: text = stream.read() if text: prepares.append({ 'description': 'run ' + script, 'genius': ScriptDeployment(script=text, name=script) }) except IOError: raise PlumberyException( "Error: cannot read '{}'".format(script)) plogging.debug('- preparing reboot to trigger cloud-init') prepares.append({ 'description': 'reboot node', 'genius': RebootDeployment(container=container) }) return prepares
def expand_parameters(cls, text, context): """ Binds parameters and produces a string :param text: the text to be expanded :type text: ``str`` :param context: context for lookup of parameters :type context: :class:`PlumberyContext` :return: the expanded text :rtype: ``str`` This function provides low-level binding of parameters. """ opening = "{{" closing = "}}" if not isinstance(text, string_types): raise TypeError("Parameters expansion requires textual input") expanded = "" index = 0 debugged = [] while index < len(text): head = text.find(opening, index) if head < 0: expanded += text[index:] break tail = text.find(closing, head + len(opening)) if tail < 0: expanded += text[index:] break while True: head_next = text.find(opening, head + len(opening), tail) if head_next > head: head = head_next else: break token = text[head + len(opening) : tail].strip(" \\\t") if len(token) < 1: expanded += text[index : tail + len(closing)] index = tail + len(closing) continue if not token.startswith("parameter."): expanded += text[index : tail + len(closing)] index = tail + len(closing) continue replacement = context.lookup(token) if replacement is None: raise KeyError("Missing parameter '{}'".format(token)) if token not in debugged: plogging.debug("- '{}' -> '{}'".format(token, replacement)) debugged.append(token) expanded += text[index:head] + str(replacement) index = tail + len(closing) return expanded
def move_to(self, facility): """ Checks if we can beachhead at this facility :param facility: access to local parameters and functions :type facility: :class:`plumbery.PlumberyFacility` This function lists all addresses of the computer that is running plumbery. If there is at least one routable IPv6 address, then it assumes that communication with nodes is possible. If no suitable IPv6 address can be found, then plumbery falls back to IPv4. Beachheading is granted only if the address of the computer running plumbery matches the fitting parameter ``beachhead``. """ self.facility = facility self.region = facility.region self.nodes = PlumberyNodes(facility) self.beachheading = False try: self.addresses = [] for interface in netifaces.interfaces(): addresses = netifaces.ifaddresses(interface) if netifaces.AF_INET in addresses.keys(): for address in addresses[netifaces.AF_INET]: # strip local loop if address['addr'].startswith('127.0.0.1'): continue self.addresses.append(address['addr']) if netifaces.AF_INET6 in addresses.keys(): for address in addresses[netifaces.AF_INET6]: # strip local loop if address['addr'].startswith('::1'): continue # strip local link addresses if address['addr'].startswith('fe80::'): continue # we have a routable ipv6, so let's go self.beachheading = True except Exception as feedback: plogging.error(str(feedback)) for item in self.facility.get_setting('prepare', []): if not isinstance(item, dict): continue if 'beachhead' not in item.keys(): continue if item['beachhead'] in self.addresses: self.beachheading = True break if self.beachheading: plogging.debug("- beachheading at '{}'".format( self.facility.get_setting('locationId'))) else: plogging.debug("- not beachheading at '{}'".format( self.facility.get_setting('locationId')))
def set_node_disk(self, node, id, size, speed='standard'): """ Sets a virtual disk :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param id: the disk id, starting at 0 and growing :type id: ``int`` :param size: the disk size, expressed in Giga bytes :type size: ``int`` :param speed: storage type, either 'standard', 'highperformance' or 'economy' :type speed: ``str`` """ if size < 1: plogging.info("- minimum disk size is 1 GB") return if size > 1000: plogging.info("- disk size cannot exceed 1000 GB") return if speed not in ('standard', 'highperformance', 'economy'): plogging.info("- disk speed should be either 'standard' " "or 'highperformance' or 'economy'") return if 'disks' in node.extra: for disk in node.extra['disks']: if disk['scsiId'] == id: changed = False if disk['size'] > size: plogging.info("- disk shrinking could break the node") plogging.info("- skipped - disk {} will not be reduced" .format(id)) if disk['size'] < size: plogging.info("- expanding disk {} to {} GB" .format(id, size)) self.change_node_disk_size(node, disk['id'], size) changed = True if disk['speed'].lower() != speed.lower(): plogging.info("- changing disk {} to '{}'" .format(id, speed)) self.change_node_disk_speed(node, disk['id'], speed) changed = True if not changed: plogging.debug("- no change in disk {}".format(id)) return plogging.info("- adding {} GB '{}' disk".format( size, speed)) # if self.engine.safeMode: # plogging.info("- skipped - safe mode") # return while True: try: self.facility.region.ex_add_storage_to_node( node=node, amount=size, speed=speed.upper()) plogging.info("- in progress") except Exception as feedback: if 'RESOURCE_BUSY' in str(feedback): time.sleep(10) continue if 'Please try again later' in str(feedback): time.sleep(10) continue plogging.info("- unable to add disk {} GB '{}'" .format(size, speed)) plogging.error(str(feedback)) break
def configure(self, node, settings): if self._element_name_ in settings: memory = int(settings[self._element_name_]) plogging.debug("- setting {} GB of memory".format(memory)) return memory return False