def process_nodes(cluster_location, config, method, method_args): clustering_results = [] for node_res in list_nodes(cluster_location, marshall=json): process_node_obj = ProcessNode(config, node_res, clustering_results) getattr(process_node_obj, method)(cluster_location, *method_args) clustering_results = process_node_obj.previous_clustering_results pp(clustering_results)
def process_nodes(cluster_location, config, method, method_args): clustering_results = [] nodes = list_nodes(cluster_location, marshall=json) if len(nodes) == 0: try: nodes = (fetch_node(cluster_location), ) # try exact match except StopIteration: raise AssertionError( "No node found at {!r}".format(cluster_location)) assert len(nodes), "No nodes found at {!r}".format(cluster_location) for node_res in nodes: try: process_node_obj = ProcessNode(config, node_res, clustering_results) getattr(process_node_obj, method)(cluster_location, *method_args) clustering_results = process_node_obj.previous_clustering_results except CalledProcessError as e: root_logger.exception(e) pprint_OrderedDict(clustering_results)
def setUpClass(cls): cls.nodes = list_nodes() if not len(cls.nodes): raise AssertionError("No nodes found to process")
def __init__( self, process_filename, node=None, previous_clustering_results=None, redis_client_kwargs=None, ): self.env = Env() self.previous_clustering_results = previous_clustering_results nodes = None if not node: nodes = list_nodes(marshall=json) if not len(nodes): raise etcd3.exceptions.Etcd3Exception( "No nodes found matching query") node = (next( n for n in nodes if "global::" not in n.key) if len(nodes) > 1 else nodes[0]) with open(process_filename) as f: self.process_dict = jsonref.load(f) driver_cls = node.value.driver self.driver_name = driver_cls.__name__[:-len("NodeDriver")] driver_to_find = (self.driver_name.upper() if hasattr( Provider, self.driver_name.upper()) else next( prov_name for prov_name in dir(Provider) if getattr(Provider, prov_name) == self.driver_name)) self.config_provider = next( provider for provider in self.process_dict["provider"]["options"] if provider["provider"]["name"] == driver_to_find) self.redis_client_kwargs = redis_client_kwargs self.driver_name = self.driver_name.lower() try: driver = driver_cls( **self.config_provider.get("auth", {"cred": {}})["cred"]) except requests.exceptions.ConnectionError: logger.warn( "Connection failed, continuing without connecting to cloud provider's API" ) self.node_name = node.key[node.key.find("/", 1) + 1:] if nodes: pass elif self.driver_name in ("azure", ): # ('azure', 'azure_arm'): if "ex_cloud_service_name" not in self.config_provider["auth"].get( "create_with", {}): raise KeyError( "`ex_cloud_service_name` must be defined. " "See: http://libcloud.readthedocs.org/en/latest/compute/drivers/azure.html" "#libcloud.compute.drivers.azure.AzureNodeDriver.create_node" ) if "driver" in locals(): nodes = ( driver.list_nodes() ) # (self.config_provider['create_with']['ex_cloud_service_name']) else: nodes = tuple() elif self.driver_name == "azure_arm": from libcloud.compute.drivers.azure_arm import AzureNodeDriver self.node = Node( node.value["uuid"], node.value["name"], node.value["state"], node.value["public_ips"], node.value["private_ips"], driver=AzureNodeDriver, extra=node.value["extra"], ) nodes = None else: nodes = driver.list_nodes() def ensure_node(n, skip_assert=False): if isinstance(n, KeyVal): n = dict_to_node(n.value) if isinstance(n.value, dict) else n.value elif isinstance(n, dict): n = dict_to_node(n) if not skip_assert: assert isinstance(n, Node) return n if self.node: self.node = ensure_node(self.node) else: self.node = ensure_node( next( (_node for _node in nodes if _node.value.uuid == node.value.uuid), None, ), skip_assert=True, ) if not self.node: logger.warning( "node not found, maybe the cloud provider is still provisioning? " "K/V version will be used in the meantime.") self.node = node.value assert isinstance(self.node, Node), "Expected Node got {!r}".format( type(self.node)) if self.node.extra is not None: if "ssh_config" in self.node.extra: if "IdentityFile" in self.node.extra["ssh_config"]: self.config_provider["ssh"] = { "private_key_path": self.node.extra["ssh_config"]["IdentityFile"] } self.config_provider["ssh"].update( self.config_provider.get("ssh", {})) self.env.ssh_config = self.node.extra["ssh_config"] # if 'password' in self.node.extra: # pp({"node.extra": self.node.extra}) # pp(node_to_dict(self.node)) self.dns_name = self.node.extra.get("dns_name")
else: raise NotImplementedError("{}".format(cluster["type"])) offregister = offregisterC(self.env, self.node, self.node_name, self.dns_name) add_cluster_ret = offregister.prepare_cluster_obj(cluster, res) offregister.run_tasks(**add_cluster_ret._asdict()) # offregister.run_tasks(cluster_path, cluster_type, res, tag, args, kwargs) save_node_info( self.node_name, node_to_dict(self.node), folder=add_cluster_ret.cluster_path, marshall=json, ) def guess_os_username(self, hint=None): return guess_os_username(node=self.node, hint=hint) def guess_os(self, hint=None): return guess_os(node=self.node, hint=hint) handle_unprocessed = lambda: tuple( ProcessNode(resource_filename("config", "register.sample.json"), node) for node in list_nodes(marshall=json)) if __name__ == "__main__": unprocessed_handler = handle_unprocessed() for handler in unprocessed_handler: pp(handler.set_clusters("/unclustered"))
return 'user' def guess_os(self, hint=None): node_name = self.node.name.lower() if 'ubuntu' in node_name: return 'ubuntu' elif 'core' in node_name: return 'core' return hint or 'ubuntu' def tail(self, within, *method_args): method_args = ''.join(method_args) self.setup_connection_meta(within) directory = self.get_directory(self.process_dict, within) for cluster_type in self.process_dict['register'][directory]: cluster_type = cluster_type[:-len(':master')] if cluster_type.endswith(':master') else cluster_type execute( globals()[ '{os}_tail_{cluster_name}'.format(os=self.guess_os_username(), cluster_name=cluster_type)], method_args ) handle_unprocessed = lambda: tuple(ProcessNode(resource_filename('config', 'register.sample.json'), node) for node in list_nodes(marshall=json)) if __name__ == '__main__': unprocessed_handler = handle_unprocessed() for handler in unprocessed_handler: pp(handler.set_clusters())
def setUpClass(cls): cls.nodes = list_nodes() if not len(cls.nodes): raise AssertionError('No nodes found to process')