def get_ips_for_feature(self, component_key): cluster_name = self.cluster_model.specification.name.lower() look_for_public_ip = self.cluster_model.specification.cloud.use_public_ips vpc_id = self.get_vpc_id() ec2 = self.session.resource('ec2') running_instances = ec2.instances.filter( Filters=[{ 'Name': 'instance-state-name', 'Values': ['running'] }, { 'Name': 'vpc-id', 'Values': [vpc_id] }, { 'Name': 'tag:' + component_key, 'Values': [''] }, { 'Name': 'tag:cluster_name', 'Values': [cluster_name] }]) result = [] for instance in running_instances: if look_for_public_ip: result.append( AnsibleHostModel(instance.public_dns_name, instance.public_ip_address)) else: result.append( AnsibleHostModel(instance.private_dns_name, instance.private_ip_address)) return result
def get_ips_for_feature(self, component_key): component_config = self.cluster_model.specification.components[ component_key] result = [] if hasattr(component_config, 'machines'): for machine in component_config.machines: machine_doc = select_first( self.config_docs, lambda x: x.kind == 'infrastructure/machine' and x.name == machine) result.append( AnsibleHostModel(machine_doc.specification.hostname, machine_doc.specification.ip)) return result
def get_ips_for_feature(self, component_key): look_for_public_ip = self.cluster_model.specification.cloud.use_public_ips cluster = cluster_tag(self.cluster_prefix, self.cluster_name) running_instances = self.run(self, f'az vm list-ip-addresses --ids $(az resource list --query "[?type==\'Microsoft.Compute/virtualMachines\' && tags.{component_key} == \'\' && tags.cluster == \'{cluster}\'].id" --output tsv)') result = [] for instance in running_instances: if isinstance(instance, list): instance = instance[0] name = instance['virtualMachine']['name'] if look_for_public_ip: ip = instance['virtualMachine']['network']['publicIpAddresses'][0]['ipAddress'] else: ip = instance['virtualMachine']['network']['privateIpAddresses'][0] result.append(AnsibleHostModel(name, ip)) return result
def upgrade(self): inventory_path = get_inventory_path_for_build(self.backup_build_dir) build_version = check_build_output_version(self.backup_build_dir) self.logger.info(f'Loading backup Ansible inventory: {inventory_path}') loaded_inventory = InventoryManager(loader=DataLoader(), sources=inventory_path) # move loaded inventory to templating structure new_inventory = [] for key in loaded_inventory.groups: if key != 'all' and key != 'ungrouped': group_hosts = loaded_inventory.groups[key].hosts new_hosts = [] for host in group_hosts: new_hosts.append( AnsibleHostModel(host.address, host.vars['ansible_host'])) new_inventory.append(AnsibleInventoryItem(key, new_hosts)) if build_version == BUILD_LEGACY: self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0') # Epiphany < 0.3.0 did not have manifest file in build folder so lets create bare minimum cluster model from inventory self.cluster_model = dict_to_objdict({ 'provider': 'any', 'specification': { 'admin_user': { 'name': loaded_inventory.groups['all'].vars['ansible_user'], 'key_path': loaded_inventory.groups['all']. vars['ansible_ssh_private_key_file'] } } }) # Remap roles self.rename_role(new_inventory, 'master', 'kubernetes_master') self.rename_role(new_inventory, 'worker', 'kubernetes_node') self.rename_role(new_inventory, 'deployments', 'applications') self.rename_role(new_inventory, 'elasticsearch-curator', 'elasticsearch_curator') self.rename_role(new_inventory, 'jmx-exporter', 'jmx_exporter') self.rename_role(new_inventory, 'kafka-exporter', 'kafka_exporter') self.rename_role(new_inventory, 'haproxy_tls_termination', 'haproxy') # remove linux and reboot roles if present self.delete_role(new_inventory, 'linux') self.delete_role(new_inventory, 'reboot') else: self.logger.info(f'Upgrading Ansible inventory Epiphany => 0.3.0') # load cluster model from manifest self.manifest_docs = load_manifest_docs(self.backup_build_dir) self.cluster_model = select_single( self.manifest_docs, lambda x: x.kind == 'epiphany-cluster') # Merge manifest cluster config with newer defaults default_cluster_model = load_yaml_obj(data_types.DEFAULT, 'common', 'epiphany-cluster') merge_objdict(default_cluster_model, self.cluster_model) self.cluster_model = default_cluster_model # Check if repo roles are present and if not add them master = self.get_role(new_inventory, 'kubernetes_master') if master == None: raise Exception('No kubernetes_master to use as repository') master_node = master.hosts[0] # add image_registry image_registry = self.get_role(new_inventory, 'image_registry') if image_registry == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('image_registry', hosts)) # add repository repository = self.get_role(new_inventory, 'repository') if repository == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('repository', hosts)) # save new inventory save_inventory(new_inventory, self.cluster_model, self.build_dir) return 0