def prepare_auth_file(self): """ Prepare the Occopus auth file """ # Pull the auth data out of the secret changes = {} try: auth_secret = self.load_auth_data_secret() except FileNotFoundError: logger.error("Auth data not found") raise AdaptorCritical auth_data = auth_secret.obj["data"] auth_file = auth_data.get("auth_data.yaml", {}) auth_file = base64.decodestring(auth_file.encode()) auth_file = utils.get_yaml_data(auth_file, stream=True) # Modify the auth data for resource in auth_file.get("resource", []): if resource.get("type") == "nova": auth = resource.get("auth_data", {}) self.modify_openstack_authentication(auth, changes) # Update the secret with the modified auth data if changes: new_auth_data = utils.dump_order_yaml(auth_file).encode() new_auth_data = base64.encodestring(new_auth_data) auth_data["auth_data.yaml"] = new_auth_data.decode() auth_secret.update() self.wait_for_volume_update(changes)
def _get_infra_def(self, tmp): """Read infra definition and modify the min max instances according to the TOSCA policies. If the template doesn't have policy section or it is invalid then the adaptor set the default value """ node_infra = {} node_infra['name'] = self.node_name node_infra['type'] = self.node_name node_infra.setdefault('scaling', {})['min'] = self.min_instances node_infra.setdefault('scaling', {})['max'] = self.max_instances if not tmp and os.path.isfile(self.infra_def_path_output): path = self.infra_def_path_output elif tmp and os.path.isfile(self.infra_def_path_output_tmp): path = self.infra_def_path_output_tmp else: path = self.infra_def_path_input if self.validate is False or tmp: try: infra_def = {} infra_def = utils.get_yaml_data(path) infra_def.setdefault('nodes', []) infra_def["nodes"].append(node_infra) except OSError as e: logger.error(e) if tmp: utils.dump_order_yaml(infra_def, self.infra_def_path_output_tmp) elif self.validate is False: utils.dump_order_yaml(infra_def, self.infra_def_path_output)
def _translate_node_monitoring_policy(self): try: nodex = utils.get_yaml_data(self.nodex_manifest_path) nodex["metadata"]["labels"][ "app.kubernetes.io/instance"] = self.short_id self.manifests.append(nodex) except FileNotFoundError: logger.warning("Could not find NodeExporter manifest" f" at {self.nodex_manifest_path}")
def _translate_container_monitoring_policy(self): try: cadvisor = utils.get_yaml_data(self.cadvisor_manifest_path) cadvisor["metadata"]["labels"][ "app.kubernetes.io/instance"] = self.short_id self.manifests.append(cadvisor) except FileNotFoundError: logger.warning("Could not find cAdvisor manifest" f" at {self.cadvisor_manifest_path}")
def _reading_config(self): """reading the config file and creating a dictionary related to it""" logger.debug("reading config file") dic_types = dict() with open(self.config_path, "r") as stream: try: dic_types = utils.get_yaml_data(stream.read(), stream=True) except OSError as exc: logger.error("Error while reading file, error: %s" % exc) logger.debug("return dictionary of types from config file") return dic_types
def _get_cloud_init(self, tosca_cloud_config, base_cloud_init, insert_mode=None): """ Get cloud-config from MiCADO cloud-init template """ default_cloud_config = {} try: with open(base_cloud_init, 'r') as f: template = jinja2.Template(f.read()) rendered = template.render(worker_name=self.node_name) default_cloud_config = utils.get_yaml_data(rendered, stream=True) except OSError as e: logger.error(e) if not tosca_cloud_config: return default_cloud_config tosca_cloud_config = utils.get_yaml_data(tosca_cloud_config, stream=True) return utils.get_cloud_config(insert_mode, RUNCMD_PLACEHOLDER, default_cloud_config, tosca_cloud_config)
def wait_for_volume_update(self, changes): """ Wait for update changes to be reflected in the volume """ wait_timer = 100 logger.debug("Waiting for authentication data to update...") while wait_timer > 0: # Read the file in the submitter's auth volume try: auth_data = utils.get_yaml_data(self.auth_data_submitter) except FileNotFoundError: logger.error("Credential file missing...") raise AdaptorCritical # Check to see if the necessary changes have been reflected for cloud in auth_data.get("resource", []): cloud_type = cloud["type"] auth_type = cloud["auth_data"].get("type", "") if cloud_type in changes and auth_type == changes[cloud_type]: return time.sleep(5) wait_timer -= 5 logger.warning( "Got timeout while waiting for secret volume to update...")
def _save_file(self, id_app, path): """ method called by the engine to dump the current template being treated to the files/templates directory, with as name the ID of the app. """ data = utils.get_yaml_data(path) utils.dump_order_yaml(data, "files/templates/{}.yaml".format(id_app))