def get_pvc_tasks(self, tags=[]): module_name='k8s_v1_persistent_volume_claim' tasks = CommentedSeq() for template in self.get_pvc_templates(): task = CommentedMap() task['name'] = 'Create PVC' task[module_name] = CommentedMap() task[module_name]['state'] = 'present' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['force'] = template.pop('force', False) task[module_name]['resource_definition'] = template if tags: task['tags'] = copy.copy(tags) tasks.append(task) if self._volumes: # Remove any volumes where state is 'absent' for volname, vol_config in self._volumes.items(): if self.CONFIG_KEY in vol_config: if vol_config[self.CONFIG_KEY].get('state', 'present') == 'absent': task = CommentedMap() task['name'] = 'Remove PVC' task[module_name] = CommentedMap() task[module_name]['name'] = volname task[module_name]['namespace'] = self._namespace_name task[module_name]['state'] = 'absent' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] if tags: task['tags'] = copy.copy(tags) tasks.append(task) return tasks
def get_service_tasks(self, tags=[]): module_name = 'k8s_v1_service' tasks = CommentedSeq() for template in self.get_services_templates(): task = CommentedMap() task['name'] = 'Create service' task[module_name] = CommentedMap() task[module_name]['state'] = 'present' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['force'] = template.pop('force', False) task[module_name]['resource_definition'] = template if tags: task['tags'] = copy.copy(tags) tasks.append(task) if self._services: # Remove an services where state is 'absent' for name, service in iteritems(self._services): if service.get(self.CONFIG_KEY, {}).get('state', 'present') == 'absent': task = CommentedMap() task['name'] = 'Remove service' task[module_name] = CommentedMap() task[module_name]['state'] = 'absent' task[module_name]['name'] = name task[module_name]['namespace'] = self._namespace_name if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] if tags: task['tags'] = copy.copy(tags) tasks.append(task) return tasks
def get_service_tasks(self, tags=[]): module_name='k8s_v1_service' tasks = CommentedSeq() for template in self.get_services_templates(): task = CommentedMap() task['name'] = 'Create service' task[module_name] = CommentedMap() task[module_name]['state'] = 'present' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['force'] = template.pop('force', False) task[module_name]['resource_definition'] = template if tags: task['tags'] = copy.copy(tags) tasks.append(task) if self._services: # Remove an services where state is 'absent' for name, service in self._services.items(): if service.get(self.CONFIG_KEY, {}).get('state', 'present') == 'absent': task = CommentedMap() task['name'] = 'Remove service' task[module_name] = CommentedMap() task[module_name]['state'] = 'absent' task[module_name]['name'] = name task[module_name]['namespace'] = self._namespace_name if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] if tags: task['tags'] = copy.copy(tags) tasks.append(task) return tasks
def get_pvc_tasks(self, tags=[]): module_name = 'k8s_v1_persistent_volume_claim' tasks = CommentedSeq() for template in self.get_pvc_templates(): task = CommentedMap() task['name'] = 'Create PVC' task[module_name] = CommentedMap() task[module_name]['state'] = 'present' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['force'] = template.pop('force', False) task[module_name]['resource_definition'] = template if tags: task['tags'] = copy.copy(tags) tasks.append(task) if self._volumes: # Remove any volumes where state is 'absent' for volname, vol_config in iteritems(self._volumes): if self.CONFIG_KEY in vol_config: if vol_config[self.CONFIG_KEY].get('state', 'present') == 'absent': task = CommentedMap() task['name'] = 'Remove PVC' task[module_name] = CommentedMap() task[module_name]['name'] = volname task[module_name]['namespace'] = self._namespace_name task[module_name]['state'] = 'absent' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] if tags: task['tags'] = copy.copy(tags) tasks.append(task) return tasks
def get_secret_templates(self): def _secret(secret_name, secret): template = CommentedMap() template['force'] = secret.get('force', False) template['apiVersion'] = 'v1' template = CommentedMap() template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = "Secret" template['metadata'] = CommentedMap([('name', secret_name), ('namespace', self._namespace_name)]) template['type'] = 'Opaque' template['data'] = {} for key, vault_variable in iteritems(secret): template['data'][key] = "{{ %s | b64encode }}" % vault_variable return template templates = CommentedSeq() if self._secrets: for secret_name, secret_config in iteritems(self._secrets): secret = _secret(secret_name, secret_config) templates.append(secret) return templates
def get_secret_templates(self): def _secret(secret_name, secret): template = CommentedMap() template['force'] = secret.get('force', False) template['apiVersion'] = 'v1' template = CommentedMap() template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = "Secret" template['metadata'] = CommentedMap([ ('name', secret_name), ('namespace', self._namespace_name) ]) template['type'] = 'Opaque' template['data'] = {} for key, vault_variable in iteritems(secret): template['data'][key] = "{{ %s | b64encode }}" % vault_variable return template templates = CommentedSeq() if self._secrets: for secret_name, secret_config in iteritems(self._secrets): secret = _secret(secret_name, secret_config) templates.append(secret) return templates
def get_deployment_tasks(self, module_name=None, engine_state=None, tags=[]): tasks = CommentedSeq() for template in self.get_deployment_templates(engine_state=engine_state): task = CommentedMap() if engine_state is None: task_name = 'Create deployment, and scale replicas up' else: task_name = 'Stop running containers by scaling replicas down to 0' task['name'] = task_name task[module_name] = CommentedMap() task[module_name]['state'] = 'present' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['force'] = template.pop('force', False) task[module_name]['resource_definition'] = template if tags: task['tags'] = copy.copy(tags) tasks.append(task) if engine_state != 'stop': for name, service_config in self._services.items(): # Remove deployment for any services where state is 'absent' if service_config.get(self.CONFIG_KEY, {}).get('state', 'present') == 'absent': task['name'] = 'Remove deployment' task[module_name] = CommentedMap() task[module_name]['state'] = 'absent' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['name'] = name task[module_name]['namespace'] = self._namespace_name if tags: task['tags'] = copy.copy(tags) tasks.append(task) return tasks
def _set_subcomponents(self, match_categories): """Set subcomponents for the top component from the match categories.""" data = CommentedMap(self.top_component.as_yaml()) data.yaml_set_start_comment(TOP_LEVEL_COMMENT) temp_list = CommentedSeq() # Set the subcomponents and comments for top_comment, start_index, matches in match_categories: components = self._matches_to_components(matches) for subcomponent in components: try: # Extract inline comment before it's removed inline_comment = subcomponent.inline_comment except AttributeError: inline_comment = None d2 = CommentedMap(subcomponent.as_yaml()) if inline_comment: # Apply inline comment to data d2.yaml_add_eol_comment(comment=inline_comment, key='name') temp_list.append(d2) temp_list.yaml_set_comment_before_after_key(key=start_index, before=top_comment, indent=OFFSET) data['subcomponents'] = temp_list return data
def _merge_simple_lists(self, lhs: CommentedSeq, rhs: CommentedSeq, path: YAMLPath, node_coord: NodeCoords) -> CommentedSeq: """ Merge two CommentedSeq-wrapped lists of Scalars or CommentedSeqs. Parameters: 1. lhs (CommentedSeq) The merge target. 2. rhs (CommentedSeq) The merge source. 3. path (YAMLPath) Location within the DOM where this merge is taking place. 4. node_coord (NodeCoords) The RHS root node, its parent, and reference within its parent; used for config lookups. Returns: (list) The merged result. Raises: - `MergeException` when a clean merge is impossible. """ if not isinstance(lhs, CommentedSeq): raise MergeException( "Impossible to add Array data to non-Array destination.", path) merge_mode = self.config.array_merge_mode(node_coord) if merge_mode is ArrayMergeOpts.LEFT: return lhs if merge_mode is ArrayMergeOpts.RIGHT: return rhs tagless_lhs = Nodes.tagless_elements(lhs) for idx, ele in enumerate(rhs): path_next = path + "[{}]".format(idx) self.logger.debug("Processing element {} at {}.".format( idx, path_next), prefix="Merger::_merge_simple_lists: ", data=ele) if merge_mode is ArrayMergeOpts.UNIQUE: cmp_val = ele if isinstance(ele, TaggedScalar): cmp_val = ele.value self.logger.debug( "Looking for comparison value, {}, in:".format(cmp_val), prefix="Merger::_merge_simple_lists: ", data=tagless_lhs) if cmp_val in tagless_lhs: lhs = CommentedSeq([ ele if (e == cmp_val or (isinstance(e, TaggedScalar) and e.value == cmp_val)) else e for e in lhs ]) else: lhs.append(ele) continue lhs.append(ele) return lhs
def get_services_templates(self): """ Generate a service configuration """ def _create_service(name, service): template = CommentedMap() state = service.get(self.CONFIG_KEY, {}).get('state', 'present') if state == 'present': ports = self.get_service_ports(service) if ports: template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = 'Service' template['force'] = service.get(self.CONFIG_KEY, {}).get( 'service', {}).get('force', False) labels = CommentedMap([('app', self._namespace_name), ('service', name)]) template['metadata'] = CommentedMap([ ('name', name), ('namespace', self._namespace_name), ('labels', copy.deepcopy(labels)) ]) template['spec'] = CommentedMap([('selector', copy.deepcopy(labels)), ('ports', ports)]) # Translate options: if service.get(self.CONFIG_KEY): for key, value in service[self.CONFIG_KEY].items(): if key == 'service': for service_key, service_value in value.items( ): if service_key == 'force': continue elif service_key == 'metadata': self.copy_attribute( template, service_key, service_value) else: self.copy_attribute( template['spec'], service_key, service_value) return template templates = CommentedSeq() if self._services: for name, service in self._services.items(): template = _create_service(name, service) if template: templates.append(template) if service.get('links'): # create services for aliased links for link in service['links']: if ':' in link: service_name, alias = link.split(':') alias_config = self._services.get(service_name) if alias_config: new_service = _create_service( alias, alias_config) if new_service: templates.append(new_service) return templates
def cmap( d, # type: Union[int, float, str, Text, Dict[Text, Any], List[Dict[Text, Any]]] lc=None, # type: Optional[List[int]] fn=None, # type: Optional[Text] ): # type: (...) -> Union[int, float, str, Text, CommentedMap, CommentedSeq] if lc is None: lc = [0, 0, 0, 0] if fn is None: fn = "test" if isinstance(d, CommentedMap): fn = d.lc.filename if hasattr(d.lc, "filename") else fn for k, v in six.iteritems(d): if d.lc.data is not None and k in d.lc.data: d[k] = cmap(v, lc=d.lc.data[k], fn=fn) else: d[k] = cmap(v, lc, fn=fn) return d if isinstance(d, CommentedSeq): fn = d.lc.filename if hasattr(d.lc, "filename") else fn for k2, v2 in enumerate(d): if d.lc.data is not None and k2 in d.lc.data: d[k2] = cmap(v2, lc=d.lc.data[k2], fn=fn) else: d[k2] = cmap(v2, lc, fn=fn) return d if isinstance(d, MutableMapping): cm = CommentedMap() for k in sorted(d.keys()): v = d[k] if isinstance(v, CommentedBase): uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col] vfn = v.lc.filename if hasattr(v.lc, "filename") else fn else: uselc = lc vfn = fn cm[k] = cmap(v, lc=uselc, fn=vfn) cm.lc.add_kv_line_col(k, uselc) cm.lc.filename = fn return cm if isinstance(d, MutableSequence): cs = CommentedSeq() for k3, v3 in enumerate(d): if isinstance(v3, CommentedBase): uselc = [v3.lc.line, v3.lc.col, v3.lc.line, v3.lc.col] vfn = v3.lc.filename if hasattr(v3.lc, "filename") else fn else: uselc = lc vfn = fn cs.append(cmap(v3, lc=uselc, fn=vfn)) cs.lc.add_kv_line_col(k3, uselc) cs.lc.filename = fn return cs else: return d
def cmap( d, lc=None, fn=None ): # type: (Union[int, float, str, Text, Dict, List], List[int], Text) -> Union[int, float, str, Text, CommentedMap, CommentedSeq] if lc is None: lc = [0, 0, 0, 0] if fn is None: fn = "test" if isinstance(d, CommentedMap): fn = d.lc.filename if hasattr(d.lc, "filename") else fn for k, v in six.iteritems(d): if k in d.lc.data: d[k] = cmap(v, lc=d.lc.data[k], fn=fn) else: d[k] = cmap(v, lc, fn=fn) return d if isinstance(d, CommentedSeq): fn = d.lc.filename if hasattr(d.lc, "filename") else fn for k, v in enumerate(d): if k in d.lc.data: d[k] = cmap(v, lc=d.lc.data[k], fn=fn) else: d[k] = cmap(v, lc, fn=fn) return d if isinstance(d, dict): cm = CommentedMap() for k in sorted(d.keys()): v = d[k] if isinstance(v, CommentedBase): uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col] vfn = v.lc.filename if hasattr(v.lc, "filename") else fn else: uselc = lc vfn = fn cm[k] = cmap(v, lc=uselc, fn=vfn) cm.lc.add_kv_line_col(k, uselc) cm.lc.filename = fn return cm if isinstance(d, list): cs = CommentedSeq() for k, v in enumerate(d): if isinstance(v, CommentedBase): uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col] vfn = v.lc.filename if hasattr(v.lc, "filename") else fn else: uselc = lc vfn = fn cs.append(cmap(v, lc=uselc, fn=vfn)) cs.lc.add_kv_line_col(k, uselc) cs.lc.filename = fn return cs else: return d
def get_services_templates(self): """ Generate a service configuration """ def _create_service(name, service): template = CommentedMap() state = service.get(self.CONFIG_KEY, {}).get('state', 'present') if state == 'present': ports = self.get_service_ports(service) if ports: template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = 'Service' template['force'] = service.get(self.CONFIG_KEY, {}).get('service', {}).get('force', False) labels = CommentedMap([ ('app', self._namespace_name), ('service', name) ]) template['metadata'] = CommentedMap([ ('name', name), ('namespace', self._namespace_name), ('labels', copy.deepcopy(labels)) ]) template['spec'] = CommentedMap([ ('selector', copy.deepcopy(labels)), ('ports', ports) ]) # Translate options: if service.get(self.CONFIG_KEY): for key, value in service[self.CONFIG_KEY].items(): if key == 'service': for service_key, service_value in value.items(): if service_key == 'force': continue elif service_key == 'metadata': self.copy_attribute(template, service_key, service_value) else: self.copy_attribute(template['spec'], service_key, service_value) return template templates = CommentedSeq() if self._services: for name, service in self._services.items(): template = _create_service(name, service) if template: templates.append(template) if service.get('links'): # create services for aliased links for link in service['links']: if ':' in link: service_name, alias = link.split(':') alias_config = self._services.get(service_name) if alias_config: new_service = _create_service(alias, alias_config) if new_service: templates.append(new_service) return templates
def split_container_config(input_data): components = sort_components_based_on_execution_id(input_data) updated_components = CommentedSeq() for component in components: container_count = component['deploy']['container_count'] for i in range(0, container_count): temp_component = copy.deepcopy(component) temp_component['container_count'] = 1 temp_component['id'] = component['execution_id'] + 0 del temp_component['execution_id'] updated_components.append(temp_component) input_data['lightweight_components'] = updated_components return input_data
def cmap(d, lc=None, fn=None): # type: (Union[int, float, str, Text, Dict, List], List[int], Text) -> Union[int, float, str, Text, CommentedMap, CommentedSeq] if lc is None: lc = [0, 0, 0, 0] if fn is None: fn = "test" if isinstance(d, CommentedMap): fn = d.lc.filename if hasattr(d.lc, "filename") else fn for k,v in six.iteritems(d): if k in d.lc.data: d[k] = cmap(v, lc=d.lc.data[k], fn=fn) else: d[k] = cmap(v, lc, fn=fn) return d if isinstance(d, CommentedSeq): fn = d.lc.filename if hasattr(d.lc, "filename") else fn for k,v in enumerate(d): if k in d.lc.data: d[k] = cmap(v, lc=d.lc.data[k], fn=fn) else: d[k] = cmap(v, lc, fn=fn) return d if isinstance(d, dict): cm = CommentedMap() for k in sorted(d.keys()): v = d[k] if isinstance(v, CommentedBase): uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col] vfn = v.lc.filename if hasattr(v.lc, "filename") else fn else: uselc = lc vfn = fn cm[k] = cmap(v, lc=uselc, fn=vfn) cm.lc.add_kv_line_col(k, uselc) cm.lc.filename = fn return cm if isinstance(d, list): cs = CommentedSeq() for k,v in enumerate(d): if isinstance(v, CommentedBase): uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col] vfn = v.lc.filename if hasattr(v.lc, "filename") else fn else: uselc = lc vfn = fn cs.append(cmap(v, lc=uselc, fn=vfn)) cs.lc.add_kv_line_col(k, uselc) cs.lc.filename = fn return cs else: return d
def test_before_top_seq_from_scratch(self): from ruamel.yaml.comments import CommentedSeq data = CommentedSeq() data.append('a') data.append('b') data.yaml_set_start_comment('Hello\nWorld\n') print(round_trip_dump(data)) compare( data, """ {comment} Hello {comment} World - a - b """.format(comment='#'))
def _resolve_idmap( self, document: CommentedMap, loader: "Loader", ) -> None: # Convert fields with mapSubject into lists # use mapPredicate if the mapped value isn't a dict. for idmapField in loader.idmap: if idmapField in document: idmapFieldValue = document[idmapField] if ( isinstance(idmapFieldValue, MutableMapping) and "$import" not in idmapFieldValue and "$include" not in idmapFieldValue ): ls = CommentedSeq() for k in sorted(idmapFieldValue.keys()): val = idmapFieldValue[k] v = None # type: Optional[CommentedMap] if not isinstance(val, CommentedMap): if idmapField in loader.mapPredicate: v = CommentedMap( ((loader.mapPredicate[idmapField], val),) ) v.lc.add_kv_line_col( loader.mapPredicate[idmapField], document[idmapField].lc.data[k], ) v.lc.filename = document.lc.filename else: raise ValidationException( "mapSubject '{}' value '{}' is not a dict " "and does not have a mapPredicate.".format(k, v) ) else: v = val v[loader.idmap[idmapField]] = k v.lc.add_kv_line_col( loader.idmap[idmapField], document[idmapField].lc.data[k] ) v.lc.filename = document.lc.filename ls.lc.add_kv_line_col(len(ls), document[idmapField].lc.data[k]) ls.lc.filename = document.lc.filename ls.append(v) document[idmapField] = ls
def generate_orchestration_playbook(self, url=None, namespace=None, local_images=True, **kwargs): """ Generate an Ansible playbook to orchestrate services. :param url: registry URL where images will be pulled from :param namespace: registry namespace :param local_images: bypass pulling images, and use local copies :return: playbook dict """ for service_name in self.services: image = self.get_latest_image_for_service(service_name) if local_images: self.services[service_name]['image'] = image.tags[0] else: if namespace is not None: image_url = urljoin('{}/'.format(urljoin(url, namespace)), image.tags[0]) else: image_url = urljoin(url, image.tags[0]) self.services[service_name]['image'] = image_url if kwargs.get('k8s_auth'): self.k8s_client.set_authorization(kwargs['auth']) play = CommentedMap() play['name'] = u'Manage the lifecycle of {} on {}'.format(self.project_name, self.display_name) play['hosts'] = 'localhost' play['gather_facts'] = 'no' play['connection'] = 'local' play['roles'] = CommentedSeq() play['tasks'] = CommentedSeq() role = CommentedMap([ ('role', 'kubernetes-modules') ]) play['roles'].append(role) play.yaml_set_comment_before_after_key( 'roles', before='Include Ansible Kubernetes and OpenShift modules', indent=4) play.yaml_set_comment_before_after_key('tasks', before='Tasks for setting the application state. ' 'Valid tags include: start, stop, restart, destroy', indent=4) play['tasks'].append(self.deploy.get_namespace_task(state='present', tags=['start'])) play['tasks'].append(self.deploy.get_namespace_task(state='absent', tags=['destroy'])) play['tasks'].extend(self.deploy.get_service_tasks(tags=['start'])) play['tasks'].extend(self.deploy.get_deployment_tasks(engine_state='stop', tags=['stop', 'restart'])) play['tasks'].extend(self.deploy.get_deployment_tasks(tags=['start', 'restart'])) play['tasks'].extend(self.deploy.get_pvc_tasks(tags=['start'])) playbook = CommentedSeq() playbook.append(play) logger.debug(u'Created playbook to run project', playbook=playbook) return playbook
def split_component_config(input_data): components = input_data['lightweight_components'] updated_components = CommentedSeq() ## deep copy for component in components: for idx, val in enumerate(component['deploy']): temp_component = copy.deepcopy(component) temp_component['deploy'] = copy.deepcopy(component['deploy'][idx]) #temp_component['execution_id'] = copy.deepcopy(component['execution_id']) updated_components.append(temp_component) components = updated_components input_data['lightweight_components'] = components return input_data
def get_pvc_templates(self): def _volume_to_pvc(claim_name, claim): template = CommentedMap() template['force'] = claim.get('force', False) template['apiVersion'] = 'v1' template = CommentedMap() template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = "PersistentVolumeClaim" template['metadata'] = CommentedMap([('name', claim_name), ('namespace', self._namespace_name)]) template['spec'] = CommentedMap() template['spec']['resources'] = {'requests': {'storage': '1Gi'}} if claim.get('volume_name'): template['spec']['volumeName'] = claim['volume_name'] if claim.get('access_modes'): template['spec']['accessModes'] = claim['access_modes'] if claim.get('requested_storage'): template['spec']['resources']['requests']['storage'] = claim[ 'requested_storage'] if claim.get('storage_class'): if not template['metadata'].get('annotations'): template['metadata']['annotations'] = {} template['metadata']['annotations']['storageClass'] = claim[ 'storage_class'] #TODO verify this syntax if claim.get('selector'): if claim['selector'].get('match_labels'): if not template['spec'].get('selector'): template['spec']['selector'] = dict() template['spec']['selector']['matchLabels'] = claim[ 'match_labels'] if claim['selector'].get('match_expressions'): if not template['spec'].get('selector'): template['spec']['selector'] = dict() template['spec']['selector']['matchExpressions'] = claim[ 'match_expressions'] return template templates = CommentedSeq() if self._volumes: for volname, vol_config in iteritems(self._volumes): if self.CONFIG_KEY in vol_config: if vol_config[self.CONFIG_KEY].get('state', 'present') == 'present': volume = _volume_to_pvc(volname, vol_config[self.CONFIG_KEY]) templates.append(volume) return templates
def test_datatype_is_CommentedSeq(self): c = CommentedSeq() c.insert(0, "key") c.insert(1, "to") c2 = CommentedMap() c2.insert(0, "to", "from") c2.insert(1, "__from__", "to") c.insert(2, c2) result = CommentedSeq() result.append("key") result.append("to") result.append("to") self.assertEqual(result, parse_for_variable_hierarchies(c, "__from__"))
def generate_orchestration_playbook(self, url=None, namespace=None, local_images=True, **kwargs): """ Generate an Ansible playbook to orchestrate services. :param url: registry URL where images will be pulled from :param namespace: registry namespace :param local_images: bypass pulling images, and use local copies :return: playbook dict """ for service_name in self.services: image = self.get_latest_image_for_service(service_name) if local_images: self.services[service_name]['image'] = image.tags[0] else: self.services[service_name]['image'] = urljoin(urljoin(url, namespace), image.tags[0]) if kwargs.get('k8s_auth'): self.k8s_client.set_authorization(kwargs['auth']) play = CommentedMap() play['name'] = 'Manage the lifecycle of {} on {}'.format(self.project_name, self.display_name) play['hosts'] = 'localhost' play['gather_facts'] = 'no' play['connection'] = 'local' play['roles'] = CommentedSeq() play['tasks'] = CommentedSeq() role = CommentedMap([ ('role', 'kubernetes-modules') ]) play['roles'].append(role) play.yaml_set_comment_before_after_key( 'roles', before='Include Ansible Kubernetes and OpenShift modules', indent=4) play.yaml_set_comment_before_after_key('tasks', before='Tasks for setting the application state. ' 'Valid tags include: start, stop, restart, destroy', indent=4) play['tasks'].append(self.deploy.get_namespace_task(state='present', tags=['start'])) play['tasks'].append(self.deploy.get_namespace_task(state='absent', tags=['destroy'])) play['tasks'].extend(self.deploy.get_service_tasks(tags=['start'])) play['tasks'].extend(self.deploy.get_deployment_tasks(engine_state='stop', tags=['stop', 'restart'])) play['tasks'].extend(self.deploy.get_deployment_tasks(tags=['start', 'restart'])) play['tasks'].extend(self.deploy.get_pvc_tasks(tags=['start'])) playbook = CommentedSeq() playbook.append(play) logger.debug(u'Created playbook to run project', playbook=playbook) return playbook
def _resolve_idmap( self, document, # type: CommentedMap loader # type: Loader ): # type: (...) -> None # Convert fields with mapSubject into lists # use mapPredicate if the mapped value isn't a dict. for idmapField in loader.idmap: if (idmapField in document): idmapFieldValue = document[idmapField] if (isinstance(idmapFieldValue, dict) and "$import" not in idmapFieldValue and "$include" not in idmapFieldValue): ls = CommentedSeq() for k in sorted(idmapFieldValue.keys()): val = idmapFieldValue[k] v = None # type: CommentedMap if not isinstance(val, CommentedMap): if idmapField in loader.mapPredicate: v = CommentedMap( ((loader.mapPredicate[idmapField], val), )) v.lc.add_kv_line_col( loader.mapPredicate[idmapField], document[idmapField].lc.data[k]) v.lc.filename = document.lc.filename else: raise validate.ValidationException( "mapSubject '%s' value '%s' is not a dict" "and does not have a mapPredicate", k, v) else: v = val v[loader.idmap[idmapField]] = k v.lc.add_kv_line_col(loader.idmap[idmapField], document[idmapField].lc.data[k]) v.lc.filename = document.lc.filename ls.lc.add_kv_line_col(len(ls), document[idmapField].lc.data[k]) ls.lc.filename = document.lc.filename ls.append(v) document[idmapField] = ls
def test_before_nested_seq_from_scratch(self): from ruamel.yaml.comments import CommentedMap, CommentedSeq data = CommentedMap() datab = CommentedSeq() data['a'] = 1 data['b'] = datab datab.append('c') datab.append('d') data['b'].yaml_set_start_comment('Hello\nWorld\n', indent=2) compare( data, """ a: 1 b: {comment} Hello {comment} World - c - d """.format(comment='#'))
def _resolve_idmap(self, document, # type: CommentedMap loader # type: Loader ): # type: (...) -> None # Convert fields with mapSubject into lists # use mapPredicate if the mapped value isn't a dict. for idmapField in loader.idmap: if (idmapField in document): idmapFieldValue = document[idmapField] if (isinstance(idmapFieldValue, dict) and "$import" not in idmapFieldValue and "$include" not in idmapFieldValue): ls = CommentedSeq() for k in sorted(idmapFieldValue.keys()): val = idmapFieldValue[k] v = None # type: Optional[CommentedMap] if not isinstance(val, CommentedMap): if idmapField in loader.mapPredicate: v = CommentedMap( ((loader.mapPredicate[idmapField], val),)) v.lc.add_kv_line_col( loader.mapPredicate[idmapField], document[idmapField].lc.data[k]) v.lc.filename = document.lc.filename else: raise validate.ValidationException( "mapSubject '%s' value '%s' is not a dict" "and does not have a mapPredicate", k, v) else: v = val v[loader.idmap[idmapField]] = k v.lc.add_kv_line_col(loader.idmap[idmapField], document[idmapField].lc.data[k]) v.lc.filename = document.lc.filename ls.lc.add_kv_line_col( len(ls), document[idmapField].lc.data[k]) ls.lc.filename = document.lc.filename ls.append(v) document[idmapField] = ls
def _resolve_dsl( self, document: CommentedMap, loader: "Loader", ) -> None: fields = list(loader.type_dsl_fields) fields.extend(loader.secondaryFile_dsl_fields) for d in fields: if d in document: datum2 = datum = document[d] if isinstance(datum, str): datum2 = self._apply_dsl( datum, d, loader, document.lc.data[d] if document.lc.data else document.lc, getattr(document.lc, "filename", ""), ) elif isinstance(datum, CommentedSeq): datum2 = CommentedSeq() for n, t in enumerate(datum): if datum.lc and datum.lc.data: datum2.lc.add_kv_line_col(len(datum2), datum.lc.data[n]) datum2.append( self._apply_dsl( t, d, loader, datum.lc.data[n], document.lc.filename ) ) else: datum2.append(self._apply_dsl(t, d, loader, LineCol(), "")) if isinstance(datum2, CommentedSeq): datum3 = CommentedSeq() seen = [] # type: List[str] for i, item in enumerate(datum2): if isinstance(item, CommentedSeq): for j, v in enumerate(item): if v not in seen: datum3.lc.add_kv_line_col( len(datum3), item.lc.data[j] ) datum3.append(v) seen.append(v) else: if item not in seen: if datum2.lc and datum2.lc.data: datum3.lc.add_kv_line_col( len(datum3), datum2.lc.data[i] ) datum3.append(item) seen.append(item) document[d] = datum3 else: document[d] = datum2
def keymap_to_easy(keymap): def get_condition(condition): quantifier_string = 'all ' if condition.get('match_all', False) else '' return "{quantifier}{key} {operator} {operand}".format( quantifier=quantifier_string, key=condition['key'], operator=condition['operator'], operand=yaml_dumps(condition['operand']), ) def get_entry(entry): ret = {} if len(entry['keys']) == 1: ret['keys'] = entry['keys'][0] else: ret['keys'] = entry['keys'] if 'args' in entry: if entry['command'] == 'chain': ret['command'] = [ cmd[0] if len(cmd) == 1 else { cmd[0]: cmd[1] } for cmd in entry['args']['commands'] ] else: ret['command'] = {entry['command']: entry['args']} else: ret['command'] = entry['command'] if 'context' in entry: ret['context'] = [ get_condition(condition) for condition in entry['context'] ] return sort_dict(ret, KEY_ORDER) ret = CommentedSeq() for i, entry in enumerate(keymap): ret.append(get_entry(entry)) if i > 0: ret.yaml_set_comment_before_after_key(i, before='\n\n') return ret
def get_pvc_templates(self): def _volume_to_pvc(claim_name, claim): template = CommentedMap() template['force'] = claim.get('force', False) template['apiVersion'] = 'v1' template = CommentedMap() template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = "PersistentVolumeClaim" template['metadata'] = CommentedMap([ ('name', claim_name), ('namespace', self._namespace_name) ]) template['spec'] = CommentedMap() template['spec']['resources'] = {'requests': {'storage': '1Gi'}} if claim.get('volume_name'): template['spec']['volumeName'] = claim['volume_name'] if claim.get('access_modes'): template['spec']['accessModes'] = claim['access_modes'] if claim.get('requested_storage'): template['spec']['resources']['requests']['storage'] = claim['requested_storage'] if claim.get('storage_class'): if not template['metadata'].get('annotations'): template['metadata']['annotations'] = {} template['metadata']['annotations']['storageClass'] = claim['storage_class'] #TODO verify this syntax if claim.get('selector'): if claim['selector'].get('match_labels'): if not template['spec'].get('selector'): template['spec']['selector'] = dict() template['spec']['selector']['matchLabels'] = claim['match_labels'] if claim['selector'].get('match_expressions'): if not template['spec'].get('selector'): template['spec']['selector'] = dict() template['spec']['selector']['matchExpressions'] = claim['match_expressions'] return template templates = CommentedSeq() if self._volumes: for volname, vol_config in self._volumes.items(): if self.CONFIG_KEY in vol_config: if vol_config[self.CONFIG_KEY].get('state', 'present') == 'present': volume = _volume_to_pvc(volname, vol_config[self.CONFIG_KEY]) templates.append(volume) return templates
def test_before_nested_seq_from_scratch_block_seq_indent(self): from ruamel.yaml.comments import CommentedMap, CommentedSeq data = CommentedMap() datab = CommentedSeq() data['a'] = 1 data['b'] = datab datab.append('c') datab.append('d') data['b'].yaml_set_start_comment('Hello\nWorld\n', indent=2) exp = """ a: 1 b: # Hello # World - c - d """ compare(data, exp.format(comment='#'), indent=4, block_seq_indent=2)
def get_secret_tasks(self, tags=[]): module_name = 'k8s_v1_secret' tasks = CommentedSeq() for template in self.get_secret_templates(): task = CommentedMap() task['name'] = 'Create Secret' task[module_name] = CommentedMap() task[module_name]['state'] = 'present' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['force'] = template.pop('force', False) task[module_name]['resource_definition'] = template if tags: task['tags'] = copy.copy(tags) tasks.append(task) return tasks
def get_secret_tasks(self, tags=[]): module_name='k8s_v1_secret' tasks = CommentedSeq() for template in self.get_secret_templates(): task = CommentedMap() task['name'] = 'Create Secret' task[module_name] = CommentedMap() task[module_name]['state'] = 'present' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['force'] = template.pop('force', False) task[module_name]['resource_definition'] = template if tags: task['tags'] = copy.copy(tags) tasks.append(task) return tasks
def get_deployment_tasks(self, module_name=None, engine_state=None, tags=[]): tasks = CommentedSeq() for template in self.get_deployment_templates( engine_state=engine_state): task = CommentedMap() if engine_state is None: task_name = 'Create deployment, and scale replicas up' else: task_name = 'Stop running containers by scaling replicas down to 0' task['name'] = task_name task[module_name] = CommentedMap() task[module_name]['state'] = 'present' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['force'] = template.pop('force', False) task[module_name]['resource_definition'] = template if tags: task['tags'] = copy.copy(tags) tasks.append(task) if engine_state != 'stop': for name, service_config in iteritems(self._services): # Remove deployment for any services where state is 'absent' if service_config.get(self.CONFIG_KEY, {}).get('state', 'present') == 'absent': task = CommentedMap() task['name'] = 'Remove deployment' task[module_name] = CommentedMap() task[module_name]['state'] = 'absent' if self._auth: for key in self._auth: task[module_name][key] = self._auth[key] task[module_name]['name'] = name task[module_name]['namespace'] = self._namespace_name if tags: task['tags'] = copy.copy(tags) tasks.append(task) return tasks
def parse_for_variable_hierarchies(data, keyword): data_type = type(data) if data_type is not CommentedSeq and data_type is not CommentedMap: return data if keyword in data: data = data[keyword] return parse_for_variable_hierarchies(data, keyword) if data_type is CommentedSeq: data_updated = CommentedSeq() for sub_data in data: sub_data_updated = parse_for_variable_hierarchies( sub_data, keyword) data_updated.append(sub_data_updated) return data_updated elif data_type is CommentedMap: data_updated = CommentedMap() for sub_data_key in data: sub_data_updated = parse_for_variable_hierarchies( data[sub_data_key], keyword) data_updated[sub_data_key] = sub_data_updated return data_updated
def _resolve_type_dsl( self, document, # type: CommentedMap loader # type: Loader ): # type: (...) -> None for d in loader.type_dsl_fields: if d in document: datum2 = datum = document[d] if isinstance(datum, (str, six.text_type)): datum2 = self._type_dsl(datum, document.lc.data[d], document.lc.filename) elif isinstance(datum, CommentedSeq): datum2 = CommentedSeq() for n, t in enumerate(datum): datum2.lc.add_kv_line_col(len(datum2), datum.lc.data[n]) datum2.append( self._type_dsl(t, datum.lc.data[n], document.lc.filename)) if isinstance(datum2, CommentedSeq): datum3 = CommentedSeq() seen = [] # type: List[Text] for i, item in enumerate(datum2): if isinstance(item, CommentedSeq): for j, v in enumerate(item): if v not in seen: datum3.lc.add_kv_line_col( len(datum3), item.lc.data[j]) datum3.append(v) seen.append(v) else: if item not in seen: datum3.lc.add_kv_line_col( len(datum3), datum2.lc.data[i]) datum3.append(item) seen.append(item) document[d] = datum3 else: document[d] = datum2
def _resolve_type_dsl(self, document, # type: CommentedMap loader # type: Loader ): # type: (...) -> None for d in loader.type_dsl_fields: if d in document: datum2 = datum = document[d] if isinstance(datum, (str, six.text_type)): datum2 = self._type_dsl(datum, document.lc.data[ d], document.lc.filename) elif isinstance(datum, CommentedSeq): datum2 = CommentedSeq() for n, t in enumerate(datum): datum2.lc.add_kv_line_col( len(datum2), datum.lc.data[n]) datum2.append(self._type_dsl( t, datum.lc.data[n], document.lc.filename)) if isinstance(datum2, CommentedSeq): datum3 = CommentedSeq() seen = [] # type: List[Text] for i, item in enumerate(datum2): if isinstance(item, CommentedSeq): for j, v in enumerate(item): if v not in seen: datum3.lc.add_kv_line_col( len(datum3), item.lc.data[j]) datum3.append(v) seen.append(v) else: if item not in seen: datum3.lc.add_kv_line_col( len(datum3), datum2.lc.data[i]) datum3.append(item) seen.append(item) document[d] = datum3 else: document[d] = datum2
def generate_example_input( inptype: Optional[CWLOutputType], default: Optional[CWLOutputType], ) -> Tuple[Any, str]: """Convert a single input schema into an example.""" example = None comment = "" defaults = { "null": "null", "Any": "null", "boolean": False, "int": 0, "long": 0, "float": 0.1, "double": 0.1, "string": "a_string", "File": yaml.comments.CommentedMap([("class", "File"), ("path", "a/file/path")]), "Directory": yaml.comments.CommentedMap([("class", "Directory"), ("path", "a/directory/path")]), } # type: CWLObjectType if isinstance(inptype, MutableSequence): optional = False if "null" in inptype: inptype.remove("null") optional = True if len(inptype) == 1: example, comment = generate_example_input(inptype[0], default) if optional: if comment: comment = "{} (optional)".format(comment) else: comment = "optional" else: example = CommentedSeq() for index, entry in enumerate(inptype): value, e_comment = generate_example_input(entry, default) example.append(value) example.yaml_add_eol_comment(e_comment, index) if optional: comment = "optional" elif isinstance(inptype, Mapping) and "type" in inptype: if inptype["type"] == "array": first_item = cast(MutableSequence[CWLObjectType], inptype["items"])[0] items_len = len(cast(Sized, inptype["items"])) if items_len == 1 and "type" in first_item and first_item[ "type"] == "enum": # array of just an enum then list all the options example = first_item["symbols"] if "name" in first_item: comment = u'array of type "{}".'.format(first_item["name"]) else: value, comment = generate_example_input(inptype["items"], None) comment = "array of " + comment if items_len == 1: example = [value] else: example = value if default is not None: example = default elif inptype["type"] == "enum": symbols = cast(List[str], inptype["symbols"]) if default is not None: example = default elif "default" in inptype: example = inptype["default"] elif len(cast(Sized, inptype["symbols"])) == 1: example = symbols[0] else: example = "{}_enum_value".format(inptype.get("name", "valid")) comment = u'enum; valid values: "{}"'.format('", "'.join(symbols)) elif inptype["type"] == "record": example = yaml.comments.CommentedMap() if "name" in inptype: comment = u'"{}" record type.'.format(inptype["name"]) for field in cast(List[CWLObjectType], inptype["fields"]): value, f_comment = generate_example_input(field["type"], None) example.insert(0, shortname(cast(str, field["name"])), value, f_comment) elif "default" in inptype: example = inptype["default"] comment = u'default value of type "{}".'.format(inptype["type"]) else: example = defaults.get(cast(str, inptype["type"]), str(inptype)) comment = u'type "{}".'.format(inptype["type"]) else: if not default: example = defaults.get(str(inptype), str(inptype)) comment = u'type "{}"'.format(inptype) else: example = default comment = u'default value of type "{}".'.format(inptype) return example, comment
def _init_job(self, joborder, runtime_context): # type: (Mapping[str, str], RuntimeContext) -> Builder if self.metadata.get("cwlVersion") != INTERNAL_VERSION: raise WorkflowException( "Process object loaded with version '%s', must update to '%s' in order to execute." % (self.metadata.get("cwlVersion"), INTERNAL_VERSION)) job = cast(Dict[str, expression.JSON], copy.deepcopy(joborder)) make_fs_access = getdefault(runtime_context.make_fs_access, StdFsAccess) fs_access = make_fs_access(runtime_context.basedir) load_listing_req, _ = self.get_requirement("LoadListingRequirement") if load_listing_req is not None: load_listing = load_listing_req.get("loadListing") else: load_listing = "no_listing" # Validate job order try: fill_in_defaults(self.tool["inputs"], job, fs_access) normalizeFilesDirs(job) schema = self.names.get_name("input_record_schema", "") if schema is None: raise WorkflowException("Missing input record schema: " "{}".format(self.names)) validate.validate_ex(schema, job, strict=False, logger=_logger_validation_warnings) if load_listing and load_listing != "no_listing": get_listing(fs_access, job, recursive=(load_listing == "deep_listing")) visit_class(job, ("File", ), functools.partial(add_sizes, fs_access)) if load_listing == "deep_listing": for i, inparm in enumerate(self.tool["inputs"]): k = shortname(inparm["id"]) if k not in job: continue v = job[k] dircount = [0] def inc(d): # type: (List[int]) -> None d[0] += 1 visit_class(v, ("Directory", ), lambda x: inc(dircount)) if dircount[0] == 0: continue filecount = [0] visit_class(v, ("File", ), lambda x: inc(filecount)) if filecount[0] > FILE_COUNT_WARNING: # Long lines in this message are okay, will be reflowed based on terminal columns. _logger.warning( strip_dup_lineno( SourceLine(self.tool["inputs"], i, str). makeError( """Recursive directory listing has resulted in a large number of File objects (%s) passed to the input parameter '%s'. This may negatively affect workflow performance and memory use. If this is a problem, use the hint 'cwltool:LoadListingRequirement' with "shallow_listing" or "no_listing" to change the directory listing behavior: $namespaces: cwltool: "http://commonwl.org/cwltool#" hints: cwltool:LoadListingRequirement: loadListing: shallow_listing """ % (filecount[0], k)))) except (validate.ValidationException, WorkflowException) as err: raise WorkflowException("Invalid job input record:\n" + str(err)) from err files = [] # type: List[Dict[str, str]] bindings = CommentedSeq() tmpdir = "" stagedir = "" docker_req, _ = self.get_requirement("DockerRequirement") default_docker = None if docker_req is None and runtime_context.default_container: default_docker = runtime_context.default_container if (docker_req or default_docker) and runtime_context.use_container: if docker_req is not None: # Check if docker output directory is absolute if docker_req.get("dockerOutputDirectory") and docker_req.get( "dockerOutputDirectory").startswith("/"): outdir = docker_req.get("dockerOutputDirectory") else: outdir = (docker_req.get("dockerOutputDirectory") or runtime_context.docker_outdir or random_outdir()) elif default_docker is not None: outdir = runtime_context.docker_outdir or random_outdir() tmpdir = runtime_context.docker_tmpdir or "/tmp" # nosec stagedir = runtime_context.docker_stagedir or "/var/lib/cwl" else: outdir = fs_access.realpath( runtime_context.outdir or tempfile.mkdtemp(prefix=getdefault( runtime_context.tmp_outdir_prefix, DEFAULT_TMP_PREFIX))) if self.tool["class"] != "Workflow": tmpdir = fs_access.realpath(runtime_context.tmpdir or tempfile.mkdtemp()) stagedir = fs_access.realpath(runtime_context.stagedir or tempfile.mkdtemp()) builder = Builder( job, files, bindings, self.schemaDefs, self.names, self.requirements, self.hints, {}, runtime_context.mutation_manager, self.formatgraph, make_fs_access, fs_access, runtime_context.job_script_provider, runtime_context.eval_timeout, runtime_context.debug, runtime_context.js_console, runtime_context.force_docker_pull, load_listing, outdir, tmpdir, stagedir, ) bindings.extend( builder.bind_input( self.inputs_record_schema, job, discover_secondaryFiles=getdefault(runtime_context.toplevel, False), )) if self.tool.get("baseCommand"): for index, command in enumerate(aslist(self.tool["baseCommand"])): bindings.append({ "position": [-1000000, index], "datum": command }) if self.tool.get("arguments"): for i, arg in enumerate(self.tool["arguments"]): lc = self.tool["arguments"].lc.data[i] filename = self.tool["arguments"].lc.filename bindings.lc.add_kv_line_col(len(bindings), lc) if isinstance(arg, MutableMapping): arg = copy.deepcopy(arg) if arg.get("position"): position = arg.get("position") if isinstance(position, str): # no need to test the # CWLVersion as the v1.0 # schema only allows ints position = builder.do_eval(position) if position is None: position = 0 arg["position"] = [position, i] else: arg["position"] = [0, i] bindings.append(arg) elif ("$(" in arg) or ("${" in arg): cm = CommentedMap((("position", [0, i]), ("valueFrom", arg))) cm.lc.add_kv_line_col("valueFrom", lc) cm.lc.filename = filename bindings.append(cm) else: cm = CommentedMap((("position", [0, i]), ("datum", arg))) cm.lc.add_kv_line_col("datum", lc) cm.lc.filename = filename bindings.append(cm) # use python2 like sorting of heterogeneous lists # (containing str and int types), key = functools.cmp_to_key(cmp_like_py2) # This awkward construction replaces the contents of # "bindings" in place (because Builder expects it to be # mutated in place, sigh, I'm sorry) with its contents sorted, # supporting different versions of Python and ruamel.yaml with # different behaviors/bugs in CommentedSeq. bindings_copy = copy.deepcopy(bindings) del bindings[:] bindings.extend(sorted(bindings_copy, key=key)) if self.tool["class"] != "Workflow": builder.resources = self.evalResources(builder, runtime_context) return builder
def _init_job(self, joborder, runtimeContext): # type: (Dict[Text, Text], RuntimeContext) -> Builder job = cast(Dict[Text, Union[Dict[Text, Any], List[Any], Text, None]], copy.deepcopy(joborder)) make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess) fs_access = make_fs_access(runtimeContext.basedir) # Validate job order try: fill_in_defaults(self.tool[u"inputs"], job, fs_access) normalizeFilesDirs(job) validate.validate_ex(self.names.get_name("input_record_schema", ""), job, strict=False, logger=_logger_validation_warnings) except (validate.ValidationException, WorkflowException) as e: raise WorkflowException("Invalid job input record:\n" + Text(e)) files = [] # type: List[Dict[Text, Text]] bindings = CommentedSeq() tmpdir = u"" stagedir = u"" loadListingReq, _ = self.get_requirement("http://commonwl.org/cwltool#LoadListingRequirement") if loadListingReq: loadListing = loadListingReq.get("loadListing") else: loadListing = "deep_listing" # will default to "no_listing" in CWL v1.1 dockerReq, _ = self.get_requirement("DockerRequirement") defaultDocker = None if dockerReq is None and runtimeContext.default_container: defaultDocker = runtimeContext.default_container if (dockerReq or defaultDocker) and runtimeContext.use_container: if dockerReq: # Check if docker output directory is absolute if dockerReq.get("dockerOutputDirectory") and \ dockerReq.get("dockerOutputDirectory").startswith('/'): outdir = dockerReq.get("dockerOutputDirectory") else: outdir = dockerReq.get("dockerOutputDirectory") or \ runtimeContext.docker_outdir or "/var/spool/cwl" elif defaultDocker: outdir = runtimeContext.docker_outdir or "/var/spool/cwl" tmpdir = runtimeContext.docker_tmpdir or "/tmp" stagedir = runtimeContext.docker_stagedir or "/var/lib/cwl" else: outdir = fs_access.realpath( runtimeContext.outdir or tempfile.mkdtemp( prefix=getdefault(runtimeContext.tmp_outdir_prefix, DEFAULT_TMP_PREFIX))) if self.tool[u"class"] != 'Workflow': tmpdir = fs_access.realpath(runtimeContext.tmpdir or tempfile.mkdtemp()) stagedir = fs_access.realpath(runtimeContext.stagedir or tempfile.mkdtemp()) builder = Builder(job, files, bindings, self.schemaDefs, self.names, self.requirements, self.hints, runtimeContext.eval_timeout, runtimeContext.debug, {}, runtimeContext.js_console, runtimeContext.mutation_manager, self.formatgraph, make_fs_access, fs_access, runtimeContext.force_docker_pull, loadListing, outdir, tmpdir, stagedir, runtimeContext.job_script_provider) bindings.extend(builder.bind_input( self.inputs_record_schema, job, discover_secondaryFiles=getdefault(runtimeContext.toplevel, False))) if self.tool.get("baseCommand"): for n, b in enumerate(aslist(self.tool["baseCommand"])): bindings.append({ "position": [-1000000, n], "datum": b }) if self.tool.get("arguments"): for i, a in enumerate(self.tool["arguments"]): lc = self.tool["arguments"].lc.data[i] fn = self.tool["arguments"].lc.filename bindings.lc.add_kv_line_col(len(bindings), lc) if isinstance(a, dict): a = copy.copy(a) if a.get("position"): a["position"] = [a["position"], i] else: a["position"] = [0, i] bindings.append(a) elif ("$(" in a) or ("${" in a): cm = CommentedMap(( ("position", [0, i]), ("valueFrom", a) )) cm.lc.add_kv_line_col("valueFrom", lc) cm.lc.filename = fn bindings.append(cm) else: cm = CommentedMap(( ("position", [0, i]), ("datum", a) )) cm.lc.add_kv_line_col("datum", lc) cm.lc.filename = fn bindings.append(cm) # use python2 like sorting of heterogeneous lists # (containing str and int types), # TODO: unify for both runtime if six.PY3: key = cmp_to_key(cmp_like_py2) else: # PY2 key = lambda dict: dict["position"] bindings.sort(key=key) if self.tool[u"class"] != 'Workflow': builder.resources = self.evalResources(builder, runtimeContext) return builder
def get_deployment_templates(self, default_api=None, default_kind=None, default_strategy=None, engine_state=None): def _service_to_k8s_container(name, config, container_name=None): container = CommentedMap() if container_name: container['name'] = container_name else: container['name'] = container['name'] if config.get('container_name') else name container['securityContext'] = CommentedMap() container['state'] = 'present' volumes = [] for key, value in iteritems(config): if key in self.IGNORE_DIRECTIVES: pass elif key == 'cap_add': if not container['securityContext'].get('Capabilities'): container['securityContext']['Capabilities'] = dict(add=[], drop=[]) for cap in value: if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]: container['securityContext']['Capabilities']['add'].append( self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]) elif key == 'cap_drop': if not container['securityContext'].get('Capabilities'): container['securityContext']['Capabilities'] = dict(add=[], drop=[]) for cap in value: if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]: container['securityContext']['Capabilities']['drop'].append( self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]) elif key == 'command': if isinstance(value, string_types): container['args'] = shlex.split(value) else: container['args'] = copy.copy(value) elif key == 'container_name': pass elif key == 'entrypoint': if isinstance(value, string_types): container['command'] = shlex.split(value) else: container['command'] = copy.copy(value) elif key == 'environment': expanded_vars = self.expand_env_vars(value) if expanded_vars: if 'env' not in container: container['env'] = [] container['env'].extend(expanded_vars) elif key in ('ports', 'expose'): if not container.get('ports'): container['ports'] = [] self.add_container_ports(value, container['ports']) elif key == 'privileged': container['securityContext']['privileged'] = value elif key == 'read_only': container['securityContext']['readOnlyRootFileSystem'] = value elif key == 'stdin_open': container['stdin'] = value elif key == 'volumes': vols, vol_mounts = self.get_k8s_volumes(value) if vol_mounts: if 'volumeMounts' not in container: container['volumeMounts'] = [] container['volumeMounts'].extend(vol_mounts) if vols: volumes += vols elif key == 'secrets': for secret, secret_config in iteritems(value): if self.CONFIG_KEY in secret_config: vols, vol_mounts, env_variables = self.get_k8s_secrets(secret, secret_config[self.CONFIG_KEY]) if vol_mounts: if 'volumeMounts' not in container: container['volumeMounts'] = [] container['volumeMounts'].extend(vol_mounts) if vols: volumes += vols if env_variables: if 'env' not in container: container['env'] = [] container['env'].extend(env_variables) elif key == 'working_dir': container['workingDir'] = value else: container[key] = value return container, volumes def _update_volumes(existing_volumes, new_volumes): existing_names = {} for vol in existing_volumes: existing_names[vol['name']] = 1 for vol in new_volumes: if vol['name'] not in existing_names: existing_volumes.append(vol) templates = CommentedSeq() for name, service_config in iteritems(self._services): containers = [] volumes = [] pod = {} if service_config.get('containers'): for c in service_config['containers']: cname = "{}-{}".format(name, c['container_name']) k8s_container, k8s_volumes, = _service_to_k8s_container(name, c, container_name=cname) containers.append(k8s_container) _update_volumes(volumes, k8s_volumes) else: k8s_container, k8s_volumes = _service_to_k8s_container(name, service_config) containers.append(k8s_container) volumes += k8s_volumes if service_config.get(self.CONFIG_KEY): for key, value in iteritems(service_config[self.CONFIG_KEY]): if key == 'deployment': for deployment_key, deployment_value in iteritems(value): if deployment_key != 'force': self.copy_attribute(pod, deployment_key, deployment_value) labels = CommentedMap([ ('app', self._namespace_name), ('service', name) ]) state = service_config.get(self.CONFIG_KEY, {}).get('state', 'present') if state == 'present': template = CommentedMap() template['apiVersion'] = default_api template['kind'] = default_kind template['force'] = service_config.get(self.CONFIG_KEY, {}).get('deployment', {}).get('force', False) template['metadata'] = CommentedMap([ ('name', name), ('labels', copy.deepcopy(labels)), ('namespace', self._namespace_name) ]) template['spec'] = CommentedMap() template['spec']['template'] = CommentedMap() template['spec']['template']['metadata'] = CommentedMap([('labels', copy.deepcopy(labels))]) template['spec']['template']['spec'] = CommentedMap([ ('containers', containers) ]) # When the engine requests a 'stop', set replicas to 0, stopping all containers template['spec']['replicas'] = 1 if not engine_state == 'stop' else 0 template['spec']['strategy'] = CommentedMap([('type', default_strategy)]) if volumes: template['spec']['template']['spec']['volumes'] = volumes if pod: for key, value in iteritems(pod): if key == 'securityContext': template['spec']['template']['spec'][key] = value elif key != 'replicas' or (key == 'replicas' and engine_state != 'stop'): # Leave replicas at 0 when engine_state is 'stop' template['spec'][key] = value templates.append(template) return templates
def _init_job(self, joborder, runtime_context): # type: (Mapping[Text, Text], RuntimeContext) -> Builder job = cast(Dict[Text, Union[Dict[Text, Any], List[Any], Text, None]], copy.deepcopy(joborder)) make_fs_access = getdefault(runtime_context.make_fs_access, StdFsAccess) fs_access = make_fs_access(runtime_context.basedir) load_listing_req, _ = self.get_requirement( "LoadListingRequirement") if load_listing_req is not None: load_listing = load_listing_req.get("loadListing") else: load_listing = "no_listing" # Validate job order try: fill_in_defaults(self.tool[u"inputs"], job, fs_access) normalizeFilesDirs(job) schema = self.names.get_name("input_record_schema", "") if schema is None: raise WorkflowException("Missing input record schema: " "{}".format(self.names)) validate.validate_ex(schema, job, strict=False, logger=_logger_validation_warnings) if load_listing and load_listing != "no_listing": get_listing(fs_access, job, recursive=(load_listing == "deep_listing")) visit_class(job, ("File",), functools.partial(add_sizes, fs_access)) if load_listing == "deep_listing": for i, inparm in enumerate(self.tool["inputs"]): k = shortname(inparm["id"]) if k not in job: continue v = job[k] dircount = [0] def inc(d): # type: (List[int]) -> None d[0] += 1 visit_class(v, ("Directory",), lambda x: inc(dircount)) if dircount[0] == 0: continue filecount = [0] visit_class(v, ("File",), lambda x: inc(filecount)) if filecount[0] > FILE_COUNT_WARNING: # Long lines in this message are okay, will be reflowed based on terminal columns. _logger.warning(strip_dup_lineno(SourceLine(self.tool["inputs"], i, Text).makeError( """Recursive directory listing has resulted in a large number of File objects (%s) passed to the input parameter '%s'. This may negatively affect workflow performance and memory use. If this is a problem, use the hint 'cwltool:LoadListingRequirement' with "shallow_listing" or "no_listing" to change the directory listing behavior: $namespaces: cwltool: "http://commonwl.org/cwltool#" hints: cwltool:LoadListingRequirement: loadListing: shallow_listing """ % (filecount[0], k)))) except (validate.ValidationException, WorkflowException) as err: raise WorkflowException("Invalid job input record:\n" + Text(err)) files = [] # type: List[Dict[Text, Text]] bindings = CommentedSeq() tmpdir = u"" stagedir = u"" docker_req, _ = self.get_requirement("DockerRequirement") default_docker = None if docker_req is None and runtime_context.default_container: default_docker = runtime_context.default_container if (docker_req or default_docker) and runtime_context.use_container: if docker_req is not None: # Check if docker output directory is absolute if docker_req.get("dockerOutputDirectory") and \ docker_req.get("dockerOutputDirectory").startswith('/'): outdir = docker_req.get("dockerOutputDirectory") else: outdir = docker_req.get("dockerOutputDirectory") or \ runtime_context.docker_outdir or random_outdir() elif default_docker is not None: outdir = runtime_context.docker_outdir or random_outdir() tmpdir = runtime_context.docker_tmpdir or "/tmp" stagedir = runtime_context.docker_stagedir or "/var/lib/cwl" else: outdir = fs_access.realpath( runtime_context.outdir or tempfile.mkdtemp( prefix=getdefault(runtime_context.tmp_outdir_prefix, DEFAULT_TMP_PREFIX))) if self.tool[u"class"] != 'Workflow': tmpdir = fs_access.realpath(runtime_context.tmpdir or tempfile.mkdtemp()) stagedir = fs_access.realpath(runtime_context.stagedir or tempfile.mkdtemp()) builder = Builder(job, files, bindings, self.schemaDefs, self.names, self.requirements, self.hints, {}, runtime_context.mutation_manager, self.formatgraph, make_fs_access, fs_access, runtime_context.job_script_provider, runtime_context.eval_timeout, runtime_context.debug, runtime_context.js_console, runtime_context.force_docker_pull, load_listing, outdir, tmpdir, stagedir) bindings.extend(builder.bind_input( self.inputs_record_schema, job, discover_secondaryFiles=getdefault(runtime_context.toplevel, False))) if self.tool.get("baseCommand"): for index, command in enumerate(aslist(self.tool["baseCommand"])): bindings.append({ "position": [-1000000, index], "datum": command }) if self.tool.get("arguments"): for i, arg in enumerate(self.tool["arguments"]): lc = self.tool["arguments"].lc.data[i] filename = self.tool["arguments"].lc.filename bindings.lc.add_kv_line_col(len(bindings), lc) if isinstance(arg, MutableMapping): arg = copy.deepcopy(arg) if arg.get("position"): arg["position"] = [arg["position"], i] else: arg["position"] = [0, i] bindings.append(arg) elif ("$(" in arg) or ("${" in arg): cm = CommentedMap(( ("position", [0, i]), ("valueFrom", arg) )) cm.lc.add_kv_line_col("valueFrom", lc) cm.lc.filename = filename bindings.append(cm) else: cm = CommentedMap(( ("position", [0, i]), ("datum", arg) )) cm.lc.add_kv_line_col("datum", lc) cm.lc.filename = filename bindings.append(cm) # use python2 like sorting of heterogeneous lists # (containing str and int types), if PY3: key = functools.cmp_to_key(cmp_like_py2) else: # PY2 key = lambda d: d["position"] # This awkward construction replaces the contents of # "bindings" in place (because Builder expects it to be # mutated in place, sigh, I'm sorry) with its contents sorted, # supporting different versions of Python and ruamel.yaml with # different behaviors/bugs in CommentedSeq. bindings_copy = copy.deepcopy(bindings) del bindings[:] bindings.extend(sorted(bindings_copy, key=key)) if self.tool[u"class"] != 'Workflow': builder.resources = self.evalResources(builder, runtime_context) return builder
def _init_job(self, joborder, runtimeContext): # type: (MutableMapping[Text, Text], RuntimeContext) -> Builder job = cast(Dict[Text, Union[Dict[Text, Any], List[Any], Text, None]], copy.deepcopy(joborder)) make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess) fs_access = make_fs_access(runtimeContext.basedir) # Validate job order try: fill_in_defaults(self.tool[u"inputs"], job, fs_access) normalizeFilesDirs(job) validate.validate_ex(self.names.get_name("input_record_schema", ""), job, strict=False, logger=_logger_validation_warnings) except (validate.ValidationException, WorkflowException) as e: raise WorkflowException("Invalid job input record:\n" + Text(e)) files = [] # type: List[Dict[Text, Text]] bindings = CommentedSeq() tmpdir = u"" stagedir = u"" loadListingReq, _ = self.get_requirement("http://commonwl.org/cwltool#LoadListingRequirement") if loadListingReq is not None: loadListing = loadListingReq.get("loadListing") else: loadListing = "deep_listing" # will default to "no_listing" in CWL v1.1 dockerReq, _ = self.get_requirement("DockerRequirement") defaultDocker = None if dockerReq is None and runtimeContext.default_container: defaultDocker = runtimeContext.default_container if (dockerReq or defaultDocker) and runtimeContext.use_container: if dockerReq is not None: # Check if docker output directory is absolute if dockerReq.get("dockerOutputDirectory") and \ dockerReq.get("dockerOutputDirectory").startswith('/'): outdir = dockerReq.get("dockerOutputDirectory") else: outdir = dockerReq.get("dockerOutputDirectory") or \ runtimeContext.docker_outdir or random_outdir() elif defaultDocker is not None: outdir = runtimeContext.docker_outdir or random_outdir() tmpdir = runtimeContext.docker_tmpdir or "/tmp" stagedir = runtimeContext.docker_stagedir or "/var/lib/cwl" else: outdir = fs_access.realpath( runtimeContext.outdir or tempfile.mkdtemp( prefix=getdefault(runtimeContext.tmp_outdir_prefix, DEFAULT_TMP_PREFIX))) if self.tool[u"class"] != 'Workflow': tmpdir = fs_access.realpath(runtimeContext.tmpdir or tempfile.mkdtemp()) stagedir = fs_access.realpath(runtimeContext.stagedir or tempfile.mkdtemp()) builder = Builder(job, files, bindings, self.schemaDefs, self.names, self.requirements, self.hints, {}, runtimeContext.mutation_manager, self.formatgraph, make_fs_access, fs_access, runtimeContext.job_script_provider, runtimeContext.eval_timeout, runtimeContext.debug, runtimeContext.js_console, runtimeContext.force_docker_pull, loadListing, outdir, tmpdir, stagedir) bindings.extend(builder.bind_input( self.inputs_record_schema, job, discover_secondaryFiles=getdefault(runtimeContext.toplevel, False))) if self.tool.get("baseCommand"): for n, b in enumerate(aslist(self.tool["baseCommand"])): bindings.append({ "position": [-1000000, n], "datum": b }) if self.tool.get("arguments"): for i, a in enumerate(self.tool["arguments"]): lc = self.tool["arguments"].lc.data[i] fn = self.tool["arguments"].lc.filename bindings.lc.add_kv_line_col(len(bindings), lc) if isinstance(a, MutableMapping): a = copy.deepcopy(a) if a.get("position"): a["position"] = [a["position"], i] else: a["position"] = [0, i] bindings.append(a) elif ("$(" in a) or ("${" in a): cm = CommentedMap(( ("position", [0, i]), ("valueFrom", a) )) cm.lc.add_kv_line_col("valueFrom", lc) cm.lc.filename = fn bindings.append(cm) else: cm = CommentedMap(( ("position", [0, i]), ("datum", a) )) cm.lc.add_kv_line_col("datum", lc) cm.lc.filename = fn bindings.append(cm) # use python2 like sorting of heterogeneous lists # (containing str and int types), # TODO: unify for both runtime if PY3: key = functools.cmp_to_key(cmp_like_py2) else: # PY2 key = lambda d: d["position"] # This awkward construction replaces the contents of # "bindings" in place (because Builder expects it to be # mutated in place, sigh, I'm sorry) with its contents sorted, # supporting different versions of Python and ruamel.yaml with # different behaviors/bugs in CommentedSeq. bd = copy.deepcopy(bindings) del bindings[:] bindings.extend(sorted(bd, key=key)) if self.tool[u"class"] != 'Workflow': builder.resources = self.evalResources(builder, runtimeContext) return builder
def get_deployment_templates(self, default_api=None, default_kind=None, default_strategy=None, engine_state=None): def _service_to_container(name, service): container = CommentedMap() container['name'] = name container['securityContext'] = CommentedMap() container['state'] = 'present' volumes = [] pod = {} for key, value in service.items(): if key in self.IGNORE_DIRECTIVES: pass elif key == 'cap_add': if not container['securityContext'].get('Capabilities'): container['securityContext']['Capabilities'] = dict(add=[], drop=[]) for cap in value: if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]: container['securityContext']['Capabilities']['add'].append( self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]) elif key == 'cap_drop': if not container['securityContext'].get('Capabilities'): container['securityContext']['Capabilities'] = dict(add=[], drop=[]) for cap in value: if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]: container['securityContext']['Capabilities']['drop'].append( self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]) elif key == 'command': if isinstance(value, string_types): container['args'] = shlex.split(value) else: container['args'] = value elif key == 'container_name': container['name'] = value elif key == 'entrypoint': if isinstance(value, string_types): container['command'] = shlex.split(value) else: container['command'] = copy.copy(value) elif key == 'environment': expanded_vars = self.expand_env_vars(value) if expanded_vars: container['env'] = expanded_vars elif key in ('ports', 'expose'): if not container.get('ports'): container['ports'] = [] self.add_container_ports(value, container['ports']) elif key == 'privileged': container['securityContext']['privileged'] = value elif key == 'read_only': container['securityContext']['readOnlyRootFileSystem'] = value elif key == 'stdin_open': container['stdin'] = value elif key == 'volumes': vols, vol_mounts = self.get_k8s_volumes(value) if vol_mounts: container['volumeMounts'] = vol_mounts if vols: volumes += vols elif key == 'working_dir': container['workingDir'] = value else: container[key] = value # Translate options: if service.get(self.CONFIG_KEY): for key, value in service[self.CONFIG_KEY].items(): if key == 'deployment': for deployment_key, deployment_value in value.items(): if deployment_key != 'force': self.copy_attribute(pod, deployment_key, deployment_value) return container, volumes, pod templates = CommentedSeq() for name, service_config in self._services.items(): container, volumes, pod = _service_to_container(name, service_config) labels = CommentedMap([ ('app', self._namespace_name), ('service', name) ]) state = service_config.get(self.CONFIG_KEY, {}).get('state', 'present') if state == 'present': template = CommentedMap() template['apiVersion'] = default_api template['kind'] = default_kind template['force'] = service_config.get(self.CONFIG_KEY, {}).get('deployment', {}).get('force', False) template['metadata'] = CommentedMap([ ('name', name), ('labels', copy.deepcopy(labels)), ('namespace', self._namespace_name) ]) template['spec'] = CommentedMap() template['spec']['template'] = CommentedMap() template['spec']['template']['metadata'] = CommentedMap([('labels', copy.deepcopy(labels))]) template['spec']['template']['spec'] = CommentedMap([ ('containers', [container]) # TODO: allow multiple pods in a container ]) # When the engine requests a 'stop', set replicas to 0, stopping all containers template['spec']['replicas'] = 1 if not engine_state == 'stop' else 0 template['spec']['strategy'] = CommentedMap([('type', default_strategy)]) if volumes: template['spec']['template']['spec']['volumes'] = volumes if pod: for key, value in pod.items(): if key == 'securityContext': template['spec']['template']['spec'][key] = value elif key != 'replicas' or (key == 'replicas' and engine_state != 'stop'): # Leave replicas at 0 when engine_state is 'stop' template['spec'][key] = value templates.append(template) return templates
def get_deployment_templates(self, default_api=None, default_kind=None, default_strategy=None, engine_state=None): def _service_to_k8s_container(name, config, container_name=None): container = CommentedMap() if container_name: container['name'] = container_name else: container['name'] = container['name'] if config.get( 'container_name') else name container['securityContext'] = CommentedMap() container['state'] = 'present' volumes = [] for key, value in iteritems(config): if key in self.IGNORE_DIRECTIVES: pass elif key == 'cap_add': if not container['securityContext'].get('Capabilities'): container['securityContext']['Capabilities'] = dict( add=[], drop=[]) for cap in value: if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]: container['securityContext']['Capabilities'][ 'add'].append( self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap] ) elif key == 'cap_drop': if not container['securityContext'].get('Capabilities'): container['securityContext']['Capabilities'] = dict( add=[], drop=[]) for cap in value: if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]: container['securityContext']['Capabilities'][ 'drop'].append( self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap] ) elif key == 'command': if isinstance(value, string_types): container['args'] = shlex.split(value) else: container['args'] = copy.copy(value) elif key == 'container_name': pass elif key == 'entrypoint': if isinstance(value, string_types): container['command'] = shlex.split(value) else: container['command'] = copy.copy(value) elif key == 'environment': expanded_vars = self.expand_env_vars(value) if expanded_vars: if 'env' not in container: container['env'] = [] container['env'].extend(expanded_vars) elif key in ('ports', 'expose'): if not container.get('ports'): container['ports'] = [] self.add_container_ports(value, container['ports']) elif key == 'privileged': container['securityContext']['privileged'] = value elif key == 'read_only': container['securityContext'][ 'readOnlyRootFileSystem'] = value elif key == 'stdin_open': container['stdin'] = value elif key == 'volumes': vols, vol_mounts = self.get_k8s_volumes(value) if vol_mounts: if 'volumeMounts' not in container: container['volumeMounts'] = [] container['volumeMounts'].extend(vol_mounts) if vols: volumes += vols elif key == 'secrets': for secret, secret_config in iteritems(value): if self.CONFIG_KEY in secret_config: vols, vol_mounts, env_variables = self.get_k8s_secrets( secret, secret_config[self.CONFIG_KEY]) if vol_mounts: if 'volumeMounts' not in container: container['volumeMounts'] = [] container['volumeMounts'].extend(vol_mounts) if vols: volumes += vols if env_variables: if 'env' not in container: container['env'] = [] container['env'].extend(env_variables) elif key == 'working_dir': container['workingDir'] = value else: container[key] = value return container, volumes def _update_volumes(existing_volumes, new_volumes): existing_names = {} for vol in existing_volumes: existing_names[vol['name']] = 1 for vol in new_volumes: if vol['name'] not in existing_names: existing_volumes.append(vol) templates = CommentedSeq() for name, service_config in iteritems(self._services): containers = [] volumes = [] pod = {} if service_config.get('containers'): for c in service_config['containers']: cname = "{}-{}".format(name, c['container_name']) k8s_container, k8s_volumes, = _service_to_k8s_container( name, c, container_name=cname) containers.append(k8s_container) _update_volumes(volumes, k8s_volumes) else: k8s_container, k8s_volumes = _service_to_k8s_container( name, service_config) containers.append(k8s_container) volumes += k8s_volumes if service_config.get(self.CONFIG_KEY): for key, value in iteritems(service_config[self.CONFIG_KEY]): if key == 'deployment': for deployment_key, deployment_value in iteritems( value): if deployment_key != 'force': self.copy_attribute(pod, deployment_key, deployment_value) labels = CommentedMap([('app', self._namespace_name), ('service', name)]) state = service_config.get(self.CONFIG_KEY, {}).get('state', 'present') if state == 'present': template = CommentedMap() template['apiVersion'] = default_api template['kind'] = default_kind template['force'] = service_config.get( self.CONFIG_KEY, {}).get('deployment', {}).get('force', False) template['metadata'] = CommentedMap([ ('name', name), ('labels', copy.deepcopy(labels)), ('namespace', self._namespace_name) ]) template['spec'] = CommentedMap() template['spec']['template'] = CommentedMap() template['spec']['template']['metadata'] = CommentedMap([ ('labels', copy.deepcopy(labels)) ]) template['spec']['template']['spec'] = CommentedMap([ ('containers', containers) ]) # When the engine requests a 'stop', set replicas to 0, stopping all containers template['spec'][ 'replicas'] = 1 if not engine_state == 'stop' else 0 if default_strategy: template['spec']['strategy'] = {} for service_key, service_value in iteritems( default_strategy): self.copy_attribute(template['spec']['strategy'], service_key, service_value) if volumes: template['spec']['template']['spec']['volumes'] = volumes if pod: for key, value in iteritems(pod): if key == 'securityContext': template['spec']['template']['spec'][key] = value elif key != 'replicas' or (key == 'replicas' and engine_state != 'stop'): # Leave replicas at 0 when engine_state is 'stop' template['spec'][key] = value templates.append(template) return templates
def upload_dependencies(arvrunner, name, document_loader, workflowobj, uri, loadref_run, include_primary=True, discovered_secondaryfiles=None): """Upload the dependencies of the workflowobj document to Keep. Returns a pathmapper object mapping local paths to keep references. Also does an in-place update of references in "workflowobj". Use scandeps to find $import, $include, $schemas, run, File and Directory fields that represent external references. If workflowobj has an "id" field, this will reload the document to ensure it is scanning the raw document prior to preprocessing. """ loaded = set() def loadref(b, u): joined = document_loader.fetcher.urljoin(b, u) defrg, _ = urllib.parse.urldefrag(joined) if defrg not in loaded: loaded.add(defrg) # Use fetch_text to get raw file (before preprocessing). text = document_loader.fetch_text(defrg) if isinstance(text, bytes): textIO = StringIO(text.decode('utf-8')) else: textIO = StringIO(text) return yaml.safe_load(textIO) else: return {} if loadref_run: loadref_fields = set(("$import", "run")) else: loadref_fields = set(("$import", )) scanobj = workflowobj if "id" in workflowobj and not workflowobj["id"].startswith("_:"): # Need raw file content (before preprocessing) to ensure # that external references in $include and $mixin are captured. scanobj = loadref("", workflowobj["id"]) metadata = scanobj sc_result = scandeps(uri, scanobj, loadref_fields, set(("$include", "$schemas", "location")), loadref, urljoin=document_loader.fetcher.urljoin) sc = [] uuids = {} def collect_uuids(obj): loc = obj.get("location", "") sp = loc.split(":") if sp[0] == "keep": # Collect collection uuids that need to be resolved to # portable data hashes gp = collection_uuid_pattern.match(loc) if gp: uuids[gp.groups()[0]] = obj if collectionUUID in obj: uuids[obj[collectionUUID]] = obj def collect_uploads(obj): loc = obj.get("location", "") sp = loc.split(":") if len(sp) < 1: return if sp[0] in ("file", "http", "https"): # Record local files than need to be uploaded, # don't include file literals, keep references, etc. sc.append(obj) collect_uuids(obj) visit_class(workflowobj, ("File", "Directory"), collect_uuids) visit_class(sc_result, ("File", "Directory"), collect_uploads) # Resolve any collection uuids we found to portable data hashes # and assign them to uuid_map uuid_map = {} fetch_uuids = list(uuids.keys()) while fetch_uuids: # For a large number of fetch_uuids, API server may limit # response size, so keep fetching from API server has nothing # more to give us. lookups = arvrunner.api.collections().list( filters=[["uuid", "in", fetch_uuids]], count="none", select=["uuid", "portable_data_hash" ]).execute(num_retries=arvrunner.num_retries) if not lookups["items"]: break for l in lookups["items"]: uuid_map[l["uuid"]] = l["portable_data_hash"] fetch_uuids = [u for u in fetch_uuids if u not in uuid_map] normalizeFilesDirs(sc) if include_primary and "id" in workflowobj: sc.append({"class": "File", "location": workflowobj["id"]}) if "$schemas" in workflowobj: for s in workflowobj["$schemas"]: sc.append({"class": "File", "location": s}) def visit_default(obj): remove = [False] def ensure_default_location(f): if "location" not in f and "path" in f: f["location"] = f["path"] del f["path"] if "location" in f and not arvrunner.fs_access.exists( f["location"]): # Doesn't exist, remove from list of dependencies to upload sc[:] = [x for x in sc if x["location"] != f["location"]] # Delete "default" from workflowobj remove[0] = True visit_class(obj["default"], ("File", "Directory"), ensure_default_location) if remove[0]: del obj["default"] find_defaults(workflowobj, visit_default) discovered = {} def discover_default_secondary_files(obj): builder_job_order = {} for t in obj["inputs"]: builder_job_order[shortname( t["id"])] = t["default"] if "default" in t else None # Need to create a builder object to evaluate expressions. builder = make_builder(builder_job_order, obj.get("hints", []), obj.get("requirements", []), ArvRuntimeContext(), metadata) discover_secondary_files(arvrunner.fs_access, builder, obj["inputs"], builder_job_order, discovered) copied, _ = document_loader.resolve_all(copy.deepcopy(cmap(workflowobj)), base_url=uri, checklinks=False) visit_class(copied, ("CommandLineTool", "Workflow"), discover_default_secondary_files) for d in list(discovered): # Only interested in discovered secondaryFiles which are local # files that need to be uploaded. if d.startswith("file:"): sc.extend(discovered[d]) else: del discovered[d] mapper = ArvPathMapper(arvrunner, sc, "", "keep:%s", "keep:%s/%s", name=name, single_collection=True) def setloc(p): loc = p.get("location") if loc and (not loc.startswith("_:")) and ( not loc.startswith("keep:")): p["location"] = mapper.mapper(p["location"]).resolved return if not loc: return if collectionUUID in p: uuid = p[collectionUUID] if uuid not in uuid_map: raise SourceLine(p, collectionUUID, validate.ValidationException).makeError( "Collection uuid %s not found" % uuid) gp = collection_pdh_pattern.match(loc) if gp and uuid_map[uuid] != gp.groups()[0]: # This file entry has both collectionUUID and a PDH # location. If the PDH doesn't match the one returned # the API server, raise an error. raise SourceLine( p, "location", validate.ValidationException ).makeError( "Expected collection uuid %s to be %s but API server reported %s" % (uuid, gp.groups()[0], uuid_map[p[collectionUUID]])) gp = collection_uuid_pattern.match(loc) if not gp: return uuid = gp.groups()[0] if uuid not in uuid_map: raise SourceLine(p, "location", validate.ValidationException).makeError( "Collection uuid %s not found" % uuid) p["location"] = "keep:%s%s" % (uuid_map[uuid], gp.groups()[1] if gp.groups()[1] else "") p[collectionUUID] = uuid visit_class(workflowobj, ("File", "Directory"), setloc) visit_class(discovered, ("File", "Directory"), setloc) if discovered_secondaryfiles is not None: for d in discovered: discovered_secondaryfiles[mapper.mapper( d).resolved] = discovered[d] if "$schemas" in workflowobj: sch = CommentedSeq() for s in workflowobj["$schemas"]: if s in mapper: sch.append(mapper.mapper(s).resolved) workflowobj["$schemas"] = sch return mapper
def __init__(self, position: int, yaml: CommentedSeq, item: Any = None): self.__yaml = weakref.ref(yaml) self.__pos = position if position >= len(yaml): yaml.append(None) self.value = item
def generate_orchestration_playbook(self, url=None, namespace=None, settings=None, repository_prefix=None, pull_from_url=None, tag=None, vault_files=None, **kwargs): """ Generate an Ansible playbook to orchestrate services. :param url: registry URL where images were pushed. :param namespace: registry namespace :param repository_prefix: prefix to use for the image name :param settings: settings dict from container.yml :param pull_from_url: if url to pull from is different than url :return: playbook dict """ def _update_service(service_name, service_config): if url and namespace: # Reference previously pushed image image_id = self.get_latest_image_id_for_service(service_name) if not image_id: raise exceptions.AnsibleContainerConductorException( u"Unable to get image ID for service {}. Did you forget to run " u"`ansible-container build`?".format(service_name) ) image_tag = tag or self.get_build_stamp_for_image(image_id) if repository_prefix: image_name = "{}-{}".format(repository_prefix, service_name) elif repository_prefix is None: image_name = "{}-{}".format(self.project_name, service_name) else: image_name = service_name repository = "{}/{}".format(namespace, image_name) image_name = "{}:{}".format(repository, image_tag) pull_url = pull_from_url if pull_from_url else url service_config['image'] = "{}/{}".format(pull_url.rstrip('/'), image_name) else: # We're using a local image, so check that the image was built image = self.get_latest_image_for_service(service_name) if image is None: raise exceptions.AnsibleContainerConductorException( u"No image found for service {}, make sure you've run `ansible-container " u"build`".format(service_name) ) service_config['image'] = image.tags[0] for service_name, service in iteritems(self.services): # set the image property of each container if service.get('containers'): for container in service['containers']: if container.get('roles'): container_service_name = "{}-{}".format(service_name, container['container_name']) _update_service(container_service_name, container) else: container['image'] = container['from'] elif service.get('roles'): _update_service(service_name, service) else: service['image'] = service['from'] play = CommentedMap() play['name'] = u'Manage the lifecycle of {} on {}'.format(self.project_name, self.display_name) play['hosts'] = 'localhost' play['gather_facts'] = 'no' play['connection'] = 'local' play['roles'] = CommentedSeq() play['vars_files'] = CommentedSeq() play['tasks'] = CommentedSeq() role = CommentedMap([ ('role', 'ansible.kubernetes-modules') ]) if vault_files: play['vars_files'].extend(vault_files) play['roles'].append(role) play.yaml_set_comment_before_after_key( 'roles', before='Include Ansible Kubernetes and OpenShift modules', indent=4) play.yaml_set_comment_before_after_key('tasks', before='Tasks for setting the application state. ' 'Valid tags include: start, stop, restart, destroy', indent=4) play['tasks'].append(self.deploy.get_namespace_task(state='present', tags=['start'])) play['tasks'].append(self.deploy.get_namespace_task(state='absent', tags=['destroy'])) play['tasks'].extend(self.deploy.get_secret_tasks(tags=['start'])) play['tasks'].extend(self.deploy.get_service_tasks(tags=['start'])) play['tasks'].extend(self.deploy.get_deployment_tasks(engine_state='stop', tags=['stop', 'restart'])) play['tasks'].extend(self.deploy.get_deployment_tasks(tags=['start', 'restart'])) play['tasks'].extend(self.deploy.get_pvc_tasks(tags=['start'])) playbook = CommentedSeq() playbook.append(play) logger.debug(u'Created playbook to run project', playbook=playbook) return playbook