def loadAnswers(answers_file, format=None): if not os.path.isfile(answers_file): raise AtomicAppUtilsException("Provided answers file does not exist: %s" % answers_file) logger.debug("Loading answers from file: %s", answers_file) try: # Try to load answers file with a specified answers file format # or the default format. result = anymarkup.parse_file(answers_file, format=format) except anymarkup.AnyMarkupError: # if no answers file format is provided and the answers file # is not a JSON file, try to load it using anymarkup in a # generic way. result = anymarkup.parse_file(answers_file) return result
def read_cccp_index(self): if self.cccp_index_uri.startswith('file://'): file_to_read = self.cccp_index_uri[len('file://'):] self.cccp_index = anymarkup.parse_file(file_to_read) else: fetched = requests.get(self.cccp_index_uri) self.cccp_index = anymarkup.parse(fetched.text)
def _resetReplicas(self, path): data = anymarkup.parse_file(path) name = data["metadata"]["name"] cmd = [self.kubectl, "resize", "rc", name, "--replicas=4" ] logger.info("Calling: %s" % " ".join(cmd)) if not self.dryrun: subprocess.check_call(cmd)
def loadAnswers(self, data = {}): if not data: raise Exception("No data answers data given") if type(data) == dict: logger.debug("Data given %s" % data) elif os.path.exists(data): logger.debug("Path to answers file given, loading %s" % data) if os.path.isdir(data): if os.path.isfile(os.path.join(data, ANSWERS_FILE)): data = os.path.isfile(os.path.join(data, ANSWERS_FILE)) else: logger.warning("No answers file found.") data = DEFAULT_ANSWERS if os.path.isfile(data): data = anymarkup.parse_file(data) else: logger.warning("No answers file found.") data = DEFAULT_ANSWERS if self.answers_data: self.answers_data = self._update(self.answers_data, data) else: self.answers_data = data return self.answers_data
def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF, nodeps=False, dryrun=False, update=False): """ Load a Nulecule application from a path in the source path itself, or in the specified destination path. Args: src (str): Path to load Nulecule application from. config (dict): Config data for Nulecule application. namespace (str): Namespace for Nulecule application. nodeps (bool): Do not pull external applications if True. dryrun (bool): Do not make any change to underlying host. update (bool): Update existing application if True, else reuse it. Returns: A Nulecule instance or None in case of some dry run (installing from image). """ nulecule_path = os.path.join(src, MAIN_FILE) if dryrun and not os.path.exists(nulecule_path): raise NuleculeException("Installed Nulecule components are required to initiate dry-run") nulecule_data = anymarkup.parse_file(nulecule_path) nulecule = Nulecule(config=config, basepath=src, namespace=namespace, **nulecule_data) nulecule.load_components(nodeps, dryrun) return nulecule
def getAppId(path): # obsolete if not os.path.isfile(path): return None data = anymarkup.parse_file(path) return data.get("id")
def loadAnswers(self, data=None): if not data: logger.info("No answers data given") if type(data) == dict: logger.debug("Data given %s", data) elif os.path.exists(data): logger.debug("Path to answers file given, loading %s", data) if os.path.isdir(data): if os.path.isfile(os.path.join(data, ANSWERS_FILE)): data = os.path.join(data, ANSWERS_FILE) else: self.write_sample_answers = True if os.path.isfile(data): data = anymarkup.parse_file(data) else: self.write_sample_answers = True if self.write_sample_answers: data = copy.deepcopy(DEFAULT_ANSWERS) if self.answers_data: self.answers_data = Utils.update(self.answers_data, data) else: self.answers_data = data return self.answers_data
def loadAnswers(answers_file): if not os.path.isfile(answers_file): raise AtomicAppUtilsException( "Provided answers file does not exist: %s" % answers_file) logger.debug("Loading answers from file: %s", answers_file) return anymarkup.parse_file(answers_file)
def from_file(filename): ''' Load a file using anymarkup Params: filename (str): File location ''' return anymarkup.parse_file(filename)
def _resetReplicas(self, path): data = anymarkup.parse_file(path) name = data["id"] cmd = [self.kubectl, "resize", "rc", name, "--replicas=0", "--namespace=%s" % self.namespace] if self.dryrun: logger.info("DRY-RUN: %s", " ".join(cmd)) else: subprocess.check_call(cmd)
def test_template_parameters(self): image = "some_image" se = SaasHerder(temp_path, None) se.template("tag", "redirector", output_dir, local=True) data = anymarkup.parse_file(os.path.join(output_dir, "redirector.yaml")) for item in data["items"]: if item["kind"] == "DeploymentConfig": assert item["spec"]["template"]["spec"]["containers"][0][ "image"].startswith(image)
def test_template_environment_no_parameters(self): environment = "no_params" output_dir = tempfile.mkdtemp() se = SaasHerder(temp_path, None, environment) se.template("tag", "hash_length", output_dir, local=True) data = anymarkup.parse_file( os.path.join(output_dir, "hash_length.yaml")) for item in data["items"]: if item["kind"] == "DeploymentConfig": assert item["spec"]["replicas"] == 200
def test_sh_update(self): sh = SaasHerder(temp_path, None) output_file = os.path.join(temp_dir, "multiple_services.yaml") sh.update("hash", "multiple_services", "master", output_file=output_file) data = anymarkup.parse_file(output_file) assert len(data["services"]) == 2 assert data["services"][1]["hash"] == "master"
def _load_index_file(self, index_file=INDEX_LOCATION): """ Load the index file. If it does not exist, fetch it. """ # If the file/path does not exist, retrieve the index yaml if not os.path.exists(index_file): logger.warning("Couldn't load index file: %s", index_file) logger.info("Retrieving index...") self._fetch_index_container() self.index = anymarkup.parse_file(index_file)
def test_template_hash_length(self): hash = "abcdef7" se = SaasHerder(temp_path, None) se.template("tag", "hash_length", output_dir, local=True) data = anymarkup.parse_file( os.path.join(output_dir, "hash_length.yaml")) for item in data["items"]: if item["kind"] == "DeploymentConfig": assert item["spec"]["template"]["spec"]["containers"][0][ "image"].endswith(hash)
def aggregate(input_keywords_file, no_synonyms=None, use_progressbar=False, occurrence_count_filter=None): # pylint: disable=too-many-branches # noqa """Aggregate available topics. :param input_keywords_file: a list/tuple of input keywords files to process :param no_synonyms: do not compute synonyms for keywords :param use_progressbar: use progressbar to report progress :param occurrence_count_filter: filter out keywords with low occurrence count :return: """ if not input_keywords_file: raise ValueError('No input keywords files provided') occurrence_count_filter = occurrence_count_filter or 0 all_keywords = {} for input_file in progressbarize(input_keywords_file or [], use_progressbar): input_content = anymarkup.parse_file(input_file) for keyword, value in input_content.items(): keyword = str(keyword) if not KeywordsChief.matches_keyword_pattern(keyword): _logger.debug("Dropping keyword '%s' as it does not match keyword pattern.", keyword) continue if keyword in all_keywords.keys() and value is not None and \ all_keywords[keyword] is not None: all_keywords[keyword]['occurrence_count'] = \ value.pop('occurrence_count', 0) + \ all_keywords[keyword].get('occurrence_count', 0) for conf, items in value.items(): all_keywords[keyword][str(conf)] = list( set(items or []) | set(all_keywords[keyword][str(conf)] or [])) else: all_keywords[keyword] = value if value is not None else {} if not no_synonyms: synonyms = list(set(all_keywords[keyword].get('synonyms') or []) | set(KeywordsChief.compute_synonyms(keyword))) if synonyms: if all_keywords[str(keyword)] is None: all_keywords[str(keyword)] = {} all_keywords[str(keyword)]['synonyms'] = synonyms # filter out keywords with low occurrence count if occurrence_count_filter > 1: result = {} for keyword, value in all_keywords.items(): if value.get('occurrence_count', 1) > occurrence_count_filter: result[keyword] = value return result return all_keywords
def cli_diff(keywords1_file_path, keywords2_file_path, synonyms_only=False, keywords_only=False, regexp_only=False): """Retrieve diff on keyword files.""" # pylint: disable=too-many-locals if synonyms_only and keywords_only: raise ValueError( 'Cannot use --synonyms-only and --keywords-only at the same time') keywords1 = anymarkup.parse_file(keywords1_file_path) keywords2 = anymarkup.parse_file(keywords2_file_path) differ = find_diffs(keywords1, keywords2, keywords1_file_path, keywords2_file_path, synonyms_only, keywords_only, regexp_only) if not differ: print("Files '%s' and '%s' do not differ" % (keywords1_file_path, keywords2_file_path))
def get_vos(): to_output = { "VOSummary": { "@xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance", "@xsi:schemaLocation": VO_SCHEMA_LOCATION, "VO": [] } } vos = [] reportinggroups_data = anymarkup.parse_file( "virtual-organizations/REPORTING_GROUPS.yaml") for file in os.listdir("virtual-organizations"): if file == "REPORTING_GROUPS.yaml": continue vo = anymarkup.parse_file("virtual-organizations/{0}".format(file)) try: vos.append(expand_vo(vo, reportinggroups_data)) except Exception: pprint.pprint(vo) raise to_output["VOSummary"]["VO"] = vos return to_output
def _resetReplicas(self, path): data = anymarkup.parse_file(path) name = data["id"] cmd = [ self.kubectl, "resize", "rc", name, "--replicas=0", "--namespace=%s" % self.namespace ] if self.dryrun: logger.info("DRY-RUN: %s", " ".join(cmd)) else: subprocess.check_call(cmd)
def get_projects(): to_output = {"Projects":{"Project": []}} projects = [] for file in os.listdir("projects"): project = anymarkup.parse_file("projects/{0}".format(file)) projects.append(project) to_output["Projects"]["Project"] = projects return to_output
def echo_xml(use_yaml): file = '/tmp/output.xml' if use_yaml: data = anymarkup.parse_file(file) data = anymarkup.serialize(data, 'yaml') print('[COUT] CO_YAML_CONTENT {}'.format(str(data)[1:])) return with open(file, 'rb') as f: data = f.read() print('[COUT] CO_XML_CONTENT {}'.format(str(data)[1:])) return True
def get_topology(indir="../topology", contacts_data=None): root = Path(indir) support_centers = anymarkup.parse_file(root / "support-centers.yaml") service_types = anymarkup.parse_file(root / "services.yaml") tables = CommonData(contacts=contacts_data, service_types=service_types, support_centers=support_centers) topology = Topology(tables) for facility_path in root.glob("*/FACILITY.yaml"): name = facility_path.parts[-2] id_ = anymarkup.parse_file(facility_path)["ID"] topology.add_facility(name, id_) for site_path in root.glob("*/*/SITE.yaml"): facility, name = site_path.parts[-3:-1] site_info = anymarkup.parse_file(site_path) id_ = site_info["ID"] topology.add_site(facility, name, id_, site_info) for yaml_path in root.glob("*/*/*.yaml"): facility, site, name = yaml_path.parts[-3:] if name == "SITE.yaml": continue if name.endswith("_downtime.yaml"): continue name = name.replace(".yaml", "") rg = anymarkup.parse_file(yaml_path) downtime_yaml_path = yaml_path.with_name(name + "_downtime.yaml") downtimes = None if downtime_yaml_path.exists(): downtimes = ensure_list(anymarkup.parse_file(downtime_yaml_path)) topology.add_rg(facility, site, name, rg) if downtimes: for downtime in downtimes: topology.add_downtime(site, name, downtime) return topology
def bundle_datafile_spec(spec): work_dir = spec['work_dir'] root = spec['root'] name = spec['name'] if not re.search(r'\.(ya?ml|json)$', name): return None path = os.path.join(root, name) rel_abs_path = path[len(work_dir):] logging.info("Processing: {}\n".format(rel_abs_path)) return rel_abs_path, anymarkup.parse_file(path, force_types=None)
def bundle_datafiles(data_dir): bundle = {} for root, dirs, files in os.walk(data_dir, topdown=False): for name in files: if re.search(r'\.(ya?ml|json)$', name): path = os.path.join(root, name) rel_abs_path = path[len(data_dir):] sys.stderr.write("Processing: {}\n".format(rel_abs_path)) bundle[rel_abs_path] = anymarkup.parse_file(path, force_types=None) return bundle
def test_template_filter_route(self): output_dir = tempfile.mkdtemp() se = SaasHerder(temp_path, None) se.template("tag", "hash_length", output_dir, local=True, template_filter=["Route"]) data = anymarkup.parse_file( os.path.join(output_dir, "hash_length.yaml")) for item in data["items"]: if item["kind"] == "Route": assert False
def read_xml_parameters(self, xml_filename): xmldict = anymarkup.parse_file(xml_filename) geoms = xmldict['mujoco']['worldbody']['geom'] bodies = xmldict['mujoco']['worldbody']['body'] table_top_name = 'tableTop' table_roof_name = 'tableRoof' table_wall_name = 'table_wall_1' table_top_geom = None table_roof_geom = None table_wall_geom = None for geom in geoms: if geom['@name'] == table_top_name: table_top_geom = copy.deepcopy(geom) if geom['@name'] == table_roof_name: table_roof_geom = copy.deepcopy(geom) if geom['@name'] == table_wall_name: table_wall_geom = copy.deepcopy(geom) assert table_top_geom is not None assert table_roof_geom is not None table_top_size = [float(x) for x in table_top_geom['@size'].split(' ')] table_roof_pos = [float(x) for x in table_roof_geom['@pos'].split(' ')] table_wall_size = [ float(x) for x in table_wall_geom['@size'].split(' ') ] space_size = copy.deepcopy(table_top_size) space_size[2] = table_roof_pos[ 2] #Pos of the table roof defines max verical size of the space size table_wall_width = table_wall_size[2] block_body_name = 'block' goal_body_name = 'goal' body_goal = None body_block = None for body in bodies: if body['@name'] == block_body_name: body_block = copy.deepcopy(body) if body['@name'] == goal_body_name: body_goal = copy.deepcopy(body) assert body_block is not None assert body_goal is not None goal_radius = float(body_goal['geom']['@size']) params = { 'space_size': space_size, 'table_wall_width': table_wall_width, 'goal_radius': goal_radius } print(params) return params
def get_rgsummary(indir="topology"): topology = Topology() root = Path(indir) support_center_name_to_id = anymarkup.parse_file(root / "support-centers.yaml") service_name_to_id = anymarkup.parse_file(root / "services.yaml") for facility_path in root.glob("*/FACILITY.yaml"): name = facility_path.parts[-2] id_ = anymarkup.parse_file(facility_path)["ID"] topology.add_facility(name, id_) for site_path in root.glob("*/*/SITE.yaml"): facility, name = site_path.parts[-3:-1] id_ = anymarkup.parse_file(site_path)["ID"] topology.add_site(facility, name, id_) for yaml_path in root.glob("*/*/*.yaml"): facility, site, name = yaml_path.parts[-3:] if name == "SITE.yaml": continue name = name.replace(".yaml", "") rg = anymarkup.parse_file(yaml_path) try: facility_id = topology.data[facility]["ID"] site_id = topology.data[facility][site]["ID"] rg["Facility"] = OrderedDict([("ID", facility_id), ("Name", facility)]) rg["Site"] = OrderedDict([("ID", site_id), ("Name", site)]) rg["GroupName"] = name topology.add_rg( facility, site, name, expand_resourcegroup(rg, service_name_to_id, support_center_name_to_id)) except Exception as e: if not isinstance(e, RGError): raise RGError(rg) from e return topology.get_resource_summary()
def cli_diff(keywords1_file_path, keywords2_file_path, synonyms_only=False, keywords_only=False, regexp_only=False): """Compute diff on keyword files.""" # pylint: disable=too-many-locals if synonyms_only and keywords_only: raise ValueError('Cannot use --synonyms-only and --keywords-only at the same time') keywords1 = anymarkup.parse_file(keywords1_file_path) keywords2 = anymarkup.parse_file(keywords2_file_path) differ = False for action, keywords_a, keywords_b, file_path in (('Removed', keywords1, keywords2, keywords1_file_path), ('Added', keywords2, keywords1, keywords2_file_path)): for keyword, value in keywords_a.items(): if not synonyms_only and not regexp_only and keyword not in keywords_b.keys(): print("%s keyword '%s' in file '%s'" % (action, keyword, file_path)) differ = True continue if not keywords_only and not regexp_only and value is not None: for synonym in (value.get('synonyms') or[]): # pylint: disable=superfluous-parens if synonym not in keywords_b[keyword].get('synonyms', []): print("%s synonym '%s' for keyword '%s' in file '%s'" % (action, synonym, keyword, file_path)) differ = True if not keywords_only and not synonyms_only and value is not None: for regexp in (value.get('regexp') or []): # pylint: disable=superfluous-parens if regexp not in keywords_b[keyword].get('regexp', []): print("%s regexp '%s' for keyword '%s' in file '%s'" % (action, regexp, keyword, file_path)) differ = True if not differ: print("Files '%s' and '%s' do not differ" % (keywords1_file_path, keywords2_file_path))
def get_projects_xml(): """ Returns the serailized xml (as a string) """ to_output = {"Projects": {"Project": []}} projects = [] for file in os.listdir("projects"): project = anymarkup.parse_file("projects/{0}".format(file)) projects.append(project) to_output["Projects"]["Project"] = projects return anymarkup.serialize(to_output, 'xml').decode()
def get_vos_xml(): """ Returns the serailized xml (as a string) """ to_output = {"VOSummary": {"VO": []}} vos = [] for file in os.listdir("virtual-organizations"): vo = anymarkup.parse_file("virtual-organizations/{0}".format(file)) vos.append(vo) to_output["VOSummary"]["VO"] = vos return anymarkup.serialize(to_output, 'xml').decode()
def get_projects(indir="../projects"): to_output = {"Projects": {"Project": []}} projects = [] for file in os.listdir(indir): project = OrderedDict.fromkeys([ "ID", "Name", "Description", "PIName", "Organization", "Department", "FieldOfScience", "Sponsor" ]) project.update(anymarkup.parse_file(os.path.join(indir, file))) projects.append(project) to_output["Projects"]["Project"] = projects return to_output
def main(argv=sys.argv): try: infile, outdir = argv[1:3] except ValueError: print("Usage: %s <input xml> <output dir>" % argv[0], file=sys.stderr) return 2 if os.path.exists(outdir): print("Warning: %s already exists" % outdir, file=sys.stderr) parsed = anymarkup.parse_file(infile)['ResourceSummary'] topology = topology_from_parsed_xml(parsed) write_topology_to_yamls(topology, outdir) print("Topology written to", outdir) return 0
def echo_xml(use_yaml): for root, dirs, files in os.walk('/tmp/output'): for file_name in files: file = os.path.join(root, file_name) if use_yaml: data = anymarkup.parse_file(file) data = anymarkup.serialize(data, 'yaml') print('[COUT] CO_YAML_CONTENT {}'.format(str(data)[1:])) continue if file_name.endswith('.xml'): with open(file, 'rb') as f: data = f.read() print('[COUT] CO_XML_CONTENT {}'.format(str(data)[1:])) return True
def loadMainfile(self, path=None): if not os.path.exists(path): raise Exception("%s not found: %s" % (MAIN_FILE, path)) self.mainfile_data = anymarkup.parse_file(path) if "id" in self.mainfile_data: self.app_id = self.mainfile_data["id"] logger.debug("Setting app id to %s", self.mainfile_data["id"]) else: raise Exception("Missing ID in %s" % self.mainfile_data) if PARAMS_KEY in self.mainfile_data: logger.debug("Loading params") self.loadParams(self.mainfile_data) return self.mainfile_data
def services(self): """ Loads and returns all the services in service dir """ if not self._services: self._services = {} self._service_files = {} for f in os.listdir(self.services_dir): service_file = os.path.join(self.services_dir, f) service = anymarkup.parse_file(service_file) for s in service["services"]: s["file"] = f self._services[s["name"]] = s if not self._service_files.get(f): self._service_files[f] = [] self._service_files[f].append(s["name"]) return self._services
def get_data_file(file_path): """Retrieve data from local file. :param file_path: path to file to read data from :type file_path: str :return: tuple, x and y values of function to be interpolated :rtype: tuple """ _logger.info("Collecting data from local file '%s'...", file_path) content = anymarkup.parse_file(file_path) x_values, y_values = content['x'], content['y'] if len(x_values) != len(y_values): raise InputError( "x and y vectors should be of same shape, got %d and %d (x and y) instead" % (len(x_values), len(y_values))) return x_values, y_values
def read_nulecule(self, path): """Return id of this app and it's graph. The returned graph is in form {<name>: <other_graph_object_attrs>}. """ # TODO: can be named differently; inspect .cccp.yaml to find out nlc_path = os.path.join(path, 'Nulecule') logger.debug('Reading Nulecule from: %s', nlc_path) nlc_content = anymarkup.parse_file(nlc_path) # TODO: we want to implement graph as object and hide details and potential # differencies in Nulecule spec versions behind it appid = nlc_content['id'] graph = nlc_content['graph'] appgraph = {} for item in graph: key = item.pop('name') appgraph[key] = item return appid, appgraph
def echo_xml(dir_name, use_yaml): if dir_name and dir_name != '.': dir_name = '{}/{}'.format(REPO_PATH, dir_name) else: dir_name = REPO_PATH for root, dirs, files in os.walk('{}/target'.format(dir_name)): for file_name in files: if file_name.endswith('.xml'): if use_yaml: data = anymarkup.parse_file(os.path.join(root, file_name)) data = anymarkup.serialize(data, 'yaml') print('[COUT] CO_YAML_CONTENT {}'.format(str(data)[1:])) continue with open(os.path.join(root, file_name), 'rb') as f: data = f.read() print('[COUT] CO_XML_CONTENT {}'.format(str(data)[1:])) return True
def _resource_identity(self, path): """Finds the Kubernetes resource name / identity from resource manifest and raises if manifest is not supported. :arg path: Absolute path to Kubernetes resource manifest :return: str -- Resource name / identity :raises: ProviderFailedException """ data = anymarkup.parse_file(path) if data["apiVersion"] == "v1": return data["metadata"]["name"] elif data["apiVersion"] in ["v1beta3", "v1beta2", "v1beta1"]: msg = ("%s is not supported API version, update Kubernetes " "artifacts to v1 API version. Error in processing " "%s manifest." % (data["apiVersion"], path)) raise ProviderFailedException(msg) else: raise ProviderFailedException("Malformed kube file: %s" % path)
def validate_file(schemas_root, filename): kind = ValidatedFileKind.DATA_FILE logging.info('validating file: {}'.format(filename)) try: data = anymarkup.parse_file(filename, force_types=None) except anymarkup.AnyMarkupError as e: return ValidationError(kind, filename, "FILE_PARSE_ERROR", e) try: schema_url = data[u'$schema'] except KeyError as e: return ValidationError(kind, filename, "MISSING_SCHEMA_URL", e) try: schema = fetch_schema(schemas_root, schema_url) except MissingSchemaFile as e: return ValidationError(kind, filename, "MISSING_SCHEMA_FILE", e, schema_url) except requests.HTTPError as e: return ValidationError(kind, filename, "HTTP_ERROR", e, schema_url) except anymarkup.AnyMarkupError as e: return ValidationError(kind, filename, "SCHEMA_PARSE_ERROR", e, schema_url) try: resolver = get_resolver(schemas_root, schema) jsonschema.Draft4Validator(schema, resolver=resolver).validate(data) except jsonschema.ValidationError as e: return ValidationError(kind, filename, "VALIDATION_ERROR", e, schema_url) except jsonschema.SchemaError as e: return ValidationError(kind, filename, "SCHEMA_ERROR", e, schema_url) except TypeError as e: return ValidationError(kind, filename, "SCHEMA_TYPE_ERROR", e, schema_url) return ValidationOK(kind, filename, schema_url)
def load_services(self): """ Returns two dictionaries that contain all the services: 1. Service indexed by service name 2. List of services indexed by service file. Note that there may be multiple services defined in a single service file. """ _services = {} _service_files = {} for f in os.listdir(self.services_dir): service_file = os.path.join(self.services_dir, f) service = anymarkup.parse_file(service_file) for s in service["services"]: s["file"] = f self.apply_environment_config(s) _services[s["name"]] = s _service_files.setdefault(f, []).append(s["name"]) return _services, _service_files
def loadParams(self, data=None): if type(data) == dict: logger.debug("Data given: %s", data) elif os.path.exists(data): logger.debug("Path given, loading %s", data) data = anymarkup.parse_file(data) else: raise Exception("Given params are broken: %s" % data) if "specversion" in data: logger.debug("Params part of %s", MAIN_FILE) tmp = {} tmp[GLOBAL_CONF] = self.fromListToDict(data[PARAMS_KEY]) data = tmp else: logger.debug("Params in separate file") if self.params_data: self.params_data = Utils.update(self.params_data, data) else: self.params_data = data return self.params_data
def setup_method(self, method): self.cccp_index = anymarkup.parse_file(CCCP_INDEX)
def cli_build(args): if os.path.isfile(os.path.join(os.getcwd(), MAIN_FILE)): data = anymarkup.parse_file(os.path.join(os.getcwd(), MAIN_FILE)) ac = Create(data["id"], args.dryrun) ac.build(args.TAG)
def _loadCredentials(self, path = DEFAULT_CREDS): credentials = anymarkup.parse_file(path) return credentials["credentials"]["user"], credentials["credentials"]["password"]