示例#1
0
def get_slack_usernames_from_owners(owners_raw_urls, owners_aliases_raw_url,
                                    users, usergroup, user_key,
                                    ssl_verify=True,
                                    missing_user_log_method=logging.warning):
    all_slack_usernames = []
    all_username_keys = [u[user_key] for u in users]

    owners_aliases = {}
    if owners_aliases_raw_url:
        r = get_raw_owners_content(owners_aliases_raw_url,
                                   ssl_verify=ssl_verify)
        try:
            content = anymarkup.parse(r.content, force_types=None)
            owners_aliases = content['aliases']
        except (anymarkup.AnyMarkupError, KeyError):
            msg = "Could not parse data. Skipping owners_aliases file: {}"
            logging.warning(msg.format(owners_aliases_raw_url))

    for owners_raw_url in owners_raw_urls or []:
        r = get_raw_owners_content(owners_raw_url, ssl_verify=ssl_verify)
        try:
            content = anymarkup.parse(r.content, force_types=None)
            owners = set()
            for value in content.values():
                for user in value:
                    if user in owners_aliases:
                        for alias_user in owners_aliases[user]:
                            owners.add(alias_user)
                    else:
                        owners.add(user)
        except (anymarkup.AnyMarkupError, KeyError):
            msg = "Could not parse data. Skipping owners file: {}"
            logging.warning(msg.format(owners_raw_url))
            continue

        if not owners:
            continue

        slack_usernames = [get_slack_username(u)
                           for u in users
                           if u[user_key]
                           in owners]
        not_found_users = [owner for owner in owners
                           if owner not in all_username_keys]
        if not_found_users:
            msg = f'[{usergroup}] {user_key} not found in app-interface: ' + \
                f'{not_found_users}'
            missing_user_log_method(msg)
        all_slack_usernames.extend(slack_usernames)

    return all_slack_usernames
示例#2
0
    def export_all(self):
        """
        only kubernetes things for now
        """
        # Resources to export.
        # Don't export Pods for now.
        # Exporting ReplicationControllers should be enough.
        # Ideally this should detect Pods that are not created by
        # ReplicationController and only export those.
        resources = ["replicationcontrollers", "persistentvolumeclaims",
                     "services"]

        # output of this export is kind List
        args = ["export", ",".join(resources), "-o", "json"]
        ec, stdout, stderr = self._call_oc(args)
        objects = anymarkup.parse(stdout, format="json", force_types=None)

        image_infos = []

        for o in objects["items"]:
            if o["kind"] == "ReplicationController":
                image_infos.extend(self.get_image_info(o))

        for ii in image_infos:
            if ii["private"]:
                logger.warning("{kind} {name} has image that appears to be "
                               "from local OpenShift registry!!".format(**ii))
        return objects
    def has_image(self, imageinfo):
        # TODO: it seems that the best non-auth way is to look at repository's tags,
        #  e.g. registry.com/v1/repositories/<image>/tags
        url = ('http://' if self.insecure else 'https://') + self.url
        url = url + '/v1/repositories/{0}/tags'.format(
            imageinfo.name_str(registry=False, tag=False))
        ret = False
        try:
            logger.debug('Polling %s to see if image %s exists', url, imageinfo.name_str())
            r = requests.get(url)
            if r.status_code == 200:
                tags = anymarkup.parse(r.text)
                look_for_tag = imageinfo.imagename.tag or 'latest'
                if look_for_tag not in tags:
                    raise AtomicappBuilderException(
                        'Image "{0}" exists, but it doesn\'t have tag "{1}"'.format(
                            imageinfo.name_str(), look_for_tag)
                    )
                logger.debug('Image %s exists', imageinfo.name_str())
                ret = True
            else:
                logger.debug('Image %s does not exist', imageinfo.name_str())
        except requests.exceptions.SSLError as e:
            raise AtomicappBuilderException('SSL error while polling registry: {0}'.format(e))
        except Exception as e:
            logger.debug(
                'Image %s does not seem to exist, exception was: %s',
                imageinfo.name_str(), e)

        return ret
    def _add_mvn_results(self, result_summary, anitya_mvn_names, version):
        def _compare_version(downstream, upstream):
            dv = downstream
            if 'redhat' in dv:
                # remove ".redhat-X" or "-redhat-X" suffix
                dv = dv[:dv.find('redhat')-1]
            if dv == upstream:
                return True
            else:
                return False

        downstream_rebuilds = []

        for name in anitya_mvn_names:
            metadata_url = '{repo}/{pkg}/maven-metadata.xml'.format(repo=RH_MVN_GA_REPO,
                                                                    pkg=mvn_pkg_to_repo_path(name))
            res = requests.get(metadata_url)
            if res.status_code != 200:
                self.log.info('Metadata for package {pkg} not found in {repo} (status {code})'.
                              format(pkg=name, repo=RH_MVN_GA_REPO, code=res.status_code))
                continue
            versions = anymarkup.parse(res.text)['metadata']['versioning']['versions']['version']
            # make sure 'versions' is a list (it's a string if there is just one version)
            if not isinstance(versions, list):
                versions = [versions]
            self.log.info('Found versions {v} for package {p}'.format(v=versions, p=name))
            for v in versions:
                if _compare_version(v, version):
                    downstream_rebuilds.append(v)

        result_summary['rh_mvn_matched_versions'] = downstream_rebuilds
        if downstream_rebuilds:
            # For now, we don't distinguish products, we just use general "Middleware"
            #  for all Maven artifacts
            result_summary['all_rhsm_product_names'].append('Middleware')
示例#5
0
    def export_all(self):
        """
        only kubernetes things for now
        """
        # Resources to export.
        # Don't export Pods for now.
        # Exporting ReplicationControllers should be enough.
        # Ideally this should detect Pods that are not created by
        # ReplicationController and only export those.
        resources = [
            "replicationcontrollers", "persistentvolumeclaims", "services"
        ]

        # output of this export is kind List
        args = ["export", ",".join(resources), "-o", "json"]
        ec, stdout, stderr = self._call_oc(args)
        objects = anymarkup.parse(stdout, format="json", force_types=None)

        image_infos = []

        for o in objects["items"]:
            if o["kind"] == "ReplicationController":
                image_infos.extend(self.get_image_info(o))

        for ii in image_infos:
            if ii["private"]:
                logger.warning("{kind} {name} has image that appears to be "
                               "from local OpenShift registry!!".format(**ii))
        return objects
示例#6
0
 def read_cccp_index(self):
     if self.cccp_index_uri.startswith('file://'):
         file_to_read = self.cccp_index_uri[len('file://'):]
         self.cccp_index = anymarkup.parse_file(file_to_read)
     else:
         fetched = requests.get(self.cccp_index_uri)
         self.cccp_index = anymarkup.parse(fetched.text)
示例#7
0
def fetch_provider_resource(path):
    gqlapi = gql.get_api()

    # get resource data
    try:
        resource = gqlapi.get_resource(path)
    except gql.GqlApiError as e:
        raise FetchResourceError(e.message)

    try:
        resource['body'] = anymarkup.parse(resource['content'],
                                           force_types=None)
    except anymarkup.AnyMarkupError:
        e_msg = "Could not parse data. Skipping resource: {}"
        raise FetchResourceError(e_msg.format(path))

    openshift_resource = OR(resource['body'])

    try:
        openshift_resource.verify_valid_k8s_object()
    except (KeyError, TypeError) as e:
        k = e.__class__.__name__
        e_msg = "Invalid data ({}). Skipping resource: {}"
        raise FetchResourceError(e_msg.format(k, path))

    return openshift_resource
示例#8
0
文件: config.py 项目: eliskasl/packit
def get_packit_config_from_repo(sourcegit_project: GitProject,
                                ref: str) -> Optional[PackageConfig]:
    for config_file_name in CONFIG_FILE_NAMES:
        try:
            config_file = sourcegit_project.get_file_content(
                path=config_file_name, ref=ref)
            logger.debug(
                f"Found a config file '{config_file_name}' "
                f"on ref '{ref}' "
                f"of the {sourcegit_project.full_repo_name} repository.")
        except FileNotFoundError:
            logger.debug(
                f"The config file '{config_file_name}' "
                f"not found on ref '{ref}' "
                f"of the {sourcegit_project.full_repo_name} repository.")
            continue

        try:
            loaded_config = anymarkup.parse(config_file)
        except Exception as ex:
            logger.error(f"Cannot load package config '{config_file_name}'.")
            raise Exception(f"Cannot load package config: {ex}.")

        return parse_loaded_config(loaded_config=loaded_config)

    return None
示例#9
0
    def get_data(self, exclude_urls=()):
        response = requests.get(self.url, headers=self.request_headers)
        entries = anymarkup.parse(response.text)
        data_list = []
        for entry in entries['source']['vacancies']['vacancy']:
            if entry['url'] in exclude_urls:
                continue
            if not words_in_string(
                    self.keywords,
                    entry['description']
            ) and not words_in_string(
                self.keywords,
                entry['job-name']
            ):
                continue

            data_list.append({
                'url': entry['url'],
                'source_datetime': datetime.strptime(
                    entry['creation-date'][:19],
                    self.pub_date_format
                ),
                'text': entry['description'],
                'title': entry['job-name'],
            })

        return data_list
示例#10
0
def get_slack_usernames_from_github_owners(github_owners, users):
    all_slack_usernames = []
    for owners_file in github_owners or []:
        r = requests.get(owners_file)
        try:
            owners_file = anymarkup.parse(
                r.content,
                force_types=None
            )
            github_users = [u for l in owners_file.values() for u in l]
        except (anymarkup.AnyMarkupError, KeyError):
            msg = "Could not parse data. Skipping owners file: {}"
            logging.warning(msg.format(owners_file))
            continue

        if not github_users:
            continue

        slack_usernames = [get_slack_username(u)
                           for u in users
                           if u['github_username']
                           in github_users]
        if len(set(slack_usernames)) != len(set(github_users)):
            msg = (
                'found Slack usernames {} '
                'do not match all github usernames: {} '
                '(hint: user is missing from app-interface)'
            ).format(slack_usernames, github_users)
            logging.warning(msg)
        else:
            all_slack_usernames.extend(slack_usernames)

    return all_slack_usernames
示例#11
0
def fetch_openshift_resource(resource, parent):
    global _log_lock

    provider = resource['provider']
    path = resource['path']
    msg = "Fetching {}: {}".format(provider, path)
    _log_lock.acquire()
    logging.debug(msg)
    _log_lock.release()

    if provider == 'resource':
        validate_json = resource.get('validate_json') or False
        openshift_resource = \
            fetch_provider_resource(path, validate_json=validate_json)
    elif provider == 'resource-template':
        tv = {}
        if resource['variables']:
            tv = anymarkup.parse(resource['variables'], force_types=None)
        tv['resource'] = resource
        tv['resource']['namespace'] = parent
        tt = resource['type']
        tt = 'jinja2' if tt is None else tt
        if tt == 'jinja2':
            tfunc = process_jinja2_template
        elif tt == 'extracurlyjinja2':
            tfunc = process_extracurlyjinja2_template
        else:
            UnknownTemplateTypeError(tt)
        try:
            openshift_resource = fetch_provider_resource(path,
                                                         tfunc=tfunc,
                                                         tvars=tv)
        except Exception as e:
            msg = "could not render template at path {}\n{}".format(path, e)
            raise ResourceTemplateRenderError(msg)
    elif provider == 'vault-secret':
        version = resource['version']
        rn = resource['name']
        name = path.split('/')[-1] if rn is None else rn
        rl = resource['labels']
        labels = {} if rl is None else json.loads(rl)
        ra = resource['annotations']
        annotations = {} if ra is None else json.loads(ra)
        rt = resource['type']
        type = 'Opaque' if rt is None else rt
        try:
            openshift_resource = \
                fetch_provider_vault_secret(path, version, name,
                                            labels, annotations, type)
        except vault_client.SecretVersionNotFound as e:
            raise FetchVaultSecretError(e)
    elif provider == 'route':
        tls_path = resource['vault_tls_secret_path']
        tls_version = resource['vault_tls_secret_version']
        openshift_resource = fetch_provider_route(path, tls_path, tls_version)
    else:
        raise UnknownProviderError(provider)

    return openshift_resource
示例#12
0
def fetch_schema(schema_url):
    if schema_url.startswith('http'):
        r = requests.get(schema_url)
        r.raise_for_status()
        schema = r.text
        return anymarkup.parse(schema, force_types=None)
    else:
        raise MissingSchemaFile(schema_url)
示例#13
0
    def loadArtifact(self, path):
        data = super(self.__class__, self).loadArtifact(path)
        self.template_data = anymarkup.parse(data, force_types=None)
        if "kind" in self.template_data and self.template_data["kind"].lower() == "template":
            if "parameters" in self.template_data:
                return anymarkup.serialize(self.template_data["parameters"], format="json")

        return data
示例#14
0
def process_template(template, values):
    try:
        manifest = template % values
        return OR(anymarkup.parse(manifest, force_types=None),
                  QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION)
    except KeyError as e:
        raise ConstructResourceError(
            'could not process template: missing key {}'.format(e))
示例#15
0
def fetch_schema(schemas_root, schema_url):
    if schema_url.startswith('http'):
        r = requests.get(schema_url)
        r.raise_for_status()
        schema = r.text
    else:
        schema = fetch_schema_file(schemas_root, schema_url)

    return anymarkup.parse(schema, force_types=None)
示例#16
0
    def __init__(self, keyword_file=None, lemmatizer=False, stemmer=None):  # pylint: disable=too-many-branches
        """Construct.

        :param keyword_file: a path to keyword file
        :param lemmatizer: lematizer instance to be used
        :param stemmer: stemmer instance to be used
        """
        self._stemmer = stemmer or defaults.DEFAULT_STEMMER
        self._lemmatizer = lemmatizer or defaults.DEFAULT_LEMMATIZER

        if isinstance(keyword_file, str) or keyword_file is None:
            with open(keyword_file or self._DEFAULT_KEYWORD_FILE_PATH, 'r') as f:
                content = f.read()
        elif isinstance(keyword_file, io.TextIOBase):
            content = keyword_file.read()
        else:
            raise InvalidInputError("Unknown keyword file provided - %s" % (type(keyword_file)))

        self._keywords = anymarkup.parse(content)
        del content

        # make sure keywords are strings
        self._keywords = dict((str(keyword), val) for keyword, val in self._keywords.items())

        # add missing default values
        for keyword in self._keywords.keys():
            if self._keywords[keyword] is None:
                self._keywords[keyword] = {'synonyms': [], 'regexp': []}

            if self._keywords[keyword].get('synonyms') is None:
                self._keywords[keyword]['synonyms'] = []

            # make sure synonyms are strings
            self._keywords[keyword]['synonyms'] = list(map(str, self._keywords[keyword]['synonyms']))

            if self._keywords[keyword].get('regexp') is None:
                self._keywords[keyword]['regexp'] = []

        for keyword, entry in self._keywords.items():
            for idx, regexp in enumerate(entry['regexp']):
                entry['regexp'][idx] = re.compile(regexp)

            entry['synonyms'].append(keyword)

            for idx, synonym in enumerate(entry['synonyms']):
                for delim in [' ', '_', '-']:
                    synonyms = synonym.split(delim)
                    if self._lemmatizer:
                        synonyms = [self._lemmatizer.lemmatize(t) for t in synonyms]
                    if self._stemmer:
                        synonyms = [self._stemmer.stem(t) for t in synonyms]
                    new_synonym = delim.join(synonyms)

                    if new_synonym != synonym:
                        _logger.debug("Stemmed and lemmatized keyword synonym from '%s' to '%s' for keyword '%s'",
                                      synonym, new_synonym, keyword)
                        entry['synonyms'][idx] = new_synonym
示例#17
0
    def saveArtifact(self, path, data):
        if self.template_data:
            if "kind" in self.template_data and self.template_data["kind"].lower() == "template":
                if "parameters" in self.template_data:
                    passed_data = anymarkup.parse(data, force_types=None)
                    self.template_data["parameters"] = passed_data
                    data = anymarkup.serialize(self.template_data, format=os.path.splitext(path)[1].strip(".")) #FIXME

        super(self.__class__, self).saveArtifact(path, data)
示例#18
0
 def prepareOrder(self):
     for artifact in self.artifacts:
         data = None
         with open(os.path.join(self.path, artifact), "r") as fp:
             logger.debug(os.path.join(self.path, artifact))
             data = anymarkup.parse(fp)
         if "kind" in data:
             self.kube_order[data["kind"].lower()] = artifact
         else:
             raise ProviderFailedException("Malformed kube file")
示例#19
0
    def loadArtifact(self, path):
        data = super(self.__class__, self).loadArtifact(path)
        self.template_data = anymarkup.parse(data, force_types=None)
        if "kind" in self.template_data and self.template_data["kind"].lower(
        ) == "template":
            if "parameters" in self.template_data:
                return anymarkup.serialize(self.template_data["parameters"],
                                           format="json")

        return data
示例#20
0
    def call(self, method, kwargs):
        method_handler = getattr(self.core, method)

        if kwargs:
            kwargs = anymarkup.parse(kwargs)
        else:
            kwargs = {}
        if method_handler:
            logger.debug("Calling method %s with args\n%s" % (method, kwargs))
            method_handler(**kwargs)
示例#21
0
文件: api.py 项目: mjisyang/atomicapp
    def call(self, method, kwargs):
        method_handler = getattr(self.core, method)

        if kwargs:
            kwargs = anymarkup.parse(kwargs)
        else:
            kwargs = {}
        if method_handler:
            logger.debug("Calling method %s with args\n%s", method, kwargs)
            method_handler(**kwargs)
示例#22
0
 def prepareOrder(self):
     for artifact in self.artifacts:
         data = None
         with open(os.path.join(self.path, artifact), "r") as fp:
             logger.debug(os.path.join(self.path, artifact))
             data = anymarkup.parse(fp)
         if "kind" in data:
             self.kube_order[data["kind"].lower()] = artifact
         else:
             raise ProviderFailedException("Malformed kube file")
示例#23
0
    def load_from_path(cls,
                       src,
                       config=None,
                       namespace=GLOBAL_CONF,
                       nodeps=False,
                       dryrun=False,
                       update=False):
        """
        Load a Nulecule application from a path in the source path itself, or
        in the specified destination path.

        Args:
            src (str): Path to load Nulecule application from.
            config (dict): Config data for Nulecule application.
            namespace (str): Namespace for Nulecule application.
            nodeps (bool): Do not pull external applications if True.
            dryrun (bool): Do not make any change to underlying host.
            update (bool): Update existing application if True, else reuse it.

        Returns:
            A Nulecule instance or None in case of some dry run (fetching
            an image).
        """
        nulecule_path = os.path.join(src, MAIN_FILE)

        if os.path.exists(nulecule_path):
            with open(nulecule_path, 'r') as f:
                nulecule_data = f.read()
        else:
            raise NuleculeException(
                "No Nulecule file exists in directory: %s" % src)

        if dryrun and not os.path.exists(nulecule_path):
            raise NuleculeException(
                "Fetched Nulecule components are required to initiate dry-run. "
                "Please specify your app via atomicapp --dry-run /path/to/your-app"
            )

        # By default, AnyMarkup converts all formats to YAML when parsing.
        # Thus the rescue works either on JSON or YAML.
        try:
            nulecule_data = anymarkup.parse(nulecule_data)
        except (yaml.parser.ParserError, AnyMarkupError), e:
            line = re.search('line (\d+)', str(e)).group(1)
            column = re.search('column (\d+)', str(e)).group(1)

            output = ""
            for i, l in enumerate(nulecule_data.splitlines()):
                if (i == int(line) -
                        1) or (i == int(line)) or (i == int(line) + 1):
                    output += "%s %s\n" % (str(i), str(l))

            raise NuleculeException(
                "Failure parsing %s file. Validation error on line %s, column %s:\n%s"
                % (nulecule_path, line, column, output))
示例#24
0
    def apply_pointers(self, content, params):
        """
        Let's apply all the json pointers!
        Valid params in Nulecule:

            param1:
                - /spec/containers/0/ports/0/hostPort
                - /spec/containers/0/ports/0/hostPort2
            or
            param1:
                - /spec/containers/0/ports/0/hostPort, /spec/containers/0/ports/0/hostPort2

        Args:
            content (str): content of artifact file
            params (dict): list of params with pointers to replace in content

        Returns:
            str: content with replaced pointers

        Todo:
            In the future we need to change this to detect haml, yaml, etc as we add more providers
            Blocked by: github.com/bkabrda/anymarkup-core/blob/master/anymarkup_core/__init__.py#L393
        """
        obj = anymarkup.parse(content)

        if type(obj) != dict:
            logger.debug(
                "Artifact file not json/haml, assuming it's $VARIABLE substitution"
            )
            return content

        if params is None:
            # Nothing to do here!
            return content

        for name, pointers in params.items():

            if not pointers:
                logger.warning("Could not find pointer for %s" % name)
                continue

            for pointer in pointers:
                try:
                    resolve_pointer(obj, pointer)
                    set_pointer(obj, pointer, name)
                    logger.debug("Replaced %s pointer with %s param" %
                                 (pointer, name))
                except JsonPointerException:
                    logger.debug("Error replacing %s with %s" %
                                 (pointer, name))
                    logger.debug("Artifact content: %s", obj)
                    raise NuleculeException(
                        "Error replacing pointer %s with %s." %
                        (pointer, name))
        return anymarkup.serialize(obj, format="json")
示例#25
0
    def export_project(self):
        """
        Export configuration from Openshift for various providers

        Returns:
            A dict with keys as provider and value as artifacts corresponding
            to that provider.
        """
        # Resources to export.
        # Don't export Pods for now.
        # Exporting ReplicationControllers should be enough.
        # Ideally this should detect Pods that are not created by
        # ReplicationController and only export those.
        # Order in resource list is significant! Object are exported in same
        # order as they are specified on command line and they will have same
        # order in Nulecule file also.
        # ImageStream is first as workaround for this https://github.com/openshift/origin/issues/4518
        # But this workaround is going to work only after resolving
        # https://github.com/projectatomic/atomicapp/issues/669
        all_artifacts = {}
        for provider in NULECULE_PROVIDERS:
            if provider == "kubernetes":
                resources = ["persistentVolumeClaim",
                             "service",
                             "replicationController"]
            elif provider == "openshift":
                resources = ["imageStream",
                             "service",
                             "persistentVolumeClaim",
                             "replicationController",
                             "deploymentConfig",
                             "buildConfig"]

            # output of this export is kind List
            args = ["export", ",".join(resources), "-o", "json"]
            # if user has specified the selector append it to command
            if self.selector:
                args.extend(["-l", self.selector])

            ec, stdout, stderr = self._call_oc(args)
            objects = anymarkup.parse(stdout, format="json", force_types=None)

            # convert OpenShift List to array
            if objects["kind"] == "List":
                artifacts = objects["items"]
            else:
                msg = "Output of `oc export` command is of diferent kind than 'List'"
                logger.critical(msg)
                raise Exception(msg)

            all_artifacts[provider] = artifacts

        ep = ExportedProject(artifacts=all_artifacts)

        return ep
示例#26
0
    def get_j_config(self):
        """ Retrieve a job config from Jenkins. """
        logger.info("Getting %s job config", self.job_name)
        job_config = self.server.get_job_config(self.job_name)
        # Convert to YAML
        job_config_yaml = anymarkup.serialize(anymarkup.parse(job_config), 'yaml')

        with open(self.config_path, 'w') as f:
            f.write(job_config_yaml)

        logger.info(" %s job config has been saved to %s", self.job_name, self.config_path)
示例#27
0
def convert(filename, from_format, to_format, interpolate):
    """Parses stdin or a file and converts to the specified format"""

    # Try to parse the file and output in the specified format
    data = anymarkup.parse(filename,
                           format=from_format,
                           interpolate=interpolate)
    serialized = anymarkup.serialize(data, format=to_format)
    click.echo(serialized)

    # Exit if all done
    sys.exit(0)
示例#28
0
    def saveArtifact(self, path, data):
        if self.template_data:
            if "kind" in self.template_data and self.template_data[
                    "kind"].lower() == "template":
                if "parameters" in self.template_data:
                    passed_data = anymarkup.parse(data, force_types=None)
                    self.template_data["parameters"] = passed_data
                    data = anymarkup.serialize(
                        self.template_data,
                        format=os.path.splitext(path)[1].strip("."))  #FIXME

        super(self.__class__, self).saveArtifact(path, data)
示例#29
0
 def get_values(self, path):
     gqlapi = gql.get_api()
     try:
         raw_values = gqlapi.get_resource(path)
     except gql.GqlApiError as e:
         raise FetchResourceError(e.message)
     try:
         values = anymarkup.parse(raw_values['content'], force_types=None)
     except anymarkup.AnyMarkupError:
         e_msg = "Could not parse data. Skipping resource: {}"
         raise FetchResourceError(e_msg.format(path))
     return values
    def export_project(self):
        """
        Export configuration from Openshift for various providers

        Returns:
            A dict with keys as provider and value as artifacts corresponding
            to that provider.
        """
        # Resources to export.
        # Don't export Pods for now.
        # Exporting ReplicationControllers should be enough.
        # Ideally this should detect Pods that are not created by
        # ReplicationController and only export those.
        # Order in resource list is significant! Object are exported in same
        # order as they are specified on command line and they will have same
        # order in Nulecule file also.
        # ImageStream is first as workaround for this https://github.com/openshift/origin/issues/4518
        # But this workaround is going to work only after resolving
        # https://github.com/projectatomic/atomicapp/issues/669
        all_artifacts = {}
        for provider in NULECULE_PROVIDERS:
            if provider == "kubernetes":
                resources = [
                    "persistentVolumeClaim", "service", "replicationController"
                ]
            elif provider == "openshift":
                resources = [
                    "imageStream", "service", "persistentVolumeClaim",
                    "replicationController", "deploymentConfig", "buildConfig"
                ]

            # output of this export is kind List
            args = ["export", ",".join(resources), "-o", "json"]
            # if user has specified the selector append it to command
            if self.selector:
                args.extend(["-l", self.selector])

            ec, stdout, stderr = self._call_oc(args)
            objects = anymarkup.parse(stdout, format="json", force_types=None)

            # convert OpenShift List to array
            if objects["kind"] == "List":
                artifacts = objects["items"]
            else:
                msg = "Output of `oc export` command is of diferent kind than 'List'"
                logger.critical(msg)
                raise Exception(msg)

            all_artifacts[provider] = artifacts

        ep = ExportedProject(artifacts=all_artifacts)

        return ep
示例#31
0
    def _convert_config_file_to_json_dict(filename):
        """
        Parses the provided file to a dictionary then converts to JSON then to dictionary.
        Needs to do a double trip because the anymarkup module autoconverts booleans and integers.

        :param filename: filename to convert
        :return: JSON data as a dictionary
        """
        config = ConfigObj(filename, interpolation=False, list_values=False)
        dict_data = config.dict()
        json_data = json.dumps(dict_data)
        json_dict = anymarkup.parse(json_data)
        return json_dict
示例#32
0
    def _process_artifacts(self):
        """
        Parse OpenShift manifests files and checks if manifest under
        process is valid. Reads self.artifacts and saves parsed artifacts
        to self.openshift_artifacts
        """
        for artifact in self.artifacts:
            logger.debug("Processing artifact: %s", artifact)
            data = None
            with open(os.path.join(self.path, artifact), "r") as fp:
                data = anymarkup.parse(fp, force_types=None)

            self._process_artifact_data(artifact, data)
    def pull_images(self, registry, username, password, only_internal=True):
        """
        This pulls all images that are mentioned in artifact.

        Args:
            registry (str): url of exposed OpenShift Docker registry
            username (str): username for for OpenShift Docker registry
            password (str): password for OpenShift Docker registry
            only_internal (bool): if True only images that are in internal
                                  OpenShift Docker registry, otherwise pulls
                                  all images (default is True)

        """
        logger.debug("Pulling images (only_internal: {}, registry:{},"
                     " login:{}:{})".format(only_internal, registry,
                                            username, password))

        docker_client = docker.Client(base_url='unix://var/run/docker.sock', version='auto')

        try:
            login_response = docker_client.login(username=username,
                                                 password=password,
                                                 registry=registry)
            logger.debug(login_response)
        except docker.errors.APIError as e:
            logger.critical(e)
            raise Exception(e)

        for image_info in self.images:
            if image_info["internal"]:
                image_info["image"] = utils.replace_registry_host(
                    image_info["image"], registry)
            else:
                if only_internal:
                    # we are exporting only internal images, skip this
                    continue
            image = image_info["image"]
            logger.info("Pulling image {}".format(image))
            for line in docker_client.pull(image, stream=True, insecure_registry=True):
                line_info = anymarkup.parse(line)
                if "progress" in line_info:
                    # don't print progress information
                    # showing status is enough for now
                    continue
                elif "status" in line_info:
                    logger.info(line_info["status"])
                elif "errorDetail" in line_info:
                    msg = line_info["errorDetail"]["message"]
                    logger.critical(msg)
                    raise Exception(msg)
示例#34
0
文件: base.py 项目: schmerk/atomicapp
    def apply_pointers(self, content, params):
        """
        Let's apply all the json pointers!
        Valid params in Nulecule:

            param1:
                - /spec/containers/0/ports/0/hostPort
                - /spec/containers/0/ports/0/hostPort2
            or
            param1:
                - /spec/containers/0/ports/0/hostPort, /spec/containers/0/ports/0/hostPort2

        Args:
            content (str): content of artifact file
            params (dict): list of params with pointers to replace in content

        Returns:
            str: content with replaced pointers

        Todo:
            In the future we need to change this to detect haml, yaml, etc as we add more providers
            Blocked by: github.com/bkabrda/anymarkup-core/blob/master/anymarkup_core/__init__.py#L393
        """
        obj = anymarkup.parse(content)

        if type(obj) != dict:
            logger.debug("Artifact file not json/haml, assuming it's $VARIABLE substitution")
            return content

        if params is None:
            # Nothing to do here!
            return content

        for name, pointers in params.items():

            if not pointers:
                logger.warning("Could not find pointer for %s" % name)
                continue

            for pointer in pointers:
                try:
                    resolve_pointer(obj, pointer)
                    set_pointer(obj, pointer, name)
                    logger.debug("Replaced %s pointer with %s param" % (pointer, name))
                except JsonPointerException:
                    logger.debug("Error replacing %s with %s" % (pointer, name))
                    logger.debug("Artifact content: %s", obj)
                    raise NuleculeException("Error replacing pointer %s with %s." % (pointer, name))
        return anymarkup.serialize(obj, format="json")
 def _dependencies_from_pom_xml():
     """
     Extract dependencies from pom.xml in current directory
     :return: {"groupId:artifactId": "version"}
     """
     solved = {}
     with open('pom.xml') as r:
         pom_dict = anymarkup.parse(r.read())
     dependencies = pom_dict.get('project', {}).get('dependencies', {}).get('dependency', [])
     if not isinstance(dependencies, list):
         dependencies = [dependencies]
     for dependency in dependencies:
         name = "{}:{}".format(dependency['groupId'], dependency['artifactId'])
         solved[name] = dependency['version']
     return solved
示例#36
0
    def _process_artifacts(self):
        """
        Parse each Kubernetes file and convert said format into an Object for
        deployment.
        """
        for artifact in self.artifacts:
            logger.debug("Processing artifact: %s", artifact)
            data = None

            # Open and parse the artifact data
            with open(os.path.join(self.path, artifact), "r") as fp:
                data = anymarkup.parse(fp, force_types=None)

            # Process said artifacts
            self._process_artifact_data(artifact, data)
示例#37
0
    def _process_artifacts(self):
        """
        Parse each Kubernetes file and convert said format into an Object for
        deployment.
        """
        for artifact in self.artifacts:
            logger.debug("Processing artifact: %s", artifact)
            data = None

            # Open and parse the artifact data
            with open(os.path.join(self.path, artifact), "r") as fp:
                data = anymarkup.parse(fp, force_types=None)

            # Process said artifacts
            self._process_artifact_data(artifact, data)
示例#38
0
def convert_to_yaml_without_none(txt):
    """
    Convert any object to OrderDict without None value
    """

    raw = anymarkup.parse(txt)
    raw = remove_none(raw)
    represent_dict_order = lambda self, data: self.represent_mapping(
        'tag:yaml.org,2002:map', data.items())  # noqa
    yaml.add_representer(OrderedDict, represent_dict_order)
    txt = yaml.dump(raw,
                    default_flow_style=False,
                    width=10000,
                    allow_unicode=True)
    return txt
示例#39
0
    def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF,
                       nodeps=False, dryrun=False, update=False):
        """
        Load a Nulecule application from a path in the source path itself, or
        in the specified destination path.

        Args:
            src (str): Path to load Nulecule application from.
            config (atomicapp.nulecule.config.Config): Config data for
                Nulecule application.
            namespace (str): Namespace for Nulecule application.
            nodeps (bool): Do not pull external applications if True.
            dryrun (bool): Do not make any change to underlying host.
            update (bool): Update existing application if True, else reuse it.

        Returns:
            A Nulecule instance or None in case of some dry run (fetching
            an image).
        """
        nulecule_path = os.path.join(src, MAIN_FILE)

        if os.path.exists(nulecule_path):
            with open(nulecule_path, 'r') as f:
                nulecule_data = f.read()
        else:
            raise NuleculeException("No Nulecule file exists in directory: %s" % src)

        if dryrun and not os.path.exists(nulecule_path):
            raise NuleculeException("Fetched Nulecule components are required to initiate dry-run. "
                                    "Please specify your app via atomicapp --dry-run /path/to/your-app")

        # By default, AnyMarkup converts all formats to YAML when parsing.
        # Thus the rescue works either on JSON or YAML.
        try:
            nulecule_data = anymarkup.parse(nulecule_data)
        except (yaml.parser.ParserError, AnyMarkupError), e:
            line = re.search('line (\d+)', str(e)).group(1)
            column = re.search('column (\d+)', str(e)).group(1)

            output = ""
            for i, l in enumerate(nulecule_data.splitlines()):
                if (i == int(line) - 1) or (i == int(line)) or (i == int(line) + 1):
                    output += "%s %s\n" % (str(i), str(l))

            raise NuleculeException("Failure parsing %s file. Validation error on line %s, column %s:\n%s"
                                    % (nulecule_path, line, column, output))
示例#40
0
 def get_data(self, exclude_urls=()):
     response = requests.get(self.url, headers=self.request_headers)
     entries = anymarkup.parse(response.text)
     data_list = []
     for entry in entries['rss']['channel']['item']:
         if entry['link'] in exclude_urls:
             continue
         data_list.append({
             'url': entry['link'],
             'source_datetime': datetime.strptime(
                 entry['pubDate'], self.pub_date_format
             ),
             'text': entry['description'],
             'title': entry['title'],
             'icon_url': entry.get('image', None)
         })
     return data_list
示例#41
0
    def deploy(self):
        kube_order = OrderedDict([("service", None), ("rc", None), ("pod", None)]) #FIXME
        for artifact in self.artifacts:
            data = None
            with open(os.path.join(self.path, artifact), "r") as fp:
                logger.debug(os.path.join(self.path, artifact))
                data = anymarkup.parse(fp)
            if "kind" in data:
                kube_order[data["kind"].lower()] = artifact
            else:
                raise ProviderFailedException("Malformed kube file")

        for artifact in kube_order:
            if not kube_order[artifact]:
                continue

            k8s_file = os.path.join(self.path, kube_order[artifact])
            self._callK8s(k8s_file)
    def export_project(self):
        """
        only kubernetes things for now
        """
        # Resources to export.
        # Don't export Pods for now.
        # Exporting ReplicationControllers should be enough.
        # Ideally this should detect Pods that are not created by
        # ReplicationController and only export those.
        resources = ["replicationcontrollers", "persistentvolumeclaims",
                     "services"]

        # output of this export is kind List
        args = ["export", ",".join(resources), "-o", "json"]
        ec, stdout, stderr = self._call_oc(args)
        objects = anymarkup.parse(stdout, format="json", force_types=None)

        ep = ExportedProject(artifacts=objects)
        return ep
    def javancss(self, source_path):
        """Run JavaNCSS tool http://www.kclee.de/clemens/java/javancss

        :param source_path: path to source codes
        :return normalized output
        """
        javancss_path = os.path.join(os.environ['JAVANCSS_PATH'], 'bin',
                                     'javancss')
        command = [javancss_path, '-all', '-xml', source_path]
        status, output, error = self._run_analyzer(command, json_output=False)

        if status != 0:
            self.log.warning("JavaNCSS tool reported some errors: %s", error)

        if output:
            output = anymarkup.parse("".join(output))
            output = self._normalize_javancss_output(output)

        return output
示例#44
0
def read_manifest(manifest_file):
    if manifest_file == '-':
        manifest_raw = sys.stdin.read()
        if not manifest_raw:
            raise argparse.ArgumentTypeError("Received empty data from STDIN")
    else:
        try:
            with open(manifest_file) as f:
                manifest_raw = f.read()
        except FileNotFoundError:
            raise argparse.ArgumentTypeError(
                "File not found: {}".format(manifest_file))

    try:
        manifest = anymarkup.parse(manifest_raw, force_types=None)
    except anymarkup.AnyMarkupError:
        raise argparse.ArgumentTypeError(
            "Could not parse file: {}".format(manifest_file))

    return manifest
示例#45
0
 def process_k8s_artifacts(self):
     """Processes Kubernetes manifests files and checks if manifest under
     process is valid.
     """
     for artifact in self.artifacts:
         data = None
         with open(os.path.join(self.path, artifact), "r") as fp:
             logger.debug(os.path.join(self.path, artifact))
             try:
                 data = anymarkup.parse(fp)
             except Exception:
                 msg = "Error processing %s artifcats, Error:" % os.path.join(
                     self.path, artifact)
                 printErrorStatus(msg)
                 raise
         if "kind" in data:
             self.k8s_manifests.append((data["kind"].lower(), artifact))
         else:
             apath = os.path.join(self.path, artifact)
             raise ProviderFailedException("Malformed kube file: %s" % apath)
示例#46
0
    def _parse_kubeconf(self, filename):
        """"
        Parse kubectl config file

        Args:
            filename (string): path to configuration file (e.g. ./kube/config)

        Returns:
            dict of parsed values from config

        Example of expected file format:
            apiVersion: v1
            clusters:
            - cluster:
                server: https://10.1.2.2:8443
                certificate-authority: path-to-ca.cert
                insecure-skip-tls-verify: false
              name: 10-1-2-2:8443
            contexts:
            - context:
                cluster: 10-1-2-2:8443
                namespace: test
                user: test-admin/10-1-2-2:8443
              name: test/10-1-2-2:8443/test-admin
            current-context: test/10-1-2-2:8443/test-admin
            kind: Config
            preferences: {}
            users:
            - name: test-admin/10-1-2-2:8443
            user:
                token: abcdefghijklmnopqrstuvwxyz0123456789ABCDEF
        """
        logger.debug("Parsing %s", filename)

        with open(filename, 'r') as fp:
            kubecfg = anymarkup.parse(fp.read())

        try:
            return self._parse_kubeconf_data(kubecfg)
        except ProviderFailedException:
            raise ProviderFailedException('Invalid %s' % filename)
示例#47
0
    def deploy(self):
        kube_order = OrderedDict([("service", None), ("rc", None), ("pod", None)]) #FIXME
        for artifact in self.artifacts:
            data = None
            artifact_path = os.path.join(self.path, artifact)
            with open(artifact_path, "r") as fp:
                data = anymarkup.parse(fp, force_types=None)
            if "kind" in data:
                if data["kind"].lower() == "template":
                    logger.info("Processing template")
                    artifact = self._processTemplate(artifact_path)
                kube_order[data["kind"].lower()] = artifact
            else:
                raise ProviderFailedException("Malformed artifact file")

        for artifact in kube_order:
            if not kube_order[artifact]:
                continue

            k8s_file = os.path.join(self.path, kube_order[artifact])
            self._callCli(k8s_file)
示例#48
0
 def _process_artifacts(self):
     """ Parse and validate Marathon artifacts
     Parsed artifacts are saved  to self.marathon_artifacts
     """
     for artifact in self.artifacts:
         logger.debug("Procesesing artifact: %s", artifact)
         data = None
         with open(os.path.join(self.path, artifact), "r") as fp:
             try:
                 data = anymarkup.parse(fp)
                 logger.debug("Parsed artifact %s", data)
                 # every marathon app has to have id. 'id' key  is also used for showing messages
                 if "id" not in data.keys():
                     msg = "Error processing %s artifact. There is no id" % artifact
                     printErrorStatus(msg)
                     raise ProviderFailedException(msg)
             except anymarkup.AnyMarkupError, e:
                 msg = "Error processing artifact - %s" % e
                 printErrorStatus(msg)
                 raise ProviderFailedException(msg)
             self.marathon_artifacts.append(data)
示例#49
0
 def _process_artifacts(self):
     """ Parse and validate Marathon artifacts
     Parsed artifacts are saved  to self.marathon_artifacts
     """
     for artifact in self.artifacts:
         logger.debug("Procesesing artifact: %s", artifact)
         data = None
         with open(os.path.join(self.path, artifact), "r") as fp:
             try:
                 # env variables in marathon artifacts have to be string:string
                 # force_types=None respects types from json file
                 data = anymarkup.parse(fp, force_types=None)
                 logger.debug("Parsed artifact %s", data)
                 # every marathon app has to have id. 'id' key  is also used for showing messages
                 if "id" not in data.keys():
                     msg = "Error processing %s artifact. There is no id" % artifact
                     cockpit_logger.error(msg)
                     raise ProviderFailedException(msg)
             except anymarkup.AnyMarkupError, e:
                 msg = "Error processing artifact - %s" % e
                 cockpit_logger.error(msg)
                 raise ProviderFailedException(msg)
             self.marathon_artifacts.append(data)
    def push_images(self, registry, username, password, only_internal=True):
        """
        This pushes all images that are mentioned in artifact.

        Args:
            registry (str): url of registry
            username (str): username for docker registry. If None
                            (don't autheticate to registry)
            password (str): password for docker registry
            only_internal (bool): if True only images that are in internal
                                  OpenShift Docker registry, otherwise pulls
                                  all images (default is True)

        """
        logger.debug("pushing images to registry only_internal: {}, "
                     "registry:{}, login:{}:{}".format(only_internal, registry,
                                                       username, password))

        docker_client = docker.Client(base_url='unix://var/run/docker.sock', version='auto')

        if username and password:
            try:
                login_response = docker_client.login(username=username,
                                                     password=password,
                                                     registry=registry)
                logger.debug(login_response)
            except docker.errors.APIError as e:
                logger.critical(e)
                raise Exception(e)

        for image_info in self.images:
            if only_internal and not image_info["internal"]:
                # skip this image
                continue
            image = image_info["image"]

            # new name of image (only replace registry part)
            name_new_registry = utils.replace_registry_host(image, registry)

            (new_name, new_name_tag, new_name_digest) = utils.parse_image_name(
                name_new_registry)

            if new_name_digest:
                # if this is image with define digest, use digest as tag
                # docker cannot push image without tag, and if images
                # is pulled with digest it doesn't have tag specified

                # if this is going to be used as tag, it cannot contain ':'
                tag = new_name_digest.replace(":", "")
            else:
                tag = new_name_tag

            new_full_name = "{}:{}".format(new_name, tag)
            image_info["image"] = new_full_name

            logger.info("Tagging image {} as {}".format(image, new_full_name))
            try:
                tag_response = docker_client.tag(image, new_name, tag,
                                                 force=True)
                if not tag_response:
                    msg = "Error while tagging image"
                    logger.critical(msg)
                    raise Exception(msg)

            except docker.errors.APIError as e:
                logger.critical(e)
                raise Exception(e)

            logger.info("Pushing image {}".format(new_full_name))
            for line in docker_client.push(new_full_name, stream=True):
                line_info = anymarkup.parse(line)
                if "progress" in line_info:
                    # don't print progress information
                    # showing status is enough for now
                    continue
                elif "status" in line_info:
                    logger.info(line_info["status"])
                elif "errorDetail" in line_info:
                    msg = line_info["errorDetail"]["message"]
                    logger.critical(msg)
                    raise Exception(msg)