Beispiel #1
0
 def __init__(self, config):
     self.backend_section = None
     self.config = config
     self.conf = config.get_conf()
     self.db_sh = self.conf['sortinghat']['database']
     self.db_user = self.conf['sortinghat']['user']
     self.db_password = self.conf['sortinghat']['password']
     self.db_host = self.conf['sortinghat']['host']
     self.grimoire_con = grimoire_con(conn_retries=12)  # 30m retry
import copy
import json
import logging

import os
import os.path
import pkgutil
import sys

from grimoire_elk.elk.elastic import ElasticSearch
from grimoire_elk.elk.utils import grimoire_con

logger = logging.getLogger(__name__)

requests_ses = grimoire_con()

ES_VER = None
HEADERS_JSON = {"Content-Type": "application/json"}


def find_elasticsearch_version(elastic):
    global ES_VER
    if not ES_VER:
        res = requests_ses.get(elastic.url)
        main_ver = res.json()['version']['number'].split(".")[0]
        ES_VER = int(main_ver)
    return ES_VER


def find_item_json(elastic, type_, item_id):
Beispiel #3
0
import json
import logging
import os
import tempfile

import requests

from grimoire_elk.arthur import load_identities
from grimoire_elk.elk.gerrit import GerritEnrich
from grimoire_elk.elk.git import GitEnrich
from grimoire_elk.elk.utils import grimoire_con

logger = logging.getLogger(__name__)

requests_ses = grimoire_con()


def fetch_track_items(upstream_file_url, data_source):
    """ The file format is:

    # Upstream contributions, bitergia will crawl this and extract the relevant information
    # system is one of Gerrit, Bugzilla, Launchpad (insert more)
    ---
    -
      url: https://review.openstack.org/169836
      system: Gerrit
    """

    track_uris = []
    req = requests_ses.get(upstream_file_url)
 def __init__(self, config):
     """ config is a Config object """
     self.config = config
     self.conf = config.get_conf()
     self.grimoire_con = grimoire_con(conn_retries=12)  # 30m retry
Beispiel #5
0
class ConfOcean(object):

    conf_index = "conf"
    conf_repos = conf_index + "/repos"
    elastic = None
    requests_ses = grimoire_con()

    @classmethod
    def get_index(cls):
        return cls.conf_index

    @classmethod
    def set_elastic(cls, elastic):
        cls.elastic = elastic

        # Check conf index
        url = elastic.url + "/" + cls.conf_index
        r = cls.requests_ses.get(url)
        if r.status_code != 200:
            cls.requests_ses.post(url)
            logger.info("Creating OceanConf index " + url)

    @classmethod
    def add_repo(cls, unique_id, repo):
        ''' Add a new perceval repository with its arguments '''

        if cls.elastic is None:
            logger.error(
                "Can't add repo to conf. Ocean elastic is not configured")
            return

        url = cls.elastic.url + "/" + cls.conf_repos + "/"
        url += cls.elastic.safe_index(unique_id)

        logger.debug("Adding repo to Ocean %s %s" % (url, repo))

        cls.requests_ses.post(url, data=json.dumps(repo))

    @classmethod
    def get_repos(cls):
        ''' List of repos data in Ocean '''

        repos = []

        if cls.elastic is None:
            logger.error("Can't get repos. Ocean elastic is not configured")
            return

        # TODO: use scrolling API for getting all repos
        url = cls.elastic.url + "/" + cls.conf_repos + "/_search?size=9999"

        r = cls.requests_ses.get(url).json()

        if 'hits' in r:

            repos_raw = r['hits']['hits']  # Already existing items

            [repos.append(rep['_source']) for rep in repos_raw]

        return repos

    @classmethod
    def get_repos_ids(cls):
        ''' Lists of repos elastic ids in Ocean '''

        repos_ids = []

        if cls.elastic is None:
            logger.error("Can't get repos. Ocean elastic is not configured")
            return

        url = cls.elastic.url + "/" + cls.conf_repos + "/_search"

        r = cls.requests_ses.get(url).json()

        if 'hits' in r:
            repos_raw = r['hits']['hits']  # Already existing items
            [repos_ids.append(rep['_id']) for rep in repos_raw]

        return repos_ids