Exemple #1
0
    def __init__(self, *, jenkins_host, job_name, jmdb,
                          aggregator_plugins=None,
                          postprocessor_plugins=None):
        """
        Initializer.

        Required parameters:
            job_name:   Jenkins job name
            jmdb:       JenkinsMongoDB.jenkins_db() instance

        Optional parameters:
            aggregator_plugins: custom aggregator plug-in classes
            postprocessor_plugins: custom post-process plug-in classes
        """
        self.logger = logging.getLogger(__name__)
        self.jenkins_host = jenkins_host
        self.job_name = job_name
        self.aggregator_plugins = aggregator_plugins
        self.postprocessor_plugins = postprocessor_plugins

        cfg = EnvConfiguration(JenkinsJobAggregators.ENV_PARAMS)
        self.builds_max = cfg.get('JENKINS_AGGREGATOR_UPDATE_BUILDS_MAX')

        # XXXrs - This is presently unused.  Want to stash the time of
        #         last update, and refuse to run again until sufficient
        #         time has passed.
        self.freq_sec = cfg.get('JENKINS_AGGREGATOR_UPDATE_FREQ_SEC')

        self.job_data_coll = JenkinsJobDataCollection(job_name=job_name, jmdb=jmdb)
        self.job_meta_coll = JenkinsJobMetaCollection(job_name=job_name, jmdb=jmdb)
        self.alljob_idx = JenkinsAllJobIndex(jmdb=jmdb)
        self.japi = JenkinsApi(host=self.jenkins_host)
Exemple #2
0
 def __init__(self, *, job_name, jmdb):
     self.db = jmdb.jenkins_db()
     self.logger = logging.getLogger(__name__)
     self.job_name = job_name
     self.coll = self.db.collection("job_{}_meta".format(job_name))
     cfg = EnvConfiguration(JenkinsJobMetaCollection.ENV_PARAMS)
     self.retry_max = cfg.get('JENKINS_AGGREGATOR_UPDATE_RETRY_MAX')
 def __init__(self, *, job_name):
     self.logger = logging.getLogger(__name__)
     cfg = EnvConfiguration(XDUnitTestCoverageAggregator.ENV_PARAMS)
     self.coverage_file_name = cfg.get("XD_UNIT_TEST_COVERAGE_FILE_NAME")
     self.artifacts_root = cfg.get("XD_UNIT_TEST_ARTIFACTS_ROOT")
     super().__init__(job_name=job_name,
                      agg_name=self.__class__.__name__)
Exemple #4
0
    def __init__(self, *, job_name, file_pats):

        self.logger = logging.getLogger(__name__)
        cfg = EnvConfiguration(SqlPerfResultsAggregator.ENV_PARAMS)
        self.artifacts_root = cfg.get('SQL_PERF_ARTIFACTS_ROOT')
        self.file_pats = file_pats
        super().__init__(job_name=job_name, agg_name=self.__class__.__name__)
 def __init__(self):
     self.logger = logging.getLogger(__name__)
     self.path_cache = {}
     cfg = EnvConfiguration(SqlPerfComparisonPdf.ENV_PARAMS)
     self.pdf_pat = cfg.get('SQL_PERF_COMPARISON_PDF_PAT')
     self.artifacts_root = cfg.get('SQL_PERF_ARTIFACTS_ROOT')
     self.job_name = cfg.get('SQL_PERF_JOB_NAME')
     self.input_root = os.path.join(self.artifacts_root, self.job_name)
Exemple #6
0
 def __init__(self, *, db, name):
     self.logger = logging.getLogger(__name__)
     self.cfg = EnvConfiguration(MongoDBKeepAliveLock.ENV_CONFIG)
     self.id = str(uuid.uuid4())  # Unique instance identifier
     self.db = db
     self.name = name
     self.collection = self.db.collection(
         self.cfg.get('MONGO_DB_KALOCK_COLLECTION_NAME'))
     self.timeout = self.cfg.get('MONGO_DB_KALOCK_TIMEOUT')
     self.freq = self.cfg.get('MONGO_DB_KALOCK_UPDATE_FREQUENCY')
     self.locked = False
     self.ka_event = threading.Event()
     self.ka_thread = None
Exemple #7
0
    def __init__(self):
        """
        Initializer

        Environment parameters:
            UBM_PERF_JOB_NAME:  Jenkins job name.
        """
        self.logger = logging.getLogger(__name__)
        cfg = EnvConfiguration(UbmPerfResultsData.ENV_PARAMS)
        self.job_name = cfg.get("UBM_PERF_JOB_NAME")
        jmdb = JenkinsMongoDB()
        self.data = JenkinsJobDataCollection(job_name=self.job_name, jmdb=jmdb)
        self.meta = JenkinsJobMetaCollection(job_name=self.job_name, jmdb=jmdb)
        self.results_cache = {}
        self.jresults_cache = {}
Exemple #8
0
 def __init__(self, *, db_name):
     self.logger = logging.getLogger(__name__)
     self.cfg = EnvConfiguration(MongoDB.ENV_CONFIG)
     self.url = "mongodb://{}:{}@{}:{}/"\
                .format(self.cfg.get('MONGO_DB_USER'),
                        self.cfg.get('MONGO_DB_PASS'),
                        self.cfg.get('MONGO_DB_HOST'),
                        self.cfg.get('MONGO_DB_PORT'))
     self.client = MongoClient(self.url, connect=False)
     if self.cfg.get('MONGO_DB_USE_PROXY'):
         self.client = MongoProxy(self.client)
     # Quick connectivity check...
     # The ismaster command is cheap and does not require auth.
     self.client.admin.command('ismaster')
     self.db = self.client[db_name]
     self.logger.info(self.db)
Exemple #9
0
    def __init__(self):
        cfg = EnvConfiguration(JenkinsMongoDB.ENV_CONFIG)
        # Explicitly pass a DB name to override the default based
        # on the Jenkins hostname.  Useful for test/debug.
        db_name = cfg.get('JENKINS_DB_NAME')
        if db_name is None:
            # Default DB name is the Jenkins host name
            db_name = cfg.get('JENKINS_HOST')
        if db_name is None:
            raise JenkinsMongoDBMissingNameError(
                "no JENKINS_DB_NAME or JENKINS_HOST")

        # If we're using a host name, it's gonna have dots, so
        # replace with underscore to make a safe MongoDB name.
        self._db_name = "{}".format(db_name).replace('.', '_')
        self._db = None
Exemple #10
0
    def __init__(self,
                 *,
                 coverage_dir,
                 bin_name="usrnode",
                 profdata_file_name="usrnode.profdata",
                 json_file_name="coverage.json"):

        cfg = EnvConfiguration(ClangCoverageDir.ENV_PARAMS)

        self.logger = logging.getLogger(__name__)
        self.coverage_dir = coverage_dir
        self.bin_name = bin_name
        self.profdata_file_name = profdata_file_name
        self.json_file_name = json_file_name
        self.rawprof_dir_name = cfg.get("ARTIFACTS_RAWPROF_DIR_NAME")
        self.bin_dir_name = cfg.get("ARTIFACTS_BIN_DIR_NAME")
        self.src_dir_name = cfg.get("ARTIFACTS_SRC_DIR_NAME")
        self.clang_bin_path = cfg.get("CLANG_BIN_PATH")
Exemple #11
0
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        cfg = EnvConfiguration(XCEFuncTestCoverageData.ENV_PARAMS)
        job_name = cfg.get("XCE_FUNC_TEST_JOB_NAME")

        # XXXrs - This should NOT communicate directly with the DB, but
        #         should go through a REST client.
        jmdb = JenkinsMongoDB()
        self.data = JenkinsJobDataCollection(job_name=job_name, jmdb=jmdb)
        self.meta = JenkinsJobMetaCollection(job_name=job_name, jmdb=jmdb)

        # XXXrs - TEMPORARY (!?!) initialize every time with static configuration.
        #         Eventually, this configuration should be managed elsewhere.

        self.file_groups = FileGroups(meta=self.meta.coll)
        self.file_groups.reset()
        for name, files in XCEFuncTestCoverageData.XCE_FUNC_TEST_FILE_GROUPS.items(
        ):
            self.file_groups.append_group(name=name, files=files)
Exemple #12
0
 def __init__(self, *, job_name):
     pp_name = "{}_postprocessor".format(UbmTestGroupName)
     super().__init__(name=pp_name, job_name=job_name)
     self.logger = logging.getLogger(__name__)
     self.jmdb = JenkinsMongoDB()
     self.ubm_perf_results_data = UbmPerfResultsData()
     self.tgroups = self.ubm_perf_results_data.test_groups()
     # XXX: Restrict To address for now, replace with the appropriate
     # email alias, when confident of results
     cfg = EnvConfiguration(UbmPerfPostprocessor.ENV_PARAMS)
     self.urlprefix = "https://{}/job/{}/".format(cfg.get('JENKINS_HOST'),
                                                  self.job_name)
     self.regr_file = "{}/{}/{}".\
         format(cfg.get('UBM_PERF_ARTIFACTS_ROOT'), self.job_name,
                RegressionAlertFileName)
     self.alert_template =\
         "Regression(s) detected in XCE benchmarks in build {}:\n{}\n\n" \
         "Please see console output at {} for more details"
     self.alert_email_subject = "Regression in XCE benchmarks!!"
     self.alljob = JenkinsAllJobIndex(jmdb=self.jmdb)
     self.ubm_num_prev_runs = int(cfg.get('UBM_PERF_REGR_NRUNS'))
Exemple #13
0
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        cfg = EnvConfiguration(ClangCoverageTools.ENV_PARAMS)

        # If explicit paths are passed, use those.
        self.llvm_cov_path = cfg.get("CLANG_LLVM_COV_PATH")
        self.llvm_profdata_path = cfg.get("CLANG_LLVM_PROFDATA_PATH")

        if self.llvm_cov_path is None or self.llvm_profdata_path is None:

            # If no explicit paths are given, find the "clang" binary.

            try:
                cargs = ["which", "clang"]
                cp = subprocess.run(cargs, stdout=subprocess.PIPE)
                clang_bin_path = cp.stdout.decode('utf-8').strip()
                if not clang_bin_path:
                    raise ClangExecutableNotFound("no clang path found")
                if not os.path.exists(clang_bin_path):
                    raise ClangExecutableNotFound(
                        "clang path {} does not exist".format(clang_bin_path))
            except Exception as e:
                raise

            # Assumption is that the llvm tools co-reside with the clang binary.

            self.clang_bin_dir = os.path.dirname(clang_bin_path)
            self.llvm_cov_path = os.path.join(clang_bin_dir, "llvm-cov")
            self.llvm_profdata_path = os.path.join(clang_bin_dir,
                                                   "llvm-profdata")

        if not os.path.exists(self.llvm_cov_path):
            raise ClangExecutableNotFound(
                "llvm-cov path {} does not exist".format(self.llvm_cov_path))
        if not os.path.exists(self.llvm_profdata_path):
            raise ClangExecutableNotFound(
                "llvm-profdata path {} does not exist".format(
                    self.llvm_profdata_path))
Exemple #14
0
class MongoDBKeepAliveLock(object):

    ENV_CONFIG = {
        'MONGO_DB_KALOCK_COLLECTION_NAME': {
            'required': True,
            'default': '_ka_locks'
        },
        'MONGO_DB_KALOCK_TIMEOUT': {
            'required': True,
            'type': EnvConfiguration.NUMBER,
            'default': 10
        },
        'MONGO_DB_KALOCK_UPDATE_FREQUENCY': {
            'required': True,
            'type': EnvConfiguration.NUMBER,
            'default': 5
        },
    }

    def __init__(self, *, db, name):
        self.logger = logging.getLogger(__name__)
        self.cfg = EnvConfiguration(MongoDBKeepAliveLock.ENV_CONFIG)
        self.id = str(uuid.uuid4())  # Unique instance identifier
        self.db = db
        self.name = name
        self.collection = self.db.collection(
            self.cfg.get('MONGO_DB_KALOCK_COLLECTION_NAME'))
        self.timeout = self.cfg.get('MONGO_DB_KALOCK_TIMEOUT')
        self.freq = self.cfg.get('MONGO_DB_KALOCK_UPDATE_FREQUENCY')
        self.locked = False
        self.ka_event = threading.Event()
        self.ka_thread = None

    def _ka_loop(self):
        self.logger.debug("start")
        first = True
        while self.locked:
            self.logger.debug("wait on ka_event")
            if first or not self.ka_event.wait(self.freq):
                first = False
                doc = self.collection.find_one_and_update(
                    {
                        '_id': self.name,
                        'locker_id': self.id
                    }, {'$set': {
                        'timeout': int(time.time()) + self.timeout
                    }})
                if not doc:
                    self.locked = False
                    err = "failed to update _id: {} locker_id: {}"\
                          .format(self.name, self.id)
                    self.logger.error(err)
                    raise MongoDBKALockUpdateFail(err)

                if not self.locked:
                    break
        self.logger.info("stop")

    def _stop_ka(self):
        self.logger.info("start")
        if not self.ka_thread:
            self.logger.error("stopping keep alive thread without starting")
            return
        self.ka_event.set()
        self.ka_thread.join(timeout=10)  # XXXrs arbitrary timeout
        if self.ka_thread.is_alive():
            self.logger.error("timeout joining keep alive thread")
        self.ka_thread = None
        self.logger.info("end")

    def _start_ka(self):
        self.logger.info("start")
        if self.ka_thread:
            self.logger.error("keep alive thread already running")
            return
        self.ka_thread = threading.Thread(target=self._ka_loop)
        self.ka_thread.daemon = True
        self.ka_thread.start()
        self.logger.info("end")

    def _try_lock(self, *, meta=None):
        """
        Try to obtain the keep-alive lock.
        """
        self.logger.debug("start")

        now = int(time.time())
        ourlock = {
            '_id': self.name,
            'locker_id': self.id,
            'timeout': now + self.timeout,
            'meta': meta
        }

        # Try to create lock in the locks collection.
        try:
            self.collection.insert(ourlock)
            self.locked = True

        except DuplicateKeyError as e:
            self.logger.debug("check old")
            # Replace if not us and too old
            doc = self.collection.find_one_and_replace(
                {
                    '_id': self.name,
                    'locker_id': {
                        '$ne': self.id
                    },
                    'timeout': {
                        '$lt': now
                    }
                }, ourlock)
            self.locked = doc is not None
        self.logger.debug("locked: {}".format(self.locked))
        return self.locked

    def lock(self, *, meta=None, timeout=None):
        """
        Try to acquire the lock, waiting as needed.
        """
        if timeout is None:
            timeout = self.timeout
        until = int(time.time()) + timeout
        while (not self._try_lock()):
            self.logger.debug("lock sleep...")
            time.sleep(1)
            if int(time.time()) >= until:
                err = "timeout: {} _id: {} locker_id: {}"\
                      .format(self.timeout, self.name, self.id)
                self.logger.error(err)
                raise MongoDBKALockTimeout(err)
        self._start_ka()
        return True

    def unlock(self):
        """
        Release the lock and stop the keep-alive thread.
        """
        self.locked = False
        self._stop_ka()
        result = self.collection.delete_one({
            '_id': self.name,
            'locker_id': self.id
        })
        if result.deleted_count != 1:
            err = "failed to delete _id: {} locker_id: {}"\
                  .format(self.name, self.id)
            self.logger.error(err)
            raise MongoDBKALockUnlockFail(err)
Exemple #15
0
from py_common.env_configuration import EnvConfiguration

# Quiet some annoying warnings :)
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

ENV_CONFIG = {
    'PROMETHEUS_QUERY_URL': {
        'required': True,
        'default': "https://prometheus.service.consul/api/v1/query"
    },
    'DEFAULT_DOMAIN': {
        'default': 'int.xcalar.com'
    }
}
CFG = EnvConfiguration(ENV_CONFIG)


class PrometheusAPI(object):
    def __init__(self):
        pass

    def host_metrics(self, *, host, start_time_s, end_time_s):

        period_s = end_time_s - start_time_s
        if period_s < 60:
            raise ValueError(
                "Period {} to {} too short.  Must be >= 60s".format(
                    start_time_s, end_time_s))

        now_s = int(time.time())
Exemple #16
0
# Copyright 2019 Xcalar, Inc. All rights reserved.
#
# No use, or distribution, of this source code is permitted in any form or
# means without a valid, written license agreement with Xcalar, Inc.
# Please refer to the included "COPYING" file for terms and conditions
# regarding the use and redistribution of this software.

import logging
import os
import sys

if __name__ == '__main__':
    sys.path.append(os.environ.get('XLRINFRADIR', ''))

from py_common.env_configuration import EnvConfiguration
config = EnvConfiguration({'LOG_LEVEL': {'default': logging.DEBUG}})

from flask import Flask, request
from flask import render_template, make_response
from flask_cors import CORS, cross_origin

# It's log, it's log... :)
logging.basicConfig(
    level=config.get('LOG_LEVEL'),
    format=
    "'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
    handlers=[logging.StreamHandler()])
logger = logging.getLogger(__name__)

app = Flask(__name__)
cors = CORS(app)
Exemple #17
0
import os
import random
import shlex
import signal
import subprocess
import sys
import time

if __name__ == '__main__':
    sys.path.append(os.environ.get('XLRINFRADIR', ''))

from py_common.env_configuration import EnvConfiguration

os.environ["XLR_PYSDK_VERIFY_SSL_CERT"] = "false"

cfg = EnvConfiguration({'LOG_LEVEL': {'default': logging.DEBUG}}) # XXXrs DEBUG!

# Configure logging
logging.basicConfig(level=cfg.get('LOG_LEVEL'),
                    format="'%(asctime)s - %(levelname)s - %(threadName)s - %(funcName)s - %(message)s",
                    handlers=[logging.StreamHandler(sys.stdout)])
logger = logging.getLogger(__name__)

# Arrange for orderly shutdown on signal 

WAIT_TIMEOUT = 10
SHUTDOWN = False
def do_shutdown(signum, frame):
    logger.info("signal {}".format(signum))
    SHUTDOWN = True
import datetime
import logging
import os
import pytz
import random
import re
import statistics
import sys
import time

sys.path.append(os.environ.get('XLRINFRADIR', ''))

from py_common.env_configuration import EnvConfiguration
cfg = EnvConfiguration({"LOG_LEVEL": {"default": logging.INFO},
                        "SQL_SCALE_JOB_NAME": {"default": "SqlScaleTest"},
                        "PRECHECKIN_VERIFY_JOB_NAME": {"default": "BuildSqldfTestAggreagate"}})
from sql_perf import SqlPerfResults, SqlPerfResultsData
from sql_perf.comparison_pdf import SqlPerfComparisonPdf

from flask import Flask, request, jsonify, json, abort
from flask_cors import CORS, cross_origin

# It's log, it's log... :)
logging.basicConfig(
                level=cfg.get("LOG_LEVEL"),
                format="'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
                handlers=[logging.StreamHandler()])
logger = logging.getLogger(__name__)

source_to_results = {
Exemple #19
0
import logging
import os
from prometheus_client import CollectorRegistry, Gauge
from prometheus_client import push_to_gateway, delete_from_gateway
import sys

sys.path.append(os.environ.get('XLRINFRADIR', ''))

from py_common.env_configuration import EnvConfiguration
from py_common.mongo import JenkinsMongoDB

CFG = EnvConfiguration({
    'LOG_LEVEL': {
        'default': logging.INFO
    },
    'PUSHGATEWAY_URL': {
        'default': 'pushgateway.nomad:9999'
    }
})
ONE_DAY = (60 * 60 * 24)


class AlertManager(object):
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.jmdb = JenkinsMongoDB()

    def _set_alert(self,
                   *,
                   alert_group,
                   alert_id,
Exemple #20
0
# regarding the use and redistribution of this software.

import datetime
import logging
import os
import pytz
import sys
import requests
import time

if __name__ == '__main__':
    sys.path.append(os.environ.get('XLRINFRADIR', ''))

from py_common.env_configuration import EnvConfiguration
cfg = EnvConfiguration({'LOG_LEVEL': {'default': logging.INFO},
                        'BACKEND_HOST': {'required': True},
                        'BACKEND_PORT': {'required': True},
                        'TIMEZONE': {'default': 'America/Los_Angeles'}})
    
from flask import Flask, request
from flask import render_template, make_response, jsonify
from flask_cors import CORS, cross_origin

# It's log, it's log... :)
logging.basicConfig(
                level=cfg.get('LOG_LEVEL'),
                format="'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
                handlers=[logging.StreamHandler()])
logger = logging.getLogger(__name__)

timezone = pytz.timezone(cfg.get('TIMEZONE'))
Exemple #21
0
        pending = self.job_meta_coll.pending_builds(first=jenkins_first, last=jenkins_last)
        updated = self._do_updates(builds=pending[:self.builds_max], force_default_job_update=force_default_job_update)

        extra = self.builds_max - updated

        # We can do up to "extra" reparse.
        if extra > 0:
            reparse = self.job_meta_coll.reparse(rtnmax=extra)
            if reparse:
                self._do_updates(builds=reparse, is_reparse=True)

# MAIN -----

cfg = EnvConfiguration({'LOG_LEVEL': {'default': logging.INFO},
                        'JENKINS_HOST': {'default': None},
                        'JENKINS_DB_NAME': {'default': None},
                        'UPDATE_JOB_LIST': {'default': None},
                        'ALL_JOB_UPDATE_FREQ_HR': {'default': 24}})

# It's log, it's log... :)
logging.basicConfig(level=cfg.get('LOG_LEVEL'),
                    format="'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
                    handlers=[logging.StreamHandler(sys.stdout)])
logger = logging.getLogger(__name__)


import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-b", help="test mode build number", metavar="bnum",
                    dest='test_builds', action='append', default=[])
parser.add_argument("-j", help="test mode job name", metavar="name",
Exemple #22
0
import pytz
import random
import re
import statistics
import sys
import time

sys.path.append(os.environ.get('XLRINFRADIR', ''))

from py_common.env_configuration import EnvConfiguration
cfg = EnvConfiguration({
    'LOG_LEVEL': {
        'default': logging.INFO
    },
    'JDQ_SERVICE_HOST': {
        'required': True
    },
    'JDQ_SERVICE_PORT': {
        'required': True
    }
})

from py_common.jenkins_data_query.client import JDQClient
jdq_client = JDQClient(host=cfg.get('JDQ_SERVICE_HOST'),
                       port=cfg.get('JDQ_SERVICE_PORT'))

from flask import Flask, request, jsonify, json, abort
from flask_cors import CORS, cross_origin

# It's log, it's log... :)
logging.basicConfig(
Exemple #23
0
from py_common.env_configuration import EnvConfiguration
from py_common.jenkins_aggregators import JenkinsAggregatorBase
from py_common.mongo import MongoDB

AGGREGATOR_PLUGINS = [{
    'class_name': 'SystemStatsPlotter',
    'job_names': ['__ALL__']
}]

cfg = EnvConfiguration({
    'LOG_LEVEL': {
        'default': logging.INFO
    },
    'PLOTTER_PATH': {
        'default': None
    },
    'DEFAULT_PLOT_CFG_PATH': {
        'default': None
    }
})


class SystemStatsPlotterException(Exception):
    pass


class SystemStatsPlotter(JenkinsAggregatorBase):
    def __init__(self, *, job_name):
        """
        Class-specific initialization.
Exemple #24
0
import os
import sys

sys.path.append(os.environ.get('XLRINFRADIR', ''))

from py_common.env_configuration import EnvConfiguration
from py_common.jenkins_aggregators import JenkinsAllJobIndex
from py_common.jenkins_aggregators import JenkinsJobDataCollection
from py_common.mongo import MongoDB, JenkinsMongoDB

cfg = EnvConfiguration({
    'LOG_LEVEL': {
        'default': logging.WARNING
    },
    'JENKINS_HOST': {
        'default': None
    },
    'JENKINS_DB_NAME': {
        'default': None
    }
})

# It's log, it's log... :)
logging.basicConfig(
    level=cfg.get('LOG_LEVEL'),
    format=
    "'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
    handlers=[logging.StreamHandler(sys.stdout)])
logger = logging.getLogger(__name__)

JMDB = JenkinsMongoDB()
Exemple #25
0
class MongoDB(object):

    ENV_CONFIG = {
        'MONGO_DB_HOST': {
            'required': True,
            'default': mongo_db_host
        },
        'MONGO_DB_PORT': {
            'required': True,
            'type': EnvConfiguration.NUMBER,
            'default': mongo_db_port
        },
        'MONGO_DB_USER': {
            'required': True,
            'default': mongo_db_user
        },
        'MONGO_DB_PASS': {
            'required': True,
            'default': mongo_db_pass
        },
        'MONGO_DB_USE_PROXY': {
            'default': False
        }
    }

    def __init__(self, *, db_name):
        self.logger = logging.getLogger(__name__)
        self.cfg = EnvConfiguration(MongoDB.ENV_CONFIG)
        self.url = "mongodb://{}:{}@{}:{}/"\
                   .format(self.cfg.get('MONGO_DB_USER'),
                           self.cfg.get('MONGO_DB_PASS'),
                           self.cfg.get('MONGO_DB_HOST'),
                           self.cfg.get('MONGO_DB_PORT'))
        self.client = MongoClient(self.url, connect=False)
        if self.cfg.get('MONGO_DB_USE_PROXY'):
            self.client = MongoProxy(self.client)
        # Quick connectivity check...
        # The ismaster command is cheap and does not require auth.
        self.client.admin.command('ismaster')
        self.db = self.client[db_name]
        self.logger.info(self.db)

    def collection(self, name):
        return self.db[name]

    def collection_names(self):
        return self.db.collection_names()

    def job_meta_collection_names(self):
        names = []
        for name in self.db.collection_names():
            if name.startswith('job_') and name.endswith('_meta'):
                names.append(name)
        return names

    @staticmethod
    def encode_key(key):
        return key.replace('.', '__dot__')

    @staticmethod
    def decode_key(key):
        return key.replace('__dot__', '.')
Exemple #26
0
        jbi = JenkinsBuildInfo(job_name=job_name,
                               build_number=build_number,
                               japi=self,
                               test_data=test_data)
        self.build_info_cache[key] = jbi
        self.logger.debug("return info: {}".format(jbi))
        return jbi


if __name__ == '__main__':
    print("Compile check A-OK!")

    cfg = EnvConfiguration({
        'LOG_LEVEL': {
            'default': logging.INFO
        },
        'JENKINS_HOST': {
            'default': 'jenkins.int.xcalar.com'
        }
    })

    # It's log, it's log... :)
    logging.basicConfig(
        level=cfg.get('LOG_LEVEL'),
        format=
        "'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
        handlers=[logging.StreamHandler()])
    logger = logging.getLogger(__name__)
    japi = JenkinsApi(host=cfg.get('JENKINS_HOST'))
    job_name = "DailyTests-Trunk"
    build_number = "545"
    jbi = japi.get_build_info(job_name=job_name, build_number=build_number)
import os
import pytz
import random
import re
import statistics
import sys
import time

if __name__ == '__main__':
    sys.path.append(os.environ.get('XLRINFRADIR', ''))

from py_common.env_configuration import EnvConfiguration
from coverage.xce_func_test_coverage import XCEFuncTestCoverageData
from coverage.xd_unit_test_coverage import XDUnitTestCoverageData

cfg = EnvConfiguration({'LOG_LEVEL': {'default': logging.INFO}})

from flask import Flask, request, jsonify, json, abort
from flask_cors import CORS, cross_origin

# It's log, it's log... :)
logging.basicConfig(
                level=cfg.get('LOG_LEVEL'),
                format="'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
                handlers=[logging.StreamHandler()])
logger = logging.getLogger(__name__)

xce_coverage_data = XCEFuncTestCoverageData()
xd_coverage_data = XDUnitTestCoverageData()

mode_to_coverage_data = {
Exemple #28
0
from ubm_perf import UbmPerfResultsData
from ubm_perf import UbmPerfResults

# Classes to support Grafana datasource server to support visualization of
# XCE operators' micro-benchmark performance data (generated regularly by a
# per build) to help identify performance regressions in operators.
#
# XXX: These classes are very similar to those in
# 	sql_perf/grafana/sql_perf_datasource.py
# and in future, we should refactor the code between these two files

# NOTE: UBM stands for MicroBenchmark (U for Micro), and a "ubm" is a single
# micro-benchmark, whose name would be the name of the test/operator: e.g.
# a ubmname would be "load" or "index", etc.

config = EnvConfiguration({"LOG_LEVEL": {"default": logging.INFO}})

logging.basicConfig(level=config.get("LOG_LEVEL"),
                    format="'%(asctime)s - %(threadName)s - %(funcName)s - \
                         %(levelname)s - %(message)s",
                    handlers=[logging.StreamHandler()])
logger = logging.getLogger(__name__)

ubm_perf_results_data = UbmPerfResultsData()

app = Flask(__name__)

cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'

methods = ('GET', 'POST')
Exemple #29
0
import logging
import os
import sys

if __name__ == '__main__':
    logger = logging.getLogger(__name__)
    xlrinfra = os.environ.get('XLRINFRADIR', '')
    logger.debug("XLRINFRADIR: {}".format(xlrinfra))
    sys.path.append(xlrinfra)

from coverage.clang import ClangCoverageDir
from py_common.env_configuration import EnvConfiguration

if __name__ == '__main__':

    cfg = EnvConfiguration({"LOG_LEVEL": {"default": logging.INFO}})
    logging.basicConfig(
        level=cfg.get("LOG_LEVEL"),
        format=
        "'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
        handlers=[logging.StreamHandler()])
    logger = logging.getLogger(__name__)

    parser = argparse.ArgumentParser()
    parser.add_argument("--dir",
                        help="coverage directory to process",
                        dest='coverage_dirs',
                        action='append',
                        required=True)
    parser.add_argument("--out",
                        help="output directory to store merged results",
        coverage = self._get_coverage_data(bnum=bnum)
        if not coverage:
            return None
        for key,data in coverage.items():
            url = MongoDB.decode_key(key)
            if filename.lower() in url.lower():
                return coverage[key].get('covered_pct', None)
        return None


if __name__ == '__main__':
    """
    Useful little utility to emit csv coverage for critical files from given build.
    """

    cfg = EnvConfiguration({"LOG_LEVEL": {"default": logging.ERROR}})
    logging.basicConfig(level=cfg.get("LOG_LEVEL"),
                        format="'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
                        handlers=[logging.StreamHandler()])

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--bnum", help="build number", required=True)
    args = parser.parse_args()

    data = XDUnitTestCoverageData()
    for fname in data.filenames(bnum=args.bnum, group_name="Critical Files"):
        coverage = data.coverage(bnum=args.bnum, filename=fname)
        if coverage is not None:
            print("{0}: {1:.2f}".format(fname, data.coverage(bnum=args.bnum, filename=fname)))
        else: