예제 #1
0
    def __init__(self, *, job_name):
        """
        Initializer

        Environment parameters:
            SQL_PERF_JOB_NAME:  Jenkins job name.
        """
        self.logger = logging.getLogger(__name__)
        self.job_name = job_name
        jmdb = JenkinsMongoDB()
        self.data = JenkinsJobDataCollection(job_name=self.job_name, jmdb=jmdb)
        self.meta = JenkinsJobMetaCollection(job_name=self.job_name, jmdb=jmdb)
        self.results_cache = {}
예제 #2
0
    def __init__(self):
        """
        Initializer

        Environment parameters:
            UBM_PERF_JOB_NAME:  Jenkins job name.
        """
        self.logger = logging.getLogger(__name__)
        cfg = EnvConfiguration(UbmPerfResultsData.ENV_PARAMS)
        self.job_name = cfg.get("UBM_PERF_JOB_NAME")
        jmdb = JenkinsMongoDB()
        self.data = JenkinsJobDataCollection(job_name=self.job_name, jmdb=jmdb)
        self.meta = JenkinsJobMetaCollection(job_name=self.job_name, jmdb=jmdb)
        self.results_cache = {}
        self.jresults_cache = {}
예제 #3
0
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        cfg = EnvConfiguration(XCEFuncTestCoverageData.ENV_PARAMS)
        job_name = cfg.get("XCE_FUNC_TEST_JOB_NAME")

        # XXXrs - This should NOT communicate directly with the DB, but
        #         should go through a REST client.
        jmdb = JenkinsMongoDB()
        self.data = JenkinsJobDataCollection(job_name=job_name, jmdb=jmdb)
        self.meta = JenkinsJobMetaCollection(job_name=job_name, jmdb=jmdb)

        # XXXrs - TEMPORARY (!?!) initialize every time with static configuration.
        #         Eventually, this configuration should be managed elsewhere.

        self.file_groups = FileGroups(meta=self.meta.coll)
        self.file_groups.reset()
        for name, files in XCEFuncTestCoverageData.XCE_FUNC_TEST_FILE_GROUPS.items(
        ):
            self.file_groups.append_group(name=name, files=files)
예제 #4
0
 def __init__(self, *, job_name):
     pp_name = "{}_postprocessor".format(UbmTestGroupName)
     super().__init__(name=pp_name, job_name=job_name)
     self.logger = logging.getLogger(__name__)
     self.jmdb = JenkinsMongoDB()
     self.ubm_perf_results_data = UbmPerfResultsData()
     self.tgroups = self.ubm_perf_results_data.test_groups()
     # XXX: Restrict To address for now, replace with the appropriate
     # email alias, when confident of results
     cfg = EnvConfiguration(UbmPerfPostprocessor.ENV_PARAMS)
     self.urlprefix = "https://{}/job/{}/".format(cfg.get('JENKINS_HOST'),
                                                  self.job_name)
     self.regr_file = "{}/{}/{}".\
         format(cfg.get('UBM_PERF_ARTIFACTS_ROOT'), self.job_name,
                RegressionAlertFileName)
     self.alert_template =\
         "Regression(s) detected in XCE benchmarks in build {}:\n{}\n\n" \
         "Please see console output at {} for more details"
     self.alert_email_subject = "Regression in XCE benchmarks!!"
     self.alljob = JenkinsAllJobIndex(jmdb=self.jmdb)
     self.ubm_num_prev_runs = int(cfg.get('UBM_PERF_REGR_NRUNS'))
예제 #5
0
    logger = logging.getLogger(__name__)

    argParser = argparse.ArgumentParser()
    argParser.add_argument("--job",
                           default=[],
                           type=str,
                           action='append',
                           dest='jobs',
                           help="only reset this (these) job(s)",
                           metavar="name")
    argParser.add_argument("--do_reset",
                           action="store_true",
                           help="do the reset")
    args = argParser.parse_args()

    jmdb = JenkinsMongoDB()
    db = jmdb.jenkins_db()
    for meta in db.job_meta_collection_names():
        if args.jobs:
            # Special knowledge...
            fields = meta.split('_')
            job = "_".join(fields[1:-1])
            if job not in args.jobs:
                continue

        if not args.do_reset:
            print("would reset: {}".format(meta))
            continue

        print("resetting {}".format(meta))
        db.collection(meta).find_one_and_delete({'_id': 'retry'})
예제 #6
0
    argParser.add_argument('--end_date',
                           default=None,
                           type=str,
                           help='end date (YYYY-MM-DD) defaults to today')
    argParser.add_argument('--end_time',
                           default=None,
                           type=str,
                           help='end time (HH:MM:SS) defaults to 23:59:59')
    argParser.add_argument('--tz',
                           default="America/Los_Angeles",
                           type=str,
                           help='timezone for inputs')

    args = argParser.parse_args()

    jmdb = JenkinsMongoDB()

    if len(args.builds):
        if len(args.jobs) != 1:
            raise ValueError("If --bnum only one --job allowed")

        # Re-parse only specific builds from a job

        # validate the job/builds
        job_name = args.jobs[0]
        active_jobs = jmdb.active_jobs()
        if job_name not in active_jobs:
            raise ValueError("{} is not an active job".format(job_name))

        meta_coll = JenkinsJobMetaCollection(job_name=job_name, jmdb=jmdb)
        all_builds = meta_coll.all_builds()
예제 #7
0
                    dest='update_jobs', action='append', default=[])
parser.add_argument("-p", help="path to test mode build info.json", metavar="path",
                    dest='test_data_path', default=None)
args = parser.parse_args()

test_mode = False
if len(args.test_builds):
    if len(args.update_jobs) != 1:
        parser.print_help()
        raise ValueError("To activate test mode, exactly one update_job (-j)"
                         " and at least one test_build (-b) are required")
    test_mode = True
    if not cfg.get('JENKINS_DB_NAME'):
        raise ValueError("test mode requires JENKINS_DB_NAME")

jmdb = JenkinsMongoDB()
logger.info("jmdb {}".format(jmdb))

try:
    # Clear any expired alerts
    AlertManager().clear_expired()
except Exception:
    logger.error("Exception while clearing expired alerts",
                 exc_info=True)

process_lock = None
try:
    aggregator_plugins = AggregatorPlugins()
    postprocessor_plugins = PostprocessorPlugins()

    jenkins_host=cfg.get('JENKINS_HOST')
예제 #8
0
        'default': None
    },
    'JENKINS_DB_NAME': {
        'default': None
    }
})

# It's log, it's log... :)
logging.basicConfig(
    level=cfg.get('LOG_LEVEL'),
    format=
    "'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
    handlers=[logging.StreamHandler(sys.stdout)])
logger = logging.getLogger(__name__)

JMDB = JenkinsMongoDB()

job_data_collections = {}


def get_job_data_collection(*, job_name):
    if job_name not in job_data_collections:
        job_data_collections[job_name] = JenkinsJobDataCollection(
            job_name=job_name, jmdb=JMDB)
    return job_data_collections[job_name]


def write_data(*, outdir, year, month, day=None, data):
    for key, item in data.items():
        if day is not None:
            os.makedirs(os.path.join(outdir, year, month, day), exist_ok=True)
예제 #9
0
 def __init__(self):
     self.logger = logging.getLogger(__name__)
     self.jmdb = JenkinsMongoDB()
예제 #10
0
class AlertManager(object):
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.jmdb = JenkinsMongoDB()

    def _set_alert(self,
                   *,
                   alert_group,
                   alert_id,
                   description,
                   severity,
                   ttl,
                   labels=None):
        self.logger.debug("alert_id {}".format(alert_id))
        self.logger.debug("description {}".format(description))
        self.logger.debug("severity {}".format(severity))
        self.logger.debug("labels {}".format(labels))
        self.logger.debug("ttl {}".format(ttl))

        registry = CollectorRegistry()

        label_names = ['description', 'severity']
        if labels is not None:
            label_names.extend(list(labels.keys()))
        else:
            labels = {}
        labels['description'] = description
        labels['severity'] = severity

        self.logger.debug("label_names: {}".format(label_names))
        self.logger.debug("labels: {}".format(labels))

        g = Gauge(alert_group, description, label_names, registry=registry)
        g.labels(**labels).set(1)
        push_to_gateway(CFG.get('PUSHGATEWAY_URL'),
                        job=alert_id,
                        registry=registry)
        self.jmdb.alert_ttl(alert_group=alert_group,
                            alert_id=alert_id,
                            ttl=ttl)

    def info(self,
             *,
             alert_group,
             alert_id,
             description,
             ttl=ONE_DAY,
             labels=None):
        args = locals()
        args.pop('self')
        args['severity'] = "info"
        self._set_alert(**args)

    def warning(self,
                *,
                alert_group,
                alert_id,
                description,
                ttl=ONE_DAY,
                labels=None):
        args = locals()
        args.pop('self')
        args['severity'] = "warning"
        self._set_alert(**args)

    def error(self,
              *,
              alert_group,
              alert_id,
              description,
              ttl=ONE_DAY,
              labels=None):
        args = locals()
        args.pop('self')
        args['severity'] = "error"
        self._set_alert(**args)

    def critical(self,
                 *,
                 alert_group,
                 alert_id,
                 description,
                 ttl=ONE_DAY,
                 labels=None):
        args = locals()
        args.pop('self')
        args['severity'] = "critical"
        self._set_alert(**args)

    def clear(self, *, alert_group, alert_id):
        self.logger.debug("alert_id {}".format(alert_id))
        delete_from_gateway(CFG.get('PUSHGATEWAY_URL'), job=alert_id)
        self.jmdb.alert_ttl(alert_group=alert_group,
                            alert_id=alert_id,
                            ttl=None)

    def clear_expired(self):
        for alert_group, alert_id in self.jmdb.alerts_expired():
            self.logger.debug("alert_group {} alert_id {}".format(
                alert_group, alert_id))
            self.clear(alert_group=alert_group, alert_id=alert_id)
예제 #11
0
        'default': None
    },
    'JENKINS_DB_NAME': {
        'default': None
    }
})

# It's log, it's log... :)
logging.basicConfig(
    level=cfg.get('LOG_LEVEL'),
    format=
    "'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
    handlers=[logging.StreamHandler()])
logger = logging.getLogger(__name__)

JMDB = JenkinsMongoDB()
DB = JMDB.jenkins_db().db

if __name__ == "__main__":

    import argparse
    argParser = argparse.ArgumentParser()
    argParser.add_argument(
        '--outdir',
        required=True,
        type=str,
        help='path to directory where per-job data should be written')
    args = argParser.parse_args()

    os.makedirs(args.outdir, exist_ok=True)
예제 #12
0
 def __init__(self, *, jenkins_host, job_name):
     super().__init__(name="default_postprocessor", job_name=job_name)
     self.logger = logging.getLogger(__name__)
     self.japi = JenkinsApi(host=jenkins_host)
     self.jmdb = JenkinsMongoDB()
     self.alljob = JenkinsAllJobIndex(jmdb=self.jmdb)
예제 #13
0
    def __init__(self):
        super().__init__(pi_label="AGGREGATOR_PLUGINS")


class PostprocessorPlugins(Plugins):
    def __init__(self):
        super().__init__(pi_label="POSTPROCESSOR_PLUGINS")


# In-line "unit test"
if __name__ == '__main__':
    print("Compile check A-OK!")

    import pprint

    jmdb = JenkinsMongoDB()
    jaji = JenkinsAllJobIndex(jmdb=jmdb)
    now_ms = int(time.time()*1000)
    day_ms = 24*60*60*1000
    end_ms = now_ms-day_ms
    start_ms = end_ms-day_ms
    builds = jaji.builds_active_between(
                            start_time_ms=start_ms,
                            end_time_ms=end_ms)

    for build in builds['builds']:
        bstart_ms = build['start_time_ms']
        bend_ms = build['end_time_ms']
        if bstart_ms < start_ms and bend_ms < start_ms:
            raise Exception("EARLY: start {} end {} bstart {} bend {}"
                            .format(start_ms, end_ms, bstart_ms, bend_ms))
예제 #14
0
    logging.basicConfig(
        level=cfg.get('LOG_LEVEL'),
        format=
        "'%(asctime)s - %(threadName)s - %(funcName)s - %(levelname)s - %(message)s",
        handlers=[logging.StreamHandler()])
    logger = logging.getLogger(__name__)
    '''
    argParser = argparse.ArgumentParser()
    argParser.add_argument("--job", default=[], type=str, action='append', dest='jobs',
                                help="only reset this (these) job(s)", metavar="name")
    argParser.add_argument("--do_reset", action="store_true",
                                help="do the reset")
    args = argParser.parse_args()
    '''

    jmdb = JenkinsMongoDB()
    db = jmdb.jenkins_db()
    job_to_builds = {}
    for name in db.collection_names():
        if not name.startswith('job_'):
            continue
        if name.endswith('_meta'):
            continue

        fields = name.split('_')
        job = "_".join(fields[1:])

        coll = db.collection(name)
        for doc in coll.find({}):
            cores = doc.get('analyzed_cores', None)
            if cores: