def get_builds_data(days_limit=1): mem_builds = memcache.get(MEMCACHE_BUILDS_KEY) if mem_builds and (days_limit in mem_builds): log.debug( "Taking builds stats data from memcache (days_limit: %s) ..." % days_limit) data = mem_builds[days_limit] data["current_time"] = get_current_timestamp_str() return data log.debug("Builds stats data not in memcache, querying datastore " "(days_limit: %s) ..." % days_limit) cond = datetime.datetime.utcnow() - datetime.timedelta(days=days_limit) # order should be the same as BuildsStatistics.name, BuildsStatistics.ts # this will already be ordered by job name and then by build id (since keys are such) # now do reverse order so that newest appear first on the webpage query = BuildsStatisticsModel.query().order(-BuildsStatisticsModel.key) builds = query.fetch( ) # returns list of builds, of BuildsStatistics objects # BadRequestError: The first sort property must be the same as the property to which the # inequality filter is applied. # In your query the first sort property is name but the inequality filter is on ts # -> do the timestamp filtering on my own ... (can't combine ordering and filtering # arbitrarily) data = dict(days_limit=days_limit, num_builds=0, builds={}) # builds - dict keys - job names ; values: lists of all builds under that job name res_builds = {} for b in builds: # check if the build is not before days_limit days if b.ts < cond: continue res_build = copy.deepcopy(b.to_dict()) del res_build["job_name"] try: res_builds[b.name].append(res_build) except KeyError: res_builds[b.name] = [res_build] finally: data["num_builds"] += 1 data["builds"] = res_builds if mem_builds: assert days_limit not in mem_builds # would have been taken from memcache otherwise mem_builds.update({days_limit: data}) memcache.set(MEMCACHE_BUILDS_KEY, mem_builds) else: memcache.set(MEMCACHE_BUILDS_KEY, {days_limit: data}) log.debug("Stored builds stats data in memcache (days_limit: %s)." % days_limit) data["current_time"] = get_current_timestamp_str() return data
def get_overview_data(): overview = memcache.get(MEMCACHE_OVERVIEW_KEY) if overview: overview["current_time"] = get_current_timestamp_str() return overview log.debug( "OverviewModel not present in memcache, getting from datastore ..." ) overview = OverviewModel.get_by_id(OVERVIEW_MODEL_ID_KEY) # is already a Python object if overview is None: overview = OverviewModel() overview.data = {} d = overview.data d["current_time"] = get_current_timestamp_str() return d
def update_overview_check_running_builds(self): """ Combines 2 actions - update overview data (info about currently running builds and checks them if they are not for too long in execution. Entry point. """ log.info("Start update overview, check builds at '%s'" % get_current_timestamp_str()) data = dict(total_queued_jobs=self.get_total_queued_jobs(), running_jobs=self.check_running_builds_get_info()) # if there is no date on running jobs, add timestamp, it would be there otherwise if len(data["running_jobs"]) == 0: data["retrieved_at"] = get_current_timestamp_str() OverviewModel.update_overview_data(data) log.debug("OverviewModel data updated:\n%s" % pprint.pformat(data)) ActivitySummaryModel.increase_counters(which_counters=["overview_update_counter"]) log.info("Finished update overview, check builds at '%s'" % get_current_timestamp_str())
def send_activity_summary(self): msg = "Sending activity summary email at %s ..." % get_current_timestamp_str( ) logging.info(msg) formatted_data = pprint.pformat(ActivitySummaryModel.get_data()) send_email(subject="activity summary", body="activity summary: " + "\n\n" + formatted_data) ActivitySummaryModel.reset() logging.info("Finished sending activity summary.") self.response.out.write(msg)
def check_running_build(self, job_name=None, current_build_id=-1): """ Check duration of the build build. Dictionary resp is updated about actions performed in this method. """ resp = {} job = self.server.get_job(job_name) build = job.get_build(current_build_id) # get_timestamp returns this type of data, is in UTC: # datetime.datetime(2015, 3, 3, 19, 41, 56, tzinfo=<UTC>) (is not JSON serializable) ts = build.get_timestamp() resp["start_timestamp"] = get_localized_timestamp_str(ts) resp["retrieved_at"] = get_current_timestamp_str() console_url = "%s/job/%s/%s/console" % (self.jenkins_url, job_name, current_build_id) now = datetime.datetime.utcnow() # there is no timezone info, putting UTC duration = pytz.utc.localize(now) - ts duration_str = str(duration).split('.')[0] resp["duration"] = duration_str resp["stop_threshold_minutes"] = self.current_build_duration_threshold_hard resp["email_notification"] = False if duration.total_seconds() > self.current_build_duration_threshold_hard * 60: stop_call_response, status = self.stop_running_build(job=job, build_id=current_build_id) msg = (("Build '%s' has been running for more than %s minutes.\n" "duration: %s\nconsole output: %s\nstopping ... current status: %s") % (build, self.current_build_duration_threshold_hard, duration_str, console_url, status)) resp["stop_call_response"] = stop_call_response resp["current_status"] = status resp["email_notification"] = True elif duration.total_seconds() > self.current_build_duration_threshold_soft * 60: msg = (("Build '%s' has been running for more than %s minutes.\n" "duration: %s\nconsole output: %s\n[soft threshold, no action taken]") % (build, self.current_build_duration_threshold_soft, duration_str, console_url)) resp["email_notification"] = True if resp["email_notification"]: log.warn(msg) formatted_data = pprint.pformat(resp) log.debug("build check response:\n%s" % formatted_data) subject = "long #%s %s" % (current_build_id, job_name) result = send_email(subject=subject, body=msg + "\n\n" + formatted_data) if result: ActivitySummaryModel.increase_counters(which_counters=["sent_emails_counter"]) return resp
def update_builds_stats(self): """ Main task to run over job types and builds from the last one: retrieve information about a build and store into datastore if it has not been processed in the previous run of this routine. """ log.info("Start update builds stats at '%s'" % get_current_timestamp_str()) for job_name in self.job_names: job = self.server.get_job(job_name) # returns iterator of available build id numbers in # reverse order, most recent first bids = job.get_build_ids() for bid in bids: log.debug("Retrieving data on %s #%s ..." % (job_name, bid)) build = job.get_build(bid) status = build.get_status() if not status: log.debug("%s #%s has not finished, status: %s, going to " "another build ..." % (job_name, bid, status)) continue # this build considered finished now # check if we have not hit a build which is already stored key_id = "%s-%s" % (job_name, bid) if BuildsStatisticsModel.get_by_id(key_id) is not None: log.debug("%s #%s is already stored, going to the " "next job type ..." % (job_name, bid)) break ts = build.get_timestamp() self.process_build_info_and_store(build=build, job_name=job_name, timestamp=ts, build_id=bid, status=status) ActivitySummaryModel.increase_counters(which_counters=["builds_stats_update_counter"]) memcache.set(MEMCACHE_BUILDS_KEY, None) log.info("Finished update builds stats at '%s'" % get_current_timestamp_str())
def builds_stats_init(self): """ Build is one running test suite on jenkins for a given job type (project type) iterate over projects and retrieve info on all builds going back to history """ log.info("Start builds stats init at '%s'" % get_current_timestamp_str()) # there is no timezone info, putting UTC limit = self.builds_history_init_limit * 60 # get seconds from minutes utc_now = pytz.utc.localize(datetime.datetime.utcnow()) for job_name in self.job_names: job = self.server.get_job(job_name) # returns iterator of available build id numbers in # reverse order, most recent first bids = job.get_build_ids() count = 0 for bid in bids: count += 1 if count == 1: # not interested in the very last one, may be running continue log.debug("Retrieving data on %s #%s (counter: %s) ..." % (job_name, bid, count)) build = job.get_build(bid) ts = build.get_timestamp() status = build.get_status() if (utc_now - ts).total_seconds() > limit: # not interested in builds older than history limit log.debug("Hit too old build, going to another job type ...") break self.process_build_info_and_store(build=build, job_name=job_name, timestamp=ts, build_id=bid, status=status) log.info("Finished builds stats init at '%s'" % get_current_timestamp_str())
def get_data(): data = ActivitySummaryModel.get_by_id(ACTIVITY_SUMMARY_MODEL_ID_KEY) if data is None: return None r = dict( overview_update_counter_total=data.overview_update_counter_total, overview_update_counter=data.overview_update_counter, sent_emails_counter_total=data.sent_emails_counter_total, sent_emails_counter=data.sent_emails_counter, stopped_builds_counter=data.stopped_builds_counter, stopped_builds_counter_total=data.stopped_builds_counter_total, builds_stats_update_counter=data.builds_stats_update_counter, builds_stats_update_counter_total=data. builds_stats_update_counter_total, builds_statistics_model_last_update_at=get_localized_timestamp_str( data.builds_statistics_model_last_update_at)) r["current_time"] = get_current_timestamp_str() return r
def initialization(): """ Initialize datastore types. """ msg = "Initialization run at %s ..." % get_current_timestamp_str() log.info(msg) if ActivitySummaryModel.get_data() is None: log.debug("ActivitySummaryModel initialization ...") activity = ActivitySummaryModel(id=ACTIVITY_SUMMARY_MODEL_ID_KEY) activity.put() log.debug("Finished ActivitySummaryModel initialization.") else: log.debug("ActivitySummaryModel is already initialized.") if len(BuildsStatisticsModel.query().fetch(keys_only=True)) == 0: deferred.defer(get_jenkins_instance().builds_stats_init) log.debug("Finished BuildsStatisticsModel initialization.") else: log.debug("BuildStatisticsModel is already initialized.") return msg
def update_overview_check_running_builds(self): msg = "Running task at %s ..." % get_current_timestamp_str() logging.info(msg) deferred.defer( get_jenkins_instance().update_overview_check_running_builds) self.response.out.write(msg)