def run_rounds(self): logger.info('Running rounds') _run=0 while True: os.system(clear_console) logger.info('Evaluating Candidates.') ### YOU CAN OVERRIDE EVALUATE_CANDIDATE with your own Fitness Function raw_scores = map(self.evaluate_candidate, self.candidates)# Sort candidates by score. self.candidates.sort(key=lambda x: x.score, reverse=True) # Update Top Scorers tally self.top_five_scores.append(self.candidates[0:4]) #since we've already sorted. self.top_scores.append(self.candidates[0].score) #FOR DEBUG # str_scores = ','.join(map(lambda x: str(x.score), self.candidates)) # logger.debug('Run #%s, Score_Summary=%s' % (_run, str_scores)) avg_score = sum(raw_scores, 0.0) / len(raw_scores) self.avg_scores.append(avg_score) logger.info('Run #%s, Average_Score=%s' % (_run, avg_score)) logger.info('Average scores over all last runs:: %s' % spark_string(self.avg_scores, True)) logger.info('Top score over all last runs:: %s' % spark_string(self.top_scores, True)) logger.info('Scores for this run:: %s' %spark_string(map(lambda x: x.score, self.candidates))) if self.should_break(self.candidates, _run): logger.info('=======================================') logger.info('Scores Stabilized.') logger.info('Summary Data: %s' % self.summary_data) logger.info('=======================================') logger.debug('Summary of each candidate in this pool:') for candidate in self.candidates: logger.debug('%s' % candidate.__unicode__()) logger.debug('=================================') logger.info('Top Five Candidates + scores %s' % self.top_five_scores[-1]) logger.info('Best candidate DNA is: %s' % ''.join(self.top_five_scores[-1][0].DNA)) logger.info('=================================') break logger.debug('Culling pool') self.cull_bad_candidates() logger.debug('Generating new generation') self.candidates = self.generate_new_pool() _run = _run+1
def print_stats(stats): for stat in stats: stat_name = stat['key'] if stat_name == 'free': continue stat_values = stat['values'] stat_values.reverse() #print '- %s: %s' % (stat_name, stat_values) values = [v[1] for v in stat_values[:35]] values.reverse() #print '- %s:' % stat_name print '%18s: %s' % (stat_name, spark_string(values))
for loglevel in ['FATAL', 'ERROR', 'WARN', 'INFO']: results = [] for hour in range(-12, 1): now = datetime.utcnow() + timedelta(hours=hour) logging.debug(now.isoformat()) q = build_loglevel_query(loglevel, now) post = requests.post('http://inw-48.rfiserve.net:9200/{0}/_search'.format(index), data=json.dumps(q)) logging.debug(pformat(post.request.url)) logging.info(pformat(post.json)) results.append(post.json['hits']['total']) now = datetime.utcnow() - timedelta(minutes=5) logging.debug(now.isoformat()) q = build_loglevel_query(loglevel, now, delta=timedelta(minutes=5)) post = requests.post('http://inw-48.rfiserve.net:9200/{0}/_search'.format(index), data=json.dumps(q)) logging.debug(pformat(post.request.url)) logging.info(pformat(post.json)) last_5 = post.json['hits']['total'] diff = map(lambda x: x[0]-x[1], zip(results[1:], results)) diff = ','.join(map(str, diff)) print u"{0}:\t{2} {1} ({3:.1f})({4:.1f}) {5}".format(loglevel, results[-1], spark.spark_string(results, fit_min=True), numpy.average(results), numpy.average(results)/12, last_5)