def exec_task(self, auth, ctx): LOGGER.debug('entered in excec_task for Purging task') MetricsService().purging_for_all_nodes(auth) #purge the task results #no need to catch exception since task service will log #and rollback in case of an exception import tg from datetime import datetime, timedelta purge_interval = tg.config.get("task_results_purge_interval") cutoff_date = datetime.utcnow() + timedelta(days=-int(purge_interval)) DBSession.query(TaskResult).\ filter(TaskResult.timestamp <= cutoff_date).\ delete() #also purge the non-repeating tasks that were submitted long time #ago limit = 5000 try: limit=int(tg.config.get(constants.TASK_PURGE_COUNT)) except Exception, e: print "Exception: ", e
def exec_task(self, auth, ctx): LOGGER.debug('entered in excec task for TimeBasisRollupForNodes task') MetricsService().timebasis_rollup_for_all_nodes(auth)
except Exception, e: print "Error while scanning the image store ", e try: storage_stats_data_upgrade() except Exception, e: print "Error while recomputing storage stats ", e try: unreserve_disks_on_cms_start() except Exception, e: print "Error while unreserving storage disks ", e #start the services thread #maker should already have been configured by calling init_model sc = ServiceCentral(zopelessmaker) sc.start() atexit.register(sc.quit) base_config.convirt_service_central = sc MetricsService().init_mappers() Node.use_bash_timeout = eval(tg.config.get("use_bash_timeout")) Node.default_bash_timeout = tg.config.get("bash_default_timeout") Node.bash_dir = os.path.join(tg.config.get('convirt_cache_dir'), 'common/scripts') Node.local_bash_dir = tg.config.get("common_script") # Wrap your base TurboGears 2 application with custom middleware here try: pass # add_deployment_stats_task() except Exception, e: print "Error while adding deployment stats task", e return app
class TopCache(GenericCache): service = MetricsService() def get_top_entities(self, node_id, node_type, metric, top_type, auth, metric_type, ids, date1, date2): """ Setting value for cache by checking the conditions """ now = datetime.utcnow() status = False user_id = auth.user.user_id top_cache = self.get_top_value(user_id) usage_list = [] cache_key = (node_id, node_type, metric, top_type) #checking cache's key is already exisiting if top_cache.has_key(cache_key): cache_ids = [] for data in top_cache[cache_key].get("value"): cache_ids.append(data[1]) diff_list = [item for item in cache_ids if not item in ids] # print "FOUNDDDDDDDDDDTOP555555==",(node_id,node_type,metric,top_type) cached_time = top_cache[cache_key].get("cached_time") if (now > cached_time) or len(diff_list) > 0: status = True else: status = True if status: #quering the result and set it to cache cache_time = now + timedelta( minutes=int(tg.config.get(constants.CACHE_TIME))) data_list = self.service.getRawTopMetric(ids, metric, metric_type, date1, date2, "DESC", 5) if len(data_list) > 0: self.check_cache_limit(top_cache) top_cache[cache_key] = { "cached_time": cache_time, "value": data_list } top_cache[cache_key]["last_accessed"] = now self.user_cache[user_id].update({cache_key: top_cache[cache_key]}) # making key to remove if not deleted on entity operations if len(ids) == 0 and self.user_cache.has_key(user_id): user = self.user_cache[user_id] if user.has_key(cache_key): self.user_cache[user_id][cache_key]["value"] = [] usage_list = self.user_cache[user_id][cache_key].get("value", []) if len(usage_list) == 0: del self.user_cache[user_id][cache_key] return usage_list def get_top_value(self, user_id): """ getting cache value of user """ if not self.user_cache.has_key(user_id): self.user_cache[user_id] = {} top_cache = self.user_cache.get(user_id, {}) return top_cache def delete_usercache(self, auth): """ deleting cache value of user """ user_id = auth.user.user_id if self.user_cache.has_key(user_id): del self.user_cache[user_id]
def __init__(self): self.service = MetricsService() self.manager = Basic.getGridManager() self.utcoffset = None