def queryConsumer(queryQueue="query_default"): rabbitHelper = queryConsumer.rabbitHelper queryQueueSize = rabbitHelper.qsize(queryQueue) # for cli retreive currently active query workload # since multi-query is not supported here active_query = None all_queries = CacheHelper.active_queries() if len(all_queries) > 0: active_query = all_queries[0] if queryQueueSize > 0: # setup new query workload from queued message queryMsg = rabbitHelper.getJsonMsg(queryQueue) logger.error(queryMsg) try: queryWorkload = QueryWorkload(queryMsg) # deactivate old query workload if active_query is not None: active_query.active = False # activate new query workload # to be detected in queryRunner task queryWorkload.active = True if 'rcq' in queryMsg: rabbitHelper.putMsg(queryMsg['rcq'], "Started Querying: %s/%s" % \ (queryWorkload.ddoc, queryWorkload.view)) except KeyError: logger.info("Invalid query workload message: %s" % queryMsg)
def queryConsumer(queryQueue = "query_default"): rabbitHelper = queryConsumer.rabbitHelper queryQueueSize = rabbitHelper.qsize(queryQueue) # for cli retreive currently active query workload # since multi-query is not supported here active_query = None all_queries = CacheHelper.active_queries() if len(all_queries) > 0: active_query = all_queries[0] if queryQueueSize> 0: # setup new query workload from queued message queryMsg = rabbitHelper.getJsonMsg(queryQueue) logger.error(queryMsg) try: queryWorkload = QueryWorkload(queryMsg) # deactivate old query workload if active_query is not None: active_query.active = False # activate new query workload # to be detected in queryRunner task queryWorkload.active = True if 'rcq' in queryMsg: rabbitHelper.putMsg(queryMsg['rcq'], "Started Querying: %s/%s" % \ (queryWorkload.ddoc, queryWorkload.view)) except KeyError: logger.info("Invalid query workload message: %s" % queryMsg)
def queryRunner(): hosts = None clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") if clusterStatus: hosts = clusterStatus.get_all_hosts() # retreive all active query workloads queries = CacheHelper.active_queries() for query in queries: # async update query workload object updateQueryWorkload.apply_async(args=[query]) count = int(query.qps) filters = list(set(query.include_filters) -\ set(query.exclude_filters)) params = generateQueryParams(query.indexed_key, query.bucket, filters, query.limit, query.startkey, query.endkey, query.startkey_docid, query.endkey_docid) multi_query.delay(count, query.ddoc, query.view, params, query.bucket, query.password, hosts = hosts)
def queryRunner(): hosts = None clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG + "_status") if clusterStatus: hosts = clusterStatus.get_all_hosts() # retreive all active query workloads queries = CacheHelper.active_queries() for query in queries: # async update query workload object updateQueryWorkload.apply_async(args=[query]) count = int(query.qps) filters = list(set(query.include_filters) -\ set(query.exclude_filters)) params = generateQueryParams(query.indexed_key, query.bucket, filters, query.limit, query.startkey, query.endkey, query.startkey_docid, query.endkey_docid) multi_query.delay(count, query.ddoc, query.view, params, query.bucket, query.password, hosts=hosts)
def queryRunner(): # retreive all active query workloads queries = CacheHelper.active_queries() for query in queries: count = int(query.qps) params = {"stale": "update_after"} multi_query.delay(count, query.ddoc, query.view, params, query.bucket, query.password)
def queryRunner(): # retreive all active query workloads queries = CacheHelper.active_queries() for query in queries: count = int(query.qps) params = {"stale" : "update_after"} multi_query.delay(count, query.ddoc, query.view, params, query.bucket, query.password)
def queryRunner(max_msgs=10): rabbitHelper = queryRunner.rabbitHelper # check queue with pending http requests pending_http_requests = "query_multi_" + cfg.CB_CLUSTER_TAG if rabbitHelper.qsize(pending_http_requests) > max_msgs: # purge waiting tasks rabbitHelper.purge(pending_http_requests) query_ops_manager(max_msgs, True) else: hosts = None clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG + "_status") if clusterStatus: hosts = clusterStatus.get_all_hosts() # retreive all active query workloads queries = CacheHelper.active_queries() for query in queries: # async update query workload object updateQueryWorkload.apply_async(args=[query]) count = int(query.qps) filters = list(set(query.include_filters) -\ set(query.exclude_filters)) params = generateQueryParams(query.indexed_key, query.bucket, filters, query.limit, query.startkey, query.endkey, query.startkey_docid, query.endkey_docid) multi_query.delay(count, query.ddoc, query.view, params, query.bucket, query.password, hosts=hosts)
def query_ops_manager(max_msgs=10, isovercommited=False): rabbitHelper = query_ops_manager.rabbitHelper # retreive all active query workloads queries = CacheHelper.active_queries() for query in queries: # check if query tasks are overloaded if rabbitHelper.qsize(query.task_queue) > max_msgs or isovercommited: # purge waiting tasks rabbitHelper.purge(query.task_queue) # throttle down ops by 10% new_queries_per_sec = query.qps * 0.90 # cannot reduce below 10 qps if new_queries_per_sec > 10: query.qps = new_queries_per_sec logger.error("Cluster Overcommited: reduced queries/sec to (%s)" % query.qps)
def queryRunner(max_msgs=10): rabbitHelper = queryRunner.rabbitHelper # check queue with pending http requests pending_http_requests = "query_multi_" + cfg.CB_CLUSTER_TAG if rabbitHelper.qsize(pending_http_requests) > max_msgs: # purge waiting tasks rabbitHelper.purge(pending_http_requests) query_ops_manager(max_msgs, True) else: hosts = None clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG + "_status") if clusterStatus: hosts = clusterStatus.get_all_hosts() # retreive all active query workloads queries = CacheHelper.active_queries() for query in queries: # async update query workload object updateQueryWorkload.apply_async(args=[query]) count = int(query.qps) filters = list(set(query.include_filters) - set(query.exclude_filters)) params = generateQueryParams( query.indexed_key, query.bucket, filters, query.limit, query.startkey, query.endkey, query.startkey_docid, query.endkey_docid, ) multi_query.delay(count, query.ddoc, query.view, params, query.bucket, query.password, hosts=hosts)
def query_ops_manager(max_msgs=10, isovercommited=False): rabbitHelper = query_ops_manager.rabbitHelper # retreive all active query workloads queries = CacheHelper.active_queries() for query in queries: # check if query tasks are overloaded if rabbitHelper.qsize(query.task_queue) > max_msgs or isovercommited: # purge waiting tasks rabbitHelper.purge(query.task_queue) # throttle down ops by 10% new_queries_per_sec = query.qps * 0.90 # cannot reduce below 10 qps if new_queries_per_sec > 10: query.qps = new_queries_per_sec logger.error("Cluster Overcommited: reduced queries/sec to (%s)" %\ query.qps)