def import_template(args): val = None if args.type == "json": json_val = {} for kv in args.kvpairs: pair = '{%s}' % kv try: pair = json.loads(pair) json_val.update(pair) except ValueError as ex: print("ERROR: Unable to encode as valid json: %s " % kv) print("make sure strings surrounded by double quotes") return val = json_val #TODO binary blobs template = { "name": args.name, "ttl": args.ttl, "flags": args.flags, "cc_queues": args.cc_queues, "size": args.size, "kv": val } cluster = args.cluster rabbitHelper = RabbitHelper(args.broker, cluster) template['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg("workload_template_" + cluster, json.dumps(template)) receiveResponse(rabbitHelper, template['rcq'])
def run_workload(args): workload = {} if args.name != None: # TODO: read in workload params from saved store # workload.update(cached_workload) pass if args.wait is not None: args.wait = conv_to_secs(args.wait) workload = { "bucket" : args.bucket, "password" : args.password, "ops_per_sec" : args.ops, "create_perc" : args.create, "update_perc" : args.update, "get_perc" : args.get, "del_perc" : args.delete, "exp_perc" : args.expire, "miss_perc" : args.miss, "ttl" : args.ttl, "cc_queues" : args.cc_queues, "consume_queue" : args.consume_queue, "postconditions" : args.postcondition, "preconditions" : args.precondition, "wait" : args.wait, "template" : args.template} cluster = args.cluster rabbitHelper = RabbitHelper(args.broker) workload['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg("workload_"+cluster, json.dumps(workload)) receiveResponse(rabbitHelper, workload['rcq'])
def requeueNonDeletedKeys(self): rabbitHelper = RabbitHelper() task_type = 'app.sdk_client_tasks.mdelete' # requeue pending delete keys so that they may be deleted in another workload while rabbitHelper.qsize(self.task_queue) > 0: task_set = rabbitHelper.getJsonMsg(self.task_queue) if len(task_set) > 0: keys = [task['args'] for task in task_set \ if task['task'] == task_type] if len(keys) > 0: # put back on to consume_queue msg = json.dumps(keys[0][0]) rabbitHelper.putMsg(self.consume_queue, msg) try: # delete task queue rabbitHelper.delete(self.task_queue) # delete consume queue if it was a miss_queue if self.miss_queue is not None and self.consume_queue is not None: rabbitHelper.delete(self.consume_queue) except: pass
def run_workload(args): workload = {} if args.name != None: # TODO: read in workload params from saved store # workload.update(cached_workload) pass if args.wait is not None: args.wait = conv_to_secs(args.wait) workload = { "bucket" : args.bucket, "password" : args.password, "ops_per_sec" : args.ops, "create_perc" : args.create, "update_perc" : args.update, "get_perc" : args.get, "del_perc" : args.delete, "exp_perc" : args.expire, "miss_perc" : args.miss, "ttl" : args.ttl, "cc_queues" : args.cc_queues, "consume_queue" : args.consume_queue, "postconditions" : args.postcondition, "preconditions" : args.precondition, "wait" : args.wait, "template" : args.template} cluster = args.cluster rabbitHelper = RabbitHelper(args.broker, cluster) workload['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg("workload_"+cluster, json.dumps(workload)) receiveResponse(rabbitHelper, workload['rcq'])
def run_workload(args): workload = {} if args.name != None: # TODO: read in workload params from saved store # workload.update(cached_workload) pass if args.wait is not None: args.wait = conv_to_secs(args.wait) if args.expires is not None: args.expires = conv_to_secs(args.expires) workload = { "bucket" : args.bucket, "ops_per_sec" : args.ops, "create_perc" : args.create, "update_perc" : args.update, "get_perc" : args.get, "del_perc" : args.delete, "cc_queues" : args.cc_queues, "consume_queue" : args.consume_queue, "postconditions" : args.postcondition, "preconditions" : args.precondition, "wait" : args.wait, "expires" : args.expires, "template" : args.template} rabbitHelper = RabbitHelper(args.broker) rabbitHelper.putMsg("workload", json.dumps(workload))
def run_workload(args): workload = {} if args.name != None: # TODO: read in workload params from saved store # workload.update(cached_workload) pass if args.wait is not None: args.wait = conv_to_secs(args.wait) if args.expires is not None: args.expires = conv_to_secs(args.expires) workload = { "bucket": args.bucket, "ops_per_sec": args.ops, "create_perc": args.create, "update_perc": args.update, "get_perc": args.get, "del_perc": args.delete, "cc_queues": args.cc_queues, "consume_queue": args.consume_queue, "postconditions": args.postcondition, "preconditions": args.precondition, "wait": args.wait, "expires": args.expires, "template": args.template } rabbitHelper = RabbitHelper(args.broker) rabbitHelper.putMsg("workload", json.dumps(workload))
def flushq(self, flush_hotkeys = False): mq = RabbitHelper() if self.ccq is not None: logging.info("[Thread %s] flushing %s items to %s" % (self.name, self.memq.qsize(), self.ccq)) # declare queue mq.declare(self.ccq) # empty the in memory queue while self.memq.empty() == False: try: msg = self.memq.get_nowait() msg = json.dumps(msg) mq.putMsg(self.ccq, msg) except queue.Empty: pass # hot keys if flush_hotkeys and (len(self.hotkey_batches) > 0): # try to put onto remote queue queue = self.consume_queue or self.ccq if queue is not None: key_map = {'start' : self.hotkey_batches[0][0], 'end' : self.hotkey_batches[-1][-1]} msg = json.dumps(key_map) mq.putMsg(queue, msg) self.hotkey_batches = []
def import_template(args): val = None if args.type == "json": json_val = {} for kv in args.kvpairs: pair = '{%s}' % kv try: pair = json.loads(pair) json_val.update(pair) except ValueError as ex: print "ERROR: Unable to encode as valid json: %s " % kv print "make sure strings surrounded by double quotes" return val = json_val #TODO binary blobs template = { "name" : args.name, "ttl" : args.ttl, "flags" : args.flags, "cc_queues" : args.cc_queues, "size" : args.size, "kv" : val} rabbitHelper = RabbitHelper(args.broker) rabbitHelper.putMsg("workload_template", json.dumps(template))
def perform_query_tasks(args): queryMsg = {'queries_per_sec' : args.queries_per_sec, 'ddoc' : args.ddoc, 'view' : args.view, 'bucket' : args.bucket, 'password' : args.password} cluster = args.cluster rabbitHelper = RabbitHelper(args.broker) queryMsg['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg('query_'+cluster, json.dumps(queryMsg)) receiveResponse(rabbitHelper, queryMsg['rcq'])
def perform_admin_tasks(args): actions = {'rebalance_in': args.rebalance_in, 'rebalance_out': args.rebalance_out, 'failover': args.failover, 'soft_restart': args.soft_restart, 'hard_restart': args.hard_restart, 'only_failover': args.only_failover } #TODO: Validate the user inputs, before passing to rabbit print actions rabbitHelper = RabbitHelper(args.broker) rabbitHelper.putMsg("admin_tasks", json.dumps(actions))
def perform_admin_tasks(args): actions = { 'rebalance_in': args.rebalance_in, 'rebalance_out': args.rebalance_out, 'failover': args.failover, 'soft_restart': args.soft_restart, 'hard_restart': args.hard_restart, 'only_failover': args.only_failover } #TODO: Validate the user inputs, before passing to rabbit print actions rabbitHelper = RabbitHelper(args.broker) rabbitHelper.putMsg("admin_tasks", json.dumps(actions))
def perform_admin_tasks(args): actions = {'rebalance_in': args.rebalance_in, 'rebalance_out': args.rebalance_out, 'failover': args.failover, 'soft_restart': args.soft_restart, 'hard_restart': args.hard_restart, 'only_failover': args.only_failover } cluster = args.cluster #TODO: Validate the user inputs, before passing to rabbit rabbitHelper = RabbitHelper(args.broker) actions['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg("admin_"+cluster, json.dumps(actions)) receiveResponse(rabbitHelper, actions['rcq'])
def perform_xdcr_tasks(args): xdcrMsg = {'dest_cluster_ip': args.dest_cluster_ip, 'dest_cluster_rest_username': args.dest_cluster_username, 'dest_cluster_rest_pwd': args.dest_cluster_pwd, 'dest_cluster_name': args.dest_cluster_name, 'replication_type': args.replication_type, } cluster = args.cluster #TODO: Validate the user inputs, before passing to rabbit print xdcrMsg rabbitHelper = RabbitHelper(args.broker, cluster) xdcrMsg['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg("xdcr_"+cluster, json.dumps(xdcrMsg)) receiveResponse(rabbitHelper, xdcrMsg['rcq'])
def perform_admin_tasks(args): actions = {'rebalance_in': args.rebalance_in, 'rebalance_out': args.rebalance_out, 'failover': args.failover, 'soft_restart': args.soft_restart, 'hard_restart': args.hard_restart, 'only_failover': args.only_failover } cluster = args.cluster #TODO: Validate the user inputs, before passing to rabbit rabbitHelper = RabbitHelper(args.broker, cluster) actions['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg("admin_"+cluster, json.dumps(actions)) receiveResponse(rabbitHelper, actions['rcq'])
def perform_xdcr_tasks(args): xdcrMsg = {'dest_cluster_ip': args.dest_cluster_ip, 'dest_cluster_rest_username': args.dest_cluster_username, 'dest_cluster_rest_pwd': args.dest_cluster_pwd, 'dest_cluster_name': args.dest_cluster_name, 'replication_type': args.replication_type, } cluster = args.cluster #TODO: Validate the user inputs, before passing to rabbit print xdcrMsg rabbitHelper = RabbitHelper(args.broker) xdcrMsg['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg("xdcr_"+cluster, json.dumps(xdcrMsg)) receiveResponse(rabbitHelper, xdcrMsg['rcq'])
def run_systemtest(args): cluster = args.cluster rabbitHelper = RabbitHelper(args.broker, cluster) test = {'suffix': args.filesuffix} if args.fromfile is not None: # load json config json_data = open(args.fromfile) msg = json.load(json_data) elif args.name is not None: msg = {"localtestname": args.name} test.update(msg) test['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg('systest_manager_' + cluster, json.dumps(test)) receiveResponse(rabbitHelper, test['rcq'])
def run_systemtest(args): cluster = args.cluster rabbitHelper = RabbitHelper(args.broker, cluster) test = {'suffix' : args.filesuffix} if args.fromfile is not None: # load json config json_data = open(args.fromfile) msg = json.load(json_data) elif args.name is not None: msg = { "localtestname" : args.name } test.update(msg) test['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg('systest_manager_'+cluster, json.dumps(test)) receiveResponse(rabbitHelper, test['rcq'])
def main(): args = parser.parse_args() CB_CLUSTER_TAG = args.cluster exchange = CB_CLUSTER_TAG + "consumers" # setup to consume messages from worker mq = RabbitHelper() mq.exchange_declare(exchange, "fanout") queue = mq.declare() queue_name = queue[0] # bind to exchange mq.bind(exchange, queue_name) mq.putMsg('', 'init', exchange) # consume messages channel, conn = mq.channel() channel.basic_consume(callback=init, queue=queue_name, no_ack=True) while True: conn.drain_events()
def main(): args = parser.parse_args() CB_CLUSTER_TAG = args.cluster exchange = CB_CLUSTER_TAG+"consumers" # setup to consume messages from worker mq = RabbitHelper() mq.exchange_declare(exchange, "fanout") queue = mq.declare() queue_name = queue[0] # bind to exchange mq.bind(exchange, queue_name) mq.putMsg('', 'init', exchange) # consume messages channel, conn = mq.channel() channel.basic_consume(callback = init, queue = queue_name, no_ack = True) while True: conn.drain_events()
def perform_query_tasks(args): queryMsg = {'queries_per_sec' : args.queries_per_sec, 'ddoc' : args.ddoc, 'view' : args.view, 'bucket' : args.bucket, 'password' : args.password, 'include_filters' : args.include_filters, 'exclude_filters' : args.exclude_filters, 'startkey' : args.startkey, 'endkey' : args.endkey, 'startkey_docid' : args.startkey_docid, 'endkey_docid' : args.endkey_docid, 'limit' : args.limit, 'indexed_key' : args.indexed_key} cluster = args.cluster rabbitHelper = RabbitHelper(args.broker, cluster) queryMsg['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg('query_'+cluster, json.dumps(queryMsg)) receiveResponse(rabbitHelper, queryMsg['rcq'])
def flushq(self): if self.ccq is not None: # declare queue mq = RabbitHelper() mq.declare(self.ccq) while self.memq.empty() == False: try: msg = self.memq.get_nowait() msg = json.dumps(msg) mq.putMsg(self.ccq, msg) except queue.Empty: pass # hot keys if len(self.hotkeys) > 0: key_map = {'start' : self.hotkeys[0], 'end' : self.hotkeys[-1]} msg = json.dumps(key_map) mq.putMsg(self.ccq, msg)
def flushq(self, flush_hotkeys=False): if self.standalone: return mq = RabbitHelper() if self.ccq is not None: logging.info("[Thread %s] flushing %s items to %s" % (self.name, self.memq.qsize(), self.ccq)) # declare queue mq.declare(self.ccq) # empty the in memory queue while self.memq.empty() == False: try: msg = self.memq.get_nowait() msg = json.dumps(msg) mq.putMsg(self.ccq, msg) except queue.Empty: pass # hot keys if flush_hotkeys and (len(self.hotkey_batches) > 0): # try to put onto remote queue queue = self.consume_queue or self.ccq if queue is not None: key_map = { 'start': self.hotkey_batches[0][0], 'end': self.hotkey_batches[-1][-1] } msg = json.dumps(key_map) mq.putMsg(queue, msg) self.hotkey_batches = []
def perform_query_tasks(args): queryMsg = { 'queries_per_sec': args.queries_per_sec, 'ddoc': args.ddoc, 'view': args.view, 'bucket': args.bucket, 'password': args.password, 'include_filters': args.include_filters, 'exclude_filters': args.exclude_filters, 'startkey': args.startkey, 'endkey': args.endkey, 'startkey_docid': args.startkey_docid, 'endkey_docid': args.endkey_docid, 'limit': args.limit, 'indexed_key': args.indexed_key } cluster = args.cluster rabbitHelper = RabbitHelper(args.broker, cluster) queryMsg['rcq'] = getResponseQueue(rabbitHelper) rabbitHelper.putMsg('query_' + cluster, json.dumps(queryMsg)) receiveResponse(rabbitHelper, queryMsg['rcq'])
def run(workload): workload.active = True rabbitHelper = RabbitHelper() sdk_queue_key = "sdk_consumer.*" # read doc template template = Template.from_cache(str(workload.template)) if template is None: logger.error("no doc template imported") return consumer_template = copy.deepcopy(template) bucket = str(workload.bucket) password = str(workload.password) active_hosts = None clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG + "_status") if clusterStatus is not None: active_hosts = clusterStatus.get_all_hosts() if workload.cc_queues is not None: # override template attribute with workload consumer_template.cc_queues = workload.cc_queues if len(workload.indexed_keys) > 0: template.indexed_keys = workload.indexed_keys ops_sec = workload.ops_per_sec # modify ops by number of consumers num_consumers = rabbitHelper.numExchangeQueues(cfg.CB_CLUSTER_TAG, EXCHANGE) if num_consumers == 0: logger.error("No sdkclients running") return ops_sec = int(ops_sec) / num_consumers create_count = int(ops_sec * workload.create_perc / 100) update_count = int(ops_sec * workload.update_perc / 100) get_count = int(ops_sec * workload.get_perc / 100) del_count = int(ops_sec * workload.del_perc / 100) exp_count = int(ops_sec * workload.exp_perc / 100) consume_queue = workload.consume_queue ttl = workload.ttl miss_queue = workload.miss_queue miss_perc = workload.miss_perc # broadcast to sdk_consumers msg = { 'bucket': bucket, 'id': workload.id, 'password': password, 'template': consumer_template.__dict__, 'ops_sec': ops_sec, 'create_count': create_count, 'update_count': update_count, 'get_count': get_count, 'del_count': del_count, 'exp_count': exp_count, 'consume_queue': consume_queue, 'ttl': ttl, 'miss_perc': miss_perc, 'active': True, 'active_hosts': active_hosts } rabbitHelper.putMsg('', json.dumps(msg), EXCHANGE) logger.error("start task sent to %s consumers" % num_consumers)
def run(workload): workload.active = True rabbitHelper = RabbitHelper() sdk_queue_key = "sdk_consumer.*" # read doc template template = Template.from_cache(str(workload.template)) if template is None: logger.error("no doc template imported") return consumer_template = copy.deepcopy(template) bucket = str(workload.bucket) password = str(workload.password) active_hosts = None clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") if clusterStatus is not None: active_hosts = clusterStatus.get_all_hosts() if workload.cc_queues is not None: # override template attribute with workload consumer_template.cc_queues = workload.cc_queues if len(workload.indexed_keys) > 0: template.indexed_keys = workload.indexed_keys ops_sec = workload.ops_per_sec # modify ops by number of consumers num_consumers = rabbitHelper.numExchangeQueues(cfg.CB_CLUSTER_TAG, EXCHANGE) if num_consumers == 0: logger.error("No sdkclients running") return ops_sec = int(ops_sec)/num_consumers create_count = int(ops_sec * workload.create_perc/100) update_count = int(ops_sec * workload.update_perc/100) get_count = int(ops_sec * workload.get_perc/100) del_count = int(ops_sec * workload.del_perc/100) exp_count = int(ops_sec * workload.exp_perc/100) consume_queue = workload.consume_queue ttl = workload.ttl miss_queue = workload.miss_queue miss_perc = workload.miss_perc # broadcast to sdk_consumers msg = {'bucket' : bucket, 'id' : workload.id, 'password' : password, 'template' : consumer_template.__dict__, 'ops_sec' : ops_sec, 'create_count' : create_count, 'update_count' : update_count, 'get_count' : get_count, 'del_count' : del_count, 'exp_count' : exp_count, 'consume_queue' : consume_queue, 'ttl' : ttl, 'miss_perc' : miss_perc, 'active' : True, 'active_hosts' : active_hosts} rabbitHelper.putMsg('', json.dumps(msg), EXCHANGE) logger.error("start task sent to %s consumers" % num_consumers)