def workloadConsumer(workloadQueue="workload", templateQueue="workload_template"): rabbitHelper = workloadConsumer.rabbitHelper templateMsg = None workloadMsg = None try: templateQueueSize = rabbitHelper.qsize(templateQueue) if templateQueueSize > 0: templateMsg = rabbitHelper.getJsonMsg(templateQueue) template = Template(templateMsg) TemplateCacher().store(template) except ValueError as ex: logger.error("Error parsing template msg %s: " % templateMsg) logger.error(ex) except Exception as ex: logger.error(ex) try: workloadQueueSize = rabbitHelper.qsize(workloadQueue) if workloadQueueSize > 0: workloadMsg = rabbitHelper.getJsonMsg(workloadQueue) workload = Workload(workloadMsg) WorkloadCacher().store(workload) # launch workload sysTestRunner.delay(workload) except ValueError as ex: logger.error("Error parsing workloadMsg %s: " % workloadMsg) logger.error(ex) except Exception as ex: logger.error(ex)
def task_prerun_handler(sender=None, task_id=None, task=None, args=None, kwargs=None, signal=None): if sender == run: workload = args[0] prevWorkload = args[1] if workload.preconditions is not None: # block tasks against bucket until pre-conditions met bucket = str(workload.bucket) bs = BucketStatusCacher().bucketstatus(bucket) bs.block(bucket) BucketStatusCacher().store(bs) stat_checker = StatChecker(cfg.COUCHBASE_IP + ":" + cfg.COUCHBASE_PORT, bucket=bucket, username=cfg.COUCHBASE_USER, password=cfg.COUCHBASE_PWD) while not stat_checker.check(workload.preconditions): time.sleep(1) prevWorkload.active = False WorkloadCacher().store(prevWorkload) bs = BucketStatusCacher().bucketstatus(bucket) bs.unblock(bucket) BucketStatusCacher().store(bs)
def task_postrun_handler(sender=None, task_id=None, task=None, args=None, kwargs=None, state=None, signal=None, retval=None): rabbitHelper = task_postrun_handler.rabbitHelper if sender == sysTestRunner: # cleanup workload after handled by test runner workload = retval rabbitHelper.purge(workload.task_queue) WorkloadCacher().delete(workload) if sender == client.mset: if isinstance(retval, list): isupdate = args[3] if isupdate == False: # allow multi set keys to be consumed keys = retval # note template was converted to dict for mset template = args[1] if template["cc_queues"] is not None: for queue in template["cc_queues"]: queue = str(queue) rabbitHelper.declare(queue) if keys is not None and len(keys) > 0: rabbitHelper.putMsg(queue, json.dumps(keys)) else: logger.error("Error during multi set") logger.error(retval)
def worker_init(): # cleanup queues rabbitHelper = RabbitHelper() cached_queues = WorkloadCacher().queues + TemplateCacher().cc_queues test_queues = [ "workload", "workload_template", "admin_tasks", "xdcr_tasks" ] + cached_queues for queue in test_queues: try: if rabbitHelper.qsize(queue) > 0: print "Purge Queue: " + queue + " " + str( rabbitHelper.qsize(queue)) rabbitHelper.purge(queue) except Exception as ex: print ex cacheClean() # kill old background processes kill_procs = ["sdkserver"] for proc in kill_procs: os.system("ps aux | grep %s | awk '{print $2}' | xargs kill" % proc) # start sdk servers os.system("ruby sdkserver.rb &") os.system("python sdkserver.py &") # make sure logdir exists os.system("mkdir -p " + cfg.LOGDIR)
def run(workload, prevWorkload=None): rabbitHelper = run.rabbitHelper cache = WorkloadCacher() workload.active = True cache.store(workload) bucket = str(workload.bucket) task_queue = workload.task_queue inflight = 0 while workload.active: if inflight < 20: # read doc template template = TemplateCacher().template(str(workload.template)) if template != None: if workload.cc_queues is not None: # override template attribute with workload template.cc_queues = workload.cc_queues # read workload settings bucket = workload.bucket ops_sec = workload.ops_per_sec create_count = int(ops_sec * workload.create_perc / 100) update_count = int(ops_sec * workload.update_perc / 100) get_count = int(ops_sec * workload.get_perc / 100) del_count = int(ops_sec * workload.del_perc / 100) consume_queue = workload.consume_queue generate_pending_tasks.delay(task_queue, template, bucket, create_count, update_count, get_count, del_count, consume_queue) inflight = inflight + 1 else: inflight = rabbitHelper.qsize(task_queue) time.sleep(1) workload = cache.workload(workload.id) return workload
def sysTestRunner(workload): bucket = str(workload.bucket) latestWorkloadTask = None prevWorkload = None cache = BucketStatusCacher() bucketStatus = cache.bucketstatus(bucket) if bucketStatus is not None: latestWorkloadTask, prevWorkload = bucketStatus.latestWorkloadTask( bucket) else: bucketStatus = BucketStatus(bucket) # make this the latest taskid against this bucket bucketStatus.addTask(bucket, current_task.request.id, workload) cache.store(bucketStatus) if workload.wait is not None: # wait before processing time.sleep(workload.wait) if bucketStatus.mode(bucket) == "blocking": while Cache().retrieve(prevWorkload.id) is not None: time.sleep(2) elif bucketStatus.mode(bucket) == "nonblocking": if prevWorkload is not None: # disable previously running # workload if bucket in nonblocking mode. # if current workload has no preconditions # it's not allowed to override previous workload if workload.preconditions is None: prevWorkload.active = False WorkloadCacher().store(prevWorkload) runTask = run.apply_async(args=[workload, prevWorkload], expires=workload.expires) return runTask.get()
def taskScheduler(): cache = WorkloadCacher() workloads = cache.workloads rabbitHelper = taskScheduler.rabbitHelper tasks = [] for workload in workloads: if workload.active: task_queue = workload.task_queue # dequeue subtasks if rabbitHelper.qsize(task_queue) > 0: tasks = rabbitHelper.getJsonMsg(task_queue) if tasks is not None and len(tasks) > 0: # apply async result = TaskSet(tasks=tasks).apply_async() try: res = result.join(timeout=1) except TimeoutError: pass
def postcondition_handler(): cache = WorkloadCacher() for workload in cache.workloads: if workload.postconditions and workload.active: bucket = workload.bucket bs = BucketStatusCacher().bucketstatus(bucket) bs.block(bucket) BucketStatusCacher().store(bs) stat_checker = StatChecker(cfg.COUCHBASE_IP + ":" + cfg.COUCHBASE_PORT, bucket=bucket, username=cfg.COUCHBASE_USER, password=cfg.COUCHBASE_PWD) status = stat_checker.check(workload.postconditions) if status == True: # unblock bucket and deactivate workload bs = BucketStatusCacher().bucketstatus(bucket) bs.unblock(bucket) BucketStatusCacher().store(bs) workload.active = False cache.store(workload)
import os import time from rabbit_helper import RabbitHelper from cache import WorkloadCacher, TemplateCacher, BucketStatusCacher, cacheClean # cleanup queues rabbitHelper = RabbitHelper() cached_queues = WorkloadCacher().queues + TemplateCacher().cc_queues test_queues = ["workload","workload_template"] + cached_queues for queue in test_queues: try: if rabbitHelper.qsize(queue) > 0: print "Purge Queue: "+queue +" "+ str(rabbitHelper.qsize(queue)) rabbitHelper.purge(queue) except Exception as ex: pass cacheClean() # kill+start sdk's os.system("ps aux | grep sdkserver | awk '{print $2'} | xargs kill") os.system("ruby sdkserver.rb &") os.system("python sdkserver.py &")