Beispiel #1
0
def updateQueryWorkload(query):
    workloads = CacheHelper.workloads()

    for workload in workloads:
        if workload.active and workload.bucket == query.bucket:
            key = query.indexed_key
            workload.updateIndexKeys(key)
def postcondition_handler():

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.postcondition_handler and workload.active:
            bucket = workload.bucket
            bs = BucketStatus.from_cache(bucket)
            bs.block(bucket)
            status = True

            try:
                postcondition_handler = \
                    getattr(phandler,
                            workload.postcondition_handler)

                status = postcondition_handler(workload)

            except AttributeError:
                logger.error("Postcondition method %s doesn't exist" \
                             % workload.postcondition_handler)
                workload.postcondition = None
                workload.postcondition_handler = None


            if status == True:
                # unblock bucket and deactivate workload
                bs = BucketStatus.from_cache(bucket)
                bs.unblock(bucket)
                workload.active = False
def postcondition_handler():

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.postcondition_handler and workload.active:
            bucket = workload.bucket
            bs = BucketStatus.from_cache(bucket)
            bs.block(bucket)
            status = True

            try:
                postcondition_handler = \
                    getattr(phandler,
                            workload.postcondition_handler)

                status = postcondition_handler(workload)

            except AttributeError:
                logger.error("Postcondition method %s doesn't exist" \
                             % workload.postcondition_handler)
                workload.postcondition = None
                workload.postcondition_handler = None

            if status == True:
                # unblock bucket and deactivate workload
                bs = BucketStatus.from_cache(bucket)
                bs.unblock(bucket)
                workload.active = False
Beispiel #4
0
def updateQueryWorkload(query):
    workloads = CacheHelper.workloads()

    for workload in workloads:
        if workload.active and workload.bucket == query.bucket:
            key = query.indexed_key
            workload.updateIndexKeys(key)
def report_kv_latency(bucket = "default"):

    if cfg.SERIESLY_IP == '':
        # seriesly not configured
        return

    rabbitHelper = report_kv_latency.rabbitHelper
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()

    host = clusterStatus.get_random_host()
    if host is None: return

    ip, port = host.split(':')

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.active and workload.bucket == bucket:

            # read workload params
            bucket = str(workload.bucket)
            password = str(workload.password)

            # read template from active workload
            template = Template.from_cache(str(workload.template))
            template = template.__dict__
            client.decodeMajgicStrings(template)

            # setup key/val to use for timing
            key = _random_string(12)
            value = json.dumps(template['kv'])
            get_key = key


            # for get op, try to pull from consume_queue
            # so that we can calc impact of dgm
            consume_queue = workload.consume_queue
            if consume_queue is not None:
                keys = rabbitHelper.getJsonMsg(str(consume_queue), requeue = True)
                if len(keys) > 0:
                    get_key = str(keys[0])

            # collect op latency
            set_latency = client.mc_op_latency('set', key, value, ip, port, bucket, password)
            get_latency = client.mc_op_latency('get', get_key, value, ip, port, bucket, password)
            delete_latency = client.mc_op_latency('delete', key, value, ip, port, bucket, password)


            # report to seriessly
            seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
            db='fast'
            seriesly[db].append({'set_latency' : set_latency,
                                 'get_latency' : get_latency,
                                 'delete_latency' : delete_latency})
def throttle_kv_ops(isovercommited=True):

    rabbitHelper = throttle_kv_ops.rabbitHelper

    workloads = CacheHelper.workloads()
    for workload in workloads:
       if workload.active:
           if isovercommited:
               # clear pending task_queue
               rabbitHelper.purge(workload.task_queue)

               # reduce ops by 10%
               workload.ops_per_sec = workload.ops_per_sec*0.90
               logger.error("Cluster Overcommited: reduced ops to (%s)" % workload.ops_per_sec)
def throttle_kv_ops(isovercommited=True):

    rabbitHelper = kv_ops_manager.rabbitHelper

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.active:
            if isovercommited:
                # clear pending task_queue
                rabbitHelper.purge(workload.task_queue)

                # reduce ops by 10%
                new_ops_per_sec = workload.ops_per_sec * 0.90
                if new_ops_per_sec > 5000:
                    workload.ops_per_sec = workload.ops_per_sec * 0.90
                    logger.error("Cluster Overcommited: reduced ops to (%s)" %
                                 workload.ops_per_sec)
Beispiel #8
0
def taskScheduler():

    workloads = CacheHelper.workloads()

    rabbitHelper = taskScheduler.rabbitHelper
    tasks = []

    for workload in workloads:
        if workload.active:
            task_queue = workload.task_queue
            # dequeue subtasks
            if rabbitHelper.qsize(task_queue) > 0:
                tasks = rabbitHelper.getJsonMsg(task_queue)
                if tasks is not None and len(tasks) > 0:

                    # apply async
                    result = TaskSet(tasks = tasks).apply_async()
Beispiel #9
0
def taskScheduler():

    workloads = CacheHelper.workloads()

    rabbitHelper = taskScheduler.rabbitHelper
    tasks = []

    for workload in workloads:
        if workload.active:
            task_queue = workload.task_queue
            # dequeue subtasks
            if rabbitHelper.qsize(task_queue) > 0:
                tasks = rabbitHelper.getJsonMsg(task_queue)
                if tasks is not None and len(tasks) > 0:

                    # apply async
                    result = TaskSet(tasks=tasks).apply_async()
def postcondition_handler():

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.postconditions and workload.active:
            bucket = workload.bucket
            bs = BucketStatus.from_cache(bucket)
            bs.block(bucket)

            stat_checker = StatChecker(cfg.COUCHBASE_IP +":"+cfg.COUCHBASE_PORT,
                                       bucket = bucket,
                                       username = cfg.COUCHBASE_USER,
                                       password = cfg.COUCHBASE_PWD)
            status = stat_checker.check(workload.postconditions)
            if status == True:
                # unblock bucket and deactivate workload
                bs = BucketStatus.from_cache(bucket)
                bs.unblock(bucket)
                workload.active = False
Beispiel #11
0
def postcondition_handler():

    workloads = CacheHelper.workloads()

    for workload in workloads:
        if workload.postconditions and workload.active:
            bucket = workload.bucket
            bs = BucketStatus.from_cache(bucket)
            bs.block(bucket)

            stat_checker = StatChecker(cfg.COUCHBASE_IP + ":" +
                                       cfg.COUCHBASE_PORT,
                                       bucket=bucket,
                                       username=cfg.COUCHBASE_USER,
                                       password=cfg.COUCHBASE_PWD)
            status = stat_checker.check(workload.postconditions)
            if status == True:
                # unblock bucket and deactivate workload
                bs = BucketStatus.from_cache(bucket)
                bs.unblock(bucket)
                workload.active = False
Beispiel #12
0
def taskScheduler():

    workloads = CacheHelper.workloads()

    rabbitHelper = taskScheduler.rabbitHelper
    tasks = []

    for workload in workloads:
        if workload.active:

            task_queue = workload.task_queue
            num_ready_tasks = rabbitHelper.qsize(task_queue)
            # dequeue subtasks
            if num_ready_tasks > 0:
                tasks = rabbitHelper.getJsonMsg(task_queue)
                if tasks is not None and len(tasks) > 0:

                    # apply async
                    result = TaskSet(tasks=tasks).apply_async()

            # check if more subtasks need to be queued
            if num_ready_tasks < 10:
                queue_op_cycles.delay(workload)
def taskScheduler():

    workloads = CacheHelper.workloads()

    rabbitHelper = taskScheduler.rabbitHelper
    tasks = []

    for workload in workloads:
        if workload.active:

            task_queue = workload.task_queue
            num_ready_tasks = rabbitHelper.qsize(task_queue)
            # dequeue subtasks
            if num_ready_tasks > 0:
                tasks = rabbitHelper.getJsonMsg(task_queue)
                if tasks is not None and len(tasks) > 0:

                    # apply async
                    result = TaskSet(tasks = tasks).apply_async()


            # check if more subtasks need to be queued
            if num_ready_tasks < 10:
                queue_op_cycles.delay(workload)
Beispiel #14
0
def report_kv_latency(bucket="default"):

    if cfg.SERIESLY_IP == '':
        # seriesly not configured
        return

    rabbitHelper = report_kv_latency.rabbitHelper
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()

    host = clusterStatus.get_random_host()
    if host is None: return

    ip, port = host.split(':')

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.active and workload.bucket == bucket:

            # read workload params
            bucket = str(workload.bucket)
            password = str(workload.password)

            # read template from active workload
            template = Template.from_cache(str(workload.template))
            template = template.__dict__
            client.decodeMajgicStrings(template)

            # setup key/val to use for timing
            key = _random_string(12)
            value = json.dumps(template['kv'])
            get_key = key

            # for get op, try to pull from consume_queue
            # so that we can calc impact of dgm
            consume_queue = workload.consume_queue
            if consume_queue is not None:
                keys = rabbitHelper.getJsonMsg(str(consume_queue),
                                               requeue=True)
                if len(keys) > 0:
                    get_key = str(keys['start'])

            # collect op latency
            set_latency = client.mc_op_latency('set', key, value, ip, port,
                                               bucket, password)
            get_latency = client.mc_op_latency('get', get_key, value, ip, port,
                                               bucket, password)
            delete_latency = client.mc_op_latency('delete', key, value, ip,
                                                  port, bucket, password)

            # report to seriessly
            seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
            db = None
            if 'fast' in seriesly.list_dbs():
                db = 'fast'
            else:
                bucketStatus = BucketStatus.from_cache(bucket) or BucketStatus(
                    bucket)
                db = bucketStatus.latency_db
                if db not in seriesly.list_dbs():
                    seriesly.create_db(db)

            if db is not None:
                seriesly[db].append({
                    'set_latency': set_latency,
                    'get_latency': get_latency,
                    'delete_latency': delete_latency
                })