Esempio n. 1
0
def worker_init():
    # cleanup queues
    rabbitHelper = RabbitHelper()

    cached_queues = WorkloadCacher().queues +  TemplateCacher().cc_queues
    test_queues = ["workload","workload_template", "admin_tasks", "xdcr_tasks"] + cached_queues

    for queue in test_queues:
        try:
            if rabbitHelper.qsize(queue) > 0:
                print "Purge Queue: "+queue +" "+ str(rabbitHelper.qsize(queue))
                rabbitHelper.purge(queue)
        except Exception as ex:
            print ex

    cacheClean()

    # kill old background processes
    kill_procs=["sdkserver"]
    for proc in kill_procs:
        os.system("ps aux | grep %s | awk '{print $2}' | xargs kill" % proc)

    # start sdk servers
    os.system("ruby sdkserver.rb &")
    os.system("python sdkserver.py  &")

    # make sure logdir exists
    os.system("mkdir -p "+cfg.LOGDIR)
Esempio n. 2
0
def get_remote_phase_status(remoteIP, taskID, retry=5):

    # assemble a request to remoteIP phase_status method
    rabbitHelper = RabbitHelper(mq_server=remoteIP)
    rcq = getResponseQueue(rabbitHelper)
    task_method = "app.systest_manager.get_phase_status"
    task_queue = "phase_status_" + cfg.CB_CLUSTER_TAG
    task_routing_queue = cfg.CB_CLUSTER_TAG + ".phase.status"
    args = (taskID, rcq)

    # call phase_status task
    rawTaskPublisher(task_method,
                     args,
                     task_queue,
                     broker=remoteIP,
                     exchange="default",
                     routing_key=task_routing_queue)

    # retrieve status of phase
    rc = None
    while rc is None and retry > 0:
        rc = rabbitHelper.getMsg(rcq)
        time.sleep(2)
        retry = retry - 1

    rabbitHelper.delete(rcq)
    return rc == 'True'
Esempio n. 3
0
def import_template(args):

    val = None

    if args.type == "json":
        json_val = {}
        for kv in args.kvpairs:
            pair = '{%s}' % kv
            try:
                pair = json.loads(pair)
                json_val.update(pair)
            except ValueError as ex:
                print("ERROR: Unable to encode as valid json: %s " % kv)
                print("make sure strings surrounded by double quotes")
                return
        val = json_val

    #TODO binary blobs

    template = {
        "name": args.name,
        "ttl": args.ttl,
        "flags": args.flags,
        "cc_queues": args.cc_queues,
        "size": args.size,
        "kv": val
    }
    cluster = args.cluster

    rabbitHelper = RabbitHelper(args.broker, cluster)
    template['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg("workload_template_" + cluster, json.dumps(template))
    receiveResponse(rabbitHelper, template['rcq'])
Esempio n. 4
0
def run_workload(args):

    workload = {}

    if args.name != None:
        # TODO: read in workload params from saved store
        # workload.update(cached_workload)
        pass

    if args.wait is not None:
        args.wait = conv_to_secs(args.wait)

    workload = { "bucket"      : args.bucket,
                 "password"    : args.password,
                 "ops_per_sec" : args.ops,
                 "create_perc" : args.create,
                 "update_perc" : args.update,
                 "get_perc"    : args.get,
                 "del_perc"    : args.delete,
                 "exp_perc"    : args.expire,
                 "miss_perc"   : args.miss,
                 "ttl"         : args.ttl,
                 "cc_queues"   : args.cc_queues,
                 "consume_queue" : args.consume_queue,
                 "postconditions" : args.postcondition,
                 "preconditions" : args.precondition,
                 "wait"  : args.wait,
                 "template"  : args.template}
    cluster = args.cluster

    rabbitHelper = RabbitHelper(args.broker, cluster)
    workload['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg("workload_"+cluster, json.dumps(workload))
    receiveResponse(rabbitHelper, workload['rcq'])
Esempio n. 5
0
def get_remote_phase_status(remoteIP, taskID, retry = 5):

    # assemble a request to remoteIP phase_status method
    rabbitHelper = RabbitHelper(mq_server = remoteIP)
    rcq = getResponseQueue(rabbitHelper)
    task_method = "app.systest_manager.get_phase_status"
    task_queue = "phase_status_"+cfg.CB_CLUSTER_TAG
    task_routing_queue = cfg.CB_CLUSTER_TAG+".phase.status"
    args = (taskID, rcq)

    # call phase_status task
    rawTaskPublisher(task_method,
                     args,
                     task_queue,
                     broker = remoteIP,
                     exchange="default",
                     routing_key = task_routing_queue)

    # retrieve status of phase
    rc = None
    while rc is None and retry > 0:
        rc = rabbitHelper.getMsg(rcq)
        time.sleep(2)
        retry = retry - 1

    rabbitHelper.delete(rcq)
    return rc == 'True'
Esempio n. 6
0
def run_workload(args):

    workload = {}

    if args.name != None:
        # TODO: read in workload params from saved store
        # workload.update(cached_workload)
        pass

    if args.wait is not None:
        args.wait = conv_to_secs(args.wait)

    if args.expires is not None:
        args.expires = conv_to_secs(args.expires)

    workload = {
        "bucket": args.bucket,
        "ops_per_sec": args.ops,
        "create_perc": args.create,
        "update_perc": args.update,
        "get_perc": args.get,
        "del_perc": args.delete,
        "cc_queues": args.cc_queues,
        "consume_queue": args.consume_queue,
        "postconditions": args.postcondition,
        "preconditions": args.precondition,
        "wait": args.wait,
        "expires": args.expires,
        "template": args.template
    }

    rabbitHelper = RabbitHelper(args.broker)
    rabbitHelper.putMsg("workload", json.dumps(workload))
Esempio n. 7
0
def run_workload(args):
 
    workload = {}

    if args.name != None:
        # TODO: read in workload params from saved store
        # workload.update(cached_workload)
        pass

    if args.wait is not None:
        args.wait = conv_to_secs(args.wait) 

    if args.expires is not None:
        args.expires = conv_to_secs(args.expires)

    workload = { "bucket"      : args.bucket,
                 "ops_per_sec" : args.ops,
                 "create_perc" : args.create, 
                 "update_perc" : args.update, 
                 "get_perc"    : args.get, 
                 "del_perc"    : args.delete, 
                 "cc_queues"   : args.cc_queues,
                 "consume_queue" : args.consume_queue,
                 "postconditions" : args.postcondition,
                 "preconditions" : args.precondition,
                 "wait"  : args.wait,
                 "expires"  : args.expires,
                 "template"  : args.template}

    rabbitHelper = RabbitHelper(args.broker)
    rabbitHelper.putMsg("workload", json.dumps(workload))
Esempio n. 8
0
    def requeueNonDeletedKeys(self):
        rabbitHelper = RabbitHelper()
        task_type = 'app.sdk_client_tasks.mdelete'

        # requeue pending delete keys so that they may be deleted in another workload
        while rabbitHelper.qsize(self.task_queue) > 0:
            task_set = rabbitHelper.getJsonMsg(self.task_queue)

            if len(task_set) > 0:
                keys = [task['args'] for task in task_set \
                            if task['task'] == task_type]
                if len(keys) > 0:
                    # put back on to consume_queue
                    msg = json.dumps(keys[0][0])
                    rabbitHelper.putMsg(self.consume_queue, msg)

        try:
            # delete task queue
            rabbitHelper.delete(self.task_queue)

            # delete consume queue if it was a miss_queue
            if self.miss_queue is not None and self.consume_queue is not None:
                rabbitHelper.delete(self.consume_queue)
        except:
            pass
Esempio n. 9
0
def runRemotePhases(remote_phases, retry=5):

    taskIds = []

    for remoteIP in remote_phases:

        # get handler to remote broker
        rabbitHelper = RabbitHelper(mq_server=remoteIP)
        rcq = getResponseQueue(rabbitHelper)
        args = (remote_phases[remoteIP], rcq)

        # call runPhase on remoteIP
        rawTaskPublisher("app.systest_manager.runPhase",
                         args,
                         "run_phase_" + cfg.CB_CLUSTER_TAG,
                         broker=remoteIP,
                         exchange="default",
                         routing_key=cfg.CB_CLUSTER_TAG + ".run.phase")

        # get taskID of phase running on remote broker
        taskId = None
        while taskId is None and retry > 0:
            time.sleep(2)
            taskId = rabbitHelper.getMsg(rcq)
            taskIds.append((remoteIP, taskId))
            retry = retry - 1

    return taskIds
Esempio n. 10
0
def runRemotePhases(remote_phases, retry = 5):

    taskIds = []

    for remoteIP in remote_phases:

        # get handler to remote broker
        rabbitHelper = RabbitHelper(mq_server = remoteIP)
        rcq = getResponseQueue(rabbitHelper)
        args = (remote_phases[remoteIP], rcq)

        # call runPhase on remoteIP
        rawTaskPublisher("app.systest_manager.runPhase",
                         args,
                         "run_phase_"+cfg.CB_CLUSTER_TAG,
                         broker = remoteIP,
                         exchange = "default",
                         routing_key = cfg.CB_CLUSTER_TAG+".run.phase")

        # get taskID of phase running on remote broker
        taskId = None
        while taskId is None and retry > 0:
            time.sleep(2)
            taskId = rabbitHelper.getMsg(rcq)
            taskIds.append((remoteIP, taskId))
            retry = retry - 1

    return taskIds
Esempio n. 11
0
def import_template(args):

    val = None

    if args.type == "json":
        json_val = {}
        for kv in args.kvpairs:
            pair = '{%s}' % kv 
            try:
                pair = json.loads(pair)
                json_val.update(pair)
            except ValueError as ex:
                print "ERROR: Unable to encode as valid json: %s " % kv
                print "make sure strings surrounded by double quotes"
                return
        val = json_val

    #TODO binary blobs

    template = { "name" : args.name,
                 "ttl" : args.ttl,
                 "flags" : args.flags,
                 "cc_queues" : args.cc_queues,
                 "size" : args.size,
                 "kv" : val}

    rabbitHelper = RabbitHelper(args.broker)
    rabbitHelper.putMsg("workload_template", json.dumps(template))
Esempio n. 12
0
def run_workload(args):

    workload = {}

    if args.name != None:
        # TODO: read in workload params from saved store
        # workload.update(cached_workload)
        pass

    if args.wait is not None:
        args.wait = conv_to_secs(args.wait)

    workload = { "bucket"      : args.bucket,
                 "password"    : args.password,
                 "ops_per_sec" : args.ops,
                 "create_perc" : args.create,
                 "update_perc" : args.update,
                 "get_perc"    : args.get,
                 "del_perc"    : args.delete,
                 "exp_perc"    : args.expire,
                 "miss_perc"   : args.miss,
                 "ttl"         : args.ttl,
                 "cc_queues"   : args.cc_queues,
                 "consume_queue" : args.consume_queue,
                 "postconditions" : args.postcondition,
                 "preconditions" : args.precondition,
                 "wait"  : args.wait,
                 "template"  : args.template}
    cluster = args.cluster

    rabbitHelper = RabbitHelper(args.broker)
    workload['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg("workload_"+cluster, json.dumps(workload))
    receiveResponse(rabbitHelper, workload['rcq'])
Esempio n. 13
0
 def getKeyMapFromRemoteQueue(self, requeue = True):
     key_map = None
     mq = RabbitHelper()
     if mq.qsize(self.ccq) > 0:
         try:
             key_map = mq.getJsonMsg(self.ccq, requeue = requeue )
         except Exception:
             pass
     return key_map
    def requeueNonDeletedKeys(self):
        rabbitHelper = RabbitHelper()
        task_type = 'app.sdk_client_tasks.mdelete'

        # requeue pending delete keys so that they may be deleted in another workload
        while rabbitHelper.qsize(self.task_queue) > 0:
            task_set = rabbitHelper.getJsonMsg(self.task_queue)

            if len(task_set) > 0:
                keys = [task['args'] for task in task_set \
                            if task['task'] == task_type]
                if len(keys) > 0:
                    # put back on to consume_queue
                    msg = json.dumps(keys[0][0])
                    rabbitHelper.putMsg(self.consume_queue, msg)

        try:
            # delete task queue
            rabbitHelper.delete(self.task_queue)

            # delete consume queue if it was a miss_queue
            if self.miss_queue is not None and self.consume_queue is not None:
                rabbitHelper.delete(self.consume_queue)
        except:
            pass
Esempio n. 15
0
def perform_query_tasks(args):
    queryMsg = {'queries_per_sec' : args.queries_per_sec,
                'ddoc' : args.ddoc,
                'view' : args.view,
                'bucket' : args.bucket,
                'password' : args.password}
    cluster = args.cluster

    rabbitHelper = RabbitHelper(args.broker)
    queryMsg['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg('query_'+cluster, json.dumps(queryMsg))
    receiveResponse(rabbitHelper, queryMsg['rcq'])
Esempio n. 16
0
def perform_admin_tasks(args):

    actions = {'rebalance_in': args.rebalance_in,
               'rebalance_out': args.rebalance_out,
               'failover': args.failover,
               'soft_restart': args.soft_restart,
               'hard_restart': args.hard_restart,
               'only_failover': args.only_failover
              }

    #TODO: Validate the user inputs, before passing to rabbit
    print actions
    rabbitHelper = RabbitHelper(args.broker)
    rabbitHelper.putMsg("admin_tasks", json.dumps(actions))
Esempio n. 17
0
def perform_admin_tasks(args):

    actions = {
        'rebalance_in': args.rebalance_in,
        'rebalance_out': args.rebalance_out,
        'failover': args.failover,
        'soft_restart': args.soft_restart,
        'hard_restart': args.hard_restart,
        'only_failover': args.only_failover
    }

    #TODO: Validate the user inputs, before passing to rabbit
    print actions
    rabbitHelper = RabbitHelper(args.broker)
    rabbitHelper.putMsg("admin_tasks", json.dumps(actions))
Esempio n. 18
0
def perform_admin_tasks(args):

    actions = {'rebalance_in': args.rebalance_in,
               'rebalance_out': args.rebalance_out,
               'failover': args.failover,
               'soft_restart': args.soft_restart,
               'hard_restart': args.hard_restart,
               'only_failover': args.only_failover
              }
    cluster = args.cluster

    #TODO: Validate the user inputs, before passing to rabbit
    rabbitHelper = RabbitHelper(args.broker, cluster)
    actions['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg("admin_"+cluster, json.dumps(actions))
    receiveResponse(rabbitHelper, actions['rcq'])
Esempio n. 19
0
def perform_xdcr_tasks(args):

    xdcrMsg = {'dest_cluster_ip': args.dest_cluster_ip,
               'dest_cluster_rest_username': args.dest_cluster_username,
               'dest_cluster_rest_pwd':  args.dest_cluster_pwd,
               'dest_cluster_name': args.dest_cluster_name,
               'replication_type': args.replication_type,
    }
    cluster = args.cluster

    #TODO: Validate the user inputs, before passing to rabbit
    print xdcrMsg
    rabbitHelper = RabbitHelper(args.broker, cluster)
    xdcrMsg['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg("xdcr_"+cluster, json.dumps(xdcrMsg))
    receiveResponse(rabbitHelper, xdcrMsg['rcq'])
Esempio n. 20
0
def perform_xdcr_tasks(args):

    xdcrMsg = {'dest_cluster_ip': args.dest_cluster_ip,
               'dest_cluster_rest_username': args.dest_cluster_username,
               'dest_cluster_rest_pwd':  args.dest_cluster_pwd,
               'dest_cluster_name': args.dest_cluster_name,
               'replication_type': args.replication_type,
    }
    cluster = args.cluster

    #TODO: Validate the user inputs, before passing to rabbit
    print xdcrMsg
    rabbitHelper = RabbitHelper(args.broker)
    xdcrMsg['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg("xdcr_"+cluster, json.dumps(xdcrMsg))
    receiveResponse(rabbitHelper, xdcrMsg['rcq'])
Esempio n. 21
0
def perform_admin_tasks(args):

    actions = {'rebalance_in': args.rebalance_in,
               'rebalance_out': args.rebalance_out,
               'failover': args.failover,
               'soft_restart': args.soft_restart,
               'hard_restart': args.hard_restart,
               'only_failover': args.only_failover
              }
    cluster = args.cluster

    #TODO: Validate the user inputs, before passing to rabbit
    rabbitHelper = RabbitHelper(args.broker)
    actions['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg("admin_"+cluster, json.dumps(actions))
    receiveResponse(rabbitHelper, actions['rcq'])
Esempio n. 22
0
    def flushq(self, flush_hotkeys = False):

        mq = RabbitHelper()

        if self.ccq is not None:

            logging.info("[Thread %s] flushing %s items to %s" %
                         (self.name, self.memq.qsize(), self.ccq))

            # declare queue
            mq.declare(self.ccq)

            # empty the in memory queue
            while self.memq.empty() == False:
                try:
                    msg = self.memq.get_nowait()
                    msg = json.dumps(msg)
                    mq.putMsg(self.ccq, msg)
                except queue.Empty:
                    pass

        # hot keys
        if flush_hotkeys and (len(self.hotkey_batches) > 0):

            # try to put onto remote queue
            queue = self.consume_queue or self.ccq

            if queue is not None:
                key_map = {'start' : self.hotkey_batches[0][0],
                           'end' : self.hotkey_batches[-1][-1]}
                msg = json.dumps(key_map)
                mq.putMsg(queue, msg)
                self.hotkey_batches = []
Esempio n. 23
0
    def getKeyMapFromRemoteQueue(self, requeue = True):

        key_map = None
        mq = RabbitHelper()

        # try to fetch from consume queue and
        # fall back to ccqueue
        queue = self.consume_queue

        if queue is None  or mq.qsize(queue) == 0:
            queue = self.ccq

        if mq.qsize(queue) > 0:
            try:
                key_map = mq.getJsonMsg(queue, requeue = requeue )
            except Exception:
                pass

        return key_map
Esempio n. 24
0
    def getKeyMapFromRemoteQueue(self, requeue=True):

        key_map = None
        mq = RabbitHelper()

        # try to fetch from consume queue and
        # fall back to ccqueue
        queue = self.consume_queue

        if queue is None or mq.qsize(queue) == 0:
            queue = self.ccq

        if mq.qsize(queue) > 0:
            try:
                key_map = mq.getJsonMsg(queue, requeue=requeue)
            except Exception:
                pass

        return key_map
Esempio n. 25
0
def run_systemtest(args):

    cluster = args.cluster
    rabbitHelper = RabbitHelper(args.broker, cluster)

    test = {'suffix': args.filesuffix}

    if args.fromfile is not None:

        # load json config
        json_data = open(args.fromfile)
        msg = json.load(json_data)

    elif args.name is not None:
        msg = {"localtestname": args.name}

    test.update(msg)
    test['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg('systest_manager_' + cluster, json.dumps(test))
    receiveResponse(rabbitHelper, test['rcq'])
Esempio n. 26
0
def run_systemtest(args):

    cluster = args.cluster
    rabbitHelper = RabbitHelper(args.broker, cluster)

    test = {'suffix' : args.filesuffix}

    if args.fromfile is not None:

        # load json config
        json_data = open(args.fromfile)
        msg = json.load(json_data)

    elif args.name is not None:
        msg = { "localtestname" : args.name }

    test.update(msg)
    test['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg('systest_manager_'+cluster, json.dumps(test))
    receiveResponse(rabbitHelper, test['rcq'])
Esempio n. 27
0
def perform_query_tasks(args):
    queryMsg = {'queries_per_sec' : args.queries_per_sec,
                'ddoc' : args.ddoc,
                'view' : args.view,
                'bucket' : args.bucket,
                'password' : args.password,
                'include_filters' : args.include_filters,
                'exclude_filters' : args.exclude_filters,
                'startkey' : args.startkey,
                'endkey' : args.endkey,
                'startkey_docid' : args.startkey_docid,
                'endkey_docid' : args.endkey_docid,
                'limit' : args.limit,
                'indexed_key' : args.indexed_key}

    cluster = args.cluster

    rabbitHelper = RabbitHelper(args.broker, cluster)
    queryMsg['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg('query_'+cluster, json.dumps(queryMsg))
    receiveResponse(rabbitHelper, queryMsg['rcq'])
Esempio n. 28
0
def main():
    args = parser.parse_args()
    CB_CLUSTER_TAG = args.cluster
    exchange = CB_CLUSTER_TAG + "consumers"

    # setup to consume messages from worker
    mq = RabbitHelper()
    mq.exchange_declare(exchange, "fanout")
    queue = mq.declare()
    queue_name = queue[0]

    # bind to exchange
    mq.bind(exchange, queue_name)
    mq.putMsg('', 'init', exchange)

    # consume messages
    channel, conn = mq.channel()
    channel.basic_consume(callback=init, queue=queue_name, no_ack=True)

    while True:
        conn.drain_events()
Esempio n. 29
0
def worker_init():
    # cleanup queues
    rabbitHelper = RabbitHelper()

    cached_queues = WorkloadCacher().queues + TemplateCacher().cc_queues
    test_queues = [
        "workload", "workload_template", "admin_tasks", "xdcr_tasks"
    ] + cached_queues

    for queue in test_queues:
        try:
            if rabbitHelper.qsize(queue) > 0:
                print "Purge Queue: " + queue + " " + str(
                    rabbitHelper.qsize(queue))
                rabbitHelper.purge(queue)
        except Exception as ex:
            print ex

    cacheClean()

    # kill old background processes
    kill_procs = ["sdkserver"]
    for proc in kill_procs:
        os.system("ps aux | grep %s | awk '{print $2}' | xargs kill" % proc)

    # start sdk servers
    os.system("ruby sdkserver.rb &")
    os.system("python sdkserver.py  &")

    # make sure logdir exists
    os.system("mkdir -p " + cfg.LOGDIR)
Esempio n. 30
0
    def __setattr__(self, name, value):
        super(Workload, self).__setattr__(name, value)

        # auto cache workload when certain attributes change
        # if object has been fully setup
        if name in Workload.AUTOCACHEKEYS and self.initialized:
            ObjCacher().store(CacheHelper.WORKLOADCACHEKEY, self)

        # check if workload is being deactivated
        if name == "active" and self.active == False and self.initialized:
            msg = {'active': False, 'id': self.id}
            RabbitHelper().putMsg('', json.dumps(msg), EXCHANGE)
            logger.error("kill task %s" % self.id)
Esempio n. 31
0
def perform_query_tasks(args):
    queryMsg = {
        'queries_per_sec': args.queries_per_sec,
        'ddoc': args.ddoc,
        'view': args.view,
        'bucket': args.bucket,
        'password': args.password,
        'include_filters': args.include_filters,
        'exclude_filters': args.exclude_filters,
        'startkey': args.startkey,
        'endkey': args.endkey,
        'startkey_docid': args.startkey_docid,
        'endkey_docid': args.endkey_docid,
        'limit': args.limit,
        'indexed_key': args.indexed_key
    }

    cluster = args.cluster

    rabbitHelper = RabbitHelper(args.broker, cluster)
    queryMsg['rcq'] = getResponseQueue(rabbitHelper)
    rabbitHelper.putMsg('query_' + cluster, json.dumps(queryMsg))
    receiveResponse(rabbitHelper, queryMsg['rcq'])
Esempio n. 32
0
class Consumer(Greenlet):

    def __init__(self, queue):
        Greenlet.__init__(self)
        self.queue = queue 
        self.cb = GConnection(bucket='default')
        self.conn = RabbitHelper()

    def _run(self):

        while True:
            data = self.conn.getMsg("gvset") 
            if data:
                do(self.cb, data)
Esempio n. 33
0
def main():
    args = parser.parse_args()
    CB_CLUSTER_TAG = args.cluster
    exchange = CB_CLUSTER_TAG+"consumers"

    # setup to consume messages from worker
    mq = RabbitHelper()
    mq.exchange_declare(exchange, "fanout")
    queue = mq.declare()
    queue_name = queue[0]

    # bind to exchange
    mq.bind(exchange, queue_name)
    mq.putMsg('', 'init', exchange)

    # consume messages
    channel, conn = mq.channel()
    channel.basic_consume(callback = init, queue = queue_name, no_ack = True)

    while True:
        conn.drain_events()
Esempio n. 34
0
    def flushq(self, flush_hotkeys=False):

        if self.standalone:
            return

        mq = RabbitHelper()

        if self.ccq is not None:

            logging.info("[Thread %s] flushing %s items to %s" %
                         (self.name, self.memq.qsize(), self.ccq))

            # declare queue
            mq.declare(self.ccq)

            # empty the in memory queue
            while self.memq.empty() == False:
                try:
                    msg = self.memq.get_nowait()
                    msg = json.dumps(msg)
                    mq.putMsg(self.ccq, msg)
                except queue.Empty:
                    pass

        # hot keys
        if flush_hotkeys and (len(self.hotkey_batches) > 0):

            # try to put onto remote queue
            queue = self.consume_queue or self.ccq

            if queue is not None:
                key_map = {
                    'start': self.hotkey_batches[0][0],
                    'end': self.hotkey_batches[-1][-1]
                }
                msg = json.dumps(key_map)
                mq.putMsg(queue, msg)
                self.hotkey_batches = []
Esempio n. 35
0
    def flushq(self):

        if self.ccq is not None:

            # declare queue
            mq = RabbitHelper()
            mq.declare(self.ccq)

            while self.memq.empty() == False:
                try:
                    msg = self.memq.get_nowait()
                    msg = json.dumps(msg)
                    mq.putMsg(self.ccq, msg)
                except queue.Empty:
                    pass

                # hot keys
                if len(self.hotkeys) > 0:
                    key_map = {'start' : self.hotkeys[0],
                               'end' : self.hotkeys[-1]}
                    msg = json.dumps(key_map)
                    mq.putMsg(self.ccq, msg)
Esempio n. 36
0
 def __init__(self, queue):
     Greenlet.__init__(self)
     self.queue = queue 
     self.cb = GConnection(bucket='default')
     self.conn = RabbitHelper()
Esempio n. 37
0
def run(workload):

    workload.active = True
    rabbitHelper = RabbitHelper()
    sdk_queue_key = "sdk_consumer.*"

    # read doc template
    template = Template.from_cache(str(workload.template))
    if template is None:
        logger.error("no doc template imported")
        return

    consumer_template = copy.deepcopy(template)
    bucket = str(workload.bucket)
    password = str(workload.password)

    active_hosts = None
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG + "_status")
    if clusterStatus is not None:
        active_hosts = clusterStatus.get_all_hosts()

    if workload.cc_queues is not None:
        # override template attribute with workload
        consumer_template.cc_queues = workload.cc_queues

    if len(workload.indexed_keys) > 0:
        template.indexed_keys = workload.indexed_keys

    ops_sec = workload.ops_per_sec

    # modify ops by number of consumers
    num_consumers = rabbitHelper.numExchangeQueues(cfg.CB_CLUSTER_TAG,
                                                   EXCHANGE)

    if num_consumers == 0:
        logger.error("No sdkclients running")
        return

    ops_sec = int(ops_sec) / num_consumers
    create_count = int(ops_sec * workload.create_perc / 100)
    update_count = int(ops_sec * workload.update_perc / 100)
    get_count = int(ops_sec * workload.get_perc / 100)
    del_count = int(ops_sec * workload.del_perc / 100)
    exp_count = int(ops_sec * workload.exp_perc / 100)
    consume_queue = workload.consume_queue

    ttl = workload.ttl
    miss_queue = workload.miss_queue
    miss_perc = workload.miss_perc

    # broadcast to sdk_consumers
    msg = {
        'bucket': bucket,
        'id': workload.id,
        'password': password,
        'template': consumer_template.__dict__,
        'ops_sec': ops_sec,
        'create_count': create_count,
        'update_count': update_count,
        'get_count': get_count,
        'del_count': del_count,
        'exp_count': exp_count,
        'consume_queue': consume_queue,
        'ttl': ttl,
        'miss_perc': miss_perc,
        'active': True,
        'active_hosts': active_hosts
    }

    rabbitHelper.putMsg('', json.dumps(msg), EXCHANGE)
    logger.error("start task sent to %s consumers" % num_consumers)
Esempio n. 38
0
import os
import time
from rabbit_helper import RabbitHelper
from cache import WorkloadCacher, TemplateCacher, BucketStatusCacher, cacheClean

# cleanup queues
rabbitHelper = RabbitHelper()

cached_queues = WorkloadCacher().queues + TemplateCacher().cc_queues

test_queues = ["workload", "workload_template"] + cached_queues

for queue in test_queues:
    try:
        if rabbitHelper.qsize(queue) > 0:
            print "Purge Queue: " + queue + " " + str(rabbitHelper.qsize(queue))
            rabbitHelper.purge(queue)
    except Exception as ex:
        pass

cacheClean()

# kill+start sdk's
os.system("ps aux | grep sdkserver | awk '{print $2'} | xargs kill")
os.system("ruby sdkserver.rb &")
os.system("python sdkserver.py  &")
Esempio n. 39
0
    queues = set(CacheHelper.queues())

    # cleaning up seriesly database (fast and slow created by cbtop)
    if cfg.SERIESLY_IP != '':
        from seriesly import Seriesly
        os.system("curl -X DELETE http://{0}:3133/fast".format(
            cfg.SERIESLY_IP))
        os.system("curl -X DELETE http://{0}:3133/slow".format(
            cfg.SERIESLY_IP))
        os.system("curl -X DELETE http://{0}:3133/event".format(
            cfg.SERIESLY_IP))
        os.system("curl -X DELETE http://{0}:3133/atop".format(
            cfg.SERIESLY_IP))

        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        seriesly.create_db('event')

for q_ in queues:
    try:
        RabbitHelper().delete(q_)
        print "Cleanup Queue: %s" % q_
    except Exception as ex:
        pass

# clean up cache
CacheHelper.cacheClean()

# start sdk server
os.system("python sdkserver.py  &")
Esempio n. 40
0
import os
import time
from rabbit_helper import RabbitHelper
from cache import WorkloadCacher, TemplateCacher, BucketStatusCacher, cacheClean

# cleanup queues
rabbitHelper = RabbitHelper()

cached_queues = WorkloadCacher().queues +  TemplateCacher().cc_queues

test_queues = ["workload","workload_template"] + cached_queues

for queue in test_queues:
    try:
        if rabbitHelper.qsize(queue) > 0:
            print "Purge Queue: "+queue +" "+ str(rabbitHelper.qsize(queue))
            rabbitHelper.purge(queue)
    except Exception as ex:
        pass

cacheClean()

# kill+start sdk's
os.system("ps aux | grep sdkserver | awk '{print $2'} | xargs kill")
os.system("ruby sdkserver.rb &")
os.system("python sdkserver.py  &")
Esempio n. 41
0
import argparse
import json
from rabbit_helper import RabbitHelper

parser = argparse.ArgumentParser(description='CB System Test Tool')
subparser = parser.add_subparsers(dest="subparsers")
rabbitHelper = RabbitHelper()


def add_modifier_args(parser):
    parser.add_argument("--cc_queues",    nargs='+', help="queues to copy created keys into")
    parser.add_argument("--consume_queue",help="queue with keys to get/update/delete")
    parser.add_argument("--precondition", help="required stat or cluster state required before running workload")
    parser.add_argument("--postcondition",help="required stat or cluster state required to complete workload")
    parser.add_argument("--wait",  nargs=3,  help="time to wait before starting workload: <hour> <min> <sec>", metavar = ('HOUR','MIN','SEC'), type=int)
    parser.add_argument("--expires",nargs=3,  help="time to wait before terminating workload: <hour> <min> <sec>", metavar = ('HOUR','MIN','SEC'), type=int)

def add_template_parser(parent):
    parser = parent.add_parser("template")

    parser.add_argument("--name",     help="template name", required = True)
    parser.add_argument("--ttl",      default=0, help="document expires time")
    parser.add_argument("--flags",    default=0, help="document create flags")
    parser.add_argument("--cc_queues",nargs='+', help="queues to copy created keys into")
    parser.add_argument("--kvpairs",   nargs='+', help="list of kv items i.e=> state:ca,age:28,company:cb")
    parser.add_argument("--type",    help="json/non-json default is json", default="json")
    parser.add_argument("--size",    help="size of documents. padding is used if necessary")

#TODO    parser.add_argument("--blobs",   nargs='+', help="data strings for non-json docs")
    parser.set_defaults(handler=import_template)
Esempio n. 42
0
# as seriesly db
if "--purge" in sys.argv:

    queues = set(CacheHelper.queues())

    # cleaning up seriesly database (fast and slow created by cbtop)
    if cfg.SERIESLY_IP != '':
        from seriesly import Seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        dbs = seriesly.list_dbs()
        for db in dbs:
            seriesly.drop_db(db)

        seriesly.create_db('event')

for q_ in queues:
    try:
        RabbitHelper().delete(q_)
        print("Cleanup Queue: %s" % q_)
    except Exception as ex:
        pass

# clean up cache
CacheHelper.cacheClean()

# start local consumer
exchange = cfg.CB_CLUSTER_TAG + "consumers"
RabbitHelper().exchange_declare(exchange, "fanout")
os.system("python consumer.py  &")
os.system("ulimit -n 10240")
Esempio n. 43
0
    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password = task['password']
        self.template = task['template']
        self.default_tsizes = [128, 256]
        self.create_count = task['create_count'] / self.op_factor
        self.update_count = task['update_count'] / self.op_factor
        self.get_count = task['get_count'] / self.op_factor
        self.del_count = task['del_count'] / self.op_factor
        self.exp_count = task['exp_count'] / self.op_factor
        self.ttl = task['ttl']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.consume_queue = task['consume_queue']
        self.standalone = task['standalone']
        self.ccq = None
        self.hotkey_batches = []

        if self.consume_queue is not None:
            RabbitHelper().declare(self.consume_queue)

        if task['template']['cc_queues']:
            self.ccq = str(
                task['template']['cc_queues'][0])  #only supporting 1 now
            RabbitHelper().declare(self.ccq)

        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']
        if not self.active_hosts:
            self.active_hosts = [cfg.COUCHBASE_IP]

        addr = task['active_hosts'][random.randint(0,
                                                   len(self.active_hosts) -
                                                   1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]

        self.e = e
        self.cb = None
        self.isterminal = False
        self.done = False

        try:
            endpoint = "%s:%s/%s" % (host, port, self.bucket)
            self.cb = Bucket(endpoint, password=self.password)
        except Exception as ex:
            logging.error("[Thread %s] cannot reach %s" %
                          (self.name, endpoint))
            logging.error(ex)
            self.isterminal = True

        logging.info("[Thread %s] started for workload: %s" %
                     (self.name, task['id']))
def run(workload):

    workload.active = True
    rabbitHelper = RabbitHelper()
    sdk_queue_key = "sdk_consumer.*"

    # read doc template
    template = Template.from_cache(str(workload.template))
    if template is None:
        logger.error("no doc template imported")
        return

    consumer_template = copy.deepcopy(template)
    bucket = str(workload.bucket)
    password = str(workload.password)

    active_hosts = None
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status")
    if clusterStatus is not None:
        active_hosts = clusterStatus.get_all_hosts()


    if workload.cc_queues is not None:
        # override template attribute with workload
        consumer_template.cc_queues = workload.cc_queues

    if len(workload.indexed_keys) > 0:
        template.indexed_keys = workload.indexed_keys


    ops_sec = workload.ops_per_sec

    # modify ops by number of consumers
    num_consumers = rabbitHelper.numExchangeQueues(cfg.CB_CLUSTER_TAG, EXCHANGE)

    if num_consumers == 0:
        logger.error("No sdkclients running")
        return

    ops_sec = int(ops_sec)/num_consumers
    create_count = int(ops_sec *  workload.create_perc/100)
    update_count = int(ops_sec *  workload.update_perc/100)
    get_count = int(ops_sec *  workload.get_perc/100)
    del_count = int(ops_sec *  workload.del_perc/100)
    exp_count = int(ops_sec *  workload.exp_perc/100)
    consume_queue =  workload.consume_queue

    ttl = workload.ttl
    miss_queue = workload.miss_queue
    miss_perc = workload.miss_perc

    # broadcast to sdk_consumers
    msg = {'bucket' : bucket,
           'id' : workload.id,
           'password' : password,
           'template' : consumer_template.__dict__,
           'ops_sec' : ops_sec,
           'create_count' : create_count,
           'update_count' : update_count,
           'get_count' : get_count,
           'del_count' : del_count,
           'exp_count' : exp_count,
           'consume_queue' : consume_queue,
           'ttl' : ttl,
           'miss_perc' : miss_perc,
           'active' : True,
           'active_hosts' : active_hosts}

    rabbitHelper.putMsg('', json.dumps(msg), EXCHANGE)
    logger.error("start task sent to %s consumers" % num_consumers)