예제 #1
0
    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password  = task['password']
        self.template = task['template']
        self.default_tsizes = [128, 256]
        self.create_count = task['create_count']/self.op_factor
        self.update_count = task['update_count']/self.op_factor
        self.get_count = task['get_count']/self.op_factor
        self.del_count = task['del_count']/self.op_factor
        self.exp_count = task['exp_count']/self.op_factor
        self.ttl = task['ttl']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.consume_queue = task['consume_queue']
        self.standalone = task['standalone']
        self.ccq = None
        self.hotkey_batches = []

        if self.consume_queue is not None:
            RabbitHelper().declare(self.consume_queue)

        if task['template']['cc_queues']:
            self.ccq = str(task['template']['cc_queues'][0])  #only supporting 1 now
            RabbitHelper().declare(self.ccq)

        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']
        if not self.active_hosts:
            self.active_hosts = [cfg.COUCHBASE_IP]

        addr = task['active_hosts'][random.randint(0,len(self.active_hosts) - 1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]

        self.e = e
        self.cb = None
        self.isterminal = False
        self.done = False

        try:
            endpoint = "%s:%s/%s" % (host, port, self.bucket)
            self.cb = Bucket(endpoint, password = self.password)
        except Exception as ex:
            logging.error("[Thread %s] cannot reach %s" %
                          (self.name, endpoint))
            logging.error(ex)
            self.isterminal = True

        logging.info("[Thread %s] started for workload: %s" % (self.name, task['id']))
예제 #2
0
    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password = task['password']
        self.template = task['template']
        self.default_tsizes = [128, 256]
        self.create_count = task['create_count'] / self.op_factor
        self.update_count = task['update_count'] / self.op_factor
        self.get_count = task['get_count'] / self.op_factor
        self.del_count = task['del_count'] / self.op_factor
        self.exp_count = task['exp_count'] / self.op_factor
        self.ttl = task['ttl']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.consume_queue = task['consume_queue']
        self.standalone = task['standalone']
        self.ccq = None
        self.hotkey_batches = []

        if self.consume_queue is not None:
            RabbitHelper().declare(self.consume_queue)

        if task['template']['cc_queues']:
            self.ccq = str(
                task['template']['cc_queues'][0])  #only supporting 1 now
            RabbitHelper().declare(self.ccq)

        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']
        if not self.active_hosts:
            self.active_hosts = [cfg.COUCHBASE_IP]

        addr = task['active_hosts'][random.randint(0,
                                                   len(self.active_hosts) -
                                                   1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]

        self.e = e
        self.cb = None
        self.isterminal = False
        self.done = False

        try:
            endpoint = "%s:%s/%s" % (host, port, self.bucket)
            self.cb = Bucket(endpoint, password=self.password)
        except Exception as ex:
            logging.error("[Thread %s] cannot reach %s" %
                          (self.name, endpoint))
            logging.error(ex)
            self.isterminal = True

        logging.info("[Thread %s] started for workload: %s" %
                     (self.name, task['id']))
예제 #3
0
class SDKClient(threading.Thread):
    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password = task['password']
        self.template = task['template']
        self.default_tsizes = [128, 256]
        self.create_count = task['create_count'] / self.op_factor
        self.update_count = task['update_count'] / self.op_factor
        self.get_count = task['get_count'] / self.op_factor
        self.del_count = task['del_count'] / self.op_factor
        self.exp_count = task['exp_count'] / self.op_factor
        self.ttl = task['ttl']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.consume_queue = task['consume_queue']
        self.standalone = task['standalone']
        self.ccq = None
        self.hotkey_batches = []

        if self.consume_queue is not None:
            RabbitHelper().declare(self.consume_queue)

        if task['template']['cc_queues']:
            self.ccq = str(
                task['template']['cc_queues'][0])  #only supporting 1 now
            RabbitHelper().declare(self.ccq)

        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']
        if not self.active_hosts:
            self.active_hosts = [cfg.COUCHBASE_IP]

        addr = task['active_hosts'][random.randint(0,
                                                   len(self.active_hosts) -
                                                   1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]

        self.e = e
        self.cb = None
        self.isterminal = False
        self.done = False

        try:
            endpoint = "%s:%s/%s" % (host, port, self.bucket)
            self.cb = Bucket(endpoint, password=self.password)
        except Exception as ex:
            logging.error("[Thread %s] cannot reach %s" %
                          (self.name, endpoint))
            logging.error(ex)
            self.isterminal = True

        logging.info("[Thread %s] started for workload: %s" %
                     (self.name, task['id']))

    def run(self):

        cycle = ops_total = 0
        self.e.set()

        while self.e.is_set() == True:

            start = datetime.datetime.now()

            # do an op cycle
            self.do_cycle()

            if self.isterminal == True:
                # some error occured during workload
                self.flushq(True)
                exit(-1)

            # wait till next cycle
            end = datetime.datetime.now()
            wait = 1 - (end - start).microseconds / float(1000000)
            if (wait > 0):
                time.sleep(wait)
            else:
                pass  #probably  we are overcomitted, but it's ok

            ops_total = ops_total + self.ops_sec
            cycle = cycle + 1

            if (cycle % 120) == 0:  # 2 mins
                logging.info("[Thread %s] total ops: %s" %
                             (self.name, ops_total))
                self.flushq()

        self.flushq()
        logging.info("[Thread %s] done!" % (self.name))

    def flushq(self, flush_hotkeys=False):

        if self.standalone:
            return

        mq = RabbitHelper()

        if self.ccq is not None:

            logging.info("[Thread %s] flushing %s items to %s" %
                         (self.name, self.memq.qsize(), self.ccq))

            # declare queue
            mq.declare(self.ccq)

            # empty the in memory queue
            while self.memq.empty() == False:
                try:
                    msg = self.memq.get_nowait()
                    msg = json.dumps(msg)
                    mq.putMsg(self.ccq, msg)
                except queue.Empty:
                    pass

        # hot keys
        if flush_hotkeys and (len(self.hotkey_batches) > 0):

            # try to put onto remote queue
            queue = self.consume_queue or self.ccq

            if queue is not None:
                key_map = {
                    'start': self.hotkey_batches[0][0],
                    'end': self.hotkey_batches[-1][-1]
                }
                msg = json.dumps(key_map)
                mq.putMsg(queue, msg)
                self.hotkey_batches = []

    def do_cycle(self):

        sizes = self.template.get('size') or self.default_tsizes
        t_size = sizes[random.randint(0, len(sizes) - 1)]
        self.template['t_size'] = t_size

        if self.create_count > 0:

            count = self.create_count
            docs_to_expire = self.exp_count
            # check if we need to expire some docs
            if docs_to_expire > 0:

                # create an expire batch
                self.mset(self.template, docs_to_expire, ttl=self.ttl)
                count = count - docs_to_expire

            self.mset(self.template, count)

        if self.update_count > 0:
            self.mset_update(self.template, self.update_count)

        if self.get_count > 0:
            self.mget(self.get_count)

        if self.del_count > 0:
            self.mdelete(self.del_count)

    def mset(self, template, count, ttl=0):
        msg = {}
        keys = []
        cursor = 0
        j = 0

        template = resolveTemplate(template)
        for j in xrange(count):
            self.i = self.i + 1
            msg[self.name + str(self.i)] = template
            keys.append(self.name + str(self.i))

            if ((j + 1) % self.batch_size) == 0:
                batch = keys[cursor:j + 1]
                self._mset(msg, ttl)
                self.memq.put_nowait({'start': batch[0], 'end': batch[-1]})
                msg = {}
                cursor = j
            elif j == (count - 1):
                batch = keys[cursor:]
                self._mset(msg, ttl)
                self.memq.put_nowait({'start': batch[0], 'end': batch[-1]})

    def _mset(self, msg, ttl=0):

        try:
            self.cb.set_multi(msg, ttl=ttl)
        except TemporaryFailError:
            logging.warn("temp failure during mset - cluster may be unstable")
        except TimeoutError:
            logging.warn("cluster timed trying to handle mset")
        except NetworkError as nx:
            logging.error("network error")
            logging.error(nx)
        except Exception as ex:
            logging.error(ex)
            self.isterminal = True

    def mset_update(self, template, count):

        msg = {}
        batches = self.getKeys(count)
        template = resolveTemplate(template)
        if len(batches) > 0:

            for batch in batches:
                try:
                    for key in batch:
                        msg[key] = template
                    self.cb.set_multi(msg)
                except NotFoundError as nf:
                    logging.error("update key not found!  %s: " % nf.key)
                except TimeoutError:
                    logging.warn(
                        "cluster timed out trying to handle mset - cluster may be unstable"
                    )
                except NetworkError as nx:
                    logging.error("network error")
                    logging.error(nx)
                except TemporaryFailError:
                    logging.warn(
                        "temp failure during mset - cluster may be unstable")
                except Exception as ex:
                    logging.error(ex)
                    self.isterminal = True

    def mget(self, count):

        batches = []
        if self.miss_perc > 0:
            batches = self.getCacheMissKeys(count)
        else:
            batches = self.getKeys(count)

        if len(batches) > 0:

            for batch in batches:
                try:
                    self.cb.get_multi(batch)
                except NotFoundError as nf:
                    logging.warn("get key not found!  %s: " % nf.key)
                    pass
                except TimeoutError:
                    logging.warn(
                        "cluster timed out trying to handle mget - cluster may be unstable"
                    )
                except NetworkError as nx:
                    logging.error("network error")
                    logging.error(nx)
                except Exception as ex:
                    logging.error(ex)
                    self.isterminal = True

    def mdelete(self, count):
        batches = self.getKeys(count, requeue=False)
        keys_deleted = 0

        # delete from buffer
        if len(batches) > 0:
            keys_deleted = self._mdelete(batches)
        else:
            pass

    def _mdelete(self, batches):
        keys_deleted = 0
        for batch in batches:
            try:
                if len(batch) > 0:
                    keys_deleted = len(batch) + keys_deleted
                    self.cb.delete_multi(batch)
            except NotFoundError as nf:
                logging.warn("get key not found!  %s: " % nf.key)
            except TimeoutError:
                logging.warn(
                    "cluster timed out trying to handle mdelete - cluster may be unstable"
                )
            except NetworkError as nx:
                logging.error("network error")
                logging.error(nx)
            except Exception as ex:
                logging.error(ex)
                self.isterminal = True

        return keys_deleted

    def getCacheMissKeys(self, count):

        # returns batches of keys where first batch contains # of keys to miss
        keys_retrieved = 0
        batches = []
        miss_keys = []

        num_to_miss = int(((self.miss_perc / float(100)) * count))
        miss_batches = self.getKeys(num_to_miss, force_stale=True)

        if len(self.hotkey_batches) == 0:
            # hotkeys are taken off queue and cannot be reused
            # until workload is flushed
            need = count - num_to_miss
            self.hotkey_batches = self.getKeys(need, requeue=False)

        batches = miss_batches + self.hotkey_batches
        return batches

    def getKeys(self, count, requeue=True, force_stale=False):

        keys_retrieved = 0
        batches = []

        while keys_retrieved < count:

            # get keys
            keys = self.getKeysFromQueue(requeue, force_stale=force_stale)

            if len(keys) == 0:
                break

            # in case we got too many keys slice the batch
            need = count - keys_retrieved
            if (len(keys) > need):
                keys = keys[:need]

            keys_retrieved = keys_retrieved + len(keys)

            # add to batch
            batches.append(keys)

        return batches

    def getKeysFromQueue(self, requeue=True, force_stale=False):

        # get key mapping and convert to keys
        keys = []
        key_map = None

        # priority to stale queue
        if force_stale:
            key_map = self.getKeyMapFromRemoteQueue(requeue)

        # fall back to local qeueue
        if key_map is None:
            key_map = self.getKeyMapFromLocalQueue(requeue)

        if key_map:
            keys = self.keyMapToKeys(key_map)

        return keys

    def keyMapToKeys(self, key_map):

        keys = []
        # reconstruct key-space
        prefix, start_idx = key_map['start'].split('_')
        prefix, end_idx = key_map['end'].split('_')

        for i in range(int(start_idx), int(end_idx) + 1):
            keys.append(prefix + "_" + str(i))

        return keys

    def fillq(self):

        if (self.consume_queue == None) and (self.ccq == None):
            return

        # put about 20 items into the queue
        for i in xrange(20):
            key_map = self.getKeyMapFromRemoteQueue()
            if key_map:
                self.memq.put_nowait(key_map)

        logging.info(
            "[Thread %s] filled %s items from  %s" %
            (self.name, self.memq.qsize(), self.consume_queue or self.ccq))

    def getKeyMapFromLocalQueue(self, requeue=True):

        key_map = None

        try:
            key_map = self.memq.get_nowait()
            if requeue:
                self.memq.put_nowait(key_map)
        except queue.Empty:
            #no more items
            self.fillq()

        return key_map

    def getKeyMapFromRemoteQueue(self, requeue=True):

        key_map = None
        mq = RabbitHelper()

        # try to fetch from consume queue and
        # fall back to ccqueue
        queue = self.consume_queue

        if queue is None or mq.qsize(queue) == 0:
            queue = self.ccq

        if mq.qsize(queue) > 0:
            try:
                key_map = mq.getJsonMsg(queue, requeue=requeue)
            except Exception:
                pass

        return key_map
예제 #4
0
class SDKClient(threading.Thread):

    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password  = task['password']
        self.template = task['template']
        self.default_tsizes = task['sizes']
        self.create_count = task['create_count']/self.op_factor
        self.update_count = task['update_count']/self.op_factor
        self.get_count = task['get_count']/self.op_factor
        self.del_count = task['del_count']/self.op_factor
        self.exp_count = task['exp_count']/self.op_factor
        self.ttl = task['ttl']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.consume_queue = task['consume_queue']
        self.standalone = task['standalone']
        self.ccq = None
        self.hotkey_batches = []

        if self.ttl:
            self.ttl = int(self.ttl)

        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']

        addr = task['active_hosts'][random.randint(0,len(self.active_hosts) - 1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]

        self.e = e
        self.cb = None
        self.isterminal = False
        self.done = False

        try:
            endpoint = "%s:%s/%s" % (host, port, self.bucket)
            self.cb = Bucket(endpoint, password = self.password)
        except Exception as ex:
            logging.error("[Thread %s] cannot reach %s" %
                          (self.name, endpoint))
            logging.error(ex)
            self.isterminal = True

        logging.info("[Thread %s] started for workload: %s" % (self.name, task['id']))

    def run(self):

        cycle = ops_total = 0
        self.e.set()

        while self.e.is_set() == True:

            start = datetime.datetime.now()


            # do an op cycle
            self.do_cycle()

            if self.isterminal == True:
                # some error occured during workload
                self.flushq(True)
                exit(-1)

            # wait till next cycle
            end = datetime.datetime.now()
            wait = 1 - (end - start).microseconds/float(1000000)
            if (wait > 0):
                time.sleep(wait)
            else:
                pass #probably  we are overcomitted, but it's ok

            ops_total = ops_total + self.ops_sec
            cycle = cycle + 1

            if (cycle % 120) == 0: # 2 mins
                logging.info("[Thread %s] total ops: %s" % (self.name, ops_total))
                self.flushq()

        self.flushq()
        logging.info("[Thread %s] done!" % (self.name))


    def flushq(self, flush_hotkeys = False):
        return # todo for dirty keys

    def do_cycle(self):

        sizes = self.template.get('size') or self.default_tsizes
        t_size = sizes[random.randint(0,len(sizes)-1)]
        self.template['t_size'] = t_size

        if self.create_count > 0:

            count = self.create_count
            docs_to_expire = self.exp_count
            # check if we need to expire some docs
            if docs_to_expire > 0:

                # create an expire batch
                self.mset(self.template, docs_to_expire, ttl = self.ttl)
                count = count - docs_to_expire

            self.mset(self.template, count)

        if self.update_count > 0:
            self.mset_update(self.template, self.update_count)

        if self.get_count > 0:
            self.mget(self.get_count)

        if self.del_count > 0:
            self.mdelete(self.del_count)


    def mset(self, template, count, ttl = 0):
        msg = {}
        keys = []
        cursor = 0
        j = 0

        template = resolveTemplate(template)
        for j in xrange(count):
            self.i = self.i+1
            msg[self.name+str(self.i)] = template
            keys.append(self.name+str(self.i))

            if ((j+1) % self.batch_size) == 0:
                batch = keys[cursor:j+1]
                self._mset(msg, ttl)
                self.memq.put_nowait({'start' : batch[0],
                                      'end'  : batch[-1]})
                msg = {}
                cursor = j
            elif j == (count -1):
                batch = keys[cursor:]
                self._mset(msg, ttl)
                self.memq.put_nowait({'start' : batch[0],
                                      'end'  : batch[-1]})


    def _mset(self, msg, ttl = 0):

        try:
            self.cb.set_multi(msg, ttl=ttl)
        except TemporaryFailError:
            logging.warn("temp failure during mset - cluster may be unstable")
        except TimeoutError:
            logging.warn("cluster timed trying to handle mset")
        except NetworkError as nx:
            logging.error("network error")
            logging.error(nx)
        except Exception as ex:
            logging.error(ex)
            self.isterminal = True

    def mset_update(self, template, count):

        msg = {}
        batches = self.getKeys(count)
        template = resolveTemplate(template)
        if len(batches) > 0:

            for batch in batches:
                try:
                    for key in batch:
                        msg[key] = template
                    self.cb.set_multi(msg)
                except NotFoundError as nf:
                    logging.error("update key not found!  %s: " % nf.key)
                except TimeoutError:
                    logging.warn("cluster timed out trying to handle mset - cluster may be unstable")
                except NetworkError as nx:
                    logging.error("network error")
                    logging.error(nx)
                except TemporaryFailError:
                    logging.warn("temp failure during mset - cluster may be unstable")
                except Exception as ex:
                    logging.error(ex)
                    self.isterminal = True


    def mget(self, count):

        batches = []
        if self.miss_perc > 0:
            batches = self.getCacheMissKeys(count)
        else:
            batches = self.getKeys(count)

        if len(batches) > 0:

            for batch in batches:
                try:
                    self.cb.get_multi(batch)
                except NotFoundError as nf:
                    logging.warn("get key not found!  %s: " % nf.key)
                    pass
                except TimeoutError:
                    logging.warn("cluster timed out trying to handle mget - cluster may be unstable")
                except NetworkError as nx:
                    logging.error("network error")
                    logging.error(nx)
                except Exception as ex:
                    logging.error(ex)
                    self.isterminal = True


    def mdelete(self, count):
        batches = self.getKeys(count, requeue = False)
        keys_deleted = 0

        # delete from buffer
        if len(batches) > 0:
            keys_deleted = self._mdelete(batches)
        else:
            pass

    def _mdelete(self, batches):
        keys_deleted = 0
        for batch in batches:
            try:
                if len(batch) > 0:
                    keys_deleted = len(batch) + keys_deleted
                    self.cb.delete_multi(batch)
            except NotFoundError as nf:
                logging.warn("get key not found!  %s: " % nf.key)
            except TimeoutError:
                logging.warn("cluster timed out trying to handle mdelete - cluster may be unstable")
            except NetworkError as nx:
                logging.error("network error")
                logging.error(nx)
            except Exception as ex:
                logging.error(ex)
                self.isterminal = True

        return keys_deleted


    def getCacheMissKeys(self, count):

        # returns batches of keys where first batch contains # of keys to miss
        keys_retrieved = 0
        batches = []
        miss_keys = []

        num_to_miss = int( ((self.miss_perc/float(100)) * count))
        miss_batches = self.getKeys(num_to_miss, force_stale = True)

        if len(self.hotkey_batches) == 0:
            # hotkeys are taken off queue and cannot be reused
            # until workload is flushed
            need = count - num_to_miss
            self.hotkey_batches = self.getKeys(need, requeue = False)


        batches = miss_batches + self.hotkey_batches
        return batches

    def getKeys(self, count, requeue = True, force_stale = False):

        keys_retrieved = 0
        batches = []

        while keys_retrieved < count:

            # get keys
            keys = self.getKeysFromQueue(requeue, force_stale = force_stale)

            if len(keys) == 0:
                break

            # in case we got too many keys slice the batch
            need = count - keys_retrieved
            if(len(keys) > need):
                keys = keys[:need]

            keys_retrieved = keys_retrieved + len(keys)

            # add to batch
            batches.append(keys)


        return batches

    def getKeysFromQueue(self, requeue = True, force_stale = False):

        # get key mapping and convert to keys
        keys = []
        key_map = None

        # priority to stale queue
        if force_stale:
            key_map = self.getKeyMapFromRemoteQueue(requeue)

        # fall back to local qeueue
        if key_map is None:
            key_map = self.getKeyMapFromLocalQueue(requeue)

        if key_map:
            keys = self.keyMapToKeys(key_map)


        return keys

    def keyMapToKeys(self, key_map):

        keys = []
        # reconstruct key-space
        prefix, start_idx = key_map['start'].split('_')
        prefix, end_idx = key_map['end'].split('_')

        for i in range(int(start_idx), int(end_idx) + 1):
            keys.append(prefix+"_"+str(i))

        return keys


    def fillq(self):

        if (self.consume_queue == None) and (self.ccq == None):
            return

        # put about 20 items into the queue
        for i in xrange(20):
            key_map = self.getKeyMapFromRemoteQueue()
            if key_map:
                self.memq.put_nowait(key_map)

        logging.info("[Thread %s] filled %s items from  %s" %
                     (self.name, self.memq.qsize(), self.consume_queue or self.ccq))

    def getKeyMapFromLocalQueue(self, requeue = True):

        key_map = None

        try:
            key_map = self.memq.get_nowait()
            if requeue:
                self.memq.put_nowait(key_map)
        except queue.Empty:
            #no more items
            self.fillq()

        return key_map

    def getKeyMapFromRemoteQueue(self, requeue = True):
        return None 
예제 #5
0
def make_instance():
    if options.global_instance:
        return GLOBAL_INSTANCE
    else:
        return Bucket(**CONN_OPTIONS)
예제 #6
0
                default=None,
                type=str,
                help="Use Pure-Python IOPS plugin")
ap.add_argument('-g',
                '--global-instance',
                help="Use global instance",
                default=False,
                action='store_true')
ap.add_argument('--batch', '-N', type=int, help="Batch size", default=1)

options = ap.parse_args()

GLOBAL_INSTANCE = None
CONN_OPTIONS = {'connstr': options.connstr, 'password': options.password}

GLOBAL_INSTANCE = Bucket(**CONN_OPTIONS)


def make_instance():
    if options.global_instance:
        return GLOBAL_INSTANCE
    else:
        return Bucket(**CONN_OPTIONS)


class Worker(object):
    def __init__(self):
        self.delay = options.delay
        self.key = 'K' * options.ksize
        self.value = b'V' * options.vsize
        self.kv = {}
예제 #7
0
    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password = task['password']
        self.user_password = task['user_password']
        self.template = task['template']
        self.default_tsizes = task['sizes']
        self.create_count = task['create_count'] / self.op_factor
        self.update_count = task['update_count'] / self.op_factor
        self.get_count = task['get_count'] / self.op_factor
        self.del_count = task['del_count'] / self.op_factor
        self.exp_count = task['exp_count'] / self.op_factor
        self.ttl = task['ttl']
        self.persist_to = task['persist_to']
        self.replicate_to = task['replicate_to']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.consume_queue = task['consume_queue']
        self.standalone = task['standalone']
        self.ccq = None
        self.hotkey_batches = []

        if self.ttl:
            self.ttl = int(self.ttl)

        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']

        addr = task['active_hosts'][random.randint(0,
                                                   len(self.active_hosts) -
                                                   1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]

        self.e = e
        self.cb = None
        self.isterminal = False
        self.done = False

        try:
            endpoint = "%s:%s/%s" % (host, port, self.bucket)
            self.cb = Bucket(endpoint, password=self.password)

        except AuthError:
            # direct port for cluster_run
            port_mod = int(port) % 9000
            if port_mod != 8091:
                port = str(12000 + port_mod)
            # try rbac style auth
            endpoint = 'couchbase://{0}:{1}?select_bucket=true'.format(
                host, port)
            cluster = Cluster(endpoint)
            auther = PasswordAuthenticator(self.bucket, self.user_password)
            cluster.authenticate(auther)
            self.cb = cluster.open_bucket(self.bucket)

        except Exception as ex:

            logging.error("[Thread %s] cannot reach %s" %
                          (self.name, endpoint))
            logging.error(ex)
            self.isterminal = True

        logging.info("[Thread %s] started for workload: %s" %
                     (self.name, task['id']))