예제 #1
0
 def addTask(self, bucket, taskid, workload):
     newPair = (taskid, workload)
     if bucket not in self.history:
         self.history[bucket] = {"tasks": [newPair]}
     else:
         self.history[bucket]["tasks"].append(newPair)
     ObjCacher().store(CacheHelper.BUCKETSTATUSCACHEKEY, self)
예제 #2
0
def setupCacheMissQueues(workload):
    """ assuming misses will come from keys in
        consume_queue or cc_queue.
        so make location where keys were going
        to be read the miss queue and set
        consume queue to location where new keys are
        being generated.

        Only required that at least a cc_queue
        with miss items is provided"""

    # make another cc_queue to put only hot items
    new_cc_queue = None
    if workload.cc_queues is None:

        new_cc_queue = workload.id + "__hot__"
        workload.cc_queues = [new_cc_queue]

        # delete new cc_queue if it exists
        try:
            rabbitHelper.delete(new_cc_queue)
        except:
            pass  # queue already deleted
    else:
        new_cc_queue = workload.cc_queues[0]

    # move old consume queue to miss queue
    if workload.consume_queue is not None:
        workload.miss_queue = workload.consume_queue

    # make new cc_queue the consume queue
    workload.consume_queue = new_cc_queue

    # save changes
    ObjCacher().store(CacheHelper.WORKLOADCACHEKEY, workload)
예제 #3
0
    def __setattr__(self, name, value):
        super(ActiveTaskChecker, self).__setattr__(name, value)

        # auto cache object when certain keys change
        if self.initialized and name in ("initialized", "task_started",
                                         "empty_stat_count"):
            ObjCacher().store(CacheHelper.ACTIVETASKCACHEKEY, self)
예제 #4
0
    def __init__(self, params):
        self.name = params["name"]
        self.id = self.name
        self.ttl = params["ttl"]
        self.flags = params["flags"]
        self.cc_queues = params["cc_queues"]
        self.kv = params["kv"]
        self.size = params["size"]

        # cache
        ObjCacher().store(CacheHelper.TEMPLATECACHEKEY, self)
예제 #5
0
    def __setattr__(self, name, value):
        super(Workload, self).__setattr__(name, value)

        # auto cache workload when certain attributes change
        # if object has been fully setup
        if name in Workload.AUTOCACHEKEYS and self.initialized:
            ObjCacher().store(CacheHelper.WORKLOADCACHEKEY, self)

        # check if workload is being deactivated
        if name == "active" and self.active == False and self.initialized:
            self.requeueNonDeletedKeys()
예제 #6
0
    def __setattr__(self, name, value):
        super(Workload, self).__setattr__(name, value)

        # auto cache workload when certain attributes change
        # if object has been fully setup
        if name in Workload.AUTOCACHEKEYS and self.initialized:
            ObjCacher().store(CacheHelper.WORKLOADCACHEKEY, self)

        # check if workload is being deactivated
        if name == "active" and self.active == False and self.initialized:
            msg = {'active': False, 'id': self.id}
            RabbitHelper().putMsg('', json.dumps(msg), EXCHANGE)
            logger.error("kill task %s" % self.id)
예제 #7
0
    def update(self, recent_key, recent_id):

        if self.startkey is None or recent_key < self.startkey:
            self.startkey = recent_key
            self.startkey_docid = recent_id
        if self.endkey is None or recent_key > self.endkey:
            self.endkey = recent_key
            self.endkey_docid = recent_id

        self.recentkey = recent_key
        self.recentkey_docid = recent_id

        ObjCacher().store(CacheHelper.QBUILDCACHEKEY, self)
예제 #8
0
    def __init__(self, params):
        logger.error(params)
        self.name = params["name"]
        self.id = self.name
        self.ttl = params["ttl"]
        self.flags = params["flags"]
        self.cc_queues = params["cc_queues"]
        self.kv = params["kv"]
        self.size = params.get("size") or ['128']
        self.size = map(int, self.size)
        self.indexed_keys = []

        # cache
        ObjCacher().store(CacheHelper.TEMPLATECACHEKEY, self)
예제 #9
0
def update_node_stats(node_stats, sample):

    for key in sample.keys():
        if key != 'ip':

            if key not in node_stats.samples:
                node_stats.samples[key] = []
            try:
                val = float(re.sub(r'[^\d.]+', '', sample[key]))
                node_stats.samples[key].append(val)
                ObjCacher().store(CacheHelper.NODESTATSCACHEKEY, node_stats)
            except Exception as ex:
                logger.error("Error saving data for (%s,%s): %s" %
                             (key, sample[key], ex))
예제 #10
0
    def updateIndexKeys(self, key):

        template = Template.from_cache(str(self.template))

        # update workload with information about which keys being index
        if key is not None:

            # when indexed key does not exist in kv pair do not update
            if key in template.kv:

                # do not update if we are already traking index key
                if key not in self.indexed_keys:

                    # update and cache workload object
                    self.indexed_keys.append(key)
                    ObjCacher().store(CacheHelper.WORKLOADCACHEKEY, self)
            else:
                logger.error(
                    "key: '%s' does not exist in kvpair.  Smart querying disabled"
                    % key)
예제 #11
0
def updateClusterStatus(ignore_result=True):

    done = False

    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()

    # check cluster nodes
    cached_nodes = clusterStatus.nodes
    new_cached_nodes = []

    for node in cached_nodes:

        # get an active node
        if clusterStatus.http_ping_node(node) is not None:

            # get remaining nodes
            active_nodes = clusterStatus.get_cluster_nodes(node)

            # populate cache with healthy nodes
            for active_node in active_nodes:
                if active_node.status == 'healthy':
                    new_cached_nodes.append(active_node)

            break

    if len(new_cached_nodes) > 0:

        # check for update
        new_node_list = ["%s:%s" % (n.ip, n.port) for n in new_cached_nodes]

        if len(new_node_list) != len(cached_nodes) or\
            len(set(clusterStatus.get_all_hosts()).intersection(new_node_list)) !=\
                len(cached_nodes):
            clusterStatus.nodes = new_cached_nodes
            clusterStatus.update_orchestrator()
    else:
        clusterStatus.orchestrator = None
        ObjCacher().delete(CacheHelper.CLUSTERSTATUSKEY, clusterStatus)
예제 #12
0
    def __setattr__(self, name, value):
        super(QueryWorkload, self).__setattr__(name, value)

        # cache when active key mutated
        if name == 'active':
            ObjCacher().store(CacheHelper.QUERYCACHEKEY, self)
예제 #13
0
    def __setattr__(self, name, value):

        # auto cache changes made to this object
        super(ClusterStatus, self).__setattr__(name, value)
        if self.initialized:
            ObjCacher().store(CacheHelper.CLUSTERSTATUSKEY, self)
예제 #14
0
 def __setattr__(self, name, value):
     super(NodeStats, self).__setattr__(name, value)
     ObjCacher().store(CacheHelper.NODESTATSCACHEKEY, self)
예제 #15
0
 def from_cache(id_):
     return ObjCacher().instance(CacheHelper.BUCKETSTATUSCACHEKEY, id_)
예제 #16
0
 def __setattr__(self, name, value):
     super(BucketStatus, self).__setattr__(name, value)
     ObjCacher().store(CacheHelper.BUCKETSTATUSCACHEKEY, self)
예제 #17
0
 def from_cache(id_):
     return ObjCacher().instance(CacheHelper.NODESTATSCACHEKEY, id_)
예제 #18
0
 def from_cache(id_):
     return ObjCacher().instance(CacheHelper.TEMPLATECACHEKEY, id_)
예제 #19
0
    def __setattr__(self, name, value):
        super(Workload, self).__setattr__(name, value)

        # cache when active key mutated
        if name == 'active' or name == 'postconditions':
            ObjCacher().store(CacheHelper.WORKLOADCACHEKEY, self)
예제 #20
0
import copy
import os

sys.path = ["../"] + sys.path

import unittest
import logger
from membase.api.rest_client import RestConnection, Bucket, RestHelper
from couchbase.cluster import Cluster
from TestInput import TestInputSingleton
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from cache import ObjCacher, CacheHelper
import testcfg as cfg
from app.workload_manager import ClusterStatus
ObjCacher().clear(CacheHelper.CLUSTERSTATUSKEY)


class initialize(unittest.TestCase):
    def setUp(self):
        self._log = logger.Logger.get_logger()
        self._input = TestInputSingleton.input
        self._clusters_dic = self._input.clusters
        self._clusters_keys_olst = range(len(self._clusters_dic))
        try:
            self._num_initial_nodes = self._input.param("initial_nodes",
                                                        '1').split(',')
        except:
            self._num_initial_nodes = [self._input.param("initial_nodes", '1')]
        self._buckets = []
        self._default_bucket = self._input.param("default_bucket", False)
예제 #21
0
 def from_cache():
     id_ = cfg.CB_CLUSTER_TAG + "active_task_status"
     return ObjCacher().instance(CacheHelper.ACTIVETASKCACHEKEY, id_)
예제 #22
0
 def from_cache(id_):
     return ObjCacher().instance(CacheHelper.QBUILDCACHEKEY, id_)
예제 #23
0
 def from_cache(id_):
     return ObjCacher().instance(CacheHelper.WORKLOADCACHEKEY, id_)