def wait_for_mc_stats_no_timeout(master, bucket, stat_key, stat_value, timeout_in_seconds=-1, verbose=True): log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \ stat_value, master.ip)) # keep retrying until reaches the server stats = {} while not stats: try: c = MemcachedClient(master.ip, 11210) c.sasl_auth_plain(bucket, '') stats = c.stats() except Exception as e: log.info("Exception: {0}, retry in 2 seconds ...".format( str(e))) stats = {} time.sleep(2) finally: c.close() while str(stats[stat_key]) != str(stat_value): c = MemcachedClient(master.ip, 11210) c.sasl_auth_plain(bucket, '') stats = c.stats() c.close() if verbose: log.info("{0} : {1}".format(stat_key, stats[stat_key])) time.sleep(5) return True
def verify_revid(self): missing_keys = False src_node = self.get_active_vb0_node(self.src_master) dest_node = self.get_active_vb0_node(self.dest_master) src_client = MemcachedClient(src_node.ip, 11210) dest_client = MemcachedClient(dest_node.ip, 11210) src_client.sasl_auth_plain("cbadminbucket", "password") src_client.bucket_select("default") dest_client.sasl_auth_plain("cbadminbucket", "password") dest_client.bucket_select("default") for key in self.keys_loaded: try: src_meta = src_client.getMeta(key) dest_meta = dest_client.getMeta(key) self.log.info("deleted, flags, exp, rev_id, cas for key from Source({0}) {1} = {2}" .format(src_node.ip, key, src_meta)) self.log.info("deleted, flags, exp, rev_id, cas for key from Destination({0}) {1} = {2}" .format(dest_node.ip, key, dest_meta)) if src_meta == dest_meta: self.log.info("RevID verification successful for key {0}".format(key)) else: self.fail("RevID verification failed for key {0}".format(key)) except MemcachedError as e: self.log.error("Key {0} threw {1} on getMeta()".format(key, e)) missing_keys = True if missing_keys: self.fail("Some keys are missing at destination")
def verify_revid(self): missing_keys = False src_node = self.get_active_vb0_node(self.src_master) dest_node = self.get_active_vb0_node(self.dest_master) src_client = MemcachedClient(src_node.ip, 11210) dest_client = MemcachedClient(dest_node.ip, 11210) src_client.sasl_auth_plain("cbadminbucket","password") src_client.bucket_select("default") dest_client.sasl_auth_plain("cbadminbucket","password") dest_client.bucket_select("default") for key in self.keys_loaded: try: src_meta = src_client.getMeta(key) dest_meta = dest_client.getMeta(key) self.log.info("deleted, flags, exp, rev_id, cas for key from Source({0}) {1} = {2}" .format(src_node.ip, key, src_meta)) self.log.info("deleted, flags, exp, rev_id, cas for key from Destination({0}) {1} = {2}" .format(dest_node.ip, key, dest_meta)) if src_meta == dest_meta: self.log.info("RevID verification successful for key {0}".format(key)) else: self.fail("RevID verification failed for key {0}".format(key)) except MemcachedError as e: self.log.error("Key {0} threw {1} on getMeta()".format(key, e)) missing_keys = True if missing_keys: self.fail("Some keys are missing at destination")
def wait_for_mc_stats_no_timeout(master, bucket, stat_key, stat_value, timeout_in_seconds=-1, verbose=True): log.info( "waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, stat_value, master.ip) ) # keep retrying until reaches the server stats = {} while not stats: try: c = MemcachedClient(master.ip, 11210) c.sasl_auth_plain(bucket, "") stats = c.stats() except Exception as e: log.info("Exception: {0}, retry in 2 seconds ...".format(str(e))) stats = {} time.sleep(2) finally: c.close() while str(stats[stat_key]) != str(stat_value): c = MemcachedClient(master.ip, 11210) c.sasl_auth_plain(bucket, "") stats = c.stats() c.close() if verbose: log.info("{0} : {1}".format(stat_key, stats[stat_key])) time.sleep(5) return True
def direct_client(server, bucket, timeout=30): log = logger.Logger.get_logger() rest = RestConnection(server) node = None try: node = rest.get_nodes_self() except ValueError as e: log.info("could not connect to server {0}, will try scanning all nodes".format(server)) if not node: nodes = rest.get_nodes() for n in nodes: if n.ip == server.ip and n.port == server.port: node = n if isinstance(server, dict): log.info("dict:{0}".format(server)) log.info("creating direct client {0}:{1} {2}".format(server["ip"], node.memcached, bucket)) else: log.info("creating direct client {0}:{1} {2}".format(server.ip, node.memcached, bucket)) RestHelper(rest).vbucket_map_ready(bucket, 60) vBuckets = RestConnection(server).get_vbuckets(bucket) if isinstance(server, dict): client = MemcachedClient(server["ip"], node.memcached, timeout=timeout) else: client = MemcachedClient(server.ip, node.memcached, timeout=timeout) client.vbucket_count = len(vBuckets) bucket_info = rest.get_bucket(bucket) # todo raise exception for not bucket_info client.sasl_auth_plain(bucket_info.name.encode("ascii"), bucket_info.saslPassword.encode("ascii")) return client
def wait_for_mc_stats_no_timeout(master, bucket, stat_key, stat_value, timeout_in_seconds=-1, verbose=True): log = logger.get("infra") log.info( "Waiting for bucket {0} stat : {1} to match {2} on {3}".format( bucket, stat_key, stat_value, master.ip)) # keep retrying until reaches the server stats = dict() while not stats: try: c = MemcachedClient(master.ip, constants.memcached_port) c.sasl_auth_plain(bucket, '') stats = c.stats() except Exception as e: stats = dict() sleep(2, "Exception: %s. Will retry.." % str(e), log_type="infra") finally: c.close() while str(stats[stat_key]) != str(stat_value): c = MemcachedClient(master.ip, constants.memcached_port) c.sasl_auth_plain(bucket, '') stats = c.stats() c.close() if verbose: log.info("{0} : {1}".format(stat_key, stats[stat_key])) sleep(5, log_type="infra") return True
def test_memecached_basic_api(self): # epengine.basic_collections.basic_collections.test_memecached_basic_api scope_name = "ScopeWith30CharactersinName123" Collection_name = "CollectionsWithLargeNamechecki" self.rest.create_scope(scope=scope_name) self.rest.create_collection(scope=scope_name, collection=Collection_name, bucket=self.default_bucket_name) collection = scope_name + "." + Collection_name self.log.info("collection name is {}".format(collection)) self.sleep(10) # create memcached client mc = MemcachedClient(self.master.ip, 11210) mc.sasl_auth_plain(self.master.rest_username, self.master.rest_password) # enable collection and get collections mc.enable_collections() mc.bucket_select('default') # mc.hello(memcacheConstants.FEATURE_COLLECTIONS) mc.hello("set_collection") mc.get_collections(True) self.log.info("get collections completed") try: mc.set("key", 0, 0, "value", collection=collection) flag, keyx, value = mc.get(key="key", collection=collection) print("flag:{} keyx:{} value:{}".format(flag, keyx, value)) except MemcachedError as exp: self.fail("Exception with setting and getting the key in collections {0}".format(exp))
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg=''): log = logger.Logger.get_logger() start_time = time.time() end_time = start_time + timeout_in_seconds ready_vbuckets = {} rest = RestConnection(node) servers = rest.get_nodes() RestHelper(rest).vbucket_map_ready(bucket, 60) vbucket_count = len(rest.get_vbuckets(bucket)) vbuckets = rest.get_vbuckets(bucket) obj = VBucketAwareMemcached(rest, bucket) memcacheds, vbucket_map, vbucket_map_replica = obj.request_map(rest, bucket) #Create dictionary with key:"ip:port" and value: a list of vbuckets server_dict = defaultdict(list) for everyID in range(0, vbucket_count): memcached_ip_port = str(vbucket_map[everyID]) server_dict[memcached_ip_port].append(everyID) while time.time() < end_time and len(ready_vbuckets) < vbucket_count: for every_ip_port in server_dict: #Retrieve memcached ip and port ip, port = every_ip_port.split(":") client = MemcachedClient(ip, int(port), timeout=30) client.vbucket_count = len(vbuckets) bucket_info = rest.get_bucket(bucket) client.sasl_auth_plain(bucket_info.name.encode('ascii'), bucket_info.saslPassword.encode('ascii')) for i in server_dict[every_ip_port]: try: (a, b, c) = client.get_vbucket_state(i) except mc_bin_client.MemcachedError as e: ex_msg = str(e) if "Not my vbucket" in log_msg: log_msg = log_msg[:log_msg.find("vBucketMap") + 12] + "..." if e.status == memcacheConstants.ERR_NOT_MY_VBUCKET: # May receive this while waiting for vbuckets, continue and retry...S continue log.error("%s: %s" % (log_msg, ex_msg)) continue except exceptions.EOFError: # The client was disconnected for some reason. This can # happen just after the bucket REST API is returned (before # the buckets are created in each of the memcached processes.) # See here for some details: http://review.couchbase.org/#/c/49781/ # Longer term when we don't disconnect clients in this state we # should probably remove this code. log.error("got disconnected from the server, reconnecting") client.reconnect() client.sasl_auth_plain(bucket_info.name.encode('ascii'), bucket_info.saslPassword.encode('ascii')) continue if c.find("\x01") > 0 or c.find("\x02") > 0: ready_vbuckets[i] = True elif i in ready_vbuckets: log.warning("vbucket state changed from active to {0}".format(c)) del ready_vbuckets[i] client.close() return len(ready_vbuckets) == vbucket_count
def connection(self, client_ip, bucket_name, user,password, port=11210): log.info("Bucket name for connection is ---- {0}, username -- {1}, ----- password -- {2}".format(bucket_name,user, \ password)) try: mc = MemcachedClient(host=client_ip, port=port) mc.sasl_auth_plain(user,password) mc.bucket_select(bucket_name) return mc, True except Exception as e: log.info( "Exception is from connection function {0}".format(e)) return False, False
def verify_stat(self, items, value="active"): mc = MemcachedClient(self.cluster.master.ip, constants.memcached_port) mc.sasl_auth_plain(self.cluster.master.rest_username, self.cluster.master.rest_password) mc.bucket_select('default') stats = mc.stats() self.assertEquals(stats['ep_compression_mode'], value) self.assertEquals(int(stats['ep_item_compressor_num_compressed']), items) self.assertNotEquals(int(stats['vb_active_itm_memory']), int(stats['vb_active_itm_memory_uncompressed']))
def connection(self, client_ip, bucket_name, user,password, port=11210): log.info("Bucket name for connection is ---- {0}, username -- {1}, ----- password -- {2}".format(bucket_name, user, \ password)) try: mc = MemcachedClient(host=client_ip, port=port) mc.sasl_auth_plain(user, password) mc.bucket_select(bucket_name) return mc, True except Exception as e: log.info( "Exception is from connection function {0}".format(e)) return False, False
def load_one_mutation_into_source_vb0(self, vb0_active_src_node): key = self.vb0_keys[self.key_counter] memc_client = MemcachedClient(vb0_active_src_node.ip, 11210) memc_client.sasl_auth_plain("cbadminbucket", "password") memc_client.bucket_select("default") try: memc_client.set(key, exp=0, flags=0, val="dummy val") self.key_counter += 1 self.keys_loaded.append(key) self.log.info("Loaded key {0} onto vb0 in {1}".format(key, vb0_active_src_node.ip)) self.log.info ("deleted, flags, exp, rev_id, cas for key {0} = {1}".format(key, memc_client.getMeta(key))) except MemcachedError as e: self.log.error(e)
def load_one_mutation_into_source_vb0(self, vb0_active_src_node): key = self.vb0_keys[self.key_counter] memc_client = MemcachedClient(vb0_active_src_node.ip, 11210) memc_client.sasl_auth_plain("cbadminbucket","password") memc_client.bucket_select("default") try: memc_client.set(key, exp=0, flags=0, val="dummy val") self.key_counter += 1 self.keys_loaded.append(key) self.log.info("Loaded key {0} onto vb0 in {1}".format(key, vb0_active_src_node.ip)) self.log.info ("deleted, flags, exp, rev_id, cas for key {0} = {1}".format(key, memc_client.getMeta(key))) except MemcachedError as e: self.log.error(e)
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg=''): log = logger.Logger.get_logger() start_time = time.time() end_time = start_time + timeout_in_seconds ready_vbuckets = {} rest = RestConnection(node) servers = rest.get_nodes() RestHelper(rest).vbucket_map_ready(bucket, 60) vbucket_count = len(rest.get_vbuckets(bucket)) vbuckets = rest.get_vbuckets(bucket) obj = VBucketAwareMemcached(rest, bucket) memcacheds, vbucket_map, vbucket_map_replica = obj.request_map( rest, bucket) #Create dictionary with key:"ip:port" and value: a list of vbuckets server_dict = defaultdict(list) for everyID in range(0, vbucket_count): memcached_ip_port = str(vbucket_map[everyID]) server_dict[memcached_ip_port].append(everyID) while time.time() < end_time and len(ready_vbuckets) < vbucket_count: for every_ip_port in server_dict: #Retrieve memcached ip and port ip, port = every_ip_port.split(":") client = MemcachedClient(ip, int(port), timeout=30) client.vbucket_count = len(vbuckets) bucket_info = rest.get_bucket(bucket) client.sasl_auth_plain( bucket_info.name.encode('ascii'), bucket_info.saslPassword.encode('ascii')) for i in server_dict[every_ip_port]: try: (a, b, c) = client.get_vbucket_state(i) except mc_bin_client.MemcachedError as e: log.error("%s: %s" % (log_msg, e)) continue if c.find("\x01") > 0 or c.find("\x02") > 0: ready_vbuckets[i] = True elif i in ready_vbuckets: log.warning( "vbucket state changed from active to {0}".format( c)) del ready_vbuckets[i] client.close() return len(ready_vbuckets) == vbucket_count
def do_get_random_key(self): # MB-31548, get_Random key gets hung sometimes. mc = MemcachedClient(self.cluster.master.ip, constants.memcached_port) mc.sasl_auth_plain(self.cluster.master.rest_username, self.cluster.master.rest_password) mc.bucket_select('default') count = 0 while count < 1000000: count += 1 try: mc.get_random_key() except MemcachedError as error: self.fail("<MemcachedError #%d ``%s''>" % (error.status, error.message)) if count % 1000 == 0: self.log.info('The number of iteration is {}'.format(count))
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg=''): log = logger.Logger.get_logger() start_time = time.time() end_time = start_time + timeout_in_seconds ready_vbuckets = {} rest = RestConnection(node) servers = rest.get_nodes() RestHelper(rest).vbucket_map_ready(bucket, 60) vbucket_count = len(rest.get_vbuckets(bucket)) vbuckets = rest.get_vbuckets(bucket) obj = VBucketAwareMemcached(rest, bucket) memcacheds, vbucket_map, vbucket_map_replica = obj.request_map(rest, bucket) #Create dictionary with key:"ip:port" and value: a list of vbuckets server_dict = defaultdict(list) for everyID in range(0, vbucket_count): memcached_ip_port = str(vbucket_map[everyID]) server_dict[memcached_ip_port].append(everyID) while time.time() < end_time and len(ready_vbuckets) < vbucket_count: for every_ip_port in server_dict: #Retrieve memcached ip and port ip, port = every_ip_port.split(":") client = MemcachedClient(ip, int(port), timeout=30) client.vbucket_count = len(vbuckets) bucket_info = rest.get_bucket(bucket) client.sasl_auth_plain(bucket_info.name.encode('ascii'), bucket_info.saslPassword.encode('ascii')) for i in server_dict[every_ip_port]: try: (a, b, c) = client.get_vbucket_state(i) except mc_bin_client.MemcachedError as e: ex_msg = str(e) if "Not my vbucket" in log_msg: log_msg = log_msg[:log_msg.find("vBucketMap") + 12] + "..." if "Not my vbucket" in ex_msg: #reduce output ex_msg = str(e)[:str(e).find('Not my vbucket') + 14] + "..." log.error("%s: %s" % (log_msg, ex_msg)) continue if c.find("\x01") > 0 or c.find("\x02") > 0: ready_vbuckets[i] = True elif i in ready_vbuckets: log.warning("vbucket state changed from active to {0}".format(c)) del ready_vbuckets[i] client.close() return len(ready_vbuckets) == vbucket_count
def direct_client(server, bucket, timeout=30): rest = RestConnection(server) node = rest.get_nodes_self() log = logger.Logger.get_logger() if isinstance(server, dict): log.info("dict:{0}".format(server)) log.info("creating direct client {0}:{1} {2}".format(server["ip"], node.memcached, bucket)) else: log.info("creating direct client {0}:{1} {2}".format(server.ip, node.memcached, bucket)) RestHelper(rest).vbucket_map_ready(bucket, 60) vBuckets = RestConnection(server).get_vbuckets(bucket) if isinstance(server, dict): client = MemcachedClient(server["ip"], node.memcached, timeout=timeout) else: client = MemcachedClient(server.ip, node.memcached, timeout=timeout) client.vbucket_count = len(vBuckets) bucket_info = rest.get_bucket(bucket) #todo raise exception for not bucket_info client.sasl_auth_plain(bucket_info.name.encode('ascii'), bucket_info.saslPassword.encode('ascii')) return client
def do_setWithMeta_twice(self): mc = MemcachedClient(self.cluster.master.ip, 11210) mc.sasl_auth_plain(self.cluster.master.rest_username, self.cluster.master.rest_password) mc.bucket_select('default') try: mc.setWithMeta('1', '{"Hello":"World"}', 3600, 0, 1, 0x1512a3186faa0000) except MemcachedError as error: self.log.info("<MemcachedError #%d ``%s''>" % (error.status, error.message)) self.fail("Error on First setWithMeta()") stats = mc.stats() self.log.info('curr_items: {0} and curr_temp_items:{1}'.format( stats['curr_items'], stats['curr_temp_items'])) self.log.info("Sleeping for 5 and checking stats again") time.sleep(5) stats = mc.stats() self.log.info('curr_items: {0} and curr_temp_items:{1}'.format( stats['curr_items'], stats['curr_temp_items'])) try: mc.setWithMeta('1', '{"Hello":"World"}', 3600, 0, 1, 0x1512a3186faa0000) except MemcachedError as error: stats = mc.stats() self.log.info( 'After 2nd setWithMeta(), curr_items: {} and curr_temp_items:{}' .format(stats['curr_items'], stats['curr_temp_items'])) if int(stats['curr_temp_items']) == 1: self.fail( "Error on second setWithMeta(), expected curr_temp_items to be 0" ) else: self.log.info("<MemcachedError #%d ``%s''>" % (error.status, error.message))
def getDirectMC(key, ip, port=8091, bucket="default", password=""): real_mc_client = None # get initial mc client client = MemcachedClient(ip, int(port)) vbId = (((zlib.crc32(key)) >> 16) & 0x7fff) & (client.vbucket_count - 1) # get vbucket map rest = create_rest(ip, port) vbuckets = rest.get_vbuckets(bucket) # find vbucket responsible to this key and mapping host if vbuckets is not None: vbucket = [vbucket for vbucket in vbuckets if vbucket.id == vbId] if len(vbucket) == 1: mc_ip, mc_port = vbucket[0].master.split(":") real_mc_client = MemcachedClient(mc_ip, int(mc_port)) real_mc_client.sasl_auth_plain(bucket, password) return real_mc_client
def getDirectMC(key, ip, port = 8091, bucket = "default", password = ""): real_mc_client = None # get initial mc client client = MemcachedClient(ip, int(port)) vbId = (((zlib.crc32(key)) >> 16) & 0x7fff) & (client.vbucket_count - 1) # get vbucket map rest = create_rest(ip, port) vbuckets = rest.get_vbuckets(bucket) # find vbucket responsible to this key and mapping host if vbuckets is not None: vbucket = [vbucket for vbucket in vbuckets if vbucket.id == vbId] if len(vbucket) == 1: mc_ip, mc_port = vbucket[0].master.split(":") real_mc_client = MemcachedClient(mc_ip, int(mc_port)) real_mc_client.sasl_auth_plain(bucket, password) return real_mc_client
def do_auth(self, bucket, password): """ default self.auth_mech is 'PLAIN' """ self.log.info("Authenticate with {0} to {1}:{2}".format( self.auth_mech, bucket, password)) ret = None nodes = RestConnection(self.master).get_nodes() for n in nodes: if n.ip == self.master.ip and n.port == self.master.port: node = n client = MemcachedClient(self.master.ip, node.memcached) try: if self.auth_mech == "PLAIN": ret = client.sasl_auth_plain(bucket, password)[2] else: self.fail("Invalid auth mechanism {0}".format(self.auth_mech)) except MemcachedError, e: ret = e[0].split(' for vbucket')[0]
def do_auth(self, bucket, password): """ default self.auth_mech is 'PLAIN' """ self.log.info("Authenticate with {0} to {1}:{2}".format(self.auth_mech, bucket, password)) ret = None nodes = RestConnection(self.master).get_nodes() for n in nodes: if n.ip == self.master.ip and n.port == self.master.port: node = n client = MemcachedClient(self.master.ip, node.memcached) try: if self.auth_mech == "PLAIN": ret = client.sasl_auth_plain(bucket, password)[2] else: self.fail("Invalid auth mechanism {0}".format(self.auth_mech)) except MemcachedError, e: ret = e[0].split(' for vbucket')[0]
def flushctl_start(servers, username=None, password=None): for server in servers: c = MemcachedClient(server.ip, constants.memcached_port) if username: c.sasl_auth_plain(username, password) c.start_persistence()
def flushctl_stop(servers, username=None, password=None): for server in servers: c = MemcachedClient(server.ip, 11210) if username: c.sasl_auth_plain(username, password) c.stop_persistence()
def poll(): url = "https://macbuild.hq.couchbase.com/xcode/api/integrations/filter/latest" latest = getJS(url) now = datetime.datetime.now() ts = now.strftime("%Y%m%d") build_no = "%s-%s" % (MOBILE_VERSION, ts[-5:]) client = McdClient(HOST, PORT) client.sasl_auth_plain("mobile", "") buildHist = {} for build in latest: if 'revisionBlueprint' not in latest[build]: continue key = latest[build]['revisionBlueprint'][ 'DVTSourceControlWorkspaceBlueprintPrimaryRemoteRepositoryKey'] rev = latest[build]['revisionBlueprint'][ 'DVTSourceControlWorkspaceBlueprintLocationsKey'][key][ 'DVTSourceControlLocationRevisionKey'] name = "iOS-" + latest[build]['bot']['name'].replace(" ", "") build_id = latest[build]['number'] results = latest[build]['buildResultSummary'] totalCount = results['testsCount'] failCount = results['errorCount'] result = 'SUCCESS' if name in TOTAL_COUNT_LOG: if totalCount == 0: # use historical value for total count totalCount = TOTAL_COUNT_LOG[name] elif totalCount != TOTAL_COUNT_LOG: # update total count log TOTAL_COUNT_LOG[name] = totalCount else: TOTAL_COUNT_LOG[name] = totalCount if (failCount > 0): result = 'UNSTABLE' component = None for feature in MOBILE_FEATURES: tag, _c = feature.split("-") docname = name.upper() docname = docname.replace("-", "_") if tag in docname: component = _c if component: doc = { 'build_id': build_id, 'priority': 'P0', 'name': name, 'url': url, 'component': component, 'failCount': failCount, 'totalCount': totalCount, 'result': result, 'os': 'iOS', 'build': build_no } key = "%s-%s" % (doc["name"], doc["build_id"]) val = json.dumps(doc) try: #print val key = hashlib.md5(key).hexdigest() print val client.set(key, 0, 0, val, 0) buildHist[doc["build"]] = doc["build_id"] except Exception as ex: print ex print "set failed, couchbase down?: %s:%s" % (HOST, PORT)
def poll(): url = "https://macbuild.hq.couchbase.com/xcode/api/integrations/filter/latest" latest = getJS(url) now = datetime.datetime.now() ts = now.strftime("%Y%m%d") build_no = "%s-%s" % (MOBILE_VERSION, ts[-5:]) client = McdClient(HOST, PORT) client.sasl_auth_plain("mobile", "") buildHist = {} for build in latest: if 'revisionBlueprint' not in latest[build]: continue key = latest[build]['revisionBlueprint']['DVTSourceControlWorkspaceBlueprintPrimaryRemoteRepositoryKey'] rev = latest[build]['revisionBlueprint']['DVTSourceControlWorkspaceBlueprintLocationsKey'][key]['DVTSourceControlLocationRevisionKey'] name = "iOS-"+latest[build]['bot']['name'].replace(" ","") build_id = latest[build]['number'] results = latest[build]['buildResultSummary'] totalCount = results['testsCount'] failCount = results['errorCount'] result = 'SUCCESS' if name in TOTAL_COUNT_LOG: if totalCount == 0: # use historical value for total count totalCount = TOTAL_COUNT_LOG[name] elif totalCount != TOTAL_COUNT_LOG: # update total count log TOTAL_COUNT_LOG[name] = totalCount else: TOTAL_COUNT_LOG[name] = totalCount if (failCount > 0): result = 'UNSTABLE' component = None for feature in MOBILE_FEATURES: tag, _c = feature.split("-") docname = name.upper() docname = docname.replace("-","_") if tag in docname: component = _c if component: doc = {'build_id': build_id, 'priority': 'P0', 'name': name, 'url': url, 'component': component, 'failCount': failCount, 'totalCount': totalCount, 'result': result, 'os': 'iOS', 'build': build_no} key = "%s-%s" % (doc["name"], doc["build_id"]) val = json.dumps(doc) try: #print val key = hashlib.md5(key).hexdigest() print val client.set(key, 0, 0, val, 0) buildHist[doc["build"]] = doc["build_id"] except Exception as ex: print ex print "set failed, couchbase down?: %s:%s" % (HOST,PORT)
def storeJob(jobDoc, bucket, first_pass = True): client = McdClient(HOST, PORT) client.sasl_auth_plain(bucket, "") doc = jobDoc url = doc["url"] res = getJS(url, {"depth" : 0}).json() if res is None: return buildHist = {} if res["lastBuild"]: bids = [b["number"] for b in res["builds"]] lastTotalCount = -1 idx=0 for bid in bids: idx = idx + 1 i = 1 if idx < len(bids): while bids[idx] != bid-i: key = "%s-%s" % (doc["name"], bid-i) key = hashlib.md5(key).hexdigest() try: client.delete(key, vbucket=0) except: pass i = i + 1 if bid in JOBS[doc["name"]]: continue # job already stored else: if first_pass == False: JOBS[doc["name"]].append(bid) doc["build_id"] = bid res = getJS(url+str(bid), {"depth" : 0}).json() if res is None: return if "result" not in res: continue doc["result"] = res["result"] doc["duration"] = res["duration"] if bucket == "server": if res["result"] not in ["SUCCESS", "UNSTABLE", "FAILURE", "ABORTED"]: continue # unknown result state actions = res["actions"] totalCount = getAction(actions, "totalCount") or 0 failCount = getAction(actions, "failCount") or 0 skipCount = getAction(actions, "skipCount") or 0 if totalCount == 0: if lastTotalCount == -1: continue # no tests ever passed for this build else: totalCount = lastTotalCount failCount = totalCount else: lastTotalCount = totalCount doc["failCount"] = failCount doc["totalCount"] = totalCount - skipCount params = getAction(actions, "parameters") if params is None: doc["priority"] = P1 doc["build"] = DEFAULT_BUILD else: doc["build"] = getAction(params, "name", "version_number") or getAction(params, "name", "cluster_version") or DEFAULT_BUILD doc["priority"] = getAction(params, "name", "priority") or P1 if doc["priority"].upper() not in [P0, P1, P2]: doc["priority"] = P1 doc["build"] = doc["build"].replace("-rel","").split(",")[0] try: _build= doc["build"].split("-") rel, bno = _build[0], _build[1] # check partial rel #'s rlen = len(rel.split(".")) while rlen < 3: rel = rel+".0" rlen+=1 # verify rel, build m=re.match("^\d\.\d\.\d{1,5}", rel) if m is None: print "unsupported version_number: "+doc["build"] continue m=re.match("^\d{1,10}", bno) if m is None: print "unsupported version_number: "+doc["build"] continue doc["build"] = "%s-%s" % (rel, bno.zfill(4)) except: print "unsupported version_number: "+doc["build"] continue else: # use date as version for sdk and mobile if res["result"] not in ["SUCCESS", "UNSTABLE", "FAILURE", "ABORTED"]: continue actions = res["actions"] totalCount = getAction(actions, "totalCount") or 0 failCount = getAction(actions, "failCount") or 0 skipCount = getAction(actions, "skipCount") or 0 if totalCount == 0: if lastTotalCount == -1: continue # no tests ever passed for this build else: totalCount = lastTotalCount failCount = totalCount else: lastTotalCount = totalCount doc["failCount"] = failCount doc["totalCount"] = totalCount - skipCount doc["priority"] = P0 ts = res["timestamp"]/1000; month = int(datetime.datetime.fromtimestamp(ts).strftime("%m")) _ts = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m%d") yr, md = _ts.split("-") doc["build"] = "%s-%s%s" % (MOBILE_VERSION, yr[-1], md) if doc["build"] in buildHist: #print "REJECTED- doc already in build results: %s" % doc #print buildHist # attempt to delete if this record has been stored in couchbase try: oldKey = "%s-%s" % (doc["name"], doc["build_id"]) oldKey = hashlib.md5(oldKey).hexdigest() client.delete(oldKey, vbucket = 0) #print "DELETED- %s:%s" % (doc["build"],doc["build_id"]) except: pass continue # already have this build results key = "%s-%s" % (doc["name"], doc["build_id"]) key = hashlib.md5(key).hexdigest() val = json.dumps(doc) try: #print val client.set(key, 0, 0, val, 0) buildHist[doc["build"]] = doc["build_id"] except: print "set failed, couchbase down?: %s:%s" % (HOST,PORT) if first_pass: storeJob(jobDoc, bucket, first_pass = False)
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg='', admin_user='******', admin_pass='******'): start_time = time.time() end_time = start_time + timeout_in_seconds ready_vbuckets = {} log = logger.get("infra") rest = RestConnection(node) # servers = rest.get_nodes() bucket_conn = BucketHelper(node) bucket_conn.vbucket_map_ready(bucket, 60) vbucket_count = len(bucket_conn.get_vbuckets(bucket)) vbuckets = bucket_conn.get_vbuckets(bucket) obj = VBucketAwareMemcached(rest, bucket, info=node) memcacheds, vbucket_map, vbucket_map_replica = obj.request_map( rest, bucket) #Create dictionary with key:"ip:port" and value: a list of vbuckets server_dict = defaultdict(list) for everyID in range(0, vbucket_count): memcached_ip_port = str(vbucket_map[everyID]) server_dict[memcached_ip_port].append(everyID) while time.time() < end_time and len(ready_vbuckets) < vbucket_count: for every_ip_port in server_dict: #Retrieve memcached ip and port ip, port = every_ip_port.split(":") client = MemcachedClient(ip, int(port), timeout=30) client.vbucket_count = len(vbuckets) bucket_info = bucket_conn.get_bucket(bucket) versions = rest.get_nodes_versions(logging=False) pre_spock = False for version in versions: if "5" > version: pre_spock = True if pre_spock: log.info("Atleast 1 of the server is on pre-spock " "version. Using the old ssl auth to connect to " "bucket.") client.sasl_auth_plain( bucket_info.name.encode('ascii'), bucket_info.saslPassword.encode('ascii')) else: client.sasl_auth_plain(admin_user, admin_pass) bucket = bucket.encode('ascii') client.bucket_select(bucket) for i in server_dict[every_ip_port]: try: (a, b, c) = client.get_vbucket_state(i) except mc_bin_client.MemcachedError as e: ex_msg = str(e) if "Not my vbucket" in log_msg: log_msg = log_msg[:log_msg.find("vBucketMap") + 12] + "..." if e.status == memcacheConstants.ERR_NOT_MY_VBUCKET: # May receive this while waiting for vbuckets, continue and retry...S continue log.error("%s: %s" % (log_msg, ex_msg)) continue except exceptions.EOFError: # The client was disconnected for some reason. This can # happen just after the bucket REST API is returned (before # the buckets are created in each of the memcached processes.) # See here for some details: http://review.couchbase.org/#/c/49781/ # Longer term when we don't disconnect clients in this state we # should probably remove this code. log.error( "got disconnected from the server, reconnecting") continue if c.find("\x01") > 0 or c.find("\x02") > 0: ready_vbuckets[i] = True elif i in ready_vbuckets: log.warning( "vbucket state changed from active to {0}".format( c)) del ready_vbuckets[i] client.close() return len(ready_vbuckets) == vbucket_count
def storeJob(jobDoc, bucket, first_pass=True): client = McdClient(HOST, PORT) client.sasl_auth_plain(bucket, "") doc = jobDoc url = doc["url"] res = getJS(url, {"depth": 0}).json() if res is None: return buildHist = {} if res["lastBuild"]: bids = [b["number"] for b in res["builds"]] lastTotalCount = -1 idx = 0 for bid in bids: idx = idx + 1 i = 1 if idx < len(bids): while bids[idx] != bid - i: key = "%s-%s" % (doc["name"], bid - i) key = hashlib.md5(key).hexdigest() try: client.delete(key, vbucket=0) except: pass i = i + 1 if bid in JOBS[doc["name"]]: continue # job already stored else: if first_pass == False: JOBS[doc["name"]].append(bid) doc["build_id"] = bid res = getJS(url + str(bid), {"depth": 0}).json() if res is None: return if "result" not in res: continue doc["result"] = res["result"] doc["duration"] = res["duration"] if bucket == "server": if res["result"] not in [ "SUCCESS", "UNSTABLE", "FAILURE", "ABORTED" ]: continue # unknown result state actions = res["actions"] totalCount = getAction(actions, "totalCount") or 0 failCount = getAction(actions, "failCount") or 0 skipCount = getAction(actions, "skipCount") or 0 if totalCount == 0: if lastTotalCount == -1: continue # no tests ever passed for this build else: totalCount = lastTotalCount failCount = totalCount else: lastTotalCount = totalCount doc["failCount"] = failCount doc["totalCount"] = totalCount - skipCount params = getAction(actions, "parameters") if params is None: doc["priority"] = P1 doc["build"] = DEFAULT_BUILD else: doc["build"] = getAction( params, "name", "version_number") or getAction( params, "name", "cluster_version") or DEFAULT_BUILD doc["priority"] = getAction(params, "name", "priority") or P1 if doc["priority"].upper() not in [P0, P1, P2]: doc["priority"] = P1 doc["build"] = doc["build"].replace("-rel", "").split(",")[0] try: _build = doc["build"].split("-") rel, bno = _build[0], _build[1] # check partial rel #'s rlen = len(rel.split(".")) while rlen < 3: rel = rel + ".0" rlen += 1 # verify rel, build m = re.match("^\d\.\d\.\d{1,5}", rel) if m is None: print "unsupported version_number: " + doc["build"] continue m = re.match("^\d{1,10}", bno) if m is None: print "unsupported version_number: " + doc["build"] continue doc["build"] = "%s-%s" % (rel, bno.zfill(4)) except: print "unsupported version_number: " + doc["build"] continue else: # use date as version for sdk and mobile if res["result"] not in [ "SUCCESS", "UNSTABLE", "FAILURE", "ABORTED" ]: continue actions = res["actions"] totalCount = getAction(actions, "totalCount") or 0 failCount = getAction(actions, "failCount") or 0 skipCount = getAction(actions, "skipCount") or 0 if totalCount == 0: if lastTotalCount == -1: continue # no tests ever passed for this build else: totalCount = lastTotalCount failCount = totalCount else: lastTotalCount = totalCount doc["failCount"] = failCount doc["totalCount"] = totalCount - skipCount doc["priority"] = P0 ts = res["timestamp"] / 1000 month = int(datetime.datetime.fromtimestamp(ts).strftime("%m")) _ts = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m%d") yr, md = _ts.split("-") doc["build"] = "%s-%s%s" % (MOBILE_VERSION, yr[-1], md) if doc["build"] in buildHist: #print "REJECTED- doc already in build results: %s" % doc #print buildHist # attempt to delete if this record has been stored in couchbase try: oldKey = "%s-%s" % (doc["name"], doc["build_id"]) oldKey = hashlib.md5(oldKey).hexdigest() client.delete(oldKey, vbucket=0) #print "DELETED- %s:%s" % (doc["build"],doc["build_id"]) except: pass continue # already have this build results key = "%s-%s" % (doc["name"], doc["build_id"]) key = hashlib.md5(key).hexdigest() val = json.dumps(doc) try: #print val client.set(key, 0, 0, val, 0) buildHist[doc["build"]] = doc["build_id"] except: print "set failed, couchbase down?: %s:%s" % (HOST, PORT) if first_pass: storeJob(jobDoc, bucket, first_pass=False)