def __init__(self, ssl_mode: str = 'none', n1ql_timeout: int = None, **kwargs): connection_string = 'couchbase://{host}?password={password}&{params}' connstr_params = parse.urlencode(kwargs["connstr_params"]) if ssl_mode == 'data': connection_string = connection_string.replace( 'couchbase', 'couchbases') connection_string += '&certpath=root.pem' connection_string = connection_string.format( host=kwargs['host'], password=kwargs['password'], params=connstr_params) pass_auth = PasswordAuthenticator(kwargs['username'], kwargs['password']) if n1ql_timeout: timeout = ClusterTimeoutOptions( kv_timeout=timedelta(seconds=self.TIMEOUT), query_timeout=timedelta(seconds=n1ql_timeout)) else: timeout = ClusterTimeoutOptions(kv_timeout=timedelta( seconds=self.TIMEOUT)) options = ClusterOptions(authenticator=pass_auth, timeout_options=timeout) self.cluster = Cluster(connection_string=connection_string, options=options) self.bucket_name = kwargs['bucket'] self.bucket = None self.collections = dict() self.collection = None
def _instantiate_cluster( self, connstr_nobucket, # type: str cluster_class=None, # type: Type[Cluster] opts=None # type: Any ): # type: (...) -> ClusterTestCase.T cluster_class = cluster_class or self.cluster_factory mock_hack = self.cluster_info.mock_hack_options(self.is_mock) auth = mock_hack.auth(self.cluster_info.admin_username, self.cluster_info.admin_password) if not opts: opts = ClusterOptions(auth) else: opts['authenticator'] = auth if SLOWCONNECT_PATTERN.match(platform.platform()): default_timeout_options = ClusterTimeoutOptions( config_total_timeout=timedelta(seconds=30)) default_timeout_options.update(opts.get('timeout_options', {})) opts['timeout_options'] = default_timeout_options return self.try_n_times(10, 3, cluster_class.connect, connection_string=str(connstr_nobucket), options=opts, **mock_hack.kwargs)
def test_can_override_timeout_options(self): timeout = timedelta(seconds=100) timeout2 = timedelta(seconds=50) opts = self._create_cluster_opts(timeout_options=ClusterTimeoutOptions(kv_timeout=timeout)) args = self._mock_hack() args.update({'timeout_options': ClusterTimeoutOptions(kv_timeout=timeout2)}) cluster = Cluster.connect(self.cluster.connstr, opts, **args) b = cluster.bucket(self.bucket_name) self.assertEqual(timeout2, b.kv_timeout)
def test_query_default_timeout(self): timeout = timedelta(seconds=50) opts = self._create_cluster_opts(timeout_options=ClusterTimeoutOptions( query_timeout=timeout)) cluster = Cluster.connect(self.cluster.connstr, opts, **self._mock_hack()) self.assertEqual(timeout, cluster.query_timeout)
def get_pool_data(servers): servers_list = [] for server in servers.split(' '): servers_list.append(server) query = "SELECT ipaddr, os, state, origin, poolId FROM `QE-server-pool` WHERE ipaddr in ['" + "','".join( servers_list) + "']" pool_cb_host = os.environ.get('pool_cb_host') if not pool_cb_host: pool_cb_host = "172.23.104.162" pool_cb_user = os.environ.get('pool_cb_user') if not pool_cb_user: pool_cb_user = "******" pool_cb_user_p = os.environ.get('pool_cb_password') if not cb_user_p: print( "Error: pool_cb_password environment variable setting is missing!") exit(1) data = '' try: pool_cluster = Cluster( "couchbase://" + pool_cb_host, ClusterOptions(PasswordAuthenticator(pool_cb_user, pool_cb_user_p), timeout_options=ClusterTimeoutOptions( kv_timeout=timedelta(seconds=10)))) result = pool_cluster.query(query) for row in result: data += ("{}=({} {} {} {}) ".format(row['ipaddr'], row['state'], row['os'], row['poolId'], row['origin'])).replace( ',', ' ') except: print("exception:", sys.exc_info()[0]) return data
def __init__(self, host=None, bucket=None, username=None, password=None): config = os.environ if not host or not bucket or not username or not password: self.cb_host = config.get("health_cb_host", "172.23.104.180") self.cb_bucket = config.get("health_cb_bucket", "QE-staticserver-pool-health") self.cb_username = config.get("health_cb_username", "Administrator") self.cb_userpassword = config.get("health_cb_password") else: self.cb_host = host self.cb_bucket = bucket self.cb_username = username self.cb_userpassword = password if not self.cb_userpassword: print("Setting of env variable: heal_cb_password= is needed!") return try: print("Connecting to {},{},{}".format(self.cb_host, self.cb_bucket, self.cb_username)) self.cb_cluster = Cluster("couchbase://"+self.cb_host, ClusterOptions(PasswordAuthenticator(self.cb_username, self.cb_userpassword), \ timeout_options=ClusterTimeoutOptions(kv_timeout=timedelta(seconds=10)))) self.cb_b = self.cb_cluster.bucket(self.cb_bucket) self.cb = self.cb_b.default_collection() except Exception as e: print('Connection Failed: %s ' % self.cb_host) print(e)
def test_views_default_timeout(self): timeout = timedelta(seconds=50) opts = self._create_cluster_opts(timeout_options=ClusterTimeoutOptions( views_timeout=timeout)) cluster = Cluster.connect(self.cluster.connstr, opts, **self._mock_hack()) b = cluster.bucket(self.bucket_name) self.assertEqual(timeout, b.views_timeout)
def __init__(self, host, bucket, username, password, quiet=True, port=8091): connection_string = 'couchbase://{}?password={}'.format(host, password) pass_auth = PasswordAuthenticator(username, password) timeout = ClusterTimeoutOptions(kv_timeout=timedelta(seconds=self.TIMEOUT)) options = ClusterOptions(authenticator=pass_auth, timeout_options=timeout) self.cluster = Cluster(connection_string=connection_string, options=options) self.bucket = self.cluster.bucket(bucket) self.client = self.bucket.default_collection() self.use_count = 0 self.use_time = 0 self.last_use_time = 0
def __init__(self, **kwargs): connection_string = 'couchbase://{host}?password={password}' connection_string = connection_string.format( host=kwargs['host'], password=kwargs['password']) pass_auth = PasswordAuthenticator(kwargs['username'], kwargs['password']) timeout = ClusterTimeoutOptions(kv_timeout=timedelta( seconds=self.TIMEOUT)) options = ClusterOptions(authenticator=pass_auth, timeout_options=timeout) self.cluster = TxCluster(connection_string=connection_string, options=options) self.bucket_name = kwargs['bucket'] self.collections = dict() self.collection = None
if not cb_user_p: print("Error: cb_password environment variable setting is missing!") exit(1) cb_bucket = os.environ.get('cb_bucket') if not cb_bucket: cb_bucket = "greenboard" is_include_unstable = os.environ.get('is_include_unstable') if not is_include_unstable: print("No result=UNSTABLE jobs included while getting the IPs list") is_include_unstable = False #print("Connecting to the greenboard couchbase nosql...") cluster = Cluster( "couchbase://" + cb_host, ClusterOptions(PasswordAuthenticator(cb_user, cb_user_p), timeout_options=ClusterTimeoutOptions(kv_timeout=timedelta( seconds=10)))) bucket = cluster.bucket(cb_bucket) doc = bucket.get(cb_build + "_server").value index = 0 success_count = 0 failure_count = 0 aborted_count = 0 unstable_count = 0 unknown_count = 0 xen_hosts_map = {} if xen_hosts_file: hosts_file = open(xen_hosts_file) #print("Please wait while loading the xenhosts information...") lines = hosts_file.readlines() for line in lines: try:
def get_pool_data(pools): pools_list = [] for pool in pools.split(','): pools_list.append(pool) pool_cb_host = os.environ.get('pool_cb_host', "172.23.104.162") pool_cb_bucket = os.environ.get('pool_cb_bucket', "QE-server-pool") pool_cb_user = os.environ.get('pool_cb_user', "Administrator") pool_cb_user_p = os.environ.get('pool_cb_password') if not pool_cb_user_p: print("Error: pool_cb_password environment variable setting is missing!") exit(1) data = '' query = "SELECT ipaddr, os, state, origin, poolId, username, mac_address FROM `" + pool_cb_bucket + "` WHERE poolId in [" \ + ', '.join('"{0}"'.format(p) for p in pools_list) + "] or " \ + ' or '.join('"{0}" in poolId'.format(p) for p in pools_list) is_debug = os.environ.get('is_debug') if is_debug: print("Query:{};".format(query)) try: pool_cluster = Cluster("couchbase://"+pool_cb_host, ClusterOptions(PasswordAuthenticator(pool_cb_user, pool_cb_user_p), timeout_options=ClusterTimeoutOptions(kv_timeout=timedelta(seconds=10)))) result = pool_cluster.query(query) count = 0 ssh_failed = 0 ssh_ok = 0 index = 0 csvout = open("pool_vm_health_info.csv", "w") print("ipaddr,ssh_status,ssh_error,ssh_resp_time(secs),pool_os,real_os,os_match_state,pool_state,pool_ids,pool_user,cpus,memory_total(kB),memory_free(kB),memory_available(kB),memory_use(%)," + \ "disk_size(MB),disk_used(MB),disk_avail(MB),disk_use%,uptime,booted(days),system_time,users,cpu_load_avg_1min,cpu_load_avg_5mins,cpu_load_avg_15mins," + \ "total_processes,total_fd_alloc,total_fd_free,total_fd_max,proc_fd_ulimit,iptables_rules_count,pool_mac_address,real_mac_address,mac_address_match,swap_total(kB),swap_used(kB),swap_free(kB),swap_use(%),couchbase_process,couchbase_version,couchbase_services,cb_data_kv_status,cb_index_status,cb_query_status,cb_search_status,cb_analytics_status,cb_eventing_status,cb_xdcr_status") csv_head = "ipaddr,ssh_status,ssh_error,ssh_resp_time(secs),pool_os,real_os,os_match_state,pool_state,pool_ids,pool_user,cpus,memory_total(kB),memory_free(kB),memory_available(kB),memory_use(%)," + \ "disk_size(MB),disk_used(MB),disk_avail(MB),disk_use%,uptime,booted(days),system_time,users,cpu_load_avg_1min,cpu_load_avg_5mins,cpu_load_avg_15mins," \ "total_processes,total_fd_alloc,total_fd_free,total_fd_max,proc_fd_ulimit,iptables_rules_count,pool_mac_address,real_mac_address,mac_address_match,swap_total(kB),swap_used(kB),swap_free(kB),swap_use(%),couchbase_process,couchbase_version,couchbase_services,cb_data_kv_status,cb_index_status,cb_query_status,cb_search_status,cb_analytics_status,cb_eventing_status,cb_xdcr_status" csvout.write(csv_head) os_mappings={"centos":"centos linux 7 (core)", "centosnonroot":"centos linux 7 (core)", "debian10":"debian gnu/linux 10 (buster)", \ "oel8":"oracle linux server 8.1", "rhel":"red hat enterprise linux", "rhel8":"red hat enterprise linux 8.3 (ootpa)", \ "suse12":"suse linux enterprise server 12 sp2", "opensuse15":"opensuse leap 15.1","suse15":"suse linux enterprise server 15", \ "opensuse15hostname":"opensuse leap 15.1","suse15hostname":"suse linux enterprise server 15","ubuntu18":"ubuntu 18", \ "ubuntu20":"ubuntu 20", "windows2019":"windows" } is_save_cb = os.environ.get("is_save_cb", 'False').lower() in ('true', '1', 't') if is_save_cb: cb_doc = CBDoc() created_time = time.time() created_timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') is_daily_save_only = os.environ.get("is_daily_save_only", 'False').lower() in ('true', '1', 't') if is_daily_save_only: created_date = datetime.datetime.now().strftime('%Y-%m-%d') query = "SELECT ipaddr FROM `" + cb_doc.cb_bucket + "` WHERE created_timestamp like '" + created_date + "%' limit 1" print(query) saved_result = cb_doc.cb_cluster.query(query) for row in saved_result: print("NOTE: Data is not saving again for Today into cb because is_daily_save_only set!") is_save_cb = False break for row in result: index += 1 try: ssh_status, ssh_error, ssh_resp_time, real_os, cpus, meminfo, diskinfo, uptime, uptime_days, systime, cpu_load, cpu_proc, \ fdinfo, iptables_rules_count, mac_address, swapinfo, cb_proc, cb_version, cb_serv, cb_ind_serv = check_vm(row['os'],row['ipaddr']) os_state = 0 mac_address_state = 0 pool_mac_address = '' if ssh_status == 'ssh_failed': ssh_state = 0 ssh_failed += 1 os_state = 1 #Marking os_match to ok for ssh_failed to avoid more notifications mac_address_state = 1 else: ssh_state = 1 ssh_ok += 1 if real_os not in (None, '') or real_os.strip(): pool_os = row['os'].lower() if pool_os in os_mappings.keys() and os_mappings[pool_os] in real_os.lower(): os_state = 1 elif pool_os in os_mappings.keys() and pool_os.startswith('suse15'): if os_mappings['open'+pool_os] == real_os.lower(): os_state = 1 else: os_state = 1 # To avoid in case no data like on sometimes with windows if 'mac_address' in row and mac_address == row['mac_address']: mac_address_state = 1 pool_mac_address = row['mac_address'] elif 'mac_address' in row: pool_mac_address = row['mac_address'] print("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}".format(index, row['ipaddr'], ssh_status, ssh_error, ssh_resp_time, row['os'], real_os, \ os_state, row['state'], '+'.join("{}".format(p) for p in row['poolId']) if isinstance(row['poolId'], list) else row['poolId'], row['username'], cpus, meminfo, diskinfo, uptime, uptime_days, systime, cpu_load, cpu_proc, \ fdinfo, iptables_rules_count, pool_mac_address, mac_address, mac_address_state, swapinfo, cb_proc, cb_version, cb_serv, cb_ind_serv)) csv_row = "{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}".format(row['ipaddr'], ssh_state, ssh_error, ssh_resp_time, row['os'], real_os, \ os_state, row['state'], '+'.join("{}".format(p) for p in row['poolId']) if isinstance(row['poolId'], list) else row['poolId'], row['username'], cpus, meminfo, diskinfo, uptime, uptime_days, systime, cpu_load, \ cpu_proc, fdinfo, iptables_rules_count, pool_mac_address, mac_address, mac_address_state, swapinfo, cb_proc, cb_version, cb_serv, cb_ind_serv) csvout.write("\n{}".format(csv_row)) csvout.flush() ipaddr = row['ipaddr'] if is_save_cb: doc_val = {} keys = csv_head.split(",") values = csv_row.split(",") for index in range(0, len(keys)): doc_val[keys[index]] = values[index] doc_val['type'] = 'static_server_pool_vm' doc_val['created_time'] = created_time doc_val['created_timestamp'] = created_timestamp doc_key = "{}_{}".format(ipaddr, str(uuid.uuid4())) cb_doc.save_doc(doc_key, doc_val) except Exception as ex: print(ex) pass count +=1 booked_count = get_pool_state_count(pool_cluster, pools_list, 'booked') avail_count = get_pool_state_count(pool_cluster, pools_list, 'available') using_count = booked_count + avail_count print("ssh_ok={},ssh_failed={},total={},booked={},avail={},using={}".format(ssh_ok, ssh_failed,count, booked_count, avail_count, using_count)) csvout.close() except Exception as fex : print(fex)
def get_pool_data_parallel(pools): pools_list = [] for pool in pools.split(','): pools_list.append(pool) pool_cb_host = os.environ.get('pool_cb_host', "172.23.104.162") pool_cb_bucket = os.environ.get('pool_cb_bucket', "QE-server-pool") pool_cb_user = os.environ.get('pool_cb_user', "Administrator") pool_cb_user_p = os.environ.get('pool_cb_password') if not pool_cb_user_p: print("Error: pool_cb_password environment variable setting is missing!") exit(1) query = "SELECT ipaddr, os, state, origin, poolId, username, mac_address FROM `" + pool_cb_bucket + "` WHERE poolId in [" \ + ', '.join('"{0}"'.format(p) for p in pools_list) + "] or " \ + ' or '.join('"{0}" in poolId'.format(p) for p in pools_list) is_debug = os.environ.get('is_debug') if is_debug: print("Query:{};".format(query)) try: retry_count = int(os.environ.get('retry_count', 3)) query_done = False while not query_done and retry_count != 0: try: pool_cluster = Cluster("couchbase://"+pool_cb_host, ClusterOptions(PasswordAuthenticator(pool_cb_user, pool_cb_user_p), timeout_options=ClusterTimeoutOptions(kv_timeout=timedelta(seconds=10)))) result = pool_cluster.query(query) query_done = True except: print("Got an error: {} and retrying after 5 secs...at {}, query_done={}, retry_count down {}".format(sys.exc_info()[0], pool_cb_host, query_done, retry_count)) time.sleep(5) retry_count -= 1 csvout = open("pool_vm_health_info.csv", "w") print("ipaddr,ssh_status,ssh_error,ssh_resp_time(secs),pool_os,real_os,os_match_state,pool_state,pool_ids,pool_user,cpus,memory_total(kB),memory_free(kB),memory_available(kB),memory_use(%)," + \ "disk_size(MB),disk_used(MB),disk_avail(MB),disk_use%,uptime,booted(days),system_time,users,cpu_load_avg_1min,cpu_load_avg_5mins,cpu_load_avg_15mins," + \ "total_processes,total_fd_alloc,total_fd_free,total_fd_max,proc_fd_ulimit,iptables_rules_count,pool_mac_address,real_mac_address,mac_address_match,swap_total(kB),swap_used(kB),swap_free(kB),swap_use(%),couchbase_process,couchbase_version,couchbase_services,cb_data_kv_status," + \ "cb_index_status,cb_query_status,cb_search_status,cb_analytics_status,cb_eventing_status,cb_xdcr_status") csv_head = "ipaddr,ssh_status,ssh_error,ssh_resp_time(secs),pool_os,real_os,os_match_state,pool_state,pool_ids,pool_user,cpus,memory_total(kB),memory_free(kB),memory_available(kB),memory_use(%)," + \ "disk_size(MB),disk_used(MB),disk_avail(MB),disk_use%,uptime,booted(days),system_time,users,cpu_load_avg_1min,cpu_load_avg_5mins,cpu_load_avg_15mins," + \ "total_processes,total_fd_alloc,total_fd_free,total_fd_max,proc_fd_ulimit,iptables_rules_count,pool_mac_address,real_mac_address,mac_address_match,swap_total(kB),swap_used(kB),swap_free(kB),swap_use(%),couchbase_process,couchbase_version,couchbase_services,cb_data_kv_status," + \ "cb_index_status,cb_query_status,cb_search_status,cb_analytics_status,cb_eventing_status,cb_xdcr_status" csvout.write(csv_head) mp_pool = mp.Pool(mp.cpu_count()) data = mp_pool.map(get_pool_data_vm_parallel, [row for row in result]) mp_pool.close() count = 0 ssh_failed = 0 ssh_ok = 0 is_save_cb = os.environ.get("is_save_cb", 'False').lower() in ('true', '1', 't') if is_save_cb: cb_doc = CBDoc() created_time = time.time() created_timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') is_daily_save_only = os.environ.get("is_daily_save_only", 'False').lower() in ('true', '1', 't') if is_daily_save_only: created_date = datetime.datetime.now().strftime('%Y-%m-%d') query = "SELECT ipaddr FROM `" + cb_doc.cb_bucket + "` WHERE created_timestamp like '" + created_date + "%' limit 1" print(query) saved_result = cb_doc.cb_cluster.query(query) for row in saved_result: print("NOTE: Data is not saving again for Today into cb because is_daily_save_only set!") is_save_cb = False break for r in data: count += 1 ssh_status=r.split(',')[1] if int(ssh_status) == 1: ssh_ok += 1 else: ssh_failed += 1 print("{},{}".format(count,r)) csvout.write("\n{}".format(r)) csvout.flush() csv_row = r if is_save_cb: doc_val = {} keys = csv_head.split(",") values = csv_row.split(",") ipaddr = values[0] for index in range(0, len(keys)): doc_val[keys[index]] = values[index] doc_val['type'] = 'static_server_pool_vm' doc_val['created_time'] = created_time doc_val['created_timestamp'] = created_timestamp doc_key = "{}_{}".format(ipaddr, str(uuid.uuid4())) cb_doc.save_doc(doc_key, doc_val) booked_count = get_pool_state_count(pool_cluster, pools_list, 'booked') avail_count = get_pool_state_count(pool_cluster, pools_list, 'available') using_count = booked_count + avail_count print("ssh_ok={},ssh_failed={},total={},booked={},avail={},using={}".format(ssh_ok, ssh_failed,count, booked_count, avail_count, using_count)) csvout.close() except Exception as fex : print(fex)