def test_kiali_reduced_cluster_permissins(kiali_client): assert command_exec().oc_delete_kiali_permissions_from_cluster() with timeout(seconds=30, error_message='Timed out waiting for denial of Graph access'): while True: access = None try: kiali_client.graph_namespace(namespace='bookinfo') except: # Will reach there if the graph is NOT accessable access = False if access == False: break time.sleep(1) assert command_exec().oc_add_kaili_permissions_to_cluster() with timeout(seconds=30, error_message='Timed out waiting for Graph access'): while True: access = True try: # Will reach there if the graph is NOT accessable kiali_client.graph_namespace(namespace='bookinfo') except: access = False if access: break time.sleep(1)
def test_kiali_circuit_breakers(kiali_client): environment_configmap = conftest.__get_environment_config__(conftest.ENV_FILE) bookinfo_namespace = environment_configmap.get('mesh_bookinfo_namespace') circuit_breaker_configmap = conftest.__get_environment_config__(conftest.CIRCUIT_BREAKER_FILE) cb_count = get_cb_count(kiali_client, bookinfo_namespace) add_command_text = "oc apply -n " + environment_configmap.get('mesh_bookinfo_namespace') + " -f " + os.path.abspath(os.path.realpath(conftest.CIRCUIT_BREAKER_FILE)) add_command_result = os.popen(add_command_text).read() assert add_command_result.__contains__("created") or add_command_result.__contains__("configured") graph = kiali_client.graph_namespace(namespace=environment_configmap.get('mesh_bookinfo_namespace'), params=PARAMS) assert graph is not None with timeout(seconds=60, error_message='Timed out waiting for Circuit Breaker to be Created'): while True: if get_cb_count(kiali_client, bookinfo_namespace) > cb_count: break time.sleep(1) delete_command_text = "oc delete destinationrule " + circuit_breaker_configmap['metadata']['name'] + " -n " + environment_configmap.get('mesh_bookinfo_namespace') delete_command_result = os.popen(delete_command_text).read() assert delete_command_result.__contains__("deleted") with timeout(seconds=30, error_message='Timed out waiting for VirtualService to be Deleted'): while True: # Validate that JSON no longer has Virtual Service if get_cb_count(kiali_client, bookinfo_namespace) <= cb_count: break time.sleep(1)
def env_test(): # 连接数据库的测试 if platform.system() == "Windows": connect_db_test(fx_engine, "分析库") connect_db_test(zb_engine, "指标库") else: timeout(TIMEOUT, error_message="ConnectionError")(connect_db_test(fx_engine, "分析库")) timeout(TIMEOUT, error_message="ConnectionError")(connect_db_test(zb_engine, "指标库"))
def test_service_detail_with_virtual_service(kiali_client): bookinfo_namespace = conftest.get_bookinfo_namespace() try: # Add a virtual service that will be tested assert command_exec.oc_apply(VIRTUAL_SERVICE_FILE, bookinfo_namespace) == True with timeout(seconds=30, error_message='Timed out waiting for virtual service creation'): while True: service_details = kiali_client.service_details(namespace=bookinfo_namespace, service=SERVICE_TO_VALIDATE) if service_details != None and service_details.get('virtualServices') != None and len(service_details.get('virtualServices').get('items')) > 0: break time.sleep(1) assert service_details != None virtual_service_descriptor = service_details.get('virtualServices') assert virtual_service_descriptor != None permissions = virtual_service_descriptor.get('permissions') assert permissions != None assert permissions.get('update') == False assert permissions.get('delete') == True virtual_service = virtual_service_descriptor.get('items')[0] assert virtual_service != None assert virtual_service.get('name') == 'reviews' https = virtual_service.get('http') assert https != None assert len (https) == 1 routes = https[0].get('route') assert len (routes) == 2 assert routes[0].get('weight') == 80 destination = routes[0].get('destination') assert destination != None assert destination.get('host') == 'reviews' assert destination.get('subset') == 'v1' assert routes[1].get('weight') == 20 destination = routes[1].get('destination') assert destination != None assert destination.get('host') == 'reviews' assert destination.get('subset') == 'v2' finally: assert command_exec.oc_delete(VIRTUAL_SERVICE_FILE, bookinfo_namespace) == True with timeout(seconds=30, error_message='Timed out waiting for virtual service deletion'): while True: service_details = kiali_client.service_details(namespace=bookinfo_namespace, service=SERVICE_TO_VALIDATE) if service_details != None and len(service_details.get('virtualServices').get('items')) == 0: break time.sleep(1)
def test_service_detail_with_destination_rule(kiali_client): bookinfo_namespace = conftest.get_bookinfo_namespace() try: # Add a destination rule that will be tested assert command_exec.oc_apply(DESTINATION_RULE_FILE, bookinfo_namespace) == True with timeout(seconds=30, error_message='Timed out waiting for destination rule creation'): while True: service_details = kiali_client.service_details(namespace=bookinfo_namespace, service=SERVICE_TO_VALIDATE) if service_details != None and service_details.get('destinationRules') != None and len(service_details.get('destinationRules').get('items')) > 0: break time.sleep(1) assert service_details != None destination_rule_descriptor = service_details.get('destinationRules') assert destination_rule_descriptor != None permissions = destination_rule_descriptor.get('permissions') assert permissions != None assert permissions.get('update') == False assert permissions.get('delete') == True destination_rule = destination_rule_descriptor.get('items')[0] assert destination_rule != None assert destination_rule.get('name') == 'reviews' assert 'trafficPolicy' in destination_rule subsets = destination_rule.get('subsets') assert subsets != None assert len (subsets) == 3 for i, subset in enumerate(subsets): subset_number = str(i + 1) name = subset.get('name') assert name == 'v' + subset_number labels = subset.get('labels') assert labels != None and labels.get('version') == 'v' + subset_number finally: assert command_exec.oc_delete(DESTINATION_RULE_FILE, bookinfo_namespace) == True with timeout(seconds=30, error_message='Timed out waiting for destination rule deletion'): while True: service_details = kiali_client.service_details(namespace=bookinfo_namespace, service=SERVICE_TO_VALIDATE) if service_details != None and len(service_details.get('destinationRules').get('items')) == 0: break time.sleep(1)
def test_auth_openshift(): kiali_hostname = conftest.get_kiali_hostname() cookie_file = "./tmp_cookie_file" try: assert change_configmap_with_new_value( element_name='strategy:', list=STRATEGY_LIST, new_value=STRATEGY_OPENSHIFT, current_configmap_file=conftest.CURRENT_CONFIGMAP_FILE, new_configmap_file=conftest.NEW_CONFIG_MAP_FILE) # Create token cookie file cmd = "curl -v -k POST -c {} -d 'access_token='$(oc whoami -t)'&expires_in=86400&scope=user%3Afull&token_type=Bearer' https://{}/api/authenticate".format( cookie_file, kiali_hostname) with timeout(seconds=120, error_message='Timed out waiting getting token'): while True: stdout, stderr = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate() if 'username' in stdout.decode(): break time.sleep(2) # Make the API request using token cookie cmd = "curl -v -k -b {} https://{}/api/namespaces".format( cookie_file, kiali_hostname) with timeout(seconds=120, error_message='Timed out waiting getting token'): while True: stdout, stderr = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate() if "istio-system" in stdout.decode(): break time.sleep(2) cmd = "rm -f {}".format(cookie_file) Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate() finally: # Return Auth strategy back to 'login' and wait for Kiali to be accessible create_configmap_and_wait_for_kiali(conftest.CURRENT_CONFIGMAP_FILE) make_request(auth_type=AUTH_LOGIN)
def test_external_host_node(kiali_client): try: assert command_exec.oc_apply(conftest.EXTERNAL_HOST_SERVICE_FILE, conftest.get_bookinfo_namespace()) == True PARAMS['namespaces'] = conftest.get_bookinfo_namespace() response = kiali_client.request(method_name='graphNamespaces', params=PARAMS) assert response.status_code == 200 nodes = response.json().get('elements').get('nodes') with timeout(seconds=60, error_message='Timed out waiting for \"{}\"'.format( EXPECTED_EXTERNAL_SERVICE_NAME)): wiat_for = True while wiat_for: for node in nodes: name = node.get('data').get('service') if name != None and name == EXPECTED_EXTERNAL_SERVICE_NAME: wiat_for = False break time.sleep(1) finally: command_exec.oc_delete(conftest.EXTERNAL_HOST_SERVICE_FILE, conftest.get_bookinfo_namespace())
def __test_change_web_root(kiali_client): new_web_root_value = '/e2e' try: assert change_configmap_with_new_value( element_name='web_root:', list=WEB_ROOT_LIST, new_value=new_web_root_value, current_configmap_file=conftest.CURRENT_CONFIGMAP_FILE, new_configmap_file=conftest.NEW_CONFIG_MAP_FILE) with timeout(seconds=180, error_message='Timed out waiting for API call'): while True: response = kiali_client.request(plain_url=new_web_root_value + "/api/namespaces", path=None, params=None) if response.status_code == 200: break time.sleep(2) finally: # Return web_root back to '/' create_configmap_and_wait_for_kiali(conftest.CURRENT_CONFIGMAP_FILE)
def download_config(self, target_file): if 'user' in self.settings: hostpart = '%s@%s' % (self.settings['user'], self.host) else: hostpart = self.host sftp_args = [ 'sftp', '%s:%s' % (hostpart, self.settings['path']), target_file.name ] session = ssh.SSHSession( sftp_args, password=self.password, prompt=pexpect.EOF, ) if session.isalive(): with timeout.timeout(5000): try: session.wait() except pexpect.ExceptionPexpect: if session.isalive(): raise if session.exitstatus != 0: raise RuntimeError( "SFTP process did not terminate successfuly. Exitcode: %d" % session.exitstatus)
def _collect_inodes_metrics(self, mountpoint): metrics = {} # we need to timeout this, too. try: inodes = timeout(5)(os.statvfs)(mountpoint) except TimeoutException: self.log.warn( u"Timeout while retrieving the disk usage of `%s` mountpoint. Skipping...", mountpoint ) return metrics except Exception as e: self.log.warn("Unable to get disk metrics for %s: %s", mountpoint, e) return metrics if inodes.f_files != 0: total = inodes.f_files free = inodes.f_ffree metrics[self.METRIC_INODE.format('total')] = total metrics[self.METRIC_INODE.format('free')] = free metrics[self.METRIC_INODE.format('used')] = total - free # FIXME: 6.x, use percent, a lot more logical than in_use metrics[self.METRIC_INODE.format('in_use')] = \ (total - free) / float(total) return metrics
def make_request(auth_type="auth"): with timeout(seconds=180, error_message='Timed out waiting for API call to complete'): while True: if auth_type == AUTH_LOGIN: kiali_client = conftest.get_new_kiali_client() response = kiali_client.request(method_name='namespaceList', path=None, params=None) elif auth_type == AUTH_NOAUTH: kiali_client = KialiClient( hostname=conftest.get_kiali_hostname(), auth_type=AUTH_NOAUTH, verify=False, swagger_address=conftest.get_kiali_swagger_address()) response = kiali_client.request(method_name='namespaceList', path=None, params=None) else: assert False, "Error: Unsupported Auth Strategy Type: {}".format( auth_type) if response.status_code == 200: break time.sleep(2) return True
def __init__(self, logger, class_name, property_names, filters="", host="localhost", namespace="root\\cimv2", username="", password="", and_props=[], timeout_duration=10): self.logger = logger self.host = host self.namespace = namespace self.username = username self.password = password self.is_raw_perf_class = "_PERFRAWDATA_" in class_name.upper() if self.is_raw_perf_class: property_names.extend([ "Timestamp_Sys100NS", "Frequency_Sys100NS", ]) self.class_name = class_name self.property_names = property_names self.filters = filters self._and_props = and_props self._formatted_filters = None self.property_counter_types = None self._timeout_duration = timeout_duration self._query = timeout(timeout_duration)(self._query) self.current_sample = None self.previous_sample = None self._sampling = False
def do_test(kiali_client, graph_params, yaml_file, badge): environment_configmap = conftest.__get_environment_config__( conftest.ENV_FILE) bookinfo_namespace = environment_configmap.get('mesh_bookinfo_namespace') appType = kiali_client.graph_namespace(namespace=bookinfo_namespace, params=graph_params)['graphType'] assert appType == graph_params.get('graphType') count = get_badge_count(kiali_client, bookinfo_namespace, graph_params, badge) add_command_text = "oc apply -n " + bookinfo_namespace + " -f " + os.path.abspath( os.path.realpath(yaml_file)) add_command_result = os.popen(add_command_text).read() assert add_command_result.__contains__( "created") or add_command_result.__contains__("configured") graph = kiali_client.graph_namespace(namespace=bookinfo_namespace, params=graph_params) assert graph is not None with timeout(seconds=60, error_message='Timed out waiting for Create'): while True: new_count = get_badge_count(kiali_client, bookinfo_namespace, graph_params, badge) if new_count != 0 and new_count >= count: break time.sleep(1) delete_command_text = "oc delete -n " + bookinfo_namespace + " -f " + os.path.abspath( os.path.realpath(yaml_file)) delete_command_result = os.popen(delete_command_text).read() assert delete_command_result.__contains__("deleted") with timeout(seconds=30, error_message='Timed out waiting for Delete'): while True: # Validate that JSON no longer has Virtual Service if get_badge_count(kiali_client, bookinfo_namespace, graph_params, badge) <= count: break time.sleep(1) return True
def __init__( self, logger, class_name, property_names, filters="", host="localhost", namespace="root\\cimv2", username="", password="", and_props=[], timeout_duration=10, ): self.logger = logger # Connection information self.host = host self.namespace = namespace self.username = username self.password = password self.is_raw_perf_class = "_PERFRAWDATA_" in class_name.upper() # Sampler settings # WMI class, properties, filters and counter types # Include required properties for making calculations with raw # performance counters: # https://msdn.microsoft.com/en-us/library/aa394299(v=vs.85).aspx if self.is_raw_perf_class: property_names.extend( [ "Timestamp_Sys100NS", "Frequency_Sys100NS", # IMPORTANT: To improve performance and since they're currently # not needed, do not include the other Timestamp/Frequency # properties: # - Timestamp_PerfTime # - Timestamp_Object # - Frequency_PerfTime # - Frequency_Object" ] ) self.class_name = class_name self.property_names = property_names self.filters = filters self._and_props = and_props self._formatted_filters = None self.property_counter_types = None self._timeout_duration = timeout_duration self._query = timeout(timeout_duration)(self._query) # Samples self.current_sample = None self.previous_sample = None # Sampling state self._sampling = False
def test_kiali_virtual_service(kiali_client): environment_configmap = conftest.__get_environment_config__( conftest.ENV_FILE) bookinfo_namespace = environment_configmap.get('mesh_bookinfo_namespace') vs_count = get_vs_count(kiali_client, bookinfo_namespace) add_command_text = "oc apply -n " + bookinfo_namespace + " -f " + os.path.abspath( os.path.realpath(conftest.VIRTUAL_SERVICE_FILE)) add_command_result = os.popen(add_command_text).read() assert add_command_result.__contains__( "created") or add_command_result.__contains__("configured") graph = kiali_client.graph_namespace(namespace=bookinfo_namespace, params=PARAMS) assert graph is not None nodes = kiali_client.graph_namespace(namespace=bookinfo_namespace, params=PARAMS)["elements"]['nodes'] assert nodes is not None with timeout( seconds=30, error_message='Timed out waiting for VirtualService to be Created' ): while True: if get_vs_count(kiali_client, bookinfo_namespace) > vs_count: break time.sleep(1) delete_command_text = "oc delete -n {} -f {}".format( bookinfo_namespace, conftest.VIRTUAL_SERVICE_FILE) delete_command_result = os.popen(delete_command_text).read() assert delete_command_result.__contains__("deleted") with timeout( seconds=30, error_message='Timed out waiting for VirtualService to be Deleted' ): while True: # Validate that JSON no longer has Virtual Service if get_vs_count(kiali_client, bookinfo_namespace) <= vs_count: break time.sleep(1)
def __test_diversity_in_workload_list_endpoint(kiali_client): bookinfo_namespace = conftest.get_bookinfo_namespace() try: # Add extra workloads that will be tested assert command_exec.oc_apply(conftest.WORKLOADS_FILE, bookinfo_namespace) == True with timeout(seconds=90, error_message='Timed out waiting for extra workloads creation'): while True: workload_list = kiali_client.request(method_name='workloadList', path={'namespace': bookinfo_namespace}).json() if workload_list != None and workload_list.get('workloads') != None: workload_names = set(list(map(lambda workload: workload.get('name'), workload_list.get('workloads')))) if EXTRA_WORKLOADS.issubset(workload_names): break time.sleep(1) # Dictionary that maps Workloads with its own types dicWorkloadType = { 'details-v2': 'Pod', 'reviews-v4': 'ReplicaSet', 'reviews-v5': 'ReplicationController', 'reviews-v6': 'StatefulSet' } for workload in workload_list.get('workloads'): if workload.get('name') in EXTRA_WORKLOADS: workloadType = dicWorkloadType[workload.get('name')] assert workload.get('type') == workloadType finally: assert command_exec.oc_delete(conftest.WORKLOADS_FILE, bookinfo_namespace) == True with timeout(seconds=90, error_message='Timed out waiting for extra workloads deletion'): print('Extra workloads added for this test:', EXTRA_WORKLOADS) while True: workload_list = kiali_client.request(method_name='workloadList', path={'namespace': bookinfo_namespace}).json() if workload_list != None and workload_list.get('workloads') != None: workload_names = set(list(map(lambda workload: workload.get('name'), workload_list.get('workloads')))) print('Still existing workloads:', workload_names) if EXTRA_WORKLOADS.intersection(workload_names) == set(): break time.sleep(1)
def do_test(kiali_client, graph_params, yaml_file, badge): bookinfo_namespace = conftest.get_bookinfo_namespace() graph_params["namespaces"] = bookinfo_namespace json = kiali_client.graph_namespaces(params=graph_params) print("Debug: Start do_test: JSON: {}".format(json)) assert badge not in json assert json['graphType'] == graph_params.get('graphType') count = get_badge_count(kiali_client, bookinfo_namespace, graph_params, badge) try: assert command_exec.oc_apply(yaml_file, bookinfo_namespace) == True graph = kiali_client.graph_namespaces(params=graph_params) assert graph is not None try: with timeout(seconds=30, error_message='Timed out waiting for Create'): while True: new_count = get_badge_count(kiali_client, bookinfo_namespace, graph_params, badge) if new_count != 0 and new_count >= count: break time.sleep(1) except: print ("Timeout Exception - Nodes: {}".format(kiali_client.graph_namespaces(params=graph_params)["elements"]['nodes'])) raise Exception("Timeout - Waiting for badge: {}".format(badge)) finally: assert command_exec.oc_delete(yaml_file, bookinfo_namespace) == True with timeout(seconds=30, error_message='Timed out waiting for Delete'): while True: # Validate that JSON no longer has Virtual Service if get_badge_count(kiali_client, bookinfo_namespace, graph_params, badge) <= count: break time.sleep(1) return True
def __init__(self, logger, class_name, property_names, filters="", host="localhost", namespace="root\\cimv2", username="", password="", and_props=[], timeout_duration=10): self.logger = logger # Connection information self.host = host self.namespace = namespace self.username = username self.password = password self.is_raw_perf_class = "_PERFRAWDATA_" in class_name.upper() # Sampler settings # WMI class, properties, filters and counter types # Include required properties for making calculations with raw # performance counters: # https://msdn.microsoft.com/en-us/library/aa394299(v=vs.85).aspx if self.is_raw_perf_class: property_names.extend([ "Timestamp_Sys100NS", "Frequency_Sys100NS", # IMPORTANT: To improve performance and since they're currently # not needed, do not include the other Timestamp/Frequency # properties: # - Timestamp_PerfTime # - Timestamp_Object # - Frequency_PerfTime # - Frequency_Object" ]) self.class_name = class_name self.property_names = property_names self.filters = filters self._and_props = and_props self._formatted_filters = None self.property_counter_types = None self._timeout_duration = timeout_duration self._query = timeout(timeout_duration)(self._query) # Samples self.current_sample = None self.previous_sample = None # Sampling state self._sampling = False
def do_test(kiali_client, graph_params, yaml_file, badge): environment_configmap = conftest.__get_environment_config__( conftest.ENV_FILE) bookinfo_namespace = bookinfo_namespace = conftest.get_bookinfo_namespace() appType = kiali_client.graph_namespace(namespace=bookinfo_namespace, params=graph_params)['graphType'] assert appType == graph_params.get('graphType') count = get_badge_count(kiali_client, bookinfo_namespace, graph_params, badge) try: assert command_exec.oc_apply(yaml_file, bookinfo_namespace) == True graph = kiali_client.graph_namespace(namespace=bookinfo_namespace, params=graph_params) assert graph is not None with timeout(seconds=30, error_message='Timed out waiting for Create'): while True: new_count = get_badge_count(kiali_client, bookinfo_namespace, graph_params, badge) if new_count != 0 and new_count >= count: break time.sleep(1) finally: assert command_exec.oc_delete(yaml_file, bookinfo_namespace) == True with timeout(seconds=30, error_message='Timed out waiting for Delete'): while True: # Validate that JSON no longer has Virtual Service if get_badge_count(kiali_client, bookinfo_namespace, graph_params, badge) <= count: break time.sleep(1) return True
def collect_metrics_psutil(self): self._valid_disks = {} for part in psutil.disk_partitions(all=True): # we check all exclude conditions if self._exclude_disk_psutil(part): continue # Get disk metrics here to be able to exclude on total usage try: disk_usage = timeout(5)(psutil.disk_usage)(part.mountpoint) except TimeoutException: self.log.warn( u"Timeout while retrieving the disk usage of `%s` mountpoint. Skipping...", part.mountpoint) continue except Exception as e: self.log.warn("Unable to get disk metrics for %s: %s", part.mountpoint, e) continue # Exclude disks with total disk size 0 if disk_usage.total == 0: continue # For later, latency metrics self._valid_disks[part.device] = (part.fstype, part.mountpoint) self.log.debug('Passed: {0}'.format(part.device)) tags = [part.fstype] if self._tag_by_filesystem else [] device_name = part.mountpoint if self._use_mount else part.device # Note: psutil (0.3.0 to at least 3.1.1) calculates in_use as (used / total) # The problem here is that total includes reserved space the user # doesn't have access to. This causes psutil to calculate a misleadng # percentage for in_use; a lower percentage than df shows. # Calculate in_use w/o reserved space; consistent w/ df's Use% metric. pmets = self._collect_part_metrics(part, disk_usage) used = 'system.disk.used' free = 'system.disk.free' pmets['system.disk.in_use'] = pmets[used] / (pmets[used] + pmets[free]) # legacy check names c: vs psutil name C:\\ if Platform.is_win32(): device_name = device_name.strip('\\').lower() for metric_name, metric_value in pmets.iteritems(): self.gauge(metric_name, metric_value, tags=tags, device_name=device_name) # And finally, latency metrics, a legacy gift from the old Windows Check if Platform.is_win32(): self.collect_latency_metrics()
def loadLEDs(): '''Loads IPs from ip.order.txt into memory, and attemps to connect to them.''' # Open ip.order.txt and read the IPs of the bulbs. It expects a list # of IPs like 192.168.1.1, with one IP per line. The file should end # with one empty line. filepath = '/home/pi/Music-LED/ip.order.txt' # Define variables ips = [] bulbs = {} lastOrder = {} cnt = 1 # Try to open local list of IPs, if not, scan the network for list try: with open(filepath) as fp: line = fp.readline() while line: line = fp.readline() if line.strip() == "": continue ip = line.strip() ips.append(ip) except: ips = getBulbs() # Once the file has been read, the script will attempt to create # objects for each bulb. printLog(f"💡 Connected to {len(ips)} LED strips") for ip in ips: with timeout(1): try: bulb = flux_led.WifiLedBulb(ip, timeout=0.1) # print(vars(bulb)) # print(bulb._WifiLedBulb__state_str) bulbs[cnt] = bulb lastOrder[cnt] = "" cnt = cnt + 1 except Exception as e: print("Unable to connect to bulb at [{}]: {}".format(ip, e)) continue # for bulb in bulbs: # bulbs[bulb].getBulbInfo() # Return the bulbs that were successfully innitiated return bulbs
def collect_metrics_psutil(self): self._valid_disks = {} for part in psutil.disk_partitions(all=True): # we check all exclude conditions if self._exclude_disk_psutil(part): continue # Get disk metrics here to be able to exclude on total usage try: disk_usage = timeout(5)(psutil.disk_usage)(part.mountpoint) except TimeoutException: self.log.warn( u"Timeout while retrieving the disk usage of `%s` mountpoint. Skipping...", part.mountpoint ) continue except Exception as e: self.log.warn("Unable to get disk metrics for %s: %s", part.mountpoint, e) continue # Exclude disks with total disk size 0 if disk_usage.total == 0: continue # For later, latency metrics self._valid_disks[part.device] = (part.fstype, part.mountpoint) self.log.debug('Passed: {0}'.format(part.device)) tags = [part.fstype] if self._tag_by_filesystem else [] device_name = part.mountpoint if self._use_mount else part.device # Note: psutil (0.3.0 to at least 3.1.1) calculates in_use as (used / total) # The problem here is that total includes reserved space the user # doesn't have access to. This causes psutil to calculate a misleadng # percentage for in_use; a lower percentage than df shows. # Calculate in_use w/o reserved space; consistent w/ df's Use% metric. pmets = self._collect_part_metrics(part, disk_usage) used = 'system.disk.used' free = 'system.disk.free' pmets['system.disk.in_use'] = pmets[used] / (pmets[used] + pmets[free]) # legacy check names c: vs psutil name C:\\ if Platform.is_win32(): device_name = device_name.strip('\\').lower() for metric_name, metric_value in pmets.iteritems(): self.gauge(metric_name, metric_value, tags=tags, device_name=device_name) # And finally, latency metrics, a legacy gift from the old Windows Check if Platform.is_win32(): self.collect_latency_metrics()
def _test_kiali_reduced_cluster_permissions(kiali_client): bookinfo_namespace = conftest.get_bookinfo_namespace() try: assert command_exec().oc_remove_cluster_role_rom_user_kiali() with timeout( seconds=60, error_message='Timed out waiting for denial of Graph access'): while True: text = get_graph_json(kiali_client, bookinfo_namespace) if "is not accessible" in text: break time.sleep(1) finally: assert command_exec().oc_add_cluster_role_to_user_kiali() with timeout(seconds=60, error_message='Timed out waiting for Graph access'): while True: text = get_graph_json(kiali_client, bookinfo_namespace) if "is not accessible" not in text and bookinfo_namespace in text: break time.sleep(1)
def collect_metrics_psutil(self): self._valid_disks = {} for part in psutil.disk_partitions(all=True): # we check all exclude conditions if self._exclude_disk_psutil(part): continue # Get disk metrics here to be able to exclude on total usage try: disk_usage = timeout(5)(psutil.disk_usage)(part.mountpoint) except TimeoutException: self.log.warn( u"Timeout while retrieving the disk usage of `%s` mountpoint. Skipping...", part.mountpoint) continue except Exception as e: self.log.warn("Unable to get disk metrics for %s: %s", part.mountpoint, e) continue # Exclude disks with total disk size 0 if disk_usage.total == 0: continue # For later, latency metrics self._valid_disks[part.device] = (part.fstype, part.mountpoint) self.log.debug('Passed: {0}'.format(part.device)) tags = [part.fstype, 'filesystem:{}'.format(part.fstype) ] if self._tag_by_filesystem else [] device_name = part.mountpoint if self._use_mount else part.device # apply device/mountpoint specific tags for regex, device_tags in self._device_tag_re: if regex.match(device_name): tags += device_tags # legacy check names c: vs psutil name C:\\ if Platform.is_win32(): device_name = device_name.strip('\\').lower() for metric_name, metric_value in self._collect_part_metrics( part, disk_usage).iteritems(): self.gauge(metric_name, metric_value, tags=tags, device_name=device_name) self.collect_latency_metrics()
def oc_wait_for_kiali_state(self, state): cmd = "oc get pods -n istio-system | grep kiali | awk '{print $3;}'" with timeout( seconds=120, error_message='Timed out waiting for Kiali state: {}'.format( state)): while True: stdout, stderr = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate() if state in stdout.decode(): # Allow container time to init time.sleep(3) break time.sleep(2) return True
def main(input_file, street_names_file, pois_file, output_file, sampling_rate, ids, maps_client, strict): street_names = open(street_names_file).read().split('\n') toronto_street_re_str = build_is_a_toronto_street_regex_str(street_names) LOG.debug(f'Toronto street regex: {toronto_street_re_str}') exact_address_re = exact_address_regex(toronto_street_re_str) standalone_street_re = standalone_street_regex(toronto_street_re_str) place_name_re, place_map = build_place_name_regex(pois_file) parsers = [ lambda x: parse_exact_address(exact_address_re, x), lambda x: parse_corner(x), lambda x: parse_direction_from(x), lambda x: parse_two_streets(standalone_street_re, x), lambda x: parse_streets_joined_by_and(x), lambda x: parse_place_name(place_name_re, place_map, x) ] results = {} # note: we convert to a list to get a nicer progress bar. for row in tqdm.tqdm(list(generators.read_ndjson_file(input_file))): id_ = row['uniqueID'] if ids is not None and id_ not in ids: continue if not should_sample(id_, sampling_rate): LOG.debug(f'Skipping {id_} due to sampling rate') continue try: with timeout(seconds=30): geocode_result = row_to_result(parsers, maps_client, row, strict=strict) if geocode_result is not None: uid, result = geocode_result results[uid] = result LOG.debug(f'{id_}: {geocode_result}') except TimeoutError: LOG.warn(f'Timed out geocoding {id_}: {get_title(row)}') pass write_result_to_file(output_file, results)
def collect_metrics_psutil(self): self._valid_disks = {} for part in psutil.disk_partitions(all=True): # we check all exclude conditions if self._exclude_disk_psutil(part): continue # Get disk metrics here to be able to exclude on total usage try: disk_usage = timeout(5)(psutil.disk_usage)(part.mountpoint) except TimeoutException: self.log.warn( u"Timeout while retrieving the disk usage of `%s` mountpoint. Skipping...", part.mountpoint ) continue except Exception as e: self.log.warn("Unable to get disk metrics for %s: %s", part.mountpoint, e) continue # Exclude disks with total disk size 0 if disk_usage.total == 0: continue # For later, latency metrics self._valid_disks[part.device] = (part.fstype, part.mountpoint) self.log.debug('Passed: {0}'.format(part.device)) tags = [part.fstype] if self._tag_by_filesystem else [] device_name = part.mountpoint if self._use_mount else part.device # legacy check names c: vs psutil name C:\\ if Platform.is_win32(): device_name = device_name.strip('\\').lower() for metric_name, metric_value in self._collect_part_metrics(part, disk_usage).iteritems(): self.gauge(metric_name, metric_value, tags=tags, device_name=device_name) # And finally, latency metrics, a legacy gift from the old Windows Check if Platform.is_win32(): self.collect_latency_metrics()
def __init__(self): """ Decorate `make_sum`. """ self.make_sum = timeout(0.2)(self.make_sum)
if __name__ == "__main__": # Wait for there to be an internet connection checkInternet() # Set up redis connection r = redis.from_url(os.getenv("REDIS_URL", "redis://localhost:6379")) # Set up audio input stream, chunk = createStream() while True: try: # set up the LEDs with timeout(1): # 10 s timeout bulbs = loadLEDs() if len(bulbs) == 0: printLog("No lights connected!") raise RuntimeError # start responding to music loop = asyncio.get_event_loop() last_ten = [] # avg values across 10 samples changed = True last_mode = 'start' count = 0 timestamp_bug = 0 timestamp = 0
def test_service_detail_with_destination_rule(kiali_client): bookinfo_namespace = conftest.get_bookinfo_namespace() try: # check if we have any existing rules pre_dr_count = len( kiali_client.request(method_name='serviceDetails', path={ 'namespace': bookinfo_namespace, 'service': SERVICE_TO_VALIDATE }).json().get('destinationRules')) # Add a destination rule that will be tested assert command_exec.oc_apply(DESTINATION_RULE_FILE, bookinfo_namespace) == True with timeout( seconds=60, error_message='Timed out waiting for destination rule creation' ): while True: service_details = kiali_client.request( method_name='serviceDetails', path={ 'namespace': bookinfo_namespace, 'service': SERVICE_TO_VALIDATE }).json() if service_details != None and service_details.get( 'destinationRules') != None and len( service_details.get( 'destinationRules')) > pre_dr_count: break time.sleep(1) assert service_details != None destination_rule_descriptor = service_details.get('destinationRules') assert destination_rule_descriptor != None # find our destination rule destination_rule = None for dr in destination_rule_descriptor: if (dr['metadata']['name'] == 'reviews'): destination_rule = dr break assert destination_rule != None destination_rule_meta = destination_rule.get('metadata') assert destination_rule_meta.get('name') == 'reviews' destination_rule_spec = destination_rule.get('spec') assert 'trafficPolicy' in destination_rule_spec subsets = destination_rule_spec.get('subsets') assert subsets != None assert len(subsets) == 3 for i, subset in enumerate(subsets): subset_number = str(i + 1) name = subset.get('name') assert name == 'v' + subset_number labels = subset.get('labels') assert labels != None and labels.get( 'version') == 'v' + subset_number finally: assert command_exec.oc_delete(DESTINATION_RULE_FILE, bookinfo_namespace) == True with timeout( seconds=60, error_message='Timed out waiting for destination rule deletion' ): while True: service_details = kiali_client.request( method_name='serviceDetails', path={ 'namespace': bookinfo_namespace, 'service': SERVICE_TO_VALIDATE }).json() if service_details != None and len( service_details.get( 'destinationRules')) == pre_dr_count: break time.sleep(1)
def test_service_detail_with_virtual_service(kiali_client): bookinfo_namespace = conftest.get_bookinfo_namespace() try: # check if we have any existing virtual services pre_vs_count = len( kiali_client.request(method_name='serviceDetails', path={ 'namespace': bookinfo_namespace, 'service': SERVICE_TO_VALIDATE }).json().get('virtualServices')) # Add a virtual service that will be tested assert command_exec.oc_apply(VIRTUAL_SERVICE_FILE, bookinfo_namespace) == True with timeout( seconds=60, error_message='Timed out waiting for virtual service creation' ): while True: service_details = kiali_client.request( method_name='serviceDetails', path={ 'namespace': bookinfo_namespace, 'service': SERVICE_TO_VALIDATE }).json() if service_details != None and service_details.get( 'virtualServices') != None and len( service_details.get( 'virtualServices')) > pre_vs_count: break time.sleep(1) assert service_details != None virtual_service_descriptor = service_details.get('virtualServices') assert virtual_service_descriptor != None # find our virtual service virtual_service = None for vs in virtual_service_descriptor: if (vs['metadata']['name'] == 'reviews'): virtual_service = vs break assert virtual_service != None virtual_service_meta = virtual_service.get('metadata') assert virtual_service_meta.get('name') == 'reviews' virtual_service_spec = virtual_service.get('spec') https = virtual_service_spec.get('http') assert https != None assert len(https) == 1 routes = https[0].get('route') assert len(routes) == 2 assert routes[0].get('weight') == 80 destination = routes[0].get('destination') assert destination != None assert destination.get('host') == 'reviews' assert destination.get('subset') == 'v1' assert routes[1].get('weight') == 20 destination = routes[1].get('destination') assert destination != None assert destination.get('host') == 'reviews' assert destination.get('subset') == 'v2' finally: assert command_exec.oc_delete(VIRTUAL_SERVICE_FILE, bookinfo_namespace) == True with timeout( seconds=60, error_message='Timed out waiting for virtual service deletion' ): while True: service_details = kiali_client.request( method_name='serviceDetails', path={ 'namespace': bookinfo_namespace, 'service': SERVICE_TO_VALIDATE }).json() if service_details != None and len( service_details.get( 'virtualServices')) == pre_vs_count: break time.sleep(1)