def test_flow_multi_projects(self): """Tests related to flow setup rate and flow table stability accross various triggers for verification accross VN's and accross multiple projects. """ result = True self.comp_node_fixt = {} for cmp_node in self.inputs.compute_ips: self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture( self.connections, cmp_node)) # 1. Start Traffic num_flows = 15000 flow_gen_rate = 1000 proto = 'udp' profile = 'TrafficProfile1' details = self.topo[self.topo.keys()[0]].traffic_profile[profile] self.traffic_setup(profile, details, num_flows, flow_gen_rate, proto) self.traffic_obj = self.useFixture( traffic_tests.trafficTestFixture(self.connections)) # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None, # stream_proto= 'udp', start_sport= 8000, # total_single_instance_streams= 20): startStatus = self.traffic_obj.startTraffic( total_single_instance_streams=num_flows, pps=self.flow_gen_rate, start_sport=1000, cfg_profile='ContinuousSportRange', tx_vm_fixture=self.src_vm_fixture, rx_vm_fixture=self.dst_vm_fixture, stream_proto=self.proto) msg1 = "Status of start traffic : %s, %s, %s" % ( self.proto, self.src_vm_fixture.vm_ip, startStatus['status']) self.logger.info(msg1) assert startStatus['status'], msg1 # 2. Poll live traffic & verify VM flow count flow_test_utils.verify_node_flow_setup(self) # 3. Stop Traffic self.logger.info("Proceed to stop traffic..") self.traffic_obj.stopTraffic(wait_for_stop=False) start_time = time.time() # 4. Verify flow ageing self.logger.info( "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing") sleep(self.flow_cache_timeout) while True: begin_flow_count = self.comp_node_fixt[ self.cmp_node].get_vrouter_matching_flow_count( self.flow_data) self.logger.debug('begin_flow_count: %s' % (begin_flow_count)) if begin_flow_count['all'] == 0: break flow_teardown_time = math.ceil( flow_test_utils.get_max_flow_removal_time( begin_flow_count['all'], self.flow_cache_timeout)) # flow_teardown_time is not the actual time to remove flows # Based on flow_count at this time, teardown_time is calculated to the value # which will vary with agent's poll, which is done at regular # intervals.. self.logger.info('Sleeping for %s secs' % (flow_teardown_time)) sleep(flow_teardown_time) # at the end of wait, actual_flows should be atleast < 50% of total # flows before start of teardown current_flow_count = self.comp_node_fixt[ self.cmp_node].get_vrouter_matching_flow_count( self.flow_data) self.logger.debug('current_flow_count: %s' % (current_flow_count)) if current_flow_count['all'] > (0.5 * begin_flow_count['all']): msg = [ 'Flow removal not happening as expected in node %s' % self.cmp_node] msg.append( 'Flow count before wait: %s, after wait of %s secs, its: %s' % (begin_flow_count['all'], flow_teardown_time, current_flow_count['all'])) assert False, msg if current_flow_count['all'] < (0.1 * begin_flow_count['all']): break # end of while loop elapsed_time = time.time() - start_time self.logger.info( "Flows aged out as expected in configured flow_cache_timeout") return True
def test_agent_flow_settings(self): """Basic systest with single project with many features & traffic.. """ # # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test. # else report that minimum 2 compute nodes are needed for this test and # exit. if len(self.inputs.compute_ips) < 2: self.logger.warn( "Minimum 2 compute nodes are needed for this test to run") self.logger.warn( "Exiting since this test can't be run on single compute node") return True # # Get config for test from topology # import mini_flow_test_topo # topology_class_name = mini_flow_test_topo.systest_topo_single_project topology_class_name = flow_test_topo.systest_topo_single_project self.logger.info( "Scenario for the test used is: %s" % (topology_class_name)) topo = topology_class_name( compute_node_list=self.inputs.compute_ips) # # 1. Test setup: Configure policy, VN, & VM # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} # Returned topo is of following format: # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': # vm_fixture} setup_obj = self.useFixture( sdnTopoSetupFixture(self.connections, topo)) out = setup_obj.sdn_topo_setup() assertEqual(out['result'], True, out['msg']) if out['result']: config_topo = out['data'][1] self.proj = list(config_topo.keys())[0] self.topo, self.config_topo = topo, config_topo # 2. set agent flow_cache_timeout to 60s # set max_vm_flows to 1% of 500k, comes to 5000 self.comp_node_fixt = {} self.flow_cache_timeout = 60 for cmp_node in self.inputs.compute_ips: self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture( self.connections, cmp_node)) self.comp_node_fixt[cmp_node].set_flow_aging_time( self.flow_cache_timeout) self.comp_node_fixt[cmp_node].sup_vrouter_process_restart() # 3. Start Traffic for profile, details in self.topo.traffic_profile.items(): self.logger.info("Profile under test: %s, details: %s" %(profile, details)) self.src_vm = details['src_vm'] self.dst_vm = details['dst_vm'] self.src_proj = self.proj self.dst_proj = self.proj # Set num_flows to fixed, smaller value but > 1% of # system max flows num_flows = 5555 self.generated_flows = 2*num_flows self.flow_gen_rate = 1000 src_vm_fixture = self.config_topo[self.proj]['vm'][self.src_vm] src_vm_vn = src_vm_fixture.vn_names[0] src_vm_vn_fix = self.config_topo[self.proj]['vn'][src_vm_vn] dst_vm_fixture = self.config_topo[self.proj]['vm'][self.dst_vm] self.proto = 'udp' self.cmp_node = src_vm_fixture.vm_node_ip # 3a. Set max_vm_flows to 1% in TX VM node self.max_vm_flows = 1 self.comp_node_fixt[ self.cmp_node].set_per_vm_flow_limit( self.max_vm_flows) self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart() self.logger.info( "Wait for 2s for flow setup to start after service restart") sleep(2) flow_test_utils.update_vm_mdata_ip(self.cmp_node, self) self.traffic_obj = self.useFixture( traffic_tests.trafficTestFixture(self.connections)) # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None, # stream_proto= 'udp', start_sport= 8000, # total_single_instance_streams= 20): startStatus = self.traffic_obj.startTraffic( total_single_instance_streams=num_flows, pps=self.flow_gen_rate, start_sport=1000, cfg_profile='ContinuousSportRange', tx_vm_fixture=src_vm_fixture, rx_vm_fixture=dst_vm_fixture, stream_proto=self.proto) msg1 = "Status of start traffic : %s, %s, %s" % ( self.proto, src_vm_fixture.vm_ip, startStatus['status']) self.logger.info(msg1) assert startStatus['status'], msg1 # 4. Poll live traffic & verify VM flow count self.verify_node_flow_setup() # 5. Increase max_vm_flows to 50% in TX VM node self.max_vm_flows = 50 self.comp_node_fixt[ self.cmp_node].set_per_vm_flow_limit( self.max_vm_flows) self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart() self.logger.info( "Wait for 2s for flow setup to start after service restart") sleep(2) # 6. Poll live traffic self.verify_node_flow_setup() # 7. Stop Traffic self.logger.info("Proceed to stop traffic..") self.traffic_obj.stopTraffic(wait_for_stop=False) start_time = time.time() # 8. Verify flow ageing self.logger.info( "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing") sleep(self.flow_cache_timeout) retries = 0 retry_wait_time = 10 flow_teardown_time = math.ceil(flow_test_utils.get_max_flow_removal_time(self.generated_flows, self.flow_cache_timeout)) self.logger.debug("flow tear down time based on calcualtion: %s" %flow_teardown_time) max_retries = math.ceil(self.flow_cache_timeout / retry_wait_time) while retries < max_retries: actual_flows = self.comp_node_fixt[ self.cmp_node].get_vrouter_matching_flow_count( self.flow_data) actual_flows = int(actual_flows['all']) if actual_flows > 10: self.logger.info("Waiting for flows to age out") sleep(retry_wait_time) retries += 1 else: break elapsed_time = time.time() - start_time if actual_flows > 50: msg = "Expected flows to age-out as configured, Seeing flows still active after elapsed time %s in node: %s, actual_flows: %s" % ( elapsed_time, self.cmp_node, actual_flows) assert False, msg else: self.logger.info( "Flows aged out as expected in configured flow_cache_timeout") self.logger.info( "elapsed_time after stopping traffic is %s, flow_count is %s" % (elapsed_time, actual_flows))
def test_agent_flow_settings(self): """Basic systest with single project with many features & traffic.. """ # # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test. # else report that minimum 2 compute nodes are needed for this test and # exit. if len(self.inputs.compute_ips) < 2: self.logger.warn( "Minimum 2 compute nodes are needed for this test to run") self.logger.warn( "Exiting since this test can't be run on single compute node") return True # # Get config for test from topology # import mini_flow_test_topo # topology_class_name = mini_flow_test_topo.systest_topo_single_project topology_class_name = flow_test_topo.systest_topo_single_project self.logger.info("Scenario for the test used is: %s" % (topology_class_name)) topo = topology_class_name(compute_node_list=self.inputs.compute_ips) # # 1. Test setup: Configure policy, VN, & VM # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} # Returned topo is of following format: # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': # vm_fixture} setup_obj = self.useFixture(sdnTopoSetupFixture( self.connections, topo)) out = setup_obj.sdn_topo_setup() assertEqual(out['result'], True, out['msg']) if out['result']: config_topo = out['data'][1] self.proj = list(config_topo.keys())[0] self.topo, self.config_topo = topo, config_topo # 2. set agent flow_cache_timeout to 60s # set max_vm_flows to 1% of 500k, comes to 5000 self.comp_node_fixt = {} self.flow_cache_timeout = 60 for cmp_node in self.inputs.compute_ips: self.comp_node_fixt[cmp_node] = self.useFixture( ComputeNodeFixture(self.connections, cmp_node)) self.comp_node_fixt[cmp_node].set_flow_aging_time( self.flow_cache_timeout) self.comp_node_fixt[cmp_node].sup_vrouter_process_restart() # 3. Start Traffic for profile, details in self.topo.traffic_profile.items(): self.logger.info("Profile under test: %s, details: %s" % (profile, details)) self.src_vm = details['src_vm'] self.dst_vm = details['dst_vm'] self.src_proj = self.proj self.dst_proj = self.proj # Set num_flows to fixed, smaller value but > 1% of # system max flows num_flows = 5555 self.generated_flows = 2 * num_flows self.flow_gen_rate = 1000 src_vm_fixture = self.config_topo[self.proj]['vm'][self.src_vm] src_vm_vn = src_vm_fixture.vn_names[0] src_vm_vn_fix = self.config_topo[self.proj]['vn'][src_vm_vn] dst_vm_fixture = self.config_topo[self.proj]['vm'][self.dst_vm] self.proto = 'udp' self.cmp_node = src_vm_fixture.vm_node_ip # 3a. Set max_vm_flows to 1% in TX VM node self.max_vm_flows = 1 self.comp_node_fixt[self.cmp_node].set_per_vm_flow_limit( self.max_vm_flows) self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart() self.logger.info( "Wait for 2s for flow setup to start after service restart") sleep(2) flow_test_utils.update_vm_mdata_ip(self.cmp_node, self) self.traffic_obj = self.useFixture( traffic_tests.trafficTestFixture(self.connections)) # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None, # stream_proto= 'udp', start_sport= 8000, # total_single_instance_streams= 20): startStatus = self.traffic_obj.startTraffic( total_single_instance_streams=num_flows, pps=self.flow_gen_rate, start_sport=1000, cfg_profile='ContinuousSportRange', tx_vm_fixture=src_vm_fixture, rx_vm_fixture=dst_vm_fixture, stream_proto=self.proto) msg1 = "Status of start traffic : %s, %s, %s" % ( self.proto, src_vm_fixture.vm_ip, startStatus['status']) self.logger.info(msg1) assert startStatus['status'], msg1 # 4. Poll live traffic & verify VM flow count self.verify_node_flow_setup() # 5. Increase max_vm_flows to 50% in TX VM node self.max_vm_flows = 50 self.comp_node_fixt[self.cmp_node].set_per_vm_flow_limit( self.max_vm_flows) self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart() self.logger.info( "Wait for 2s for flow setup to start after service restart") sleep(2) # 6. Poll live traffic self.verify_node_flow_setup() # 7. Stop Traffic self.logger.info("Proceed to stop traffic..") self.traffic_obj.stopTraffic(wait_for_stop=False) start_time = time.time() # 8. Verify flow ageing self.logger.info( "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing" ) sleep(self.flow_cache_timeout) retries = 0 retry_wait_time = 10 flow_teardown_time = math.ceil( flow_test_utils.get_max_flow_removal_time( self.generated_flows, self.flow_cache_timeout)) self.logger.debug("flow tear down time based on calcualtion: %s" % flow_teardown_time) max_retries = math.ceil(self.flow_cache_timeout / retry_wait_time) while retries < max_retries: actual_flows = self.comp_node_fixt[ self.cmp_node].get_vrouter_matching_flow_count( self.flow_data) actual_flows = int(actual_flows['all']) if actual_flows > 10: self.logger.info("Waiting for flows to age out") sleep(retry_wait_time) retries += 1 else: break elapsed_time = time.time() - start_time if actual_flows > 50: msg = "Expected flows to age-out as configured, Seeing flows still active after elapsed time %s in node: %s, actual_flows: %s" % ( elapsed_time, self.cmp_node, actual_flows) assert False, msg else: self.logger.info( "Flows aged out as expected in configured flow_cache_timeout" ) self.logger.info( "elapsed_time after stopping traffic is %s, flow_count is %s" % (elapsed_time, actual_flows))
def test_flow_multi_projects(self): """Tests related to flow setup rate and flow table stability accross various triggers for verification accross VN's and accross multiple projects""" result = True self.comp_node_fixt = {} for cmp_node in self.inputs.compute_ips: self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture( self.connections, cmp_node)) # # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test. # else report that minimum 2 compute nodes are needed for this test and # exit. if len(self.inputs.compute_ips) < 2: self.logger.warn( "Minimum 2 compute nodes are needed for this test to run") self.logger.warn( "Exiting since this test can't be run on single compute node") return True # # Get config for test from topology msg = [] topology_class_name = sdn_flow_test_topo_multiple_projects.multi_project_topo self.logger.info("Scenario for the test used is: %s" % (topology_class_name)) # # Create a list of compute node IP's and pass it to topo if you want to pin # a vm to a particular node topo = topology_class_name( compute_node_list=self.inputs.compute_ips) # # 1. Test setup: Configure policy, VN, & VM # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} # Returned topo is of following format: # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': # vm_fixture} setup_obj = self.useFixture( sdnTopoSetupFixture(self.connections, topo)) out = setup_obj.sdn_topo_setup() assertEqual(out['result'], True, out['msg']) self.topo, self.config_topo = out['data'][0], out['data'][1] self.proj = list(self.topo.keys())[0] # 2. Start Traffic for profile, details in self.topo[self.proj].traffic_profile.items(): self.logger.info("Profile under test: %s, details: %s" %(profile, details)) self.src_vm = details['src_vm'] self.dst_vm = details['dst_vm'] self.src_proj = details['src_proj'] self.dst_proj = details['dst_proj'] # Not flow scaling test, limit num_flows to low number.. num_flows = 15000 self.generated_flows = 2*num_flows self.flow_gen_rate = 1000 src_vm_fixture = self.config_topo[self.src_proj]['vm'][self.src_vm] src_vm_vn = src_vm_fixture.vn_names[0] src_vm_vn_fix = self.config_topo[self.src_proj]['vn'][src_vm_vn] dst_vm_fixture = self.config_topo[self.dst_proj]['vm'][self.dst_vm] self.proto = 'udp' self.cmp_node = src_vm_fixture.vm_node_ip self.comp_node_fixt[self.cmp_node].get_config_per_vm_flow_limit() self.comp_node_fixt[self.cmp_node].get_config_flow_aging_time() self.max_vm_flows = self.comp_node_fixt[self.cmp_node].max_vm_flows self.flow_cache_timeout = self.comp_node_fixt[self.cmp_node].flow_cache_timeout self.traffic_obj = self.useFixture( traffic_tests.trafficTestFixture(self.connections)) # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None, # stream_proto= 'udp', start_sport= 8000, # total_single_instance_streams= 20): startStatus = self.traffic_obj.startTraffic( total_single_instance_streams=num_flows, pps=self.flow_gen_rate, start_sport=1000, cfg_profile='ContinuousSportRange', tx_vm_fixture=src_vm_fixture, rx_vm_fixture=dst_vm_fixture, stream_proto=self.proto) msg1 = "Status of start traffic : %s, %s, %s" % ( self.proto, src_vm_fixture.vm_ip, startStatus['status']) self.logger.info(msg1) assert startStatus['status'], msg1 # 3. Poll live traffic & verify VM flow count self.verify_node_flow_setup() # 4. Stop Traffic self.logger.info("Proceed to stop traffic..") self.traffic_obj.stopTraffic(wait_for_stop=False) start_time = time.time() # 5. Verify flow ageing self.logger.info( "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing") sleep(self.flow_cache_timeout) while True: begin_flow_count = self.comp_node_fixt[ self.cmp_node].get_vrouter_matching_flow_count( self.flow_data) self.logger.debug('begin_flow_count: %s' %(begin_flow_count)) if begin_flow_count['all'] == 0: break flow_teardown_time = math.ceil(flow_test_utils.get_max_flow_removal_time(begin_flow_count['all'], self.flow_cache_timeout)) # flow_teardown_time is not the actual time to remove flows # Based on flow_count at this time, teardown_time is calculated to the value # which will vary with agent's poll, which is done at regular intervals.. self.logger.info('Sleeping for %s secs' %(flow_teardown_time)) sleep(flow_teardown_time) # at the end of wait, actual_flows should be atleast < 50% of total flows before start of teardown current_flow_count = self.comp_node_fixt[ self.cmp_node].get_vrouter_matching_flow_count( self.flow_data) self.logger.debug('current_flow_count: %s' %(current_flow_count)) if current_flow_count['all'] > (0.5*begin_flow_count['all']): msg = ['Flow removal not happening as expected in node %s' %self.cmp_node] msg.append('Flow count before wait: %s, after wait of %s secs, its: %s' % (begin_flow_count['all'], flow_teardown_time, current_flow_count['all'])) assert False, msg if current_flow_count['all'] < (0.1*begin_flow_count['all']): break # end of while loop elapsed_time = time.time() - start_time self.logger.info( "Flows aged out as expected in configured flow_cache_timeout") # end of profile for loop return True
def test_flow_multi_projects(self): """Tests related to flow setup rate and flow table stability accross various triggers for verification accross VN's and accross multiple projects""" result = True self.comp_node_fixt = {} for cmp_node in self.inputs.compute_ips: self.comp_node_fixt[cmp_node] = self.useFixture( ComputeNodeFixture(self.connections, cmp_node)) # # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test. # else report that minimum 2 compute nodes are needed for this test and # exit. if len(self.inputs.compute_ips) < 2: self.logger.warn( "Minimum 2 compute nodes are needed for this test to run") self.logger.warn( "Exiting since this test can't be run on single compute node") return True # # Get config for test from topology msg = [] topology_class_name = sdn_flow_test_topo_multiple_projects.multi_project_topo self.logger.info("Scenario for the test used is: %s" % (topology_class_name)) # # Create a list of compute node IP's and pass it to topo if you want to pin # a vm to a particular node topo = topology_class_name(compute_node_list=self.inputs.compute_ips) # # 1. Test setup: Configure policy, VN, & VM # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} # Returned topo is of following format: # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': # vm_fixture} setup_obj = self.useFixture(sdnTopoSetupFixture( self.connections, topo)) out = setup_obj.sdn_topo_setup() assertEqual(out['result'], True, out['msg']) self.topo, self.config_topo = out['data'][0], out['data'][1] self.proj = list(self.topo.keys())[0] # 2. Start Traffic for profile, details in self.topo[self.proj].traffic_profile.items(): self.logger.info("Profile under test: %s, details: %s" % (profile, details)) self.src_vm = details['src_vm'] self.dst_vm = details['dst_vm'] self.src_proj = details['src_proj'] self.dst_proj = details['dst_proj'] # Not flow scaling test, limit num_flows to low number.. num_flows = 15000 self.generated_flows = 2 * num_flows self.flow_gen_rate = 1000 src_vm_fixture = self.config_topo[self.src_proj]['vm'][self.src_vm] src_vm_vn = src_vm_fixture.vn_names[0] src_vm_vn_fix = self.config_topo[self.src_proj]['vn'][src_vm_vn] dst_vm_fixture = self.config_topo[self.dst_proj]['vm'][self.dst_vm] self.proto = 'udp' self.cmp_node = src_vm_fixture.vm_node_ip self.comp_node_fixt[self.cmp_node].get_config_per_vm_flow_limit() self.comp_node_fixt[self.cmp_node].get_config_flow_aging_time() self.max_vm_flows = self.comp_node_fixt[self.cmp_node].max_vm_flows self.flow_cache_timeout = self.comp_node_fixt[ self.cmp_node].flow_cache_timeout self.traffic_obj = self.useFixture( traffic_tests.trafficTestFixture(self.connections)) # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None, # stream_proto= 'udp', start_sport= 8000, # total_single_instance_streams= 20): startStatus = self.traffic_obj.startTraffic( total_single_instance_streams=num_flows, pps=self.flow_gen_rate, start_sport=1000, cfg_profile='ContinuousSportRange', tx_vm_fixture=src_vm_fixture, rx_vm_fixture=dst_vm_fixture, stream_proto=self.proto) msg1 = "Status of start traffic : %s, %s, %s" % ( self.proto, src_vm_fixture.vm_ip, startStatus['status']) self.logger.info(msg1) assert startStatus['status'], msg1 # 3. Poll live traffic & verify VM flow count self.verify_node_flow_setup() # 4. Stop Traffic self.logger.info("Proceed to stop traffic..") self.traffic_obj.stopTraffic(wait_for_stop=False) start_time = time.time() # 5. Verify flow ageing self.logger.info( "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing" ) sleep(self.flow_cache_timeout) while True: begin_flow_count = self.comp_node_fixt[ self.cmp_node].get_vrouter_matching_flow_count( self.flow_data) self.logger.debug('begin_flow_count: %s' % (begin_flow_count)) if begin_flow_count['all'] == 0: break flow_teardown_time = math.ceil( flow_test_utils.get_max_flow_removal_time( begin_flow_count['all'], self.flow_cache_timeout)) # flow_teardown_time is not the actual time to remove flows # Based on flow_count at this time, teardown_time is calculated to the value # which will vary with agent's poll, which is done at regular intervals.. self.logger.info('Sleeping for %s secs' % (flow_teardown_time)) sleep(flow_teardown_time) # at the end of wait, actual_flows should be atleast < 50% of total flows before start of teardown current_flow_count = self.comp_node_fixt[ self.cmp_node].get_vrouter_matching_flow_count( self.flow_data) self.logger.debug('current_flow_count: %s' % (current_flow_count)) if current_flow_count['all'] > (0.5 * begin_flow_count['all']): msg = [ 'Flow removal not happening as expected in node %s' % self.cmp_node ] msg.append( 'Flow count before wait: %s, after wait of %s secs, its: %s' % (begin_flow_count['all'], flow_teardown_time, current_flow_count['all'])) assert False, msg if current_flow_count['all'] < (0.1 * begin_flow_count['all']): break # end of while loop elapsed_time = time.time() - start_time self.logger.info( "Flows aged out as expected in configured flow_cache_timeout") # end of profile for loop return True