def test_multiLB(self): """ test of multiple lb's per haproxy device """ # wait until our lb is ACTIVE before trying to update it # test / validate lb1 lbaas_utils.wait_for_active_status(self, self.lb_id1) lbaas_utils.validate_loadBalancer(self, multi=True, multi_id=self.lb_id1, multi_name=self.lb_name1, multi_nodes=self.nodes1) result_data = self.driver.list_lb_detail(self.lb_id1) if 'virtualIps' in result_data: self.vip = result_data['virtualIps'][0]['id'] else: # client has different key self.vip = ast.literal_eval(result_data['ips'])[0]['id'] # create lb2 self.logging.info('Creating load balancer2...') self.create_result, self.actual_status, self.lb_id2, self.lb_addr = self.driver.create_lb( self.lb_name2, self.nodes2, self.algorithm, self.bad_statuses, self.vip) if self.args.verbose: self.logging.info("STATUS: %s" % self.actual_status) self.logging.info("RESULT: %s" % self.create_result.text) lbaas_utils.wait_for_active_status(self, self.lb_id2) self.logging.info('load balancer2 id: %s' % (self.lb_id2)) lbaas_utils.validate_loadBalancer(self, multi=True, multi_id=self.lb_id2, multi_name=self.lb_name2, multi_nodes=self.nodes2)
def test_Monitoring(self): """ multiple clients beat on a loadbalancer """ # Create our loadbalancer if not self.args.lbid: self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.validate_loadBalancer(self) else: self.logging.info("Using user-supplied loadbalancer: %s" %self.args.lbid) self.lb_id = self.args.lbid self.lb_addr = self.get_floating_ip(self.lb_id) # validate the loadbalancer lbaas_utils.validate_loadBalancer(self) self.logging.info("Getting monitor data for default monitor...") output, status = self.driver.get_monitor(self.lb_id) self.assertEqual(status,'200',msg="ERROR: problem w/ loadbalancer: %s monitor. Received status: %s: %s" %(self.lb_id, status, output)) self.assertEqual(output, self.default_monitor, msg="ERROR: problem with default monitor. Expected: %s, Actual: %s" %(self.default_monitor, output)) # updating the monitor... for monitor_set in self.monitor_data: description = monitor_set['description'] monitor = monitor_set['monitor'] self.logging.info("Testing monitor variant: %s..." %description) output, status = self.driver.update_monitor(self.lb_id, monitor) self.logging.info("\tStatus: %s || output: %s" %(status, output)) current_monitor, current_status = self.driver.get_monitor(self.lb_id) if status == '202': # we have a positive result! self.assertEqual(current_monitor, monitor, msg= "Loadbalancer: %s monitor not matching expected value: Expected: %s || Actual %s" %(self.lb_id, monitor, current_monitor)) self.logging.info("\tTest monitor: %s" %monitor) self.logging.info("\tCurrent monitor: %s" %current_monitor) lbaas_utils.wait_for_active_status(self)
def test_multiLB(self): """ test of multiple lb's per haproxy device """ # wait until our lb is ACTIVE before trying to update it # test / validate lb1 lbaas_utils.wait_for_active_status(self, self.lb_id1) lbaas_utils.validate_loadBalancer( self , multi=True , multi_id=self.lb_id1 , multi_name=self.lb_name1 , multi_nodes=self.nodes1) result_data = self.driver.list_lb_detail(self.lb_id1) if 'virtualIps' in result_data: self.vip = result_data['virtualIps'][0]['id'] else: # client has different key self.vip = ast.literal_eval(result_data['ips'])[0]['id'] # create lb2 self.logging.info('Creating load balancer2...') self.create_result, self.actual_status, self.lb_id2, self.lb_addr = self.driver.create_lb(self.lb_name2, self.nodes2, self.algorithm, self.bad_statuses, self.vip) if self.args.verbose: self.logging.info("STATUS: %s" %self.actual_status) self.logging.info("RESULT: %s" %self.create_result.text) lbaas_utils.wait_for_active_status(self, self.lb_id2) self.logging.info('load balancer2 id: %s' %(self.lb_id2)) lbaas_utils.validate_loadBalancer( self , multi=True , multi_id=self.lb_id2 , multi_name=self.lb_name2 , multi_nodes=self.nodes2)
def test_monitorLoadBalancer(self): """ test monitoring of loadbalancers for libra """ # Create our loadbalancer self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.validate_loadBalancer(self) # use fabric to stop libra_worker self.logging.info("Stopping libra_worker on lb: %s address: %s" %(self.lb_id, self.lb_addr)) cmd = "fab --no-pty -H %s stop_libra_worker" %(self.lb_addr) status, output = commands.getstatusoutput(cmd) #if self.args.verbose: self.logging.info("Command: %s" %cmd) self.logging.info("Status: %s" %status) self.logging.info("Output: %s" %output) # check api to see if ERROR state is set / libra_worker failure is detected self.logging.info("Wait / poll api server for loadbalancer to be set to ERROR status...") lbaas_utils.wait_for_active_status(self, self.lb_id, active_wait_time=240, desired_status='ERROR') # restart libra_worker self.logging.info("Starting libra_worker on lb: %s address: %s" %(self.lb_id, self.lb_addr)) cmd = "fab --no-pty -H %s start_libra_worker" %(self.lb_addr) status, output = commands.getstatusoutput(cmd) #if self.args.verbose: self.logging.info("Command: %s" %cmd) self.logging.info("Status: %s" %status) self.logging.info("Output: %s" %output) # check api to ensure lb state is properly reset self.logging.info("Wait / poll api server for loadbalancer to be set to ACTIVE status...") lbaas_utils.wait_for_active_status(self, self.lb_id, active_wait_time=240, desired_status='ACTIVE') # one final lb validation lbaas_utils.validate_loadBalancer(self)
def test_createLoadBalancer(self): """ test creation of loadbalancers for libra """ # Create our loadbalancer self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.validate_loadBalancer(self) self.logging.info(self.driver.list_lb_nodes(self.lb_id))
def test_modifyNodes(self): """ test modifying loadbalancer nodes for libra """ # wait until our lb is ACTIVE before trying to update it lbaas_utils.wait_for_active_status(self) # modify/disable a node on our loadbalancer self.logging.info("Testing node disable...") nodes = self.driver.list_lb_nodes(self.lb_id) mod_node = nodes['nodes'][0] mod_node_id = mod_node['id'] mod_node_addr = mod_node['address'] mod_node_data = {'condition':'DISABLED'} self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) lbaas_utils.validate_loadBalancer(self, [mod_node_addr]) # re-enable the node self.logging.info("Testing re-enable of node...") mod_node_data = {'condition':'ENABLED'} self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) lbaas_utils.validate_loadBalancer(self) # modify ip / address self.logging.info("Testing update of node ip...") mod_node_data = {'address': '127.0.0.1'} expected_status = '400' self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertEqual(str(self.actual_status), expected_status, msg = "ERROR: Attempt to update node ip address succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status)) # modify port self.logging.info("Testing update of node port...") mod_node_data = {'port': '443'} expected_status = '400' self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertEqual(str(self.actual_status), expected_status, msg = "ERROR: Attempt to update node port succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status)) # update of non-existent node self.logging.info("Testing update of non-existent node...") mod_node_data = {'condition':"DISABLED"} expected_status = '404' self.actual_status = self.driver.modify_node(self.lb_id, '0', mod_node_data) self.assertEqual(str(self.actual_status), expected_status, msg = "ERROR: Attempt to update node port succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status)) # lower-case condition #self.logging.info("Testing lowercase condition...") #mod_node_data = {'condition':'disabled'} #self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) #lbaas_utils.validate_loadBalancer(self, [mod_node_addr]) #self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) #lbaas_utils.validate_loadBalancer(self, [mod_node_addr]) # lower-case condition self.logging.info("Testing bad condition...") mod_node_data = {'condition':'TASERED_BADLY'} expected_statuses = ['400','512'] self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertTrue(str(self.actual_status) in expected_statuses, msg = "ERROR: Attempt to update to bad condition succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status))
def test_BetaRayBill(self): """ multiple clients beat on a loadbalancer """ # Create our loadbalancer if not self.args.lbid: self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb( self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' % self.lb_id) self.logging.info('load balancer ip addr: %s' % self.lb_addr) lbaas_utils.validate_loadBalancer(self) else: self.logging.info("Using user-supplied loadbalancer: %s" % self.args.lbid) self.lb_id = self.args.lbid self.lb_addr = self.get_floating_ip(self.lb_id) # create our hammer-time minions self.logging.info("Creating salt minions...") cmd = 'salt-cloud -C%s -m%s -y' % (self.args.cloudconfig, self.args.cloudmap) status, output = commands.getstatusoutput(cmd) self.logging.info("Command: %s" % cmd) self.logging.info("Status: %s" % status) self.logging.info("Output: %s" % output) # configure them self.logging.info("Configuring our hellish minions...mwa ha ha") cmd = 'salt *lbaas-hellraiser* state.highstate' status, output = commands.getstatusoutput(cmd) self.logging.info("Command: %s" % cmd) self.logging.info("Status: %s" % status) self.logging.info("Output: %s" % output) # start the hammer-storm! self.logging.info("Stop!") self.logging.info("Hammer time!") self.logging.info("Beginning test runs on minions...") test_url = 'https://%s/earth2kb.jpg' % (self.lb_addr) cmd = 'salt *lbaas-hellraiser* cmd.run cwd=/home/ubuntu "python hellraiser.py %s"' % ( test_url) status, output = commands.getstatusoutput(cmd) self.logging.info("Command: %s" % cmd) self.logging.info("Status: %s" % status) self.logging.info("Output: %s" % output) # get lbaas logs log_status = self.driver.get_logs(self.lb_id, auth_token=self.swift_auth_token, obj_endpoint=self.swift_endpoint, obj_basepath=self.args.swiftbasepath) # validate the loadbalancer lbaas_utils.validate_loadBalancer(self)
def test_updateLoadBalancer(self): """ test update of loadbalancers for libra """ # wait until our lb is ACTIVE before trying to update it lbaas_utils.wait_for_active_status(self) # update our loadbalancer self.actual_status = self.driver.update_lb(self.lb_id, self.update_data) if 'name' in self.update_data: self.lb_name = self.update_data['name'] lbaas_utils.validate_loadBalancer(self)
def test_Monitoring(self): """ multiple clients beat on a loadbalancer """ # Create our loadbalancer if not self.args.lbid: self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb( self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' % self.lb_id) self.logging.info('load balancer ip addr: %s' % self.lb_addr) lbaas_utils.validate_loadBalancer(self) else: self.logging.info("Using user-supplied loadbalancer: %s" % self.args.lbid) self.lb_id = self.args.lbid self.lb_addr = self.get_floating_ip(self.lb_id) # validate the loadbalancer lbaas_utils.validate_loadBalancer(self) self.logging.info("Getting monitor data for default monitor...") output, status = self.driver.get_monitor(self.lb_id) self.assertEqual( status, '200', msg= "ERROR: problem w/ loadbalancer: %s monitor. Received status: %s: %s" % (self.lb_id, status, output)) self.assertEqual( output, self.default_monitor, msg="ERROR: problem with default monitor. Expected: %s, Actual: %s" % (self.default_monitor, output)) # updating the monitor... for monitor_set in self.monitor_data: description = monitor_set['description'] monitor = monitor_set['monitor'] self.logging.info("Testing monitor variant: %s..." % description) output, status = self.driver.update_monitor(self.lb_id, monitor) self.logging.info("\tStatus: %s || output: %s" % (status, output)) current_monitor, current_status = self.driver.get_monitor( self.lb_id) if status == '202': # we have a positive result! self.assertEqual( current_monitor, monitor, msg= "Loadbalancer: %s monitor not matching expected value: Expected: %s || Actual %s" % (self.lb_id, monitor, current_monitor)) self.logging.info("\tTest monitor: %s" % monitor) self.logging.info("\tCurrent monitor: %s" % current_monitor) lbaas_utils.wait_for_active_status(self)
def test_loadBalancerFuncs(self): """ test libra loadbalancers via siege """ # Create our loadbalancer w/ one node to start self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb( self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' % self.lb_id) self.logging.info('load balancer ip addr: %s' % self.lb_addr) lbaas_utils.validate_loadBalancer(self) # iterate through backend node sets and run siege for node_count in self.node_counts: self.logging.info("Testing with %s nodes" % node_count) self.logging.info("*" * 80) if node_count != 1: # we have nodes[0] already, we add subsequent nodes to it add_nodes = self.node_pool[len(self.nodes):node_count] # add nodes to our loadbalancer self.add_node_result, self.actual_status = self.driver.add_nodes( self.lb_id, add_nodes) if self.actual_status == '202': # good update, we need to update our expected nodes self.nodes += add_nodes self.logging.info("Current node list:") nodes = self.driver.list_lb_nodes(self.lb_id) self.logging.info(nodes) lbaas_utils.validate_loadBalancer(self) else: self.assertEqual( self.actual_status, '202', msg= "Adding nodes to loadbalancer %s failed with status: %s" % (self.lb_id, self.actual_status)) # now we run siege! self.logging.info("Beginning siege tests...") for page_set in self.pages: page_file = page_set['path'] page_desc = page_set['description'] page_path = os.path.join(self.lb_addr, page_file) self.logging.info("Testing page: %s, %s" % (page_path, page_desc)) self.logging.info("Testing with %s nodes" % node_count) cmd = 'siege http://%s -d1 -r%s -c%s -q ' % ( page_path, self.requests, self.concurrency) self.logging.info("test command: %s" % cmd) status, output = commands.getstatusoutput(cmd) self.logging.info("status: %s" % status) self.logging.info("output: %s" % output)
def test_LoadBalancerLogs(self): """ test creation of loadbalancers for libra """ # Create our loadbalancer self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.wait_for_active_status(self, active_wait_time=180) self.logging.info('gathering logs, 1st attempt...') self.actual_status = self.driver.get_logs(self.lb_id, auth_token = self.swift_auth_token, obj_endpoint = self.swift_endpoint, obj_basepath = self.args.swiftbasepath) self.assertEqual(str(self.actual_status), str(self.expected_status), msg = "ERROR: Attempt to gather lb logs produced status: %s. Expected status: %s" %(self.actual_status, self.expected_status)) lbaas_utils.validate_loadBalancer(self) lbaas_utils.wait_for_active_status(self, active_wait_time=180) self.logging.info('gathering logs, 2nd attempt...') self.actual_status = self.driver.get_logs(self.lb_id, auth_token = self.swift_auth_token, obj_endpoint = self.swift_endpoint, obj_basepath = self.args.swiftbasepath) self.assertEqual(str(self.actual_status), str(self.expected_status), msg = "ERROR: Attempt to gather lb logs produced status: %s. Expected status: %s" %(self.actual_status, self.expected_status))
def test_BetaRayBill(self): """ multiple clients beat on a loadbalancer """ # Create our loadbalancer if not self.args.lbid: self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.validate_loadBalancer(self) else: self.logging.info("Using user-supplied loadbalancer: %s" %self.args.lbid) self.lb_id = self.args.lbid self.lb_addr = self.get_floating_ip(self.lb_id) # create our hammer-time minions self.logging.info("Creating salt minions...") cmd = 'salt-cloud -C%s -m%s -y' %(self.args.cloudconfig, self.args.cloudmap) status, output = commands.getstatusoutput(cmd) self.logging.info("Command: %s" %cmd) self.logging.info("Status: %s" %status) self.logging.info("Output: %s" %output) # configure them self.logging.info("Configuring our hellish minions...mwa ha ha") cmd = 'salt *lbaas-hellraiser* state.highstate' status, output = commands.getstatusoutput(cmd) self.logging.info("Command: %s" %cmd) self.logging.info("Status: %s" %status) self.logging.info("Output: %s" %output) # start the hammer-storm! self.logging.info("Stop!") self.logging.info("Hammer time!") self.logging.info("Beginning test runs on minions...") test_url = 'https://%s/earth2kb.jpg' %(self.lb_addr) cmd = 'salt *lbaas-hellraiser* cmd.run cwd=/home/ubuntu "python hellraiser.py %s"' %(test_url) status, output = commands.getstatusoutput(cmd) self.logging.info("Command: %s" %cmd) self.logging.info("Status: %s" %status) self.logging.info("Output: %s" %output) # get lbaas logs log_status = self.driver.get_logs(self.lb_id, auth_token = self.swift_auth_token, obj_endpoint = self.swift_endpoint, obj_basepath = self.args.swiftbasepath) # validate the loadbalancer lbaas_utils.validate_loadBalancer(self)
def test_addNodes(self): """ test update of loadbalancers for libra """ # wait until our lb is ACTIVE before trying to update it lbaas_utils.wait_for_active_status(self) # add nodes to our loadbalancer self.add_node_result, self.actual_status = self.driver.add_nodes(self.lb_id, self.add_node_data) disabled_list = [] if self.actual_status in self.good_statuses: # good update, we need to update our expected nodes self.nodes = self.init_nodes + self.add_node_data for node in self.nodes: if "condition" in node and node["condition"] == "DISABLED": if "address" in node: disabled_list.append(node["address"]) lbaas_utils.validate_loadBalancer(self, disabled_list)
def test_monitorLoadBalancer(self): """ test monitoring of loadbalancers for libra """ # Create our loadbalancer self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb( self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' % self.lb_id) self.logging.info('load balancer ip addr: %s' % self.lb_addr) lbaas_utils.validate_loadBalancer(self) # use fabric to stop libra_worker self.logging.info("Stopping libra_worker on lb: %s address: %s" % (self.lb_id, self.lb_addr)) cmd = "fab --no-pty -H %s stop_libra_worker" % (self.lb_addr) status, output = commands.getstatusoutput(cmd) #if self.args.verbose: self.logging.info("Command: %s" % cmd) self.logging.info("Status: %s" % status) self.logging.info("Output: %s" % output) # check api to see if ERROR state is set / libra_worker failure is detected self.logging.info( "Wait / poll api server for loadbalancer to be set to ERROR status..." ) lbaas_utils.wait_for_active_status(self, self.lb_id, active_wait_time=240, desired_status='ERROR') # restart libra_worker self.logging.info("Starting libra_worker on lb: %s address: %s" % (self.lb_id, self.lb_addr)) cmd = "fab --no-pty -H %s start_libra_worker" % (self.lb_addr) status, output = commands.getstatusoutput(cmd) #if self.args.verbose: self.logging.info("Command: %s" % cmd) self.logging.info("Status: %s" % status) self.logging.info("Output: %s" % output) # check api to ensure lb state is properly reset self.logging.info( "Wait / poll api server for loadbalancer to be set to ACTIVE status..." ) lbaas_utils.wait_for_active_status(self, self.lb_id, active_wait_time=240, desired_status='ACTIVE') # one final lb validation lbaas_utils.validate_loadBalancer(self)
def test_addNodes(self): """ test update of loadbalancers for libra """ # wait until our lb is ACTIVE before trying to update it lbaas_utils.wait_for_active_status(self) # add nodes to our loadbalancer self.add_node_result, self.actual_status = self.driver.add_nodes( self.lb_id, self.add_node_data) disabled_list = [] if self.actual_status in self.good_statuses: # good update, we need to update our expected nodes self.nodes = self.init_nodes + self.add_node_data for node in self.nodes: if 'condition' in node and node['condition'] == 'DISABLED': if 'address' in node: disabled_list.append(node['address']) lbaas_utils.validate_loadBalancer(self, disabled_list)
def test_loadBalancerFuncs(self): """ test libra loadbalancers via siege """ # Create our loadbalancer w/ one node to start self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.validate_loadBalancer(self) # iterate through backend node sets and run siege for node_count in self.node_counts: self.logging.info("Testing with %s nodes" %node_count) self.logging.info("*"*80) if node_count != 1: # we have nodes[0] already, we add subsequent nodes to it add_nodes = self.node_pool[len(self.nodes):node_count] # add nodes to our loadbalancer self.add_node_result, self.actual_status = self.driver.add_nodes(self.lb_id, add_nodes) if self.actual_status =='202': # good update, we need to update our expected nodes self.nodes += add_nodes self.logging.info("Current node list:") nodes = self.driver.list_lb_nodes(self.lb_id) self.logging.info(nodes) lbaas_utils.validate_loadBalancer(self) else: self.assertEqual(self.actual_status, '202', msg = "Adding nodes to loadbalancer %s failed with status: %s" %(self.lb_id, self.actual_status)) # now we run siege! self.logging.info("Beginning siege tests...") for page_set in self.pages: page_file = page_set['path'] page_desc = page_set['description'] page_path = os.path.join(self.lb_addr, page_file) self.logging.info("Testing page: %s, %s" %(page_path, page_desc)) self.logging.info("Testing with %s nodes" %node_count) cmd = 'siege http://%s -d1 -r%s -c%s -q ' %(page_path, self.requests, self.concurrency) self.logging.info("test command: %s" %cmd) status, output = commands.getstatusoutput(cmd) self.logging.info("status: %s" %status) self.logging.info("output: %s" %output)
def test_modifyNodes(self): """ test modifying loadbalancer nodes for libra """ # wait until our lb is ACTIVE before trying to update it lbaas_utils.wait_for_active_status(self) # modify/disable a node on our loadbalancer self.logging.info("Testing node disable...") nodes = self.driver.list_lb_nodes(self.lb_id) mod_node = nodes['nodes'][0] mod_node_id = mod_node['id'] mod_node_addr = mod_node['address'] mod_node_data = {'condition': 'DISABLED'} self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) lbaas_utils.validate_loadBalancer(self, [mod_node_addr]) # re-enable the node self.logging.info("Testing re-enable of node...") mod_node_data = {'condition': 'ENABLED'} self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) lbaas_utils.validate_loadBalancer(self) # modify ip / address self.logging.info("Testing update of node ip...") mod_node_data = {'address': '127.0.0.1'} expected_status = '400' self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertEqual( str(self.actual_status), expected_status, msg= "ERROR: Attempt to update node ip address succeeded with status: %s. Expected status: %s" % (self.actual_status, expected_status)) # modify port self.logging.info("Testing update of node port...") mod_node_data = {'port': '443'} expected_status = '400' self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertEqual( str(self.actual_status), expected_status, msg= "ERROR: Attempt to update node port succeeded with status: %s. Expected status: %s" % (self.actual_status, expected_status)) # update of non-existent node self.logging.info("Testing update of non-existent node...") mod_node_data = {'condition': "DISABLED"} expected_status = '404' self.actual_status = self.driver.modify_node(self.lb_id, '0', mod_node_data) self.assertEqual( str(self.actual_status), expected_status, msg= "ERROR: Attempt to update node port succeeded with status: %s. Expected status: %s" % (self.actual_status, expected_status)) # lower-case condition #self.logging.info("Testing lowercase condition...") #mod_node_data = {'condition':'disabled'} #self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) #lbaas_utils.validate_loadBalancer(self, [mod_node_addr]) #self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) #lbaas_utils.validate_loadBalancer(self, [mod_node_addr]) # lower-case condition self.logging.info("Testing bad condition...") mod_node_data = {'condition': 'TASERED_BADLY'} expected_statuses = ['400', '512'] self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertTrue( str(self.actual_status) in expected_statuses, msg= "ERROR: Attempt to update to bad condition succeeded with status: %s. Expected status: %s" % (self.actual_status, expected_status))
def test_loadBalancerFuncs(self): """ test creation of loadbalancers and their functions for libra """ # Create our loadbalancer self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb( self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' % self.lb_id) self.logging.info('load balancer ip addr: %s' % self.lb_addr) lbaas_utils.validate_loadBalancer(self) ############## # test updates ############## self.logging.info("Testing update variants...") if self.functional_inputs and 'update_variants' in self.functional_inputs and self.actual_status not in self.bad_statuses: for update_variant in self.functional_inputs['update_variants']: self.logging.info("Update variant description: %s" % update_variant['description']) self.update_data = update_variant['update_data'] if 'expected_status' in update_variant: self.expected_status = update_variant['expected_status'] else: self.expected_status = 202 if 'algorithm' in update_variant and update_variant[ 'algorithm'] in self.good_algorithms: self.algorithm = update_variant['algorithm'] # wait until our lb is ACTIVE before trying to update it lbaas_utils.wait_for_active_status(self) # update our loadbalancer self.actual_status = self.driver.update_lb( self.lb_id, self.update_data) if 'name' in self.update_data and self.actual_status not in self.bad_statuses: self.lb_name = self.update_data['name'] lbaas_utils.validate_loadBalancer(self) ################ # test add_nodes ################ self.expected_status = '202' if self.functional_inputs and 'add_node_variants' in self.functional_inputs: # get list of original lb nodes (each node = dict) original_nodes = self.driver.list_lb_nodes(self.lb_id)['nodes'] self.original_nodes = copy.deepcopy(self.nodes) for variant in self.functional_inputs['add_node_variants']: self.logging.info("Testing add / remove node functionality...") self.logging.info("Add node variant: %s" % (variant['description'])) add_nodes = [] if 'nodes' in variant: add_nodes = variant['nodes'] else: node_count = variant['node_count'] if str(variant['node_count']).startswith( 'MAX_BACKEND_COUNT'): node_count = int(self.args.maxbackendnodes) - len( self.nodes) if str(variant['node_count']).endswith('+1'): node_count += 1 node_pool = self.test_nodes # we have a node_count value and pull from default_values['nodes'] if node_count < len(node_pool): add_nodes = node_pool[:node_count] else: add_nodes = [] idx = 0 while len(add_nodes) < node_count: add_nodes.append(node_pool[idx]) idx += 1 if idx == len(node_pool): idx = 0 # add nodes to our loadbalancer if self.args.verbose: self.logging.info("Pre-add node list:") nodes = self.driver.list_lb_nodes(self.lb_id) self.logging.info(nodes) self.add_node_result, self.actual_status = self.driver.add_nodes( self.lb_id, add_nodes) disabled_list = [] if self.actual_status == '202': # good update, we need to update our expected nodes self.nodes += add_nodes for node in self.nodes: if 'condition' in node and node[ 'condition'] == 'DISABLED': if 'address' in node: disabled_list.append(node['address']) self.logging.info("Post-add node list:") nodes = self.driver.list_lb_nodes(self.lb_id) self.logging.info(nodes) # maybe delete this wait! lbaas_utils.wait_for_active_status(self) lbaas_utils.validate_loadBalancer(self, disabled_list) # remove nodes / reset to original set up current_nodes = self.driver.list_lb_nodes( self.lb_id)['nodes'] for orig_node in original_nodes: if orig_node in current_nodes: current_nodes.remove(orig_node) for current_node in current_nodes: attempts_remain = 60 time_wait = 1 node_exists = True node_id = current_node['id'] if self.args.verbose: self.logging.info("Removing node id: %s" % node_id) while attempts_remain and node_exists: result = self.driver.delete_lb_node( self.lb_id, node_id) if result != '202': attempts_remain -= 1 time.sleep(time_wait) else: node_exists = False self.assertEqual( result, '202', msg= "ERROR: Node id: %s deletion on loadbalancer id: %s failed" % (node_id, self.lb_id)) self.nodes = copy.deepcopy(self.original_nodes) ################### # test modify_nodes ################### if self.original_nodes: self.nodes = self.original_nodes if self.functional_inputs and 'modify_variants' in self.functional_inputs: self.logging.info("Testing modify nodes") # wait until our lb is ACTIVE before trying to update it lbaas_utils.wait_for_active_status(self) # modify/disable a node on our loadbalancer self.logging.info("Testing node disable...") self.expected_status = 202 nodes = self.driver.list_lb_nodes(self.lb_id) mod_node = nodes['nodes'][0] mod_node_id = mod_node['id'] mod_node_addr = mod_node['address'] mod_node_data = {'condition': 'DISABLED'} self.actual_status = self.driver.modify_node( self.lb_id, mod_node_id, mod_node_data) lbaas_utils.validate_loadBalancer(self, [mod_node_addr]) # re-enable the node self.logging.info("Testing re-enable of node...") mod_node_data = {'condition': 'ENABLED'} self.actual_status = self.driver.modify_node( self.lb_id, mod_node_id, mod_node_data) lbaas_utils.validate_loadBalancer(self) # DISABLED - # libra currently does not actually update ip addrs or ports of nodes # but it does return a 202 / incorrect status code """ # modify ip / address self.logging.info("Testing update of node ip...") mod_node_data = {'address': '127.0.0.1'} expected_status = '400' if self.args.badstatus: expected_status = self.args.badstatus self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) nodes = self.driver.list_lb_nodes(self.lb_id) self.assertEqual(str(self.actual_status), expected_status, msg = "ERROR: Attempt to update node ip address succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status)) # modify port self.logging.info("Testing update of node port...") mod_node_data = {'port': '443'} expected_status = '400' if self.args.badstatus: expected_status = self.args.badstatus self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertEqual(str(self.actual_status), expected_status, msg = "ERROR: Attempt to update node port succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status)) """ # update of non-existent node self.logging.info("Testing update of non-existent node...") mod_node_data = {'condition': "DISABLED"} expected_status = '404' if self.args.badstatus: expected_status = self.args.badstatus self.actual_status = self.driver.modify_node( self.lb_id, '0', mod_node_data) self.assertEqual( str(self.actual_status), expected_status, msg= "ERROR: Attempt to update non-existent node succeeded with status: %s. Expected status: %s" % (self.actual_status, expected_status)) # lower-case condition expected_status = '400' if self.args.badstatus: expected_status = self.args.badstatus self.logging.info("Testing lowercase condition...") mod_node_data = {'condition': 'disabled'} self.actual_status = self.driver.modify_node( self.lb_id, mod_node_id, mod_node_data) self.assertEqual( str(self.actual_status), expected_status, msg= "ERROR: Usage of lowercase node condition succeeded with status: %s. Expected status: %s" % (self.actual_status, expected_status)) self.expected_status = '202' # negative / bad node condition self.logging.info("Testing bad condition...") mod_node_data = {'condition': 'TASERED_BADLY'} expected_statuses = ['400', '512', '404'] self.actual_status = self.driver.modify_node( self.lb_id, mod_node_id, mod_node_data) self.assertTrue( str(self.actual_status) in expected_statuses, msg= "ERROR: Attempt to update to bad condition succeeded with status: %s. Expected status: %s" % (self.actual_status, expected_statuses)) ################ # test multi_lb ################ """ test of multiple lb's per haproxy device """ if self.functional_inputs and 'multiLB_variants' in self.functional_inputs: self.logging.info( "Testing multiple loadbalancers on one device...") self.expected_status = '202' self.main_lb_id = copy.deepcopy(self.lb_id) for multi_variant in self.functional_inputs['multiLB_variants']: self.lb_name2 = multi_variant['name2'] self.nodes2 = multi_variant['nodes2'] # wait until our lb is ACTIVE before trying to update it # test / validate lb1 self.actual_status = '202' lbaas_utils.wait_for_active_status(self, self.lb_id) lbaas_utils.validate_loadBalancer(self, multi=True, multi_id=self.lb_id, multi_name=self.lb_name, multi_nodes=self.nodes) result_data = self.driver.list_lb_detail(self.lb_id) if 'virtualIps' in result_data: self.vip = result_data['virtualIps'][0]['id'] else: # client has different key self.vip = ast.literal_eval(result_data['ips'])[0]['id'] # create lb2 self.logging.info('Creating load balancer2...') self.create_result, self.actual_status, self.lb_id2, self.lb_addr = self.driver.create_lb( self.lb_name2, self.nodes2, self.algorithm, self.bad_statuses, self.vip) if self.args.verbose: self.logging.info("STATUS: %s" % self.actual_status) self.logging.info("RESULT: %s" % self.create_result.text) lbaas_utils.wait_for_active_status(self, self.lb_id2) self.logging.info('load balancer2 id: %s' % (self.lb_id2)) lbaas_utils.validate_loadBalancer(self, multi=True, multi_id=self.lb_id2, multi_name=self.lb_name2, multi_nodes=self.nodes2) self.logging.info("Deleting loadbalancer: %s" % self.lb_id2) result = self.driver.delete_lb(self.lb_id2)
def test_loadBalancerFuncs(self): """ test libra loadbalancers via apachebench """ # Create our loadbalancer w/ one node to start self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.validate_loadBalancer(self) # iterate through backend node sets and run apachebench for node_count in self.node_counts: self.logging.info("Testing with %s nodes" %node_count) self.logging.info("*"*80) if node_count != 1: # we have nodes[0] already, we add subsequent nodes to it add_nodes = self.node_pool[len(self.nodes):node_count] # add nodes to our loadbalancer self.add_node_result, self.actual_status = self.driver.add_nodes(self.lb_id, add_nodes) if self.actual_status =='202': # good update, we need to update our expected nodes self.nodes += add_nodes self.logging.info("Current node list:") nodes = self.driver.list_lb_nodes(self.lb_id) self.logging.info(nodes) lbaas_utils.validate_loadBalancer(self) else: self.assertEqual(self.actual_status, '202', msg = "Adding nodes to loadbalancer %s failed with status: %s" %(self.lb_id, self.actual_status)) # now we run apache-bench! self.logging.info("Beginning apache-bench tests...") for page_set in self.pages: page_file = page_set['path'] page_desc = page_set['description'] page_path = os.path.join(self.lb_addr, page_file) self.logging.info("Testing page: %s, %s" %(page_path, page_desc)) self.logging.info("Testing with %s nodes" %node_count) cmd = 'ab -q -r -c%s -n%s http://%s' %(self.concurrency, self.requests, page_path) self.logging.info("test command: %s" %cmd) status, output = commands.getstatusoutput(cmd) self.logging.info("status: %s" %status) self.logging.info("output: %s" %output) # determine if we want to wait for m&b testing or not self.logging.info(self.args.testmab) if self.args.testmab == True: report_wait = 180 self.logging.info("Waiting %s seconds for metering testing..." %report_wait) time.sleep(report_wait) # delete the lb if self.args.cleanupoff: self.logging.info("NOT deleting loadbalancer: %s per user-specified flag..." %self.lb_id) else: self.logging.info("Deleting loadbalancer: %s" %self.lb_id) result = self.driver.delete_lb(self.lb_id) # get total html bytes and total bytes total_bytes = 0 total_requests = 0 for line in output.split('\n'): line = line.replace('bytes','') if line.strip().startswith('Complete requests:'): total_requests = int(line.split(':')[1].strip()) self.logging.info("Complete requests: %s" %(total_requests)) if line.strip().startswith('Total transferred:'): total_bytes = int(line.split(':')[1].strip()) self.logging.info("Total bytes: %s" %(total_bytes)) # get total bytes / messages from metering server metering_result = lbaas_utils.validate_metering(self, total_requests, total_bytes) self.assertTrue(metering_result)
def test_healLoadBalancer(self): """ test creation of loadbalancers for libra """ # Create our loadbalancer if not self.args.lbid: self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.validate_loadBalancer(self) else: self.logging.info("Using user-supplied loadbalancer: %s" %self.args.lbid) self.lb_id = self.args.lbid self.lb_addr = self.get_floating_ip(self.lb_id) # wait a bit if we want to show off if self.args.demowaittime: self.logging.info("Sleeping %s seconds for demo / manual testing" %self.args.demowaittime) time.sleep(self.args.demowaittime) # get the nova name for our loadbalancer orig_nova_name = self.get_nova_name() # get nova id orig_nova_id = self.get_nova_id(orig_nova_name) # check floating_ip orig_floating_ip_output = self.check_floating_ip() # use our nova info and delete the haproxy vm nova_sleep = 15 self.logging.info("-"*80) self.logging.info("Nova info for loadbalancer: %s, ip_addr: %s" %(self.lb_id, self.lb_addr)) self.logging.info("Nova name: %s" %orig_nova_name) self.logging.info("Nova id: %s" %orig_nova_id) self.logging.info("Floating ip data: %s" %orig_floating_ip_output) self.logging.info("-"*80) self.logging.info("") self.logging.info("Deleting nova node for lb: %s..." %(self.lb_id)) cmd ='nova --insecure --os-username=%s --os-tenant-id=%s --os-region-name=%s --os-password=%s --os-auth-url=%s delete %s' %(self.args.nodesusername, self.args.nodestenantid, self.args.nodesregionname, self.args.nodespassword, self.args.nodesauthurl, orig_nova_id) status, output = commands.getstatusoutput(cmd) self.logging.info("Command: %s" %cmd) self.logging.info("Status: %s" %status) self.logging.info("Output: %s" %output) self.logging.info("Sleeping %s seconds for nova delete to take effect..." %nova_sleep) # TODO: add in nova list verification of no node (?) # TODO: test the database for the device? time.sleep(nova_sleep) time_wait = 1 attempts_remain = 100 max_time = 600 lb_ready = False suspected_bad = False first_run = True start_time = time.time() new_nova_name = orig_nova_name new_nova_id = orig_nova_id self.logging.info("Scanning nova and libra database for vm / loadbalancer status...") while not lb_ready and attempts_remain and ((time.time()-start_time) <= max_time): # get new nova name new_nova_name = self.get_nova_name(quiet=True) # get new nova id / check floating ip new_nova_id = self.get_nova_id(new_nova_name, quiet=True) # check floating ip floating_ip_output = self.check_floating_ip(quiet=True) if attempts_remain%10 ==0 and not first_run: self.logging.info("-"*80) self.logging.info("Status check:") self.logging.info("Attempts remaining: %d" %attempts_remain) self.logging.info("Time waited: %f" %(time.time() - start_time)) self.logging.info("Nova name: %s" %new_nova_name) self.logging.info("Nova id: %s" %new_nova_id) self.logging.info("Floating ip data: %s" %floating_ip_output) self.logging.info("-"*80) self.logging.info("") if new_nova_name != orig_nova_name and new_nova_id and new_nova_id in floating_ip_output: self.logging.info("-"*80) self.logging.info("New nova node has been assigned loadbalancer: %s's floating ip" %(self.lb_id)) self.logging.info("-"*80) self.logging.info("") lb_ready = True else: suspected_bad = True if first_run: self.logging.info("Will try up to: %d times for the loadbalancer to be functional (~%d minutes), please be patient..." %(attempts_remain*time_wait, (max_time/60))) first_run = False time.sleep(time_wait) attempts_remain -= 1 stop_time = time.time() expended_time = stop_time - start_time self.logging.info("Time for loadbalancer: %s to be ready: %f" %(self.lb_id, expended_time)) self.logging.info("Gathering new nova attributes...") # list new nova name new_nova_name = self.get_nova_name() # get new nova id / check floating ip new_nova_id = self.get_nova_id(new_nova_name) # check floating ip new_floating_ip_output = self.check_floating_ip() self.logging.info("-"*80) self.logging.info("New nova attributes for loadbalancer: %s ip_addr: %s:" %(self.lb_id, self.lb_addr)) self.logging.info("New nova name: %s" %new_nova_name) self.logging.info("Original nova name: %s" %orig_nova_name) self.logging.info("New nova id: %s" %new_nova_id) self.logging.info("Original nova id: %s" %orig_nova_id) self.logging.info("New floating ip info: %s" %new_floating_ip_output) self.logging.info("Original floating ip info: %s" %orig_floating_ip_output) self.logging.info("-"*80) self.logging.info("") self.logging.info("") self.assertTrue(lb_ready, msg = "WARNING: loadbalancer %s not ready in %f seconds" %(self.lb_id, expended_time)) if not self.args.lbid: lbaas_utils.validate_loadBalancer(self) # wait a bit if we want to show off if self.args.demowaittime: self.logging.info("Sleeping %s seconds for demo / manual testing" %self.args.demowaittime) time.sleep(self.args.demowaittime)
def test_createLoadBalancer(self): """ gathering time until we have a good loadbalancer and counting bad devices """ iterations = [] bad_iterations = [] failed_iterations = [] bad_count = 0 fail_count = 0 test_iterations=25 for i in range(test_iterations): self.logging.info("Iteration: %d" %i) # Create our loadbalancer lb_ready = False suspected_bad = False time_wait = 1 attempts_remain = 100 max_time = 300 start_time = time.time() self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.wait_for_active_status(self, must_pass=False) # make sure we can get traffic from our loadbalancer self.logging.info(time.time()-start_time) self.logging.info((time.time()-start_time) < max_time) while not lb_ready and attempts_remain and ((time.time()-start_time) <= max_time): try: if attempts_remain%10 ==0: self.logging.info("Attempts remaining: %d" %attempts_remain) lb_url = 'http://%s' %(self.lb_addr) result = requests.get(lb_url, verify= False) result.connection.close() if result: lb_ready=True except Exception, e: if not suspected_bad: self.logging.info(Exception) self.logging.info(e) self.logging.info("loadbalancer id: %s not yet ready. Suspected bad haproxy device" %(self.lb_id)) self.logging.info("Will try up to: %d times for the loadbalancer to be functional (~10 minutes), please be patient..." %(attempts_remain*time_wait)) suspected_bad = True bad_count += 1 time.sleep(time_wait) attempts_remain -= 1 stop_time = time.time() expended_time = stop_time - start_time self.logging.info("Time for loadbalancer: %s to be ready: %f" %(self.lb_id, expended_time)) if suspected_bad: if ((expended_time) <= max_time): bad_iterations.append(expended_time) else: failed_iterations.append(expended_time) else: iterations.append(expended_time) if attempts_remain and ((expended_time) <= max_time): lbaas_utils.validate_loadBalancer(self) else: self.logging.info("WARN: loadbalancer: %s suspected still not ready after %d seconds" %(self.lb_id, expended_time)) fail_count += 1 self.logging.info("Deleting loadbalancer: %s" %self.lb_id) result = self.driver.delete_lb(self.lb_id) time.sleep(10)
def test_loadBalancerFuncs(self): """ test creation of loadbalancers and their functions for libra """ # Create our loadbalancer self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.validate_loadBalancer(self) ############## # test updates ############## self.logging.info("Testing update variants...") if self.functional_inputs and 'update_variants' in self.functional_inputs and self.actual_status not in self.bad_statuses: for update_variant in self.functional_inputs['update_variants']: self.logging.info("Update variant description: %s" %update_variant['description']) self.update_data = update_variant['update_data'] if 'expected_status' in update_variant: self.expected_status = update_variant['expected_status'] else: self.expected_status = 202 if 'algorithm' in update_variant and update_variant['algorithm'] in self.good_algorithms: self.algorithm = update_variant['algorithm'] # wait until our lb is ACTIVE before trying to update it lbaas_utils.wait_for_active_status(self) # update our loadbalancer self.actual_status = self.driver.update_lb(self.lb_id, self.update_data) if 'name' in self.update_data and self.actual_status not in self.bad_statuses: self.lb_name = self.update_data['name'] lbaas_utils.validate_loadBalancer(self) ################ # test add_nodes ################ self.expected_status='202' if self.functional_inputs and 'add_node_variants' in self.functional_inputs: # get list of original lb nodes (each node = dict) original_nodes = self.driver.list_lb_nodes(self.lb_id)['nodes'] self.original_nodes = copy.deepcopy(self.nodes) for variant in self.functional_inputs['add_node_variants']: self.logging.info("Testing add / remove node functionality...") self.logging.info("Add node variant: %s" %(variant['description'])) add_nodes = [] if 'nodes' in variant: add_nodes = variant['nodes'] else: node_count = variant['node_count'] if str(variant['node_count']).startswith('MAX_BACKEND_COUNT'): node_count = int(self.args.maxbackendnodes) - len(self.nodes) if str(variant['node_count']).endswith('+1'): node_count += 1 node_pool = self.test_nodes # we have a node_count value and pull from default_values['nodes'] if node_count < len(node_pool): add_nodes = node_pool[:node_count] else: add_nodes = [] idx = 0 while len(add_nodes) < node_count: add_nodes.append(node_pool[idx]) idx += 1 if idx == len(node_pool): idx = 0 # add nodes to our loadbalancer if self.args.verbose: self.logging.info("Pre-add node list:") nodes = self.driver.list_lb_nodes(self.lb_id) self.logging.info(nodes) self.add_node_result, self.actual_status = self.driver.add_nodes(self.lb_id, add_nodes) disabled_list = [] if self.actual_status =='202': # good update, we need to update our expected nodes self.nodes += add_nodes for node in self.nodes: if 'condition' in node and node['condition'] == 'DISABLED': if 'address' in node: disabled_list.append(node['address']) self.logging.info("Post-add node list:") nodes = self.driver.list_lb_nodes(self.lb_id) self.logging.info(nodes) # maybe delete this wait! lbaas_utils.wait_for_active_status(self) lbaas_utils.validate_loadBalancer(self, disabled_list) # remove nodes / reset to original set up current_nodes = self.driver.list_lb_nodes(self.lb_id)['nodes'] for orig_node in original_nodes: if orig_node in current_nodes: current_nodes.remove(orig_node) for current_node in current_nodes: attempts_remain = 60 time_wait = 1 node_exists = True node_id = current_node['id'] if self.args.verbose: self.logging.info("Removing node id: %s" %node_id) while attempts_remain and node_exists: result = self.driver.delete_lb_node(self.lb_id, node_id) if result != '202': attempts_remain -= 1 time.sleep(time_wait) else: node_exists = False self.assertEqual(result, '202', msg="ERROR: Node id: %s deletion on loadbalancer id: %s failed" %(node_id, self.lb_id)) self.nodes = copy.deepcopy(self.original_nodes) ################### # test modify_nodes ################### if self.original_nodes: self.nodes = self.original_nodes if self.functional_inputs and 'modify_variants' in self.functional_inputs: self.logging.info("Testing modify nodes") # wait until our lb is ACTIVE before trying to update it lbaas_utils.wait_for_active_status(self) # modify/disable a node on our loadbalancer self.logging.info("Testing node disable...") self.expected_status=202 nodes = self.driver.list_lb_nodes(self.lb_id) mod_node = nodes['nodes'][0] mod_node_id = mod_node['id'] mod_node_addr = mod_node['address'] mod_node_data = {'condition':'DISABLED'} self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) lbaas_utils.validate_loadBalancer(self, [mod_node_addr]) # re-enable the node self.logging.info("Testing re-enable of node...") mod_node_data = {'condition':'ENABLED'} self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) lbaas_utils.validate_loadBalancer(self) # DISABLED - # libra currently does not actually update ip addrs or ports of nodes # but it does return a 202 / incorrect status code """ # modify ip / address self.logging.info("Testing update of node ip...") mod_node_data = {'address': '127.0.0.1'} expected_status = '400' if self.args.badstatus: expected_status = self.args.badstatus self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) nodes = self.driver.list_lb_nodes(self.lb_id) self.assertEqual(str(self.actual_status), expected_status, msg = "ERROR: Attempt to update node ip address succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status)) # modify port self.logging.info("Testing update of node port...") mod_node_data = {'port': '443'} expected_status = '400' if self.args.badstatus: expected_status = self.args.badstatus self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertEqual(str(self.actual_status), expected_status, msg = "ERROR: Attempt to update node port succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status)) """ # update of non-existent node self.logging.info("Testing update of non-existent node...") mod_node_data = {'condition':"DISABLED"} expected_status = '404' if self.args.badstatus: expected_status = self.args.badstatus self.actual_status = self.driver.modify_node(self.lb_id, '0', mod_node_data) self.assertEqual(str(self.actual_status), expected_status, msg = "ERROR: Attempt to update non-existent node succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status)) # lower-case condition expected_status='400' if self.args.badstatus: expected_status = self.args.badstatus self.logging.info("Testing lowercase condition...") mod_node_data = {'condition':'disabled'} self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertEqual(str(self.actual_status), expected_status, msg = "ERROR: Usage of lowercase node condition succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_status)) self.expected_status='202' # negative / bad node condition self.logging.info("Testing bad condition...") mod_node_data = {'condition':'TASERED_BADLY'} expected_statuses = ['400','512', '404'] self.actual_status = self.driver.modify_node(self.lb_id, mod_node_id, mod_node_data) self.assertTrue(str(self.actual_status) in expected_statuses, msg = "ERROR: Attempt to update to bad condition succeeded with status: %s. Expected status: %s" %(self.actual_status, expected_statuses)) ################ # test multi_lb ################ """ test of multiple lb's per haproxy device """ if self.functional_inputs and 'multiLB_variants' in self.functional_inputs: self.logging.info("Testing multiple loadbalancers on one device...") self.expected_status = '202' self.main_lb_id = copy.deepcopy(self.lb_id) for multi_variant in self.functional_inputs['multiLB_variants']: self.lb_name2 = multi_variant['name2'] self.nodes2 = multi_variant['nodes2'] # wait until our lb is ACTIVE before trying to update it # test / validate lb1 self.actual_status='202' lbaas_utils.wait_for_active_status(self, self.lb_id) lbaas_utils.validate_loadBalancer( self , multi=True , multi_id=self.lb_id , multi_name=self.lb_name , multi_nodes=self.nodes) result_data = self.driver.list_lb_detail(self.lb_id) if 'virtualIps' in result_data: self.vip = result_data['virtualIps'][0]['id'] else: # client has different key self.vip = ast.literal_eval(result_data['ips'])[0]['id'] # create lb2 self.logging.info('Creating load balancer2...') self.create_result, self.actual_status, self.lb_id2, self.lb_addr = self.driver.create_lb(self.lb_name2, self.nodes2, self.algorithm, self.bad_statuses, self.vip) if self.args.verbose: self.logging.info("STATUS: %s" %self.actual_status) self.logging.info("RESULT: %s" %self.create_result.text) lbaas_utils.wait_for_active_status(self, self.lb_id2) self.logging.info('load balancer2 id: %s' %(self.lb_id2)) lbaas_utils.validate_loadBalancer( self , multi=True , multi_id=self.lb_id2 , multi_name=self.lb_name2 , multi_nodes=self.nodes2) self.logging.info("Deleting loadbalancer: %s" %self.lb_id2) result = self.driver.delete_lb(self.lb_id2)
def test_healLoadBalancer(self): """ test creation of loadbalancers for libra """ # Create our loadbalancer if not self.args.lbid: self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb( self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' % self.lb_id) self.logging.info('load balancer ip addr: %s' % self.lb_addr) lbaas_utils.validate_loadBalancer(self) else: self.logging.info("Using user-supplied loadbalancer: %s" % self.args.lbid) self.lb_id = self.args.lbid self.lb_addr = self.get_floating_ip(self.lb_id) # wait a bit if we want to show off if self.args.demowaittime: self.logging.info("Sleeping %s seconds for demo / manual testing" % self.args.demowaittime) time.sleep(self.args.demowaittime) # get the nova name for our loadbalancer orig_nova_name = self.get_nova_name() # get nova id orig_nova_id = self.get_nova_id(orig_nova_name) # check floating_ip orig_floating_ip_output = self.check_floating_ip() # use our nova info and delete the haproxy vm nova_sleep = 15 self.logging.info("-" * 80) self.logging.info("Nova info for loadbalancer: %s, ip_addr: %s" % (self.lb_id, self.lb_addr)) self.logging.info("Nova name: %s" % orig_nova_name) self.logging.info("Nova id: %s" % orig_nova_id) self.logging.info("Floating ip data: %s" % orig_floating_ip_output) self.logging.info("-" * 80) self.logging.info("") self.logging.info("Deleting nova node for lb: %s..." % (self.lb_id)) cmd = 'nova --insecure --os-username=%s --os-tenant-id=%s --os-region-name=%s --os-password=%s --os-auth-url=%s delete %s' % ( self.args.nodesusername, self.args.nodestenantid, self.args.nodesregionname, self.args.nodespassword, self.args.nodesauthurl, orig_nova_id) status, output = commands.getstatusoutput(cmd) self.logging.info("Command: %s" % cmd) self.logging.info("Status: %s" % status) self.logging.info("Output: %s" % output) self.logging.info( "Sleeping %s seconds for nova delete to take effect..." % nova_sleep) # TODO: add in nova list verification of no node (?) # TODO: test the database for the device? time.sleep(nova_sleep) time_wait = 1 attempts_remain = 100 max_time = 600 lb_ready = False suspected_bad = False first_run = True start_time = time.time() new_nova_name = orig_nova_name new_nova_id = orig_nova_id self.logging.info( "Scanning nova and libra database for vm / loadbalancer status...") while not lb_ready and attempts_remain and ( (time.time() - start_time) <= max_time): # get new nova name new_nova_name = self.get_nova_name(quiet=True) # get new nova id / check floating ip new_nova_id = self.get_nova_id(new_nova_name, quiet=True) # check floating ip floating_ip_output = self.check_floating_ip(quiet=True) if attempts_remain % 10 == 0 and not first_run: self.logging.info("-" * 80) self.logging.info("Status check:") self.logging.info("Attempts remaining: %d" % attempts_remain) self.logging.info("Time waited: %f" % (time.time() - start_time)) self.logging.info("Nova name: %s" % new_nova_name) self.logging.info("Nova id: %s" % new_nova_id) self.logging.info("Floating ip data: %s" % floating_ip_output) self.logging.info("-" * 80) self.logging.info("") if new_nova_name != orig_nova_name and new_nova_id and new_nova_id in floating_ip_output: self.logging.info("-" * 80) self.logging.info( "New nova node has been assigned loadbalancer: %s's floating ip" % (self.lb_id)) self.logging.info("-" * 80) self.logging.info("") lb_ready = True else: suspected_bad = True if first_run: self.logging.info( "Will try up to: %d times for the loadbalancer to be functional (~%d minutes), please be patient..." % (attempts_remain * time_wait, (max_time / 60))) first_run = False time.sleep(time_wait) attempts_remain -= 1 stop_time = time.time() expended_time = stop_time - start_time self.logging.info("Time for loadbalancer: %s to be ready: %f" % (self.lb_id, expended_time)) self.logging.info("Gathering new nova attributes...") # list new nova name new_nova_name = self.get_nova_name() # get new nova id / check floating ip new_nova_id = self.get_nova_id(new_nova_name) # check floating ip new_floating_ip_output = self.check_floating_ip() self.logging.info("-" * 80) self.logging.info( "New nova attributes for loadbalancer: %s ip_addr: %s:" % (self.lb_id, self.lb_addr)) self.logging.info("New nova name: %s" % new_nova_name) self.logging.info("Original nova name: %s" % orig_nova_name) self.logging.info("New nova id: %s" % new_nova_id) self.logging.info("Original nova id: %s" % orig_nova_id) self.logging.info("New floating ip info: %s" % new_floating_ip_output) self.logging.info("Original floating ip info: %s" % orig_floating_ip_output) self.logging.info("-" * 80) self.logging.info("") self.logging.info("") self.assertTrue( lb_ready, msg="WARNING: loadbalancer %s not ready in %f seconds" % (self.lb_id, expended_time)) if not self.args.lbid: lbaas_utils.validate_loadBalancer(self) # wait a bit if we want to show off if self.args.demowaittime: self.logging.info("Sleeping %s seconds for demo / manual testing" % self.args.demowaittime) time.sleep(self.args.demowaittime)
def test_loadBalancerFuncs(self): """ test libra loadbalancers via apachebench """ # Create our loadbalancer w/ one node to start self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb( self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' % self.lb_id) self.logging.info('load balancer ip addr: %s' % self.lb_addr) lbaas_utils.validate_loadBalancer(self) # iterate through backend node sets and run apachebench for node_count in self.node_counts: self.logging.info("Testing with %s nodes" % node_count) self.logging.info("*" * 80) if node_count != 1: # we have nodes[0] already, we add subsequent nodes to it add_nodes = self.node_pool[len(self.nodes):node_count] # add nodes to our loadbalancer self.add_node_result, self.actual_status = self.driver.add_nodes( self.lb_id, add_nodes) if self.actual_status == '202': # good update, we need to update our expected nodes self.nodes += add_nodes self.logging.info("Current node list:") nodes = self.driver.list_lb_nodes(self.lb_id) self.logging.info(nodes) lbaas_utils.validate_loadBalancer(self) else: self.assertEqual( self.actual_status, '202', msg= "Adding nodes to loadbalancer %s failed with status: %s" % (self.lb_id, self.actual_status)) # now we run apache-bench! self.logging.info("Beginning apache-bench tests...") for page_set in self.pages: page_file = page_set['path'] page_desc = page_set['description'] page_path = os.path.join(self.lb_addr, page_file) self.logging.info("Testing page: %s, %s" % (page_path, page_desc)) self.logging.info("Testing with %s nodes" % node_count) cmd = 'ab -q -r -c%s -n%s http://%s' % ( self.concurrency, self.requests, page_path) self.logging.info("test command: %s" % cmd) status, output = commands.getstatusoutput(cmd) self.logging.info("status: %s" % status) self.logging.info("output: %s" % output) # determine if we want to wait for m&b testing or not self.logging.info(self.args.testmab) if self.args.testmab == True: report_wait = 180 self.logging.info( "Waiting %s seconds for metering testing..." % report_wait) time.sleep(report_wait) # delete the lb if self.args.cleanupoff: self.logging.info( "NOT deleting loadbalancer: %s per user-specified flag..." % self.lb_id) else: self.logging.info("Deleting loadbalancer: %s" % self.lb_id) result = self.driver.delete_lb(self.lb_id) # get total html bytes and total bytes total_bytes = 0 total_requests = 0 for line in output.split('\n'): line = line.replace('bytes', '') if line.strip().startswith('Complete requests:'): total_requests = int(line.split(':')[1].strip()) self.logging.info("Complete requests: %s" % (total_requests)) if line.strip().startswith('Total transferred:'): total_bytes = int(line.split(':')[1].strip()) self.logging.info("Total bytes: %s" % (total_bytes)) # get total bytes / messages from metering server metering_result = lbaas_utils.validate_metering( self, total_requests, total_bytes) self.assertTrue(metering_result)
def test_createLoadBalancer(self): """ gathering time until we have a good loadbalancer and counting bad devices """ iterations = [] bad_iterations = [] failed_iterations = [] bad_count = 0 fail_count = 0 test_iterations = 25 for i in range(test_iterations): self.logging.info("Iteration: %d" % i) # Create our loadbalancer lb_ready = False suspected_bad = False time_wait = 1 attempts_remain = 100 max_time = 300 start_time = time.time() self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb( self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' % self.lb_id) self.logging.info('load balancer ip addr: %s' % self.lb_addr) lbaas_utils.wait_for_active_status(self, must_pass=False) # make sure we can get traffic from our loadbalancer self.logging.info(time.time() - start_time) self.logging.info((time.time() - start_time) < max_time) while not lb_ready and attempts_remain and ( (time.time() - start_time) <= max_time): try: if attempts_remain % 10 == 0: self.logging.info("Attempts remaining: %d" % attempts_remain) lb_url = 'http://%s' % (self.lb_addr) result = requests.get(lb_url, verify=False) result.connection.close() if result: lb_ready = True except Exception, e: if not suspected_bad: self.logging.info(Exception) self.logging.info(e) self.logging.info( "loadbalancer id: %s not yet ready. Suspected bad haproxy device" % (self.lb_id)) self.logging.info( "Will try up to: %d times for the loadbalancer to be functional (~10 minutes), please be patient..." % (attempts_remain * time_wait)) suspected_bad = True bad_count += 1 time.sleep(time_wait) attempts_remain -= 1 stop_time = time.time() expended_time = stop_time - start_time self.logging.info("Time for loadbalancer: %s to be ready: %f" % (self.lb_id, expended_time)) if suspected_bad: if ((expended_time) <= max_time): bad_iterations.append(expended_time) else: failed_iterations.append(expended_time) else: iterations.append(expended_time) if attempts_remain and ((expended_time) <= max_time): lbaas_utils.validate_loadBalancer(self) else: self.logging.info( "WARN: loadbalancer: %s suspected still not ready after %d seconds" % (self.lb_id, expended_time)) fail_count += 1 self.logging.info("Deleting loadbalancer: %s" % self.lb_id) result = self.driver.delete_lb(self.lb_id) time.sleep(10)