def copy_to_vm(self, pkg, host): output = None self.log.debug("Copying Package %s to VM" % (str(pkg))) try: with hide('everything'): with settings(host_string='%s@%s' % (self.pkgsrc.user, host), password=self.pkgsrc.password, warn_only=True, abort_on_prompts=False): output = fab_put_file_to_vm(host_string='%s@%s' % ( self.pkgdst.user, self.pkgdst.host), password=self.pkgdst.password, src=pkg, dest='~/') self.log.debug(str(output)) self.log.debug( "Copied the distro from compute '%s' to VM '%s'", host, self.pkgdst.host) except Exception, errmsg: self.logger.exception( "Exception: %s occured when copying %s" % (errmsg, pkg))
def test_rsyslog_messages_in_db_through_contrail_logs(self): """Tests related to rsyslog.""" result = True if len(self.inputs.compute_ips) < 1: self.logger.warn( "Minimum 1 compute nodes are needed for this test to run") self.logger.warn( "Exiting since this test can't be run.") return True # get a collector less compute node for the test. # so that we can test remote syslog messages. try: list_of_collector_less_compute = \ list(set(self.inputs.compute_ips) - set(self.inputs.collector_ips)) comp_node_ip = list_of_collector_less_compute[0] except Exception as e: self.logger.error( "Colud not get a collector less compute node for the test.") self.logger.exception( "Got exception as %s" % (e)) # bring up rsyslog client-server connection with udp protocol. restart_collector_to_listen_on_35999( self, self.inputs.collector_ips[0]) restart_rsyslog_client_to_send_on_35999( self, comp_node_ip, self.inputs.collector_ips[0]) # send 10 syslog messages and verify through contrail logs. There might be loss, # but few messages should reach. Or else the test fails. # copy test files to the compute node. with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.cfgm_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False): path = self.inputs.test_repo_dir + '/scripts/rsyslog/mylogging.py' output = fab_put_file_to_vm( host_string='%s@%s' % (self.inputs.username, comp_node_ip), password=self.inputs.password, src=path, dest='~/') path = self.inputs.test_repo_dir + '/scripts/rsyslog/message.txt' output = fab_put_file_to_vm( host_string='%s@%s' % (self.inputs.username, comp_node_ip), password=self.inputs.password, src=path, dest='~/') # send 10 messages with delay. with settings(host_string='%s@%s' % (self.inputs.username, comp_node_ip), password=self.inputs.password, warn_only=True, abort_on_prompts=False): cmd = "chmod 777 ~/mylogging.py" run('%s' % (cmd), pty=True) cmd = "~/mylogging.py send_10_log_messages_with_delay" run('%s' % (cmd), pty=True) # verify through contrail logs. with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.collector_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False): cmd = "contrail-logs --last 2m --message-type Syslog | grep 'Test Syslog Messages being sent.' | wc -l" output = run('%s' % (cmd), pty=True) if int(output) == 0: self.logger.error( "No syslog messages were through contrail-logs. Seems to be an issue") return False elif int(output) < 7: self.logger.info( "Remote syslog message test connection setup passed.") self.logger.warn( "There is 30% message loss. There might be an issue.") else: self.logger.info( "Remote syslog message test connection setup passed.") self.logger.info( "Remote syslog message test over UDP connection passed.") # change rsyslog client server connection to tcp. update_rsyslog_client_connection_details( self, node_ip=comp_node_ip, server_ip=self.inputs.cfgm_ips[0], protocol='tcp', restart=True) # send 10 log messages without any delay. # no message should be lost in a tcp connection. with settings(host_string='%s@%s' % (self.inputs.username, comp_node_ip), password=self.inputs.password, warn_only=True, abort_on_prompts=False): cmd = "~/mylogging.py send_10_log_messages" run('%s' % (cmd), pty=True) # verify through contrail logs. time.sleep(2) # for database sync. with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.collector_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False): cmd = "contrail-logs --last 2m --message-type Syslog | grep 'Test Syslog Messages being sent without delay.' | wc -l" output = run('%s' % (cmd), pty=True) if int(output) != 10: self.logger.error( "There was a message loss in a tcp connection which is unexpected.") return False else: self.logger.info( "Remote syslog message test over TCP connection passed.") # verify 'category' query of contrail logs. cmd = "contrail-logs --last 3m --category cron | grep 'Test Syslog Messages being sent without delay.' | wc -l" output = run('%s' % (cmd), pty=True) if int(output) != 10: self.logger.error( "Unable to retrieve messages from the database using the 'category' query.") return False else: self.logger.info( "Succesfully retrived messages from the database using the 'category' query.") # send syslog messages of all facilities and severities and verify. with settings(host_string='%s@%s' % (self.inputs.username, comp_node_ip), password=self.inputs.password, warn_only=True, abort_on_prompts=False): cmd = "~/mylogging.py send_messages_of_all_facility_and_severity" run('%s' % (cmd), pty=True) # verify all facilities and severities through contrail logs. time.sleep(2) # for database sync. result_flag = 0 list_of_facility = ['LOG_KERN', 'LOG_USER', 'LOG_MAIL', 'LOG_DAEMON', 'LOG_AUTH', 'LOG_NEWS', 'LOG_UUCP', 'LOG_LOCAL0', 'LOG_CRON', 'LOG_SYSLOG', 'LOG_LOCAL1'] list_of_severity = [ 'LOG_EMERG', 'LOG_ALERT', 'LOG_CRIT', 'LOG_ERR', 'LOG_WARNING', 'LOG_NOTICE', 'LOG_INFO', 'LOG_DEBUG'] with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.collector_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False): cmd = "contrail-logs --last 2m --message-type Syslog | grep 'Test Message from' > ~/result.txt " run('%s' % (cmd), pty=True) for each_facility in list_of_facility: for each_severity in list_of_severity: cmd = "cat ~/result.txt | grep 'Test Message from " + \ str(each_facility) + " with severity " + \ str(each_severity) + ".' | wc -l" output = run('%s' % (cmd), pty=True) if int(output) != 1: self.logger.error( "Syslog message with facility %s and severity %s was not received" % (each_facility, each_severity)) result_flag = 1 else: self.logger.info( "Syslog message with facility %s and severity %s was received" % (each_facility, each_severity)) if result_flag != 0: self.logger.error( "Error in transmitting or receiving some syslog facilities and severities") return False # verify 'level' query of contrail logs. bug_1353624_fix = False if bug_1353624_fix: with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.collector_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False): for each_severity in list_of_severity: cmd = "contrail-logs --last 4m --level " + \ str(each_severity) + " | wc -l" output = run('%s' % (cmd), pty=True) if int(output) < 1: self.logger.error( "Syslog message with severity %s was not found." % (each_severity)) result_flag = 1 else: self.logger.info( "Syslog message with severity %s was found." % (each_severity)) if result_flag != 0: self.logger.error( "Error in transmitting or receiving some syslog severities.") return False # send 100 messages grater than 1024 bytes with a delay of 1 sec between each message. # This delay factor is expected to be brought down through bug fix. with settings(host_string='%s@%s' % (self.inputs.username, comp_node_ip), password=self.inputs.password, warn_only=True, abort_on_prompts=False): cmd = "~/mylogging.py send_messages_grater_than_1024_bytes" run('%s' % (cmd), pty=True, timeout=120) # verify all the 10 messages of 1074 bytes are received. time.sleep(2) # for database sync. with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.collector_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False): cmd = "contrail-logs --last 3m --message-type Syslog | grep 'This is a 1074 byte message' | wc -l" output = run('%s' % (cmd), pty=True) if int(output) != 100: self.logger.error( "Failed to receive all the messages greater than 1024 bytes over a tcp connection.") return False else: self.logger.info( "Successfully received all the messages greater than 1024 bytes over a tcp connection.") return True
def test_check_flow_setup_within_vn(self, dst_port_min, dst_port_max, src_port_min, src_port_max, no_of_vn=1, pkt_size=64, no_of_flows=1000): ''' Validate flow setup rate between two VMs within a VN. ''' if getattr(self, 'res', None): self.vn1_fixture= self.res.vn1_fixture self.vn2_fixture= self.res.vn2_fixture if no_of_vn==2: self.vm1_fixture= self.res.vn1_vm5_fixture self.vm2_fixture= self.res.vn2_vm3_fixture else: self.vm1_fixture= self.res.vn1_vm5_fixture self.vm2_fixture= self.res.vn1_vm6_fixture else: self.vn1_fq_name = "default-domain:admin:vn1" self.vn1_name = "vn1" self.vn1_subnets = ['31.1.1.0/24'] self.vm1_name = 'vm1' self.vm2_name = 'vm2' if no_of_vn==2: self.vn2_fq_name = "default-domain:admin:vn2" self.vn2_name = "vn2" self.vn2_subnets = ['32.1.1.0/24'] if getattr(self, 'res', None): self.vn1_fixture= self.res.vn1_fixture assert self.vn1_fixture.verify_on_setup() else: self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) if no_of_vn==2: if getattr(self, 'res', None): self.vn2_fixture= self.res.vn2_fixture assert self.vn2_fixture.verify_on_setup() else: self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) self.policy_name = 'flow_policy' self.rules = [{'direction' : '<>', 'protocol' : 'udp', 'source_network': self.vn1_name, 'src_ports' : [0, -1], 'dest_network' : self.vn2_name, 'dst_ports' : [0, -1], 'simple_action' : 'pass', } ] self.policy_fix = self.config_policy(self.policy_name, self.rules) self.policy_attach_fix = self.attach_policy_to_vn(self.policy_fix, self.vn1_fixture) self.policy_attach_fix = self.attach_policy_to_vn(self.policy_fix, self.vn2_fixture) if getattr(self, 'res', None): self.vm1_fixture= self.res.vn1_vm5_fixture self.vm2_fixture= self.res.vn2_vm3_fixture else: # Making sure VM falls on diffrent compute host host_list = self.connections.nova_h.get_hosts() compute_1 = host_list[0] compute_2 = host_list[0] if len(host_list) > 1: compute_1 = host_list[0] compute_2 = host_list[1] self.vm1_fixture = self.config_vm(self.vn1_fixture, self.vm1_name, node_name=compute_1) self.vm2_fixture = self.config_vm(self.vn2_fixture, self.vm2_name, node_name=compute_2) assert self.vm1_fixture.verify_on_setup() assert self.vm2_fixture.verify_on_setup() self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) else: if getattr(self, 'res', None): self.vm1_fixture= self.res.vn1_vm5_fixture self.vm2_fixture= self.res.vn1_vm6_fixture else: # Making sure VM falls on diffrent compute host host_list = self.connections.nova_h.get_hosts() compute_1 = host_list[0] compute_2 = host_list[0] if len(host_list) > 1: compute_1 = host_list[0] compute_2 = host_list[1] self.vm1_fixture = self.config_vm(self.vn1_fixture, self.vm1_name, node_name=compute_1, image_name='ubuntu-traffic') self.vm2_fixture = self.config_vm(self.vn1_fixture, self.vm2_name, node_name=compute_2, image_name='ubuntu-traffic') assert self.vm1_fixture.verify_on_setup() assert self.vm2_fixture.verify_on_setup() self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) vm1_ip = self.vm1_fixture.vm_ip vm2_ip = self.vm2_fixture.vm_ip fd = open('tcutils/templates/pktgen_template.sh', 'r') fr = open('/tmp/pktgen', 'w+') content = fd.read() template = string.Template(content) fr.write((template.safe_substitute({'__pkt_size__':pkt_size, '__count__':no_of_flows, '__dst_ip__':vm2_ip, '__src_ip__':vm1_ip, '__dst_port_mim__':dst_port_min, '__dst_port_max__':dst_port_max, '__src_port_min__':src_port_min,'__src_port_max__':src_port_max}))) fr.flush() src_ip = self.inputs.cfgm_ips[0] if self.inputs.cfgm_ips[0] != self.vm1_fixture.vm_node_ip: self.logger.info("Cfgm and compute are different; copy the pktgen from cfgm '%s'" " to compute '%s'" , self.inputs.cfgm_ips[0], self.vm1_fixture.vm_node_ip) with hide('everything'): with settings(host_string='%s@%s' % (self.inputs.username,self.vm1_fixture.vm_node_ip), password=self.inputs.password, warn_only=True,abort_on_prompts= False): put('/tmp/pktgen', '/tmp') self.logger.info("Copied the pktgen to compute '%s'", self.vm1_fixture.vm_node_ip) #Copy the pkgen to VM with hide('everything'): with settings(host_string='%s@%s' % (self.inputs.username,self.vm1_fixture.vm_node_ip), password=self.inputs.password, warn_only=True,abort_on_prompts= False): output = fab_put_file_to_vm(host_string='%s@%s' %(self.vm1_fixture.vm_username, self.vm1_fixture.local_ip), password=self.vm1_fixture.vm_password, src='/tmp/pktgen', dest='/tmp') #Start the flow -r on compute to check the flow setup rate self.logger.info("Start flow -r to monitor the flow setup rate") run('flow -r >> /tmp/flow_rate &', pty=False) #start the tcpdump on the tap interface of sender and receiver computes. session_vm1 = self.start_tcp_dump(self.vm1_fixture) session_vm2 = self.start_tcp_dump(self.vm2_fixture) #Run pktgen on VM output = '' with hide('everything'): with settings(host_string='%s@%s' % (self.inputs.username,self.vm1_fixture.vm_node_ip), password=self.inputs.password, warn_only=True,abort_on_prompts= False): cmd = 'chmod 755 /tmp/pktgen' output = run_fab_cmd_on_node(host_string = '%s@%s'%(self.vm1_fixture.vm_username,self.vm1_fixture.local_ip), password = self.vm1_fixture.vm_password, cmd = cmd, as_sudo=False) cmd = 'sudo /tmp/pktgen' output = run_fab_cmd_on_node(host_string = '%s@%s'%(self.vm1_fixture.vm_username,self.vm1_fixture.local_ip), password = self.vm1_fixture.vm_password, cmd = cmd, as_sudo=False) #Check flow -l to check the number of flows created. with hide('everything'): with settings(host_string='%s@%s' % (self.inputs.username,self.vm1_fixture.vm_node_ip), password=self.inputs.password, warn_only=True,abort_on_prompts= False): flows_created = run('flow -l | grep Action | wc -l') self.logger.info("number of flows created are: '%s'", flows_created) #Stop the tcpdump sender_count = self.stop_tcp_dump(session_vm1) rcvr_count = self.stop_tcp_dump(session_vm2) self.logger.info("sender_count: %s, rcvr_count: %s" % (sender_count, rcvr_count)) #Stop monitoring flow -r with hide('everything'): with settings(host_string='%s@%s' % (self.inputs.username,self.vm1_fixture.vm_node_ip), password=self.inputs.password, warn_only=True,abort_on_prompts= False): pid = run('pidof flow') run('kill $(pidof flow)') get('/tmp/flow_rate', '/tmp/') run('rm -rf /tmp/flow_rate') FlowRateParserObj = FlowRateParser('/tmp/flow_rate') flow_setup_rate = FlowRateParserObj.flowrate() self.logger.info("flow setup rate: '%s'", flow_setup_rate) local('rm -rf /tmp/flow_rate') results = [] errmsg = '' for (rc, msg) in results: if not rc: self.logger.error(msg) errmsg += msg + '\n' if errmsg: #assert False, errmsg self.logger.info("This test wont fail; until we identify a number for ping latency.") self.logger.error(errmsg) return True