def connection(): try: conn = sqlite3.connect(CONF.base()['db_file']) conn.isolation_level = None return conn except: LOG.exception() sys.exit(1)
def sql_insert_nodes(cls, db_log, node_list, username, type, sub_type='none'): try: for node in node_list: name, ip = str(node).split(':') db_log.write_log('Insert node [%s %s %s %s]', name, ip, username, type) sql = 'INSERT INTO ' + cls.NODE_INFO_TBL + \ ' VALUES (\'' + name + '\', \'' + ip + '\', \'' + username + '\', \'' + type.upper() + '\', \'' + sub_type.upper() + '\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [NODE TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) # set status tbl sql = 'INSERT INTO ' + cls.STATUS_TBL + \ ' VALUES (\'' + name + '\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [STATUS TABLE] Status data insert fail \n%s", sql_rt) sys.exit(1) # add Alarm Items evt_list = DB.get_event_list(type) for item in evt_list: db_log.write_log('Insert item [%s %s]', name, item) sql = 'INSERT INTO ' + cls.EVENT_TBL + \ ' VALUES (\'' + name + '\',\'' + item + '\', \'none\', \'none\', \'none\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [EVENT TABLE] Event data insert fail \n%s", sql_rt) sys.exit(1) if type.upper() == 'ONOS': # set app tbl sql = 'INSERT INTO ' + cls.ONOS_TBL + ' VALUES (\'' + name + '\', \'none\', \'none\', \'none\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [ONOS TABLE] Onos data insert fail \n%s", sql_rt) sys.exit(1) except: LOG.exception()
def flush_pending_alarm(): global alarm_count, alarm_subject, alarm_body if alarm_count <= 0: return; # no alarm pending conf = CONF.alarm() # copy to local variables and clear global variables count = alarm_count subject = '[%s] %s' % (conf['site_name'], alarm_subject) if (count > 1): subject += ' (+ %d events)' % (count - 1) body = alarm_body alarm_count = 0 alarm_subject = '' alarm_body = '' if conf['mail_alarm']: mail_from = conf['mail_user'] + '@' + conf['mail_server'].split(':')[0] # send to each mail_list entry for gmail smtp seems not handling mutiple To: addresses for mail_to in conf['mail_list']: msg = MIMEText(body) msg['Subject'] = subject msg['From'] = mail_from msg['To'] = mail_to LOG.info('Send Email Alarm: subject=%s to=%s body=%s', subject, mail_to, body) try: ms = smtplib.SMTP(conf['mail_server']) if conf['mail_tls']: ms.starttls() ms.login(conf['mail_user'], conf['mail_password']) ms.sendmail(mail_from, mail_to, msg.as_string()) ms.quit() except: LOG.exception() if conf['slack_alarm']: ch = conf['slack_channel'].strip() if ch[0] != '#': ch = '#' + ch LOG.info('Send Slack Alarm: channel=%s text=%s', ch, body) sc = SlackClient(conf['slack_token']) try: sc.api_call("chat.postMessage", channel=ch, text=body) except: LOG.exception()
def ssh_exec(cls, username, node, command): cmd = 'ssh %s %s@%s %s' % (cls.ssh_options(), username, node, command) try: result = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) output, error = result.communicate() if result.returncode != 0: LOG.error("\'%s\' SSH_Cmd Fail, cause => %s", node, error) return else: # LOG.info("ssh command execute successful \n%s", output) return output except: LOG.exception()
def ssh_pexpect(cls, username, node, onos_ip, command): cmd = 'ssh %s %s@%s' % (cls.ssh_options(), username, node) try: LOG.info('ssh_pexpect cmd = ' + cmd) ssh_conn = pexpect.spawn(cmd) rt1 = ssh_conn.expect(['#', '\$', pexpect.EOF], timeout=CONF.ssh_conn()['ssh_req_timeout']) if rt1 == 0: cmd = 'ssh -p 8101 karaf@' + onos_ip + ' ' + command LOG.info('ssh_pexpect cmd = ' + cmd) ssh_conn.sendline(cmd) rt2 = ssh_conn.expect( ['Password:'******'ssh_req_timeout']) if rt2 == 0: ssh_conn.sendline('karaf') ssh_conn.expect(['#', '\$', pexpect.EOF], timeout=CONF.ssh_conn()['ssh_req_timeout']) str_output = str(ssh_conn.before) ret = '' for line in str_output.splitlines(): if (line.strip() == '') or ('#' in line) or ( '$' in line) or ('~' in line) or ('@' in line): continue ret = ret + line + '\n' return ret else: return "fail" elif rt1 == 1: LOG.error('%s', ssh_conn.before) elif rt1 == 2: LOG.error("[ssh_pexpect] connection timeout") return "fail" except: LOG.exception() return "fail"
def onos_ssh_exec(cls, node_ip, command): local_ssh_options = cls.ssh_options() + " -p 8101" cmd = 'ssh %s %s %s' % (local_ssh_options, node_ip, command) try: result = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) output, error = result.communicate() if result.returncode != 0: LOG.error("ONOS(%s) SSH_Cmd Fail, cause => %s", node_ip, error) return else: # LOG.info("ONOS ssh command execute successful \n%s", output) return output except: LOG.exception()
def delete_test_instance(server_vm, client_vm, client_floatingip): try: nova_credentials = client.Client(CONF.openstack()['version'], CONF.openstack()['username'], CONF.openstack()['api_key'], CONF.openstack()['project_id'], CONF.openstack()['auth_url']) nova_credentials.floating_ips.delete(client_floatingip) LOG.info('[Tperf Test] Client floatingip Deleted --- ') for vm in [server_vm, client_vm]: if vm: nova_credentials.servers.delete(vm) LOG.info('[Tperf Test] Server and Client instance Deleted] --- ') except: LOG.exception()
def ssh_tperf_exec(cls, keypair, username, node_ip, command, timeout): ssh_options = '-o StrictHostKeyChecking=no ' \ '-o ConnectTimeout=' + str(timeout) if not os.path.exists(keypair): LOG.error('[SSH Fail] keypaire file not exist. ---') return 'fail' cmd = 'ssh %s -i %s %s@%s %s' % (ssh_options, keypair, username, node_ip, command) LOG.info("[SB SSH CMD] cmd = %s", cmd) try: result = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) (output, error) = result.communicate() if result.returncode != 0: LOG.error("\'%s\' SSH_Cmd Fail, cause(%d) => %s", node_ip, result.returncode, str(error)) return 'fail' else: LOG.info("ssh command execute successful \n >> [%s]", output) return output except: LOG.exception() pass
def db_initiation(cls, db_log): try: db_path = CONF.base()['db_file'] if os.path.isfile(db_path): os.remove(db_path) db_log.write_log("--- Initiating SONA DB ---") init_sql = [ 'CREATE TABLE ' + cls.NODE_INFO_TBL + '(nodename text primary key, ip_addr, username, type, sub_type)', 'CREATE TABLE ' + cls.STATUS_TBL + '(nodename text primary key, ' + cls.item_list + ', time)', 'CREATE TABLE ' + cls.REGI_SYS_TBL + '(url text primary key, auth)', 'CREATE TABLE ' + cls.EVENT_TBL + '(nodename, item, grade, pre_grade, reason, time, PRIMARY KEY (nodename, item))', 'CREATE TABLE ' + cls.ONOS_TBL + '(nodename text primary key, cluster, device, link, app)' ] for sql in init_sql: sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log("DB initiation fail\n%s", sql_rt) sys.exit(1) db_log.write_log('Insert nodes information ...') for node_type in CONF.watchdog()['check_system']: cls.sql_insert_nodes( db_log, (CONF_MAP[node_type.upper()]())['list'], str((CONF_MAP[node_type.upper()]() )['account']).split(':')[0], node_type) except: LOG.exception()
def run_test(sona_topology, test_json, timeout_arr, index, total_timeout): try: node = test_json['node'] ins_id = test_json['instance_id'] user = test_json['vm_user_id'] pw = test_json['vm_user_password'] command = test_json['traffic_test_command'] ip = sona_topology.get_openstack_info(node, 'ip') if ip == '': str_output = node + ' node does not exist' else: node_id = CONF.openstack()['account'].split(':')[0] ssh_options = '-o StrictHostKeyChecking=no ' \ '-o ConnectTimeout=' + str(CONF.ssh_conn()['ssh_req_timeout']) cmd = 'ssh %s %s@%s' % (ssh_options, node_id, ip) try: LOG.info('ssh_pexpect cmd = ' + cmd) ssh_conn = pexpect.spawn(cmd) rt1 = ssh_conn.expect( PROMPT, timeout=CONF.ssh_conn()['ssh_req_timeout']) if rt1 == 0: cmd = 'virsh console ' + ins_id LOG.info('ssh_pexpect cmd = ' + cmd) ssh_conn.sendline(cmd) rt2 = ssh_conn.expect( [ pexpect.TIMEOUT, 'Escape character is', 'error:', pexpect.EOF ], timeout=CONF.ssh_conn()['ssh_req_timeout']) if rt2 == 0: str_output = cmd + ' timeout' elif rt2 == 1: ssh_conn.sendline('\n') try: rt3 = ssh_conn.expect( ['login: '******'ssh_req_timeout']) LOG.info('rt3 = ' + str(rt3)) if rt3 == 2: str_output = 'Permission denied' else: ssh_conn.sendline(user) rt_pw = ssh_conn.expect( [ pexpect.TIMEOUT, '[P|p]assword:', pexpect.EOF ], timeout=CONF.ssh_conn()['ssh_req_timeout']) if rt_pw == 1: ssh_conn.sendline(pw) rt4 = ssh_conn.expect( [ pexpect.TIMEOUT, 'Login incorrect', '~# ', 'onos> ', '\$ ', '\# ', ':~$ ' ], timeout=CONF.ssh_conn() ['ssh_req_timeout']) LOG.info('rt4 = ' + str(rt4)) if rt4 == 0 or rt4 == 1: str_output = 'auth fail' else: ssh_conn.sendline(command) rt5 = ssh_conn.expect( [ pexpect.TIMEOUT, '~# ', 'onos> ', '\$ ', '\# ', ':~$ ' ], timeout=total_timeout) if rt5 == 0: str_output = 'timeout' ssh_conn.sendline('exit') ssh_conn.close() else: str_output = ssh_conn.before ssh_conn.sendline('exit') ssh_conn.close() else: str_output = 'auth fail' except: str_output = 'exception' ssh_conn.sendline('exit') ssh_conn.close() elif rt2 == 2: result = {'command_result': 'virsh console error'} timeout_arr[index] = result return else: str_output = 'connection fail' except: LOG.exception() str_output = 'exception 1' except: LOG.exception() str_output = 'exception 2' result = { 'command_result': str_output.replace('\r\n', '\n'), 'node': node, 'instance_id': ins_id } timeout_arr[index] = result
def process_trace(output, sona_topology, trace_conditions): try: retry_flag = False is_success = False result_flow = [] lines = output.splitlines() for line in lines: line = line.strip() if line.startswith('Rule:'): rule_dict = dict() tmp = line.split(' ') rule_dict['table'] = int(tmp[1].split('=')[1]) rule_dict['cookie'] = tmp[2].split('=')[1] selector_dict = {} for col in tmp[3].split(','): tmp = col.split('=') if len(tmp) == 2: if tmp[0] in ['priority']: rule_dict[tmp[0]] = int(tmp[1]) elif tmp[0] in ['in_port']: selector_dict[tmp[0]] = int(tmp[1]) else: selector_dict[tmp[0]] = tmp[1] if len(selector_dict.keys()) > 0: rule_dict['selector'] = selector_dict elif line.startswith('OpenFlow actions='): action_dict = dict() action_list = line.split('=')[1].split(',') for action in action_list: if action.startswith('set_field'): type = action.split('->')[1] value = action[action.find(':') + 1:action.find('-')] action_dict[type] = value if type == 'tun_dst': # find next target trace_conditions.cur_target_ip = sona_topology.get_openstack_info( ' ' + value + ' ', 'ip') trace_conditions.cur_target_hostname = sona_topology.get_openstack_info( ' ' + value + ' ', 'hostname') else: trace_conditions.cond_dict[type] = value if type == 'ip_dst': trace_conditions.cond_dict['nw_dst'] = '' else: if action.startswith('group:'): trace_conditions.cur_target_ip = sona_topology.get_gateway_ip( ) trace_conditions.cur_target_hostname = sona_topology.get_openstack_info( ' ' + trace_conditions.cur_target_ip + ' ', 'hostname') if len(line.split('=')) == 3: action = action + '=' + line.split('=')[2] LOG.info('action = ' + action) tmp = action.split(':') if action.startswith('group:') or action.startswith( 'goto_table:'): action_dict[tmp[0]] = int(tmp[1]) elif len(tmp) < 2: action_dict[tmp[0]] = tmp[0] else: action_dict[tmp[0]] = tmp[1] rule_dict['actions'] = action_dict result_flow.append(rule_dict) if 'tun_dst' in line or 'group' in line: retry_flag = True if 'output' in line or 'CONTROLLER' in line: is_success = True break return result_flow, retry_flag, is_success except: LOG.exception() return 'parsing error\n' + output
def db_initiation(cls, db_log): try: db_path = CONF.base()['db_file'] if os.path.isfile(db_path): os.remove(db_path) db_log.write_log("--- Initiating SONA DB ---") init_sql = [ 'CREATE TABLE ' + cls.NODE_INFO_TBL + '(nodename text primary key, ip_addr, username, type, sub_type)', 'CREATE TABLE ' + cls.STATUS_TBL + '(nodename text primary key, ' + cls.item_list + ', time)', 'CREATE TABLE ' + cls.RESOURCE_TBL + '(nodename text primary key, cpu real, memory real, disk real)', 'CREATE TABLE ' + cls.REGI_SYS_TBL + '(url text primary key, auth)', 'CREATE TABLE ' + cls.ONOS_TBL + '(nodename text primary key, applist, weblist, nodelist, port, openflow, cluster, traffic_stat)', 'CREATE TABLE ' + cls.SWARM_TBL + '(nodename text primary key, node, service, ps)', 'CREATE TABLE ' + cls.XOS_TBL + '(nodename text primary key, xos_status, synchronizer)', 'CREATE TABLE ' + cls.OPENSTACK_TBL + '(nodename text primary key, sub_type, data_ip, of_id, hostname, docker, onosApp, routingTable, gw_ratio, vxlan_traffic, internal_traffic)', 'CREATE TABLE ' + cls.HA_TBL + '(ha_key text primary key, stats)', 'CREATE TABLE ' + cls.EVENT_TBL + '(nodename, item, grade, pre_grade, reason, time, PRIMARY KEY (nodename, item))' ] for sql in init_sql: sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log("DB initiation fail\n%s", sql_rt) sys.exit(1) db_log.write_log('Insert nodes information ...') for node_type in CONF.watchdog()['check_system']: if node_type == 'OPENSTACK': cls.sql_insert_nodes( db_log, CONF_MAP[node_type.upper()]()['compute_list'], str((CONF_MAP[node_type.upper()]() )['account']).split(':')[0], node_type, 'COMPUTE') cls.sql_insert_nodes( db_log, CONF_MAP[node_type.upper()]()['gateway_list'], str((CONF_MAP[node_type.upper()]() )['account']).split(':')[0], node_type, 'GATEWAY') else: cls.sql_insert_nodes( db_log, CONF_MAP[node_type.upper()]()['list'], str((CONF_MAP[node_type.upper()]() )['account']).split(':')[0], node_type) # set ha proxy tbl sql = 'INSERT INTO ' + cls.HA_TBL + ' VALUES (\'' + 'HA' + '\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [HA PROXY TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) except: LOG.exception()
def sql_insert_nodes(cls, db_log, node_list, username, type, sub_type='none'): try: for node in node_list: name, ip = str(node).split(':') db_log.write_log('Insert node [%s %s %s %s]', name, ip, username, type) sql = 'INSERT INTO ' + cls.NODE_INFO_TBL + \ ' VALUES (\'' + name + '\', \'' + ip + '\', \'' + username + '\', \'' + type.upper() + '\', \'' + sub_type.upper() + '\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [NODE TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) # set status tbl sql = 'INSERT INTO ' + cls.STATUS_TBL + \ ' VALUES (\'' + name + '\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', ' \ '\'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [STATUS TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) # add Alarm Items evt_list = DB.get_event_list(type) for item in evt_list: db_log.write_log('Insert item [%s %s]', name, item) sql = 'INSERT INTO ' + cls.EVENT_TBL + \ ' VALUES (\'' + name + '\',\'' + item + '\', \'none\', \'none\', \'none\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [ITEM TABLE] Item data insert fail \n%s", sql_rt) sys.exit(1) # set resource tbl sql = 'INSERT INTO ' + cls.RESOURCE_TBL + ' VALUES (\'' + name + '\', -1, -1, -1)' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [RESOURCE TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) if type.upper() == 'ONOS': # set app tbl sql = 'INSERT INTO ' + cls.ONOS_TBL + ' VALUES (\'' + name + '\', \'none\', \'none\', \'none\', ' \ '\'none\', \'none\', \'none\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [APP TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) elif type.upper() == 'XOS': # set xos tbl sql = 'INSERT INTO ' + cls.XOS_TBL + ' VALUES (\'' + name + '\', \'none\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [XOS TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) # set swarm tbl sql = 'INSERT INTO ' + cls.SWARM_TBL + ' VALUES (\'' + name + '\', \'none\', \'none\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [SWARM TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) elif type.upper() == 'OPENSTACK': # set vrouter tbl sql = 'INSERT INTO ' + cls.OPENSTACK_TBL + \ ' VALUES (\'' + name + '\', \'' + sub_type + '\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [VROUTER TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) except: LOG.exception()
i = i + 1 try: if conn == None: with cls.connection() as conn: conn.cursor().execute(sql) conn.commit() conn.close() else: conn.cursor().execute(sql) conn.commit() return 'SUCCESS' except sqlite3.OperationalError, err: LOG.error(err.message) except: LOG.exception() return 'FAIL' DB_CONN = DB().connection() CONF_MAP = { 'ONOS': CONF.onos, 'XOS': CONF.xos, 'SWARM': CONF.swarm, 'OPENSTACK': CONF.openstack, 'HA': CONF.ha }
def tperf3_output_parse(tperf_out_list): parse_result = {'test_options': {}, 'test_result': {}, 'cpu_utilization_percent': {}} tout_protocol_l = list() tout_num_streams_l = list() tout_blksize_l = list() tout_duration_l = list() tout_start_l = list() tout_end_l = list() tout_seconds_l = list() tout_bytes_l = list() tout_bps_l = list() tout_jitter_l = list() tout_lost_packet_l = list() tout_packet_l = list() tout_lost_percent_l = list() tout_client_cpu_l = list() tout_server_cpu_l = list() for i in range(len(tperf_out_list)): if tperf_out_list[i] == 'fail': return {'result': 'FAIL', 'fail_reason': 'Tperf command run fail on Client VM'} try: tperf_out_list[i] = json.loads(' '.join(str(tperf_out_list[i]).split())) tout_protocol_l.append(tperf_out_list[i]['start']['test_start']['protocol']) tout_num_streams_l.append(tperf_out_list[i]['start']['test_start']['num_streams']) tout_blksize_l.append(tperf_out_list[i]['start']['test_start']['blksize']) tout_duration_l.append(tperf_out_list[i]['start']['test_start']['duration']) tout_start_l.append(float(tperf_out_list[i]['end']['sum']['start'])) tout_end_l.append(float(tperf_out_list[i]['end']['sum']['end'])) tout_seconds_l.append(float(tperf_out_list[i]['end']['sum']['seconds'])) tout_bytes_l.append(float(tperf_out_list[i]['end']['sum']['bytes'])) tout_bps_l.append(float(tperf_out_list[i]['end']['sum']['bits_per_second'])) tout_jitter_l.append(float(tperf_out_list[i]['end']['sum']['jitter_ms'])) tout_lost_packet_l.append(float(tperf_out_list[i]['end']['sum']['lost_packets'])) tout_packet_l.append(float(tperf_out_list[i]['end']['sum']['packets'])) tout_lost_percent_l.append(float(tperf_out_list[i]['end']['sum']['lost_percent'])) tout_client_cpu_l.append(float(tperf_out_list[i]['end']['cpu_utilization_percent']['host_total'])) tout_server_cpu_l.append(float(tperf_out_list[i]['end']['cpu_utilization_percent']['remote_total'])) except: LOG.exception() return {'result': 'FAIL', 'fail_reason': ''} parse_result['test_options'].update({'protocol': tout_protocol_l[0]}) parse_result['test_options'].update({'parallel': sum(tout_num_streams_l)}) parse_result['test_options'].update({'blksize': tout_blksize_l[0]}) parse_result['test_options'].update({'duration': tout_duration_l[0]}) parse_result['test_options'].update({'reverse': tperf_out_list[0]['start']['test_start']['reverse']}) parse_result['test_result'].update({'start': sum(tout_start_l)/len(tperf_out_list)}) parse_result['test_result'].update({'end': sum(tout_end_l)/len(tperf_out_list)}) parse_result['test_result'].update({'seconds': sum(tout_seconds_l)/len(tperf_out_list)}) parse_result['test_result'].update({'bytes': sum(tout_bytes_l)}) parse_result['test_result'].update({'bits_per_second': sum(tout_bps_l)}) parse_result['test_result'].update({'jitter_ms': sum(tout_jitter_l)/len(tperf_out_list)}) parse_result['test_result'].update({'lost_packets': sum(tout_lost_packet_l)}) parse_result['test_result'].update({'packets': sum(tout_packet_l)}) parse_result['test_result'].update({'lost_percent': sum(tout_lost_percent_l)/len(tperf_out_list)}) parse_result['cpu_utilization_percent'].update({'client_total': sum(tout_client_cpu_l)/len(tperf_out_list)}) parse_result['cpu_utilization_percent'].update({'server_total': sum(tout_server_cpu_l)/len(tperf_out_list)}) parse_result['result'] = 'SUCCESS' return parse_result