def install_switch_bsp(): set_sde_env() aps_bsp_installation_file = get_aps_bsp_pkg_abs_path() if (get_switch_model() == constants.bf6064x_t and ( 'BF2556' in aps_bsp_installation_file or 'bf2556' in aps_bsp_installation_file)) \ or (get_switch_model() == constants.bf2556x_1t and ( 'BF6064' in aps_bsp_installation_file or 'bf6064' in aps_bsp_installation_file)): print("ERROR: Incompatible BSP provided in settings.yaml," " Switch model is {model} but BSP is {bsp}".format( model=get_switch_model(), bsp=aps_bsp_installation_file)) exit(0) print("Installing {}".format(aps_bsp_installation_file)) os.environ['BSP_INSTALL'] = get_env_var('SDE_INSTALL') print("BSP_INSTALL directory set to {}".format(os.environ['BSP_INSTALL'])) install_bsp_deps() cmake_cmd = 'cmake -DCMAKE_INSTALL_PREFIX={}'.format( get_env_var('SDE_INSTALL')) cmake_cmd += ' -B ' + aps_bsp_installation_file cmake_cmd += ' -S ' + aps_bsp_installation_file execute_cmd(cmake_cmd) os.system("make -C {0}".format(aps_bsp_installation_file)) os.system("make -C {0} install".format(aps_bsp_installation_file)) return True
def install_switch_bsp(): set_sde_env_n_load_drivers() aps_bsp_installation_file = get_aps_bsp_pkg_abs_path() print("Installing {}".format(aps_bsp_installation_file)) aps_zip = zipfile.ZipFile(aps_bsp_installation_file) aps_zip.extractall(Path(aps_bsp_installation_file).parent) aps_bsp_dir = aps_zip.namelist()[0] + '/apsn/' aps_bsp_dir_absolute = str( Path(aps_bsp_installation_file).parent) + '/' + aps_bsp_dir aps_zip.close() ref_bsp_tar = tarfile.open(get_ref_bsp_abs_path()) ref_bsp_tar.extractall(Path(get_ref_bsp_abs_path()).parent) ref_bsp_dir = ref_bsp_tar.getnames()[0] os.chdir( str(Path(get_ref_bsp_abs_path()).parent) + '/' + ref_bsp_dir + '/packages') pltfm_tar_name = '' for f in os.listdir('./'): if f.endswith('.tgz'): pltfm_tar_name = f pltfm_tar = tarfile.open(pltfm_tar_name) pltfm_tar.extractall() bf_pltfm_dir = str(Path( get_ref_bsp_abs_path()).parent) + '/' + ref_bsp_dir + '/packages/' + \ pltfm_tar.getnames()[0] aps_pltfm_dir = bf_pltfm_dir + '/platforms/apsn/' if os.path.exists(aps_pltfm_dir): shutil.rmtree(aps_pltfm_dir) shutil.copytree(aps_bsp_dir_absolute, aps_pltfm_dir) pltfm_tar.close() ref_bsp_tar.close() os.chdir(bf_pltfm_dir) os.system('patch -p1 < {0}/{1}'.format( str(Path(aps_bsp_installation_file).parent), get_diff_file_name())) # os.environ['BSP'] = os.getcwd() # print("BSP home directory set to {}".format(os.environ['BSP'])) os.environ['BSP_INSTALL'] = get_env_var('SDE_INSTALL') print("BSP_INSTALL directory set to {}".format(os.environ['BSP_INSTALL'])) install_bsp_deps() os.system("autoreconf && autoconf") os.system("chmod +x ./autogen.sh") thrift_flag = '' if get_p4_studio_build_profile_name() != stratum_profile: thrift_flag = '--enable-thrift' if get_switch_model() == constants.bf2556x_1t: execute_cmd("CFLAGS=-Wno-error ./configure --prefix={0} {1} " "--with-tof-brgup-plat".format(os.environ['BSP_INSTALL'], thrift_flag)) else: execute_cmd("CFLAGS=-Wno-error ./configure --prefix={0} {1}".format( os.environ['BSP_INSTALL'], thrift_flag)) os.system("make") os.system("sudo make install") os.chdir(dname) return True
def load_and_verify_kernel_modules(): output = execute_cmd('lsmod') bf_kdrv = True i2c_i801 = True os.system("sudo modprobe -q i2c-i801") os.system("sudo modprobe -q i2c-dev") if 'bf_kdrv' not in output: load_bf_kdrv() output = execute_cmd('lsmod') if 'i2c_i801' not in output and is_ubuntu(): # Ubuntu check is there because i2c_i801 appears only in output of lsmod in Ubuntu i2c_i801 = False print('ERROR:i2c_i801 is not loaded.') if 'bf_kdrv' not in output: bf_kdrv = False print("ERROR:bf_kdrv is not loaded.") # Load switch specific kernel modules if get_switch_model_from_settings() == constants.bf2556x_1t: return bf_kdrv and i2c_i801 and load_and_verify_kernel_modules_bf2556() else: return bf_kdrv and i2c_i801 and load_and_verify_kernel_modules_bf6064()
def execute(db_util, directory): logger.info('start importing db schema ...') cmd_template = ''.join([ 'mysql -h', db_util.host, ' -P', db_util.port, ' -u', db_util.user, ' -p', db_util.password, ' --default-character-set=utf8', ' ', db_util.database, ' < {}' ]) baseline_home = os.sep.join([directory, 'baseline']) cmd = cmd_template.format(os.sep.join([baseline_home, 'schema.sql'])) if not common.execute_cmd(cmd): logger.warn('found error, abort ... ') return logger.info('start importing dictionary data ...') with open(os.sep.join([baseline_home, 'dictionary', 'dictionary.txt']), 'r') as dictionary_list: for dictionary in dictionary_list.read().splitlines(): if common.is_skipped_line(dictionary): continue cmd = cmd_template.format( os.sep.join([baseline_home, 'dictionary', dictionary + '.sql'])) logger.info('importing dictionary data [%s]', dictionary) if not common.execute_cmd(cmd): logger.info('found error, abort ... ') return logger.info(common.adjust_message('import dictionary data completely'))
def execute(db_util, script_home): baseline_home = os.sep.join([script_home, 'baseline']) check_output_path(baseline_home) logger.info('start dumping database schema ...') create_schema_cmd = ''.join([ 'mysqldump --skip-add-drop-table -d -h', db_util.host, ' -P', db_util.port, ' -u', db_util.user, ' -p', db_util.password, ' ', db_util.database, ' > ', os.sep.join([baseline_home, 'schema.sql']) ]) if not common.execute_cmd(create_schema_cmd): logger.info('found error during dumping, abort...') return logger.info('...........start dumping dictionary data...................') cmd_template = ''.join(['mysqldump -t --extended-insert --complete-insert -h', db_util.host, ' -P', db_util.port, ' -u', db_util.user, ' -p', db_util.password, ' ', db_util.database, ' {} > {}']) with open(os.sep.join([baseline_home, 'dictionary', 'dictionary.txt']), 'r') as dictionary_list: for dictionary in dictionary_list.read().splitlines(): if common.is_skipped_line(dictionary): continue cmd = cmd_template.format(dictionary, os.sep.join([baseline_home, 'dictionary', dictionary + '.sql'])) logger.info(cmd) if not common.execute_cmd(cmd): logger.info('found error during dumping, abort...') return logger.info('...........dump dictionary data finished...........')
def apply_file_tag(item): fn, tag = item assert len(file2tags) > 0 fn = normalize_file(fn) try: print("adding '%s' into '%s'" % (tag, fn)) cmd = '''_tag_file -s "%s" "%s"''' % (tag, fn) nlp.execute_cmd(cmd) except: print("failed '%s'" % fn) print()
def clean_bsp(): print('Cleaning BSP...') to_delete = [ get_aps_bsp_pkg_abs_path() + f for f in ['/CMakeCache.txt', '/Makefile', '/CMakeFiles', '/cmake-build-debug'] ] execute_cmd('make -C {} clean'.format(get_aps_bsp_pkg_abs_path())) for file in to_delete: print('Deteling {}'.format(file)) delete_files(file) return True
def main(): check_args() iteration = 0 # Parser of any specific command might add more commands to be executed. # Hence continue in a loop. while True: if (all_commands_executed(commands) or iteration >= 10): break iteration += 1 status_update('Iteration: ' + str(iteration)) sorted_keys = sorted(commands.items(), key=lambda (k, v): v['order']) for (cmd, dontcare) in sorted_keys: # Only collect stuff for which we have written a parser if commands[cmd]['parser']: if commands[cmd].get('done', False): continue if commands[cmd].has_key('help'): status_update(commands[cmd]['help']) shell = commands[cmd].get('shell', False) env = None if commands[cmd].get('env', False): env = myenv sudo = commands[cmd].get('sudo', False) if deployment_type == 'multinode': # handling for network node if cmd.startswith('netns_'): commands[cmd]['output'] = exec_on_remote( commands[cmd]['cmd']) if cmd == 'cat_instance': commands[cmd]['output'] = get_vm_info_from_compute( commands[cmd]['cmd']) print commands[cmd]['output'] else: commands[cmd]['output'] = execute_cmd( commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n') else: commands[cmd]['output'] = execute_cmd(commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n') commands[cmd]['parser'](commands[cmd]['output']) commands[cmd]['done'] = True debug('============= COMMANDS =============') #debug(pprint.pformat(commands)) status_update('Writing collected info into ' + settings['info_file']) dump_json(info, settings['info_file'])
def run_sal(debug): print('Starting SAL reference application...') load_drivers() sal_home = get_env_var(constants.sal_home_env_var_name) sal_executable = sal_home + '/build/salRefApp' sal_run_cmd = 'sudo -E LD_LIBRARY_PATH={0}:{1}:{2}:{3}:{4} {6} {5}'.format( sal_home + '/build', sal_home + '/lib', get_env_var(constants.tp_install_env_var_name) + '/lib', get_env_var(constants.sal_home_env_var_name) + '/install/lib', get_env_var(constants.sde_install_env_var_name) + '/lib', sal_executable, 'gdb' if debug else '') print('Running SAL with command: {}'.format(sal_run_cmd)) execute_cmd(sal_run_cmd) return True
def clean_sal(): print('Cleaning SAL...') to_delete = [ get_env_var(constants.sal_home_env_var_name) + f for f in [ '/lib', '/bin', '/build', '/logs/', '/CMakeCache.txt', '/Makefile', '/CMakeFiles', '/cmake-build-debug' ] ] execute_cmd('make -C {} clean'.format( get_env_var(constants.sal_home_env_var_name))) for file in to_delete: print('Deteling {}'.format(file)) delete_files(file) return True
def build_sal(): print('Building SAL...') cmake_cmd = 'cmake ' cmake_cmd += ' -B ' + get_env_var(constants.sal_home_env_var_name) cmake_cmd += ' -S ' + get_env_var(constants.sal_home_env_var_name) print('Executing cmake command {}.'.format(cmake_cmd)) execute_cmd(cmake_cmd) execute_cmd('LD_LIBRARY_PATH={0}/lib:$LD_LIBRARY_PATH make -C {1}'.format( get_env_var(constants.tp_install_env_var_name), get_env_var(constants.sal_home_env_var_name))) return True
def alloc_dma(): output = execute_cmd('cat /etc/sysctl.conf') if 'vm.nr_hugepages = 128' not in output: print('Setting up huge pages...') dma_alloc_cmd = 'sudo /{}/pkgsrc/ptf-modules/ptf-utils/dma_setup.sh'.format( get_env_var('SDE')) os.system(dma_alloc_cmd)
def main(): check_args() iteration = 0 # Parser of any specific command might add more commands to be executed. # Hence continue in a loop. while True: if (all_commands_executed(commands) or iteration >= 10): break iteration += 1 status_update('Iteration: ' + str(iteration)) sorted_keys = sorted(commands.items(), key=lambda (k, v): v['order']) for (cmd, dontcare) in sorted_keys: # Only collect stuff for which we have written a parser if commands[cmd]['parser']: if commands[cmd].get('done', False): continue if commands[cmd].has_key('help'): status_update(commands[cmd]['help']) shell = commands[cmd].get('shell', False) env = None if commands[cmd].get('env', False): env = myenv sudo = commands[cmd].get('sudo', False) if deployment_type == 'multinode': # handling for network node if cmd.startswith('netns_'): commands[cmd]['output'] = exec_on_remote( commands[cmd]['cmd']) if cmd == 'cat_instance': commands[cmd]['output'] = get_vm_info_from_compute(commands[ cmd]['cmd']) print commands[cmd]['output'] else: commands[cmd]['output'] = execute_cmd( commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n') else: commands[cmd]['output'] = execute_cmd( commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n') commands[cmd]['parser'](commands[cmd]['output']) commands[cmd]['done'] = True debug('============= COMMANDS =============') # debug(pprint.pformat(commands)) status_update('Writing collected info into ' + settings['info_file']) dump_json(info, settings['info_file'])
def run_sal(): print('Starting SAL reference application...') set_sal_runtime_env() if get_selected_profile_name( ) == constants.sal_hw_profile_name and not load_and_verify_kernel_modules( ): print("ERROR:Some kernel modules are not loaded.") exit(0) sal_executable = sal_rel_dir + '/build/salRefApp' sal_run_cmd = 'sudo -E LD_LIBRARY_PATH={0}:{1}:{2}:{3} {4}'.format( sal_rel_dir + '/build', sal_rel_dir + '/lib', get_env_var(constants.sal_home_env_var_name) + '/install/lib', get_sde_home_absolute() + '/install/lib', sal_executable) print('Running SAL with command: {}'.format(sal_run_cmd)) execute_cmd(sal_run_cmd) return True
def build_sal(): print('Building SAL...') try: # TODO Need to fix this in SAL, to use dedicated boost libs. os.system('sudo rm -rf {}'.format('/usr/local/include/boost')) except FileNotFoundError: pass os.chdir(get_env_var(constants.sal_home_env_var_name)) cmake_cmd = 'cmake ' cmake_cmd += get_env_var(constants.sal_home_env_var_name) print('Executing cmake command {}.'.format(cmake_cmd)) execute_cmd(cmake_cmd) execute_cmd( 'LD_LIBRARY_PATH={0}/lib:$LD_LIBRARY_PATH make -j -C {1}'.format( get_env_var(constants.tp_install_env_var_name), get_env_var(constants.sal_home_env_var_name))) return True
def get_vm_info_from_compute(cmd): output = execute_cmd(['nova', 'hypervisor-list'], sudo=False, shell=False, env=myenv).split('\n'); compute_list = get_hypervisor(output) vm_info = [] compute_creds = get_vm_credentials() for node in compute_list: creds = compute_creds.get('hypervisor').get(node,compute_creds.get('hypervisor')['default']) ssh = connect_to_box(node,creds['username'],creds['password']) (stdin,out,err) = ssh.exec_command('sudo ' + cmd) vm_info.extend(out.read().splitlines()) ssh.close() return vm_info
def load_and_verify_kernel_modules_bf6064(): execute_cmd('sudo i2cset -y 0 0x70 0x20 \ sudo i2cset -y 0 0x32 0xE 0x0 \ sudo i2cset -y 0 0x32 0xF 0x0 \ sudo i2cset -y 0 0x34 0x2 0x0 \ sudo i2cset -y 0 0x34 0x3 0x0 \ sudo i2cset -y 0 0x34 0x4 0x0 \ sudo i2cset -y 0 0x35 0x2 0x0 \ sudo i2cset -y 0 0x35 0x3 0x0 \ sudo i2cset -y 0 0x35 0x4 0x0 \ sudo i2cset -y 0 0x70 0x20 \ sudo i2cset -y 0 0x32 0x14 0xff \ sudo i2cset -y 0 0x32 0x15 0xff \ sudo i2cset -y 0 0x34 0xB 0xff \ sudo i2cset -y 0 0x34 0xC 0xff \ sudo i2cset -y 0 0x34 0xD 0xff \ sudo i2cset -y 0 0x35 0xB 0xff \ sudo i2cset -y 0 0x35 0xC 0xff \ sudo i2cset -y 0 0x35 0xD 0xff') return True
def install_switch_bsp(): set_sde_env() aps_bsp_installation_file = get_aps_bsp_pkg_abs_path() if (get_switch_model() == constants.bf6064x_t and ( 'BF2556' in aps_bsp_installation_file or 'bf2556' in aps_bsp_installation_file)) \ or (get_switch_model() == constants.bf2556x_1t and ( 'BF6064' in aps_bsp_installation_file or 'bf6064' in aps_bsp_installation_file)): print("ERROR: Incompatible BSP provided in settings.yaml," " Switch model is {model} but BSP is {bsp}".format( model=get_switch_model(), bsp=aps_bsp_installation_file)) exit(0) print("Installing {}".format(aps_bsp_installation_file)) os.environ['BSP_INSTALL'] = get_env_var('SDE_INSTALL') print("BSP_INSTALL directory set to {}".format(os.environ['BSP_INSTALL'])) install_bsp_deps() import subprocess import re out = subprocess.check_output("cmake --version", shell=True) res = re.search(r'version\s*([\d.]+)', str(out)).group(1) if res >= '3.13': cmake_cmd = 'cmake -DCMAKE_INSTALL_PREFIX={}'.format( get_env_var('SDE_INSTALL')) cmake_cmd += ' -B ' + aps_bsp_installation_file cmake_cmd += ' -S ' + aps_bsp_installation_file else: cmake_cmd = 'cmake -DCMAKE_INSTALL_PREFIX={}'.format( get_env_var('SDE_INSTALL')) cmake_cmd += ' -B' + aps_bsp_installation_file cmake_cmd += ' -H' + aps_bsp_installation_file execute_cmd(cmake_cmd) os.system("make -C {0}".format(aps_bsp_installation_file)) os.system("make -C {0} install".format(aps_bsp_installation_file)) return True
def load_and_verify_kernel_modules_bf2556(): output = execute_cmd('lsmod') irq_debug = True mv_pipe = True if 'irq_debug' not in output: install_irq_debug() if not os.path.exists("/delta/mv_pipe_config"): install_mv_pipe() #Verify that modules are loaded. output = execute_cmd('lsmod') if 'irq_debug' not in output: irq_debug = False print("ERROR:irq_debug is not loaded.") if not os.path.exists("/delta/mv_pipe_config"): mv_pipe = False print("ERROR:mv_pipe_config not installed.") return irq_debug and mv_pipe
def get_vm_info_from_compute(cmd): output = execute_cmd(['nova', 'hypervisor-list'], sudo=False, shell=False, env=myenv).split('\n') compute_list = get_hypervisor(output) vm_info = [] compute_creds = get_vm_credentials() for node in compute_list: creds = compute_creds.get('hypervisor').get( node, compute_creds.get('hypervisor')['default']) ssh = connect_to_box(node, creds['username'], creds['password']) (stdin, out, err) = ssh.exec_command('sudo ' + cmd) vm_info.extend(out.read().splitlines()) ssh.close() return vm_info
def exec_version(script_home, db_util, version_no, db_data=None): logger.info('start updating [%s] ...', version_no) version_directory = os.sep.join([script_home, version_no]) script_path = os.sep.join([version_directory, 'script.txt']) logger.info(script_path) if os.path.exists(script_path): # no script.txt, skip db_list = [] if db_data and version_no in db_data: db_list = db_data[version_no] else: temp_list = db_util.query( 'select script from script_history where version = %s', [version_no]) for i in temp_list: db_list.append(i[0]) logger.info('read script.txt in [%s]', script_path) line = open(script_path, 'r') sql_list = line.read().splitlines() logger.info('script.txt has %s sql files', len(sql_list)) for sql_file_name in sql_list: if sql_file_name[0] == '#': continue sql_file = os.sep.join([version_directory, sql_file_name]) if sql_file_name in db_list: logger.info( 'The script file [%s] was executed before, just ignore it!', sql_file) else: logger.info('start running script file [%s] ...', sql_file) cmd = 'mysql ' + \ '-h' + db_util.host + \ ' -P' + db_util.port + \ ' -u' + db_util.user + \ ' -p' + db_util.password + \ ' ' + db_util.database +\ ' < ' + sql_file if common.execute_cmd(cmd): sql = 'insert into script_history(version, script, create_time) values(%s, %s, now())' db_util.execute_sql(sql, [version_no, sql_file_name]) else: # todo ask user whether execute other scripts return False else: logger.info('Did not find script.txt in directory [%s].', script_path) return True
def ovs_test(src_port_id, dst_port_id, tag, ovs_bridge): smac = 'AA:BB:CC:DD:EE:11' dmac = 'AA:BB:CC:DD:EE:22' cmd_dict = {} # Step 0. Flush the fdb cmd = '' cmd += 'sudo ovs-appctl fdb/flush br-int' output = execute_cmd(cmd, shell=True).split('\n') cmd_dict['cmd'] = cmd cmd_dict['output'] = output output_dict['command_list'].append(cmd_dict) cmd_dict = {} # Step 1. run command that will learn smac cmd = '' cmd += 'sudo ovs-appctl ofproto/trace %s in_port=%s' % ( ovs_bridge, src_port_id) cmd += ',dl_src=' + smac + ',dl_dst=' + dmac + ' -generate' output = execute_cmd(cmd, shell=True).split('\n') cmd_dict['cmd'] = cmd cmd_dict['output'] = output output_dict['command_list'].append(cmd_dict) cmd_dict = {} # Step 2. verify that the mac has been learnt cmd = '' cmd += 'sudo ovs-appctl fdb/show br-int' output = execute_cmd(cmd, shell=True).split('\n') cmd_dict['cmd'] = cmd cmd_dict['output'] = output output_dict['command_list'].append(cmd_dict) cmd_dict = {} port = None for line in output: m = re.search('(\d)\s+(\d+)\s+(\S+)\s+\d+', line) if m: mac = m.group(3) if mac.lower() == smac.lower(): port = m.group(1) vlan = m.group(2) debug(line) break if not port: output_dict['errors'].append( '%s not learnt on port %s' % (smac, src_port_id)) output_dict['pass'] = False return False if vlan != tag: output_dict['errors'].append( '%s learnt on vlan %s but should have been learnt on vlan %s on port %s' % (smac, vlan, tag, port)) output_dict['pass'] = False return False output_dict['debugs'].append( '%s learnt on expected vlan %s on port %s' % (smac, vlan, port)) # Step 3. now do a lookup using the dst port id and dmac as the smac of # step 1. cmd = '' cmd += 'sudo ovs-appctl ofproto/trace %s in_port=%s' % ( ovs_bridge, dst_port_id) cmd += ',dl_src=' + dmac + ',dl_dst=' + smac + ' -generate' output = execute_cmd(cmd, shell=True).split('\n') cmd_dict['cmd'] = cmd cmd_dict['output'] = output output_dict['command_list'].append(cmd_dict) cmd_dict = {} forwarded = False egress_port = None for line in output: if re.search('forwarding to learned port', line): forwarded = True continue m = re.search('Datapath actions: (.*)', line) if m: egress_port = m.group(1) continue result = True if not forwarded: output_dict['errors'].append('Packet for learnt mac not forwarded!') result = False else: output_dict['debugs'].append( 'Packet for learnt mac forwarded properly') if egress_port: if egress_port == src_port_id: output_dict['debugs'].append( 'Packet forwarded to correct port %s' % egress_port) else: output_dict['errors'].append('Packet forwarded to incorrect port %s, expected %s' % (egress_port, src_port_id)) result = False else: output_dict['errors'].append('No egress port assigned to packet! Expected %s' % src_port_id) result = False output_dict['pass'] = result return result
def test(self): command_list = [] smac = 'AA:BB:CC:DD:EE:11' dmac = 'AA:BB:CC:DD:EE:22' # Step 1. run command that will learn smac cmd = '' cmd += 'sudo ovs-appctl ofproto/trace br-int in_port=' + self.src_port_id cmd += ',dl_src=' + smac + ',dl_dst=' + dmac + ' -generate' output = execute_cmd(cmd, shell=True).split('\n') command_list.append((cmd, output)) # Step 2. verify that the mac has been learnt cmd = '' cmd += 'sudo ovs-appctl fdb/show br-int' output = execute_cmd(cmd, shell=True).split('\n') command_list.append((cmd, output)) port = None for line in output: m = re.search('(\d)\s+(\d+)\s+(\S+)\s+\d+', line) if m: mac = m.group(3) if mac.lower() == smac.lower(): port = m.group(1) vlan = m.group(2) debug(line) break if not port: error('%s not learnt on port %s' % (smac, self.src_port_id)) return False if vlan != self.src_port_tag: error( '%s learnt on vlan %s but should have been learnt on vlan %s on port %s' % (smac, vlan, self.src_port_tag, port)) return False debug('%s learnt on expected vlan %s on port %s' % (smac, vlan, port)) # Step 3. now do a lookup using the dst port id and dmac as the smac of step 1. cmd = '' cmd += 'sudo ovs-appctl ofproto/trace br-int in_port=' + self.dst_port_id cmd += ',dl_src=' + dmac + ',dl_dst=' + smac + ' -generate' output = execute_cmd(cmd, shell=True).split('\n') command_list.append((cmd, output)) forwarded = False egress_port = None for line in output: if re.search('forwarding to learned port', line): forwarded = True continue m = re.search('Datapath actions: (.*)', line) if m: egress_port = m.group(1) continue result = True if not forwarded: error('Packet for learnt mac not forwarded!') result = False else: debug('Packet for learnt mac forwarded properly') if egress_port: if egress_port == self.src_port_id: debug('Packet forwarded to correct port %s' % egress_port) else: error('Packet forwarded to incorrect port %s, expected %s' % (egress_port, self.src_port_id)) result = False else: error('No egress port assigned to packet! Expected %s' % self.src_port_id) result = False debug(pprint.pformat(command_list)) return result
def path_same_network (params, nms_hops = None): src_ip = params['src_ip'] dst_ip = params['dst_ip'] json_file = params['json_file'] username = params['username'] passwd = params['passwd'] count = params['count'] timeout = params['timeout'] qrouter = params['qrouter'] router = params['router'] if qrouter_usable(qrouter, src_ip, dst_ip, username, passwd): outfile = 'path.ping.txt' ping_process = launch_ping(src_ip, dst_ip, username, passwd, count, timeout, qrouter, outfile) debug('Ping started with pid: %d' % ping_process.pid) capture_packets(params,'src') capture_packets(params, 'dst', src_tag = src_info['tag']) if src_info['tag'] != dst_info['tag']: capture_network_packets(params, nms_hops) status_update('Waiting %s sec for tcpdump and ping processes to complete' % (params['count'] + 2)) time.sleep(params['count'] + 4) status_update('if processes have not stopped, lets kill them') cleanup_processes([ping_process.pid] + src_info['pids'] + dst_info['pids']) if net_info: cleanup_processes(net_info['pids']) process_captures('src') process_captures('dst') if src_info['tag'] != dst_info['tag']: process_network_captures() ping_pass = process_ping(outfile) debug(pprint.pformat(src_info)) debug(pprint.pformat(dst_info)) debug(pprint.pformat(net_info)) info = { 'src' : src_ip, 'dst' : dst_ip, 'src_info' : src_info, 'dst_info' : dst_info, 'net_info' : net_info, 'ping_pass' : ping_pass, 'error' : '', } status_update('Dumping results into %s in JSON format' % params['path_file']) dump_json(info, params['path_file']) if params['plot']: cmd = 'python plot.py --info_file %s --highlight_file %s --combined_file static/ping' % (json_file, params['path_file']) status_update('Running ' + cmd) output = execute_cmd(cmd, shell=True).split('\n') debug(pprint.pformat(output)) status_update('Done') else: err_msg = 'Cannot reach %s via router %s' % (src_ip, router) info = { 'src' : src_ip, 'dst' : dst_ip, 'src_info' : src_info, 'dst_info' : dst_info, 'ping_pass' : False, 'error' : err_msg } error(err_msg) status_update('Dumping results into %s in JSON format' % params['path_file']) dump_json(info, params['path_file']) status_update('Done')
#!/usr/bin/env python3 #coding: utf8 import common as nlp import optparse import _scp if __name__ == "__main__": parser = optparse.OptionParser(usage="cmd srcDir targetDir") #parser.add_option("-q", "--quiet", action = "store_true", dest = "verbose", #default = False, help = "") parser.add_option("--exclude", dest="excludePattern", default=None) parser.add_option("-d", action="store_true", dest="delete", default=False) (options, args) = parser.parse_args() assert len(args) == 2 and ("." == args[0] or "." == args[1]) deleteOpt = "--delete" if options.delete else "" if options.excludePattern is not None: excludeOpt = f"--exclude={options.excludePattern}" else: excludeOpt = "" srcDir = _scp.replaceServer(args[0]) + "/" tgtDir = _scp.replaceServer(args[1]) + "/" cmd = f"rsync -ravutzhl --progress -e ssh " \ f"{srcDir} {tgtDir} {excludeOpt} {deleteOpt}" print(cmd) nlp.execute_cmd(cmd)
def ping(request): # if this is a POST request we need to process the form data if request.method == 'POST': # create a form instance and populate it with data from the request: form = PingForm(request.POST) # check whether it's valid: if form.is_valid(): # process the data in form.cleaned_data as required # ... # redirect to a new URL: src_ip = form.cleaned_data['src_ip'] dst_ip = form.cleaned_data['dst_ip'] router = form.cleaned_data['router'] # html = '<html><body>SIP: %s DIP: %s router: %s</body></html>' % (src_ip, dst_ip, router) # return HttpResponse(html) static_path = settings.STATIC_ROOT pwd = settings.ROOT_PATH JSON_FILE = pwd + '/don/ovs/don.json' params = { 'json_file': pwd + '/don/ovs/don.json', 'src_ip': src_ip, 'dst_ip': dst_ip, 'router': router, 'path_file': static_path + '/don/ping.html', 'username': '******', 'passwd': 'cubswin:)', 'count': 2, 'timeout': 2, 'debug': True, 'plot': False, } response = path.path(params) if response: error_text = response messages.error(request, error_text) return render(request, 'don/ovs/ping.html', {'form': form}) JSON_FILE = pwd + '/don/ovs/don.json' COMPUTE_DOT_FILE = None COMPUTE_SVG_FILE = None NETWORK_DOT_FILE = None NETWORK_SVG_FILE = None COMBINED_DOT_FILE = static_path + '/don/ping.dot' COMBINED_SVG_FILE = static_path + '/don/ping.svg' # HIGHLIGHT_FILE = pwd + '/don/ovs/static/ping.html' HIGHLIGHT_FILE = static_path + '/don/ping.html' plotter = DotGenerator( JSON_FILE, COMPUTE_DOT_FILE, COMPUTE_SVG_FILE, NETWORK_DOT_FILE, NETWORK_SVG_FILE, COMBINED_DOT_FILE, COMBINED_SVG_FILE, HIGHLIGHT_FILE, ) plotter.plot_combined() plotter.generate_combined_svg() # return HttpResponseRedirect('/static/path.html') return render(request, 'don/ovs/path.html') # if a GET (or any other method) we'll create a blank form else: form = PingForm() BASE_DIR = settings.ROOT_PATH + '/don/ovs/' myenv = os.environ.copy() myenv.update(get_env(BASE_DIR + 'admin-openrc.sh')) output = execute_cmd(['nova', 'list'], sudo=False, shell=False, env=myenv).split('\n') ip_list = get_instance_ips(output) ip_list.sort() router_op = execute_cmd(['neutron', 'router-list'], sudo=False, shell=False, env=myenv).split('\n') router_list = get_router_names(router_op) router_list.sort() # insert first value of select menu ip_opt = zip(ip_list, ip_list) router_opt = zip(router_list, router_list) # ip_opt.insert(0,('','Select IP address')) # router_opt.insert(0,('','Select Router')) form.fields['src_ip'].widget.choices = ip_opt form.fields['dst_ip'].widget.choices = ip_opt form.fields['router'].widget.choices = router_opt return render(request, 'don/ovs/ping.html', {'form': form})
def ping(request): # if this is a POST request we need to process the form data if request.method == 'POST': # create a form instance and populate it with data from the request: form = PingForm(request.POST) # check whether it's valid: if form.is_valid(): # process the data in form.cleaned_data as required # ... # redirect to a new URL: src_ip = form.cleaned_data['src_ip'] dst_ip = form.cleaned_data['dst_ip'] router = form.cleaned_data['router'] #html = '<html><body>SIP: %s DIP: %s router: %s</body></html>' % (src_ip, dst_ip, router) #return HttpResponse(html) static_path = settings.STATIC_ROOT pwd = settings.ROOT_PATH JSON_FILE = pwd + '/don/ovs/don.json' params = { 'json_file' : pwd + '/don/ovs/don.json', 'src_ip' : src_ip, 'dst_ip' : dst_ip, 'router' : router, 'path_file' : static_path + '/don/ping.html', 'username' : 'cirros', 'passwd' : 'cubswin:)', 'count' : 2, 'timeout' : 2, 'debug' : True, 'plot' : False, } response = path.path(params) if response: error_text = response messages.error(request,error_text) return render(request, 'don/ovs/ping.html', {'form': form}) JSON_FILE = pwd + '/don/ovs/don.json' COMPUTE_DOT_FILE = None COMPUTE_SVG_FILE = None NETWORK_DOT_FILE = None NETWORK_SVG_FILE = None COMBINED_DOT_FILE = static_path + '/don/ping.dot' COMBINED_SVG_FILE = static_path + '/don/ping.svg' # HIGHLIGHT_FILE = pwd + '/don/ovs/static/ping.html' HIGHLIGHT_FILE = static_path + '/don/ping.html' plotter = DotGenerator(JSON_FILE, COMPUTE_DOT_FILE, COMPUTE_SVG_FILE, NETWORK_DOT_FILE, NETWORK_SVG_FILE, COMBINED_DOT_FILE, COMBINED_SVG_FILE, HIGHLIGHT_FILE, ) plotter.plot_combined() plotter.generate_combined_svg() # return HttpResponseRedirect('/static/path.html') return render(request, 'don/ovs/path.html') # if a GET (or any other method) we'll create a blank form else: form = PingForm() BASE_DIR = settings.ROOT_PATH + '/don/ovs/' myenv = os.environ.copy() myenv.update(get_env(BASE_DIR + 'admin-openrc.sh')) output = execute_cmd(['nova', 'list'], sudo=False, shell=False, env=myenv).split('\n'); ip_list = get_instance_ips(output) ip_list.sort() router_op = execute_cmd(['neutron', 'router-list'], sudo=False, shell=False, env=myenv).split('\n'); router_list = get_router_names(router_op) router_list.sort() # insert first value of select menu ip_opt = zip(ip_list,ip_list) router_opt = zip(router_list,router_list) # ip_opt.insert(0,('','Select IP address')) # router_opt.insert(0,('','Select Router')) form.fields['src_ip'].widget.choices = ip_opt form.fields['dst_ip'].widget.choices = ip_opt form.fields['router'].widget.choices = router_opt return render(request, 'don/ovs/ping.html', {'form': form})
def gen_personlist(**kwargs): cmd = os.path.join(common.bin_dir, "./personlist_gen") output = common.execute_cmd(cmd) return [cmd, output]
def ovs_test(src_port_id, dst_port_id, tag, ovs_bridge): smac = 'AA:BB:CC:DD:EE:11' dmac = 'AA:BB:CC:DD:EE:22' cmd_dict = {} # Step 0. Flush the fdb cmd = '' cmd += 'sudo ovs-appctl fdb/flush br-int' output = execute_cmd(cmd, shell=True).split('\n') cmd_dict['cmd'] = cmd cmd_dict['output'] = output output_dict['command_list'].append(cmd_dict) cmd_dict = {} # Step 1. run command that will learn smac cmd = '' cmd += 'sudo ovs-appctl ofproto/trace %s in_port=%s' % (ovs_bridge, src_port_id) cmd += ',dl_src=' + smac + ',dl_dst=' + dmac + ' -generate' output = execute_cmd(cmd, shell=True).split('\n') cmd_dict['cmd'] = cmd cmd_dict['output'] = output output_dict['command_list'].append(cmd_dict) cmd_dict = {} # Step 2. verify that the mac has been learnt cmd = '' cmd += 'sudo ovs-appctl fdb/show br-int' output = execute_cmd(cmd, shell=True).split('\n') cmd_dict['cmd'] = cmd cmd_dict['output'] = output output_dict['command_list'].append(cmd_dict) cmd_dict = {} port = None for line in output: m = re.search('(\d)\s+(\d+)\s+(\S+)\s+\d+', line) if m: mac = m.group(3) if mac.lower() == smac.lower(): port = m.group(1) vlan = m.group(2) debug(line) break if not port: output_dict['errors'].append('%s not learnt on port %s' % (smac, src_port_id)) output_dict['pass'] = False return False if vlan != tag: output_dict['errors'].append( '%s learnt on vlan %s but should have been learnt on vlan %s on port %s' % (smac, vlan, tag, port)) output_dict['pass'] = False return False output_dict['debugs'].append('%s learnt on expected vlan %s on port %s' % (smac, vlan, port)) # Step 3. now do a lookup using the dst port id and dmac as the smac of step 1. cmd = '' cmd += 'sudo ovs-appctl ofproto/trace %s in_port=%s' % (ovs_bridge, dst_port_id) cmd += ',dl_src=' + dmac + ',dl_dst=' + smac + ' -generate' output = execute_cmd(cmd, shell=True).split('\n') cmd_dict['cmd'] = cmd cmd_dict['output'] = output output_dict['command_list'].append(cmd_dict) cmd_dict = {} forwarded = False egress_port = None for line in output: if re.search('forwarding to learned port', line): forwarded = True continue m = re.search('Datapath actions: (.*)', line) if m: egress_port = m.group(1) continue result = True if not forwarded: output_dict['errors'].append('Packet for learnt mac not forwarded!') result = False else: output_dict['debugs'].append( 'Packet for learnt mac forwarded properly') if egress_port: if egress_port == src_port_id: output_dict['debugs'].append( 'Packet forwarded to correct port %s' % egress_port) else: output_dict['errors'].append( 'Packet forwarded to incorrect port %s, expected %s' % (egress_port, src_port_id)) result = False else: output_dict['errors'].append( 'No egress port assigned to packet! Expected %s' % src_port_id) result = False output_dict['pass'] = result return result
def test (self): command_list = [] smac = 'AA:BB:CC:DD:EE:11' dmac = 'AA:BB:CC:DD:EE:22' # Step 1. run command that will learn smac cmd = '' cmd += 'sudo ovs-appctl ofproto/trace br-int in_port=' + self.src_port_id cmd += ',dl_src=' + smac + ',dl_dst=' + dmac + ' -generate' output = execute_cmd(cmd, shell=True).split('\n') command_list.append((cmd, output)) # Step 2. verify that the mac has been learnt cmd = '' cmd += 'sudo ovs-appctl fdb/show br-int' output = execute_cmd(cmd, shell=True).split('\n') command_list.append((cmd, output)) port = None for line in output: m = re.search('(\d)\s+(\d+)\s+(\S+)\s+\d+', line) if m: mac = m.group(3) if mac.lower() == smac.lower(): port = m.group(1) vlan = m.group(2) debug(line) break if not port: error('%s not learnt on port %s' % (smac, self.src_port_id)) return False if vlan != self.src_port_tag: error('%s learnt on vlan %s but should have been learnt on vlan %s on port %s' % (smac, vlan, self.src_port_tag, port)) return False debug('%s learnt on expected vlan %s on port %s' % (smac, vlan, port)) # Step 3. now do a lookup using the dst port id and dmac as the smac of step 1. cmd = '' cmd += 'sudo ovs-appctl ofproto/trace br-int in_port=' + self.dst_port_id cmd += ',dl_src=' + dmac + ',dl_dst=' + smac + ' -generate' output = execute_cmd(cmd, shell=True).split('\n') command_list.append((cmd, output)) forwarded = False egress_port = None for line in output: if re.search('forwarding to learned port', line): forwarded = True continue m = re.search('Datapath actions: (.*)', line) if m: egress_port = m.group(1) continue result = True if not forwarded: error('Packet for learnt mac not forwarded!') result = False else: debug('Packet for learnt mac forwarded properly') if egress_port: if egress_port == self.src_port_id: debug('Packet forwarded to correct port %s' % egress_port) else: error('Packet forwarded to incorrect port %s, expected %s' % (egress_port, self.src_port_id)) result = False else: error('No egress port assigned to packet! Expected %s' % self.src_port_id) result = False debug(pprint.pformat(command_list)) return result