def run(self): ''' Run layer action evaluation. Checks: 1) Action was successful 2) Response was successful @return: True if successful, False otherwise. ''' # Make sure the current layer is layer 0 state = i.control.cmd('getLayerState')() check(len(state.stack) == 0) # Permutate action for permuation in itertools.permutations(self.action): # Process action self.run_action(self.action) # Process opposing action self.run_action(permuation) # Cleanup after test self.clean() # Make sure the current layer is layer 0 state = i.control.cmd('getLayerState')() check(len(state.stack) == 0) return True
def run_netconf_tests(): process = None device_name = "sfc-netconf" print("Starting Netconf Server") try: process = subprocess.Popen([ 'java', '-Xmx1G', '-XX:MaxPermSize=256M', '-jar', 'netconf-testtool-0.3.0-20150320.211342-654-executable.jar', '--device-count', '2' ]) time.sleep(5) except subprocess.CalledProcessError as e: print(e.output) return # input("Press Enter to continue to Auto-Provisioning...") try: common.post_netconf_connector( common.NETCONF_CONNECTOR_URL, sfc_nrm.NETCONF_CONNECTOR_XML.format(device_name, "localhost")) time.sleep(5) common.check(common.SFF_ONE_URL.format(device_name), sfc_nrm.SERVICE_FUNCTION_FORWARDER_NETCONF_JSON, "Checking if Netconf SFF was created successfully") input("Press Enter to finish tests") except requests.exceptions.RequestException: print("Error sending POST request to spawn netconf connector \n") finally: print("Finishing Tests...") process.kill() return
def run_netconf_tests(jarfile): process = None device_name = "sfc-netconf" print("Starting Netconf Server") try: process = subprocess.Popen( ['java', '-Xmx1G', '-XX:MaxPermSize=256M', '-jar', jarfile, '--device-count', '2']) time.sleep(5) except subprocess.CalledProcessError as e: print(e.output) return # input("Press Enter to continue to Auto-Provisioning...") try: common.post_netconf_connector( common.NETCONF_CONNECTOR_URL, sfc_nrm.NETCONF_CONNECTOR_XML.format(device_name, "localhost")) time.sleep(5) common.check( common.SFF_ONE_URL.format(device_name), sfc_nrm.SERVICE_FUNCTION_FORWARDER_NETCONF_JSON, "Checking if Netconf SFF was created successfully") input("Press Enter to finish tests") except requests.exceptions.RequestException: print("Error sending POST request to spawn netconf connector \n") finally: print("Finishing Tests...") process.kill() return
def run_rest_regression(host_ip): common.delete_configuration() common.put_and_check( common.SF_URL, sfc_drm.SERVICE_FUNCTIONS_JSON.replace("{ip}", host_ip), sfc_drm.SERVICE_FUNCTIONS_JSON.replace("{ip}", host_ip)) common.check(common.SFT_URL, sfc_drm.SERVICE_FUNCTION_TYPE_JSON, "Checking Service Function Type...") common.put_and_check( common.SFF_URL, sfc_drm.SERVICE_FUNCTION_FORWARDERS_JSON.replace("{ip}", host_ip), sfc_drm.SERVICE_FUNCTION_FORWARDERS_JSON.replace("{ip}", host_ip)) common.put_and_check(common.SFC_URL, sfc_drm.SERVICE_CHAINS_JSON, sfc_drm.SERVICE_CHAINS_JSON) common.put_and_check(common.SFP_URL, sfc_drm.SERVICE_PATH_JSON, sfc_drm.SERVICE_PATH_JSON) common.post_rpc(common.RSP_RPC_URL, sfc_drm.RENDERED_SERVICE_PATH_RPC_REQ, sfc_drm.RENDERED_SERVICE_PATH_RPC_RESP) common.check(common.RSP_URL, sfc_drm.RENDERED_SERVICE_PATH_RESP_JSON, "Checking RSP...") common.check(common.SFF_OPER_URL, sfc_drm.SERVICE_FUNCTION_FORWARDERS_OPER_JSON, "Checking SFF Operational State...") common.check(common.SF_OPER_URL, sfc_drm.SERVICE_FUNCTION_OPER_JSON, "Checking SF Operational State...") common.put_and_check(common.IETF_ACL_URL, sfc_drm.IETF_ACL_JSON_IPV4, sfc_drm.IETF_ACL_JSON_IPV4) common.put_and_check(common.SCF_URL, sfc_drm.SERVICE_CLASSIFIER_JSON, sfc_drm.SERVICE_CLASSIFIER_JSON)
def main(): # 检查本地环境 if (common.check()): hour_process = multiprocessing.Process(target=hour.hour_task) hour_process.start() day_process = multiprocessing.Process(target=day.day_task) day_process.start() else: week_process = multiprocessing.Process(target=week.week_task) week_process.start()
def run_rest_regression(): print("Starting Test Execution") common.delete_configuration() common.put_and_check( common.SF_URL, sfc_brrm.SERVICE_FUNCTIONS_JSON, sfc_brrm.SERVICE_FUNCTIONS_JSON) common.check( common.SFT_URL, sfc_brrm.SERVICE_FUNCTION_TYPE_JSON, "Checking Service Function Type...") common.put_and_check( common.SFF_URL, sfc_brrm.SERVICE_FUNCTION_FORWARDERS_JSON, sfc_brrm.SERVICE_FUNCTION_FORWARDERS_JSON) common.put_and_check( common.SFC_URL, sfc_brrm.SERVICE_CHAINS_JSON, sfc_brrm.SERVICE_CHAINS_JSON) common.put_and_check( common.SFP_URL, sfc_brrm.SERVICE_PATH_JSON, sfc_brrm.SERVICE_PATH_JSON) common.post_rpc( common.RSP_RPC_URL, sfc_brrm.RENDERED_SERVICE_PATH_RPC_PATH_1_REQ, sfc_brrm.RENDERED_SERVICE_PATH_RPC_PATH_1_RESP) common.check( common.RSP_URL, sfc_brrm.RENDERED_SERVICE_PATH_RESP_JSON, "Checking RSP...") common.check( common.SFF_OPER_URL, sfc_brrm.SERVICE_FUNCTION_FORWARDERS_OPER_JSON, "Checking SFF Operational State...") common.check( common.SF_OPER_URL, sfc_brrm.SERVICE_FUNCTION_OPER_JSON, "Checking SF Operational State...") common.put_and_check( common.IETF_ACL_URL, sfc_brrm.IETF_ACL_JSON, sfc_brrm.IETF_ACL_JSON) common.put_and_check( common.SFP_ONE_URL.format("Path-3-SFC2"), sfc_brrm.SERVICE_PATH_ADD_ONE_JSON, sfc_brrm.SERVICE_PATH_ADD_ONE_RESP_JSON) input("Press Enter to finish tests")
def run_rest_regression(host_ip): common.delete_configuration() common.put_and_check( common.SF_URL, sfc_drm.SERVICE_FUNCTIONS_JSON.replace("{ip}", host_ip), sfc_drm.SERVICE_FUNCTIONS_JSON.replace("{ip}", host_ip)) common.check( common.SFT_URL, sfc_drm.SERVICE_FUNCTION_TYPE_JSON, "Checking Service Function Type...") common.put_and_check( common.SFF_URL, sfc_drm.SERVICE_FUNCTION_FORWARDERS_JSON.replace("{ip}", host_ip), sfc_drm.SERVICE_FUNCTION_FORWARDERS_JSON.replace("{ip}", host_ip)) common.put_and_check( common.SFC_URL, sfc_drm.SERVICE_CHAINS_JSON, sfc_drm.SERVICE_CHAINS_JSON) common.put_and_check( common.SFP_URL, sfc_drm.SERVICE_PATH_JSON, sfc_drm.SERVICE_PATH_JSON) common.post_rpc( common.RSP_RPC_URL, sfc_drm.RENDERED_SERVICE_PATH_RPC_REQ, sfc_drm.RENDERED_SERVICE_PATH_RPC_RESP) common.check( common.RSP_URL, sfc_drm.RENDERED_SERVICE_PATH_RESP_JSON, "Checking RSP...") common.check( common.SFF_OPER_URL, sfc_drm.SERVICE_FUNCTION_FORWARDERS_OPER_JSON, "Checking SFF Operational State...") common.check( common.SF_OPER_URL, sfc_drm.SERVICE_FUNCTION_OPER_JSON, "Checking SF Operational State...") common.put_and_check( common.IETF_ACL_URL, sfc_drm.IETF_ACL_JSON_IPV4, sfc_drm.IETF_ACL_JSON_IPV4) common.put_and_check( common.SCF_URL, sfc_drm.SERVICE_CLASSIFIER_JSON, sfc_drm.SERVICE_CLASSIFIER_JSON)
def run_action(self, action): ''' Process and monitor action Compares the layer state bit of each action to make sure the action resulted in the opposite reaction. @param action: Commands to run ''' for act in action: # Get current state prev_state = i.control.cmd('getLayerState')() if act == 'shift': # Determine if press or release for shift input_state = 0x1 # Press if prev_state.state[self.layer] & 0x1: input_state = 0x3 # Release # Press/Release, Switch1 i.control.cmd('capability')('layerShift', None, input_state, 0x0, [self.layer]) # Make sure action ocurred new_state = i.control.cmd('getLayerState')() check(prev_state.state[self.layer] & 0x1 != new_state.state[self.layer] & 0x1) elif act == 'latch': # Release, Switch1 i.control.cmd('capability')('layerLatch', None, 0x3, 0x0, [self.layer]) # Make sure action ocurred new_state = i.control.cmd('getLayerState')() check(prev_state.state[self.layer] & 0x2 != new_state.state[self.layer] & 0x2) elif act == 'lock': # Press, Switch1 i.control.cmd('capability')('layerLock', None, 0x1, 0x0, [self.layer]) # Make sure action ocurred new_state = i.control.cmd('getLayerState')() check(prev_state.state[self.layer] & 0x4 != new_state.state[self.layer] & 0x4) else: logger.warning("'{}' is an invalid layer action", act) break # Make sure to loop or the shared library interface may get cranky # macOS Python interface with brew Python 3 i.control.loop(1)
import common import sfc_ovs_regression_messages as sfc_orm __author__ = "Reinaldo Penno" __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc." __license__ = "New-style BSD" __version__ = "0.1" __email__ = "*****@*****.**" __status__ = "Tested with SFC-Karaf distribution as of 04/14/2015" if __name__ == "__main__": common.delete_configuration() common.put_and_check(common.SF_URL, sfc_orm.SERVICE_FUNCTIONS_JSON, sfc_orm.SERVICE_FUNCTIONS_JSON) common.check(common.SFT_URL, sfc_orm.SERVICE_FUNCTION_TYPE_JSON, "Checking Service Function Type...") common.put_and_check(common.SFF_URL, sfc_orm.SERVICE_FUNCTION_FORWARDERS_JSON, sfc_orm.SERVICE_FUNCTION_FORWARDERS_JSON) common.put_and_check(common.SFC_URL, sfc_orm.SERVICE_CHAINS_JSON, sfc_orm.SERVICE_CHAINS_JSON) common.put_and_check(common.SFP_URL, sfc_orm.SERVICE_PATH_JSON, sfc_orm.SERVICE_PATH_JSON) common.post_rpc(common.RSP_RPC_URL, sfc_orm.RENDERED_SERVICE_PATH_RPC_REQ, sfc_orm.RENDERED_SERVICE_PATH_RPC_RESP) common.check(common.RSP_URL, sfc_orm.RENDERED_SERVICE_PATH_RESP_JSON, "Checking RSP...") ''' common.check( common.SFF_OPER_URL, sfc_orm.SERVICE_FUNCTION_FORWARDERS_OPER_JSON,
## Loopback Tests ## logger.info(header("-- RawIO Loopback tests --")) i.control.cmd('setRawIOPacketSize')(64) i.control.cmd('setRawIOLoopback')(True) # Send basic test packet, 1 byte length, payload 0xAC logger.info(header("- Single byte packet payload -")) i.control.cmd('HIDIO_test_2_request')(1, 0xAC) # A single processing loop (receivves, then sends packets) i.control.loop(1) # Check contents of incoming buffer (should be empty in loopback mode) logger.info("Incoming Buf: {}", data.rawio_incoming_buffer) check(len(data.rawio_incoming_buffer) == 0) # Check contents of outgoing buffer (should have packet in loopback mode) logger.info("Outgoing Buf: {}", data.rawio_outgoing_buffer) check(len(data.rawio_outgoing_buffer) == 1) check(data.rawio_outgoing_buffer[0][0].len == 3) check(data.rawio_outgoing_buffer[0][0].type == 0) check(data.rawio_outgoing_buffer[0][0].cont == 0) check(data.rawio_outgoing_buffer[0][1] == 2) # Id check check(len(data.rawio_outgoing_buffer[0][2]) == 1) check(data.rawio_outgoing_buffer[0][2][0] == 0xAC) # Payload check # A single processing loop (receives, then sends packets) i.control.loop(1) # Check contents of incoming buffer (should be empty in loopback mode)
# default_one={} # for i in keys: # default_one[i]=0 # default_one["其他"]=0 # default_one["其他医生"]="" # default_one["全部"]=0 # excel = pd.ExcelFile("泌尿科手术.xlsx") zg_sheet = pd.read_excel(excel, sheet_name="Sheet1", keep_default_na=False) zg_sheet_columns = zg_sheet.columns.to_list() for one in zg_sheet.values: if len(one[4]) > 0: doctor = one[7].strip() two = results.get(one[4], copy.deepcopy(common.default_one)) common.check(doctor, two) # inkeys=False # # for i in common.keys: # if one[7].find(i)==0: # two[i]=two[i]+1 # inkeys=True # break # if not inkeys: # two["其他"] = two["其他"] + 1 # two["其他医生"] = two["其他医生"] + ","+doctor # two["全部"]=two["全部"]+1 results[one[4]] = two common.compositeResults(results, header, "1.xlsx")
# Show output i.control.cmd('rectDisp')() #for index in range( 15 ): # i.control.cmd('addScanCode')( 0x3C ) # i.control.loop(1) # i.control.cmd('removeScanCode')( 0x3C ) # i.control.loop(1) # Loop 13 times, displaying each time #for index in range( 12 ): for index in range( 2 ): # Read animation stack info print( "Loop {0} - Expecting Stack Size: 1 Got: {1}".format( index, i.control.cmd('animationStackInfo')().size ) ) check( i.control.cmd('animationStackInfo')().size == 1 ) # Loop once i.control.cmd('setFrameState')(2) i.control.loop(1) # Show output i.control.cmd('rectDisp')() #i.control.cmd('removeScanCode')( 0x3C ) i.control.loop(1) if False: print("-Rainbow Animation Test-");
__copyright__ = "Copyright(c) 2015, Cisco Systems, Inc." __license__ = "New-style BSD" __version__ = "0.1" __email__ = "*****@*****.**" __status__ = "Tested with SFC-Karaf distribution as of 04/14/2015" if __name__ == "__main__": common.delete_configuration() common.put_and_check( common.SF_URL, sfc_orm.SERVICE_FUNCTIONS_JSON, sfc_orm.SERVICE_FUNCTIONS_JSON) common.check( common.SFT_URL, sfc_orm.SERVICE_FUNCTION_TYPE_JSON, "Checking Service Function Type...") common.put_and_check( common.SFF_URL, sfc_orm.SERVICE_FUNCTION_FORWARDERS_JSON, sfc_orm.SERVICE_FUNCTION_FORWARDERS_JSON) common.put_and_check( common.SFC_URL, sfc_orm.SERVICE_CHAINS_JSON, sfc_orm.SERVICE_CHAINS_JSON) common.put_and_check( common.SFP_URL, sfc_orm.SERVICE_PATH_JSON, sfc_orm.SERVICE_PATH_JSON) common.post_rpc( common.RSP_RPC_URL,
# Drop to cli, type exit in the displayed terminal to continue #i.control.cli() logger.info(header("-Pixel Test-")) # Read status of animation display buffers logger.debug(i.control.cmd('animationDisplayBuffers')()) i.control.cmd('rectDisp')() # Add Animation, index 3, to Stack (testanimation) i.control.cmd('addAnimation')(name='testanimation') # Read animation stack info logger.info("Expecting Stack Size: 1 Got: {}", i.control.cmd('animationStackInfo')().size) check( i.control.cmd('animationStackInfo')().size == 1 ) # Loop once i.control.loop(1) # Check Pixel 1 expecting = ((16, 0, 32), (30, 70, 120)) logger.info("Expecting: {} Got: {}", expecting, i.control.cmd('readPixel')(1)) check( i.control.cmd('readPixel')(1) == expecting ) i.control.cmd('rectDisp')() # Update FrameState and Loop again i.control.cmd('setFrameState')(2) i.control.loop(1) # Check Pixel 1
def run(self): ''' Evaluate/run layer test @return: True if successful, False if not ''' # Make sure the current layer is layer 0 state = i.control.cmd('getLayerState')() check(len(state.stack) == 0) # Rotate right from 0 to max, then to 0 # Making sure we hit each layer for layer in range(1, len(self.layers)): # Press, Switch1, Next i.control.cmd('capability')('layerRotate', None, 0x1, 0x0, [0]) # Check layer state state = i.control.cmd('getLayerState')() check(len(state.stack) == 1) check(state.stack[0] == layer) # Final rotation back to 0 # Press, Switch1, Next i.control.cmd('capability')('layerRotate', None, 0x1, 0x0, [0]) # Check layer state state = i.control.cmd('getLayerState')() check(len(state.stack) == 0) # Cleanup self.clean() # Make sure the current layer is layer 0 state = i.control.cmd('getLayerState')() check(len(state.stack) == 0) # Rotate left from max to 0, starting at 0 # Making sure we hit each layer for layer in range(len(self.layers) - 1, 0, -1): # Press, Switch1, Previous i.control.cmd('capability')('layerRotate', None, 0x1, 0x0, [1]) # Check layer state state = i.control.cmd('getLayerState')() check(len(state.stack) == 1) check(state.stack[0] == layer) # Final rotation back to 0 # Press, Switch1, Next i.control.cmd('capability')('layerRotate', None, 0x1, 0x0, [1]) # Check layer state state = i.control.cmd('getLayerState')() check(len(state.stack) == 0) # Make sure to loop or the shared library interface may get cranky # macOS Python interface with brew Python 3 i.control.loop(1) # Cleanup self.clean() return True
# Show output i.control.cmd('rectDisp')() #for index in range( 15 ): # i.control.cmd('addScanCode')( 0x3C ) # i.control.loop(1) # i.control.cmd('removeScanCode')( 0x3C ) # i.control.loop(1) # Loop 13 times, displaying each time for index in range(12): #for index in range( 1 ): # Read animation stack info print("Loop {0} - Expecting Stack Size: 1 Got: {1}".format( index, i.control.cmd('animationStackInfo')().size)) check(i.control.cmd('animationStackInfo')().size == 1) # Loop once i.control.cmd('setFrameState')(2) i.control.loop(1) # Show output i.control.cmd('rectDisp')() #i.control.cmd('removeScanCode')( 0x3C ) i.control.loop(1) if False: print("-Rainbow Animation Test-")
def run_qsub_mr(options): # Record current working directory. Both mapper and reducer commands will run under this directory. cwd = os.getcwd() # Find input files. input_files = glob.glob(options.input) input_files.sort() common.check(input_files, 'input pattern does not list any file') common.check(len(set([basename(i) for i in input_files])) == len(input_files), 'input pattern matches input files with duplicate basename') if options.verbose: common.info('input pattern %s matched %d files', repr(options.input), len(input_files)) # Check then create output directory. common.check(options.retry or not os.path.exists(options.output), 'output directory already exists') workdir = os.path.join(options.output, '.mr') if not os.path.exists(workdir): os.makedirs(workdir) if options.verbose: common.info('workdir: %s', workdir) # Write out script to rerun this command. with common.open(os.path.join(workdir, 'rerun.bash'), 'w') as f: f.write('''# Run this script with --rerun is equivalent to running the mapreduction (assuming the input hasn't changed) cd %(cwd)s %(args)s "$@" ''' % {'cwd': pipes.quote(cwd), 'args': ' '.join(map(pipes.quote, sys.argv))}) # Write out mapper script to run on nodes. with common.open(os.path.join(workdir, 'map.node.bash'), 'w') as f: f.write(bash_start + options.mapper) # Write out mapper script to submit to qsub. with common.open(os.path.join(workdir, 'map.qsub.bash'), 'w') as f: f.write(bash_start + ''' cd %(cwd)s if [ -e %(workdir)s/map.${PBS_ARRAYID}.err ]; then mv %(workdir)s/map.${PBS_ARRAYID}.err %(workdir)s/map.${PBS_ARRAYID}.err.old fi bash %(workdir)s/map.${PBS_ARRAYID}.bash 2> %(workdir)s/map.${PBS_ARRAYID}.err ''' % {'cwd': pipes.quote(cwd), 'workdir': pipes.quote(workdir)}) # Write out individual mapper command scripts. for i, path in enumerate(input_files): with common.open(os.path.join(workdir, 'map.%d.bash' % i), 'w') as f: f.write(bash_start + ''' cd %(cwd)s %(cat)s %(path)s | bash %(workdir)s/map.node.bash | gzip - > %(workdir)s/map.%(task_id)d.out.gz touch %(workdir)s/map.%(task_id)d.success ''' % {'cwd': pipes.quote(cwd), 'cat': choose_cat(path), 'path': pipes.quote(path), 'workdir': pipes.quote(workdir), 'task_id': i}) # Write out reducer script. if options.reducer != 'NONE': with common.open(os.path.join(workdir, 'reduce.node.bash'), 'w') as f: f.write(bash_start + options.reducer) with common.open(os.path.join(workdir, 'reduce.qsub.bash'), 'w') as f: f.write(bash_start + ''' cd %(cwd)s if [ -e %(workdir)s/reduce.err ]; then mv %(workdir)s/reduce.err %(workdir)s/reduce.err.old fi find %(workdir)s -name 'map.*.out.gz' -print0 2>> %(workdir)s/reduce.err | \\ xargs -0 zcat 2>> %(workdir)s/reduce.err | \\ LC_ALL=C sort %(sort_options)s 2>> %(workdir)s/reduce.err | \\ bash %(workdir)s/reduce.node.bash 2>> %(workdir)s/reduce.err | \\ gzip - > %(workdir)s/reduce.out.gz 2>> %(workdir)s/reduce.err touch %(workdir)s/reduce.success 2>> %(workdir)s/reduce.err ''' % {'cwd': pipes.quote(cwd), 'workdir': pipes.quote(workdir), 'sort_options': '-n' if options.numerical_sort else ''}) # Run mapper jobs. for i in range(options.max_tries): # Find tasks to run. task_ids = [] for task_id in range(len(input_files)): if not os.path.exists(os.path.join(workdir, 'map.%d.success' % task_id)): task_ids.append(task_id) if not task_ids: break qsub_args = ['-N', '%s-map' % options.name, '-q', options.mapper_queue, '-l', 'pmem=%s,walltime=%s' % (options.mapper_pmem, options.mapper_walltime), '-t', format_task_ids(task_ids), '-o', os.devnull, '-e', os.devnull] + \ options.qsub_args + [os.path.join(workdir, 'map.qsub.bash')] if options.verbose: common.info('map try %d of %d: need to run %d tasks', i + 1, options.max_tries, len(task_ids)) common.info('map try %d of %d: qsub_args is %s', i + 1, options.max_tries, repr(qsub_args)) wait_qsub(qsub_args) if options.verbose: common.info('map try %d of %d: finished', i + 1, options.max_tries) map_success = 0 for task_id in range(len(input_files)): if os.path.exists(os.path.join(workdir, 'map.%d.success' % task_id)): map_success += 1 if i > 0 and options.verbose: common.info('map success: %d / %d', map_success, len(input_files)) common.check(map_success == len(input_files), 'map failed after %d tries', options.max_tries) # Run reducer jobs. if options.reducer != 'NONE': for i in range(options.max_tries): if os.path.exists(os.path.join(workdir, 'reduce.success')): break qsub_args = ['-N', '%s-reduce' % options.name, '-q', options.reducer_queue, '-l', 'pmem=%s,walltime=%s' % (options.reducer_pmem, options.reducer_walltime), '-o', os.devnull, '-e', os.devnull] + options.qsub_args + \ [os.path.join(workdir, 'reduce.qsub.bash')] if options.verbose: common.info('reduce try %d of %d: started', i + 1, options.max_tries) common.info('reduce try %d of %d: qsub_args is %s', i + 1, options.max_tries, repr(qsub_args)) wait_qsub(qsub_args) if options.verbose: common.info('reduce try %d of %d: finished', i + 1, options.max_tries) common.check(os.path.exists(os.path.join(workdir, 'reduce.success')), 'reduce failed after %d tries', options.max_tries) # Move output. if options.reducer != 'NONE': src = os.path.join(workdir, 'reduce.out.gz') dst = os.path.join(options.output, 'reduce.out.gz') subprocess.check_call(['rm', '-f', dst]) if options.keep_workdir: os.symlink(os.path.abspath(src), dst) else: os.rename(src, dst) else: for i, path in enumerate(input_files): src = os.path.join(workdir, 'map.%d.out.gz' % i) dst = os.path.join(options.output, 'map.%s.out.gz' % basename(path)) subprocess.check_call(['rm', '-f', dst]) if options.keep_workdir: os.symlink(os.path.abspath(src), dst) else: os.rename(src, dst) # Remove workdir if not options.keep_workdir: subprocess.call(['rm', '-rf', workdir])
# Drop to cli, type exit in the displayed terminal to continue #i.control.cli() # Read current keyboard state print( data.usb_keyboard() ) # Press key 0x00 i.control.cmd('addScanCode')( 0x01 ) # Run processing loop twice, needs to run twice in order to reach the Hold state i.control.loop(2) print( data.usb_keyboard() ) print( data.usb_keyboard_data ) check( set( data.usb_keyboard()[1] ) >= set([ 41 ]) ) # Check if [41] is a subset of the usb keyboard data # Release key 0x00 i.control.cmd('removeScanCode')( 0x01 ) # Run processing loop once, only needs to transition from hold to release i.control.loop(1) print( data.usb_keyboard() ) ### Test 3 keys at same time ### print("-- 3 key test --") # press keys
logger.info(header("-- RawIO Loopback tests --")) i.control.cmd('setRawIOPacketSize')( 64 ) i.control.cmd('setRawIOLoopback')( True ) # Send basic test packet, 1 byte length, payload 0xAC logger.info(header("- Single byte packet payload -")) i.control.cmd('HIDIO_test_2_request')( 1, 0xAC ) # A single processing loop (receivves, then sends packets) i.control.loop(1) # Check contents of incoming buffer (should be empty in loopback mode) logger.info("Incoming Buf: {}", data.rawio_incoming_buffer) check( len( data.rawio_incoming_buffer ) == 0 ) # Check contents of outgoing buffer (should have packet in loopback mode) logger.info("Outgoing Buf: {}", data.rawio_outgoing_buffer) check( len( data.rawio_outgoing_buffer ) == 1 ) check( data.rawio_outgoing_buffer[0][0].len == 3 ) check( data.rawio_outgoing_buffer[0][0].type == 0 ) check( data.rawio_outgoing_buffer[0][0].cont == 0 ) check( data.rawio_outgoing_buffer[0][1] == 2 ) # Id check check( len( data.rawio_outgoing_buffer[0][2] ) == 1 ) check( data.rawio_outgoing_buffer[0][2][0] == 0xAC ) # Payload check # A single processing loop (receives, then sends packets) i.control.loop(1)
def check(parts): return common.check(patterns, **parts)
tty_flush.read() time.sleep(0.1) thread_flush = threading.Thread(target=background_flush, args=(i.control.cli_name(), )) thread_flush.daemon = True thread_flush.start() # Send version command, every command must end with a \r tty_interface.write("help\r") tty_interface.write("version\r") # Clear periodic interval i.control.cmd('setPeriodic')(0) check(i.control.cmd('getPeriodic')() == 0) # Set periodic interval period_value = 1234 tty_interface.write("periodic {}\r".format(period_value)) # Wait for commands to register/process time.sleep(0.5) # Check periodic interval check(i.control.cmd('getPeriodic')() == period_value) ### Basic echo validation test ### logger.info(header("-- Basic echo validation test --"))
tty_flush = open(interface, "r") while True: tty_flush.read() time.sleep(0.1) thread_flush = threading.Thread(target=background_flush, args=(i.control.cli_name(),)) thread_flush.daemon = True thread_flush.start() # Send version command, every command must end with a \r tty_interface.write("help\r") tty_interface.write("version\r") # Clear periodic interval i.control.cmd('setPeriodic')(0) check(i.control.cmd('getPeriodic')() == 0) # Set periodic interval period_value = 1234 tty_interface.write("periodic {}\r".format(period_value)) # Wait for commands to register/process time.sleep(0.5) # Check periodic interval check(i.control.cmd('getPeriodic')() == period_value) ### Basic echo validation test ###
# Drop to cli, type exit in the displayed terminal to continue #i.control.cli() # Read current keyboard state print(data.usb_keyboard()) # Press key 0x00 i.control.cmd('addScanCode')(0x01) # Run processing loop twice, needs to run twice in order to reach the Hold state i.control.loop(2) print(data.usb_keyboard()) print(data.usb_keyboard_data) check(set(data.usb_keyboard()[1]) >= set( [41])) # Check if [41] is a subset of the usb keyboard data # Release key 0x00 i.control.cmd('removeScanCode')(0x01) # Run processing loop once, only needs to transition from hold to release i.control.loop(1) print(data.usb_keyboard()) ### Test 3 keys at same time ### print("-- 3 key test --") # press keys i.control.cmd('addScanCode')(0x01)
help='Set ACL matches type [' + ' '.join(acl_type_dict.keys()) + ']', required=True) parser.add_argument( '--dp-type', choices=sf_type_dict.keys(), help='Set data plane type [' + ' '.join(sf_type_dict.keys()) + ']', required=True) args = parser.parse_args() common.delete_configuration() common.put_and_check( common.SF_URL, sf_type_dict[args.dp_type], sf_type_dict[args.dp_type]) common.check( common.SFT_URL, sfc_crm.SERVICE_FUNCTION_TYPE_JSON, "Checking Service Function Type...") common.put_and_check( common.METADATA_URL, sfc_crm.METADATA_JSON, sfc_crm.METADATA_JSON) common.put_and_check( common.SFF_URL, sff_type_dict[args.dp_type], sff_type_dict[args.dp_type]) common.put_and_check( common.SFC_URL, sfc_crm.SERVICE_CHAINS_JSON, sfc_crm.SERVICE_CHAINS_JSON) common.put_and_check( common.SFP_URL,