def main(): mem_image = Memory('./proj_trace.txt') #Enter the path for the trace file p1 = Processor(mem_image,forwarding=False,) p1.run() p1.print_stats() p2 = Processor(mem_image,forwarding=True,) p2.run() p2.print_stats()
def index(): endpoints_reader = LoadData() data = Processor() df_csv, df_json = endpoints_reader.read_data() summary_csv_info, summary_json_info = data.process_endpoints(df_csv, df_json) return 'Service A result: ' + summary_csv_info + \ '\n' + 'Service B result: ' + summary_json_info
def test_lista_instancias(): p = Processor() expected = {'instancias': []} with requests_mock.Mocker() as mock_request: mock_request.get(p.BASE_URL, json=expected) response = p.lista_instancias() assert response == expected
def __init__(self, processor=None): if (processor == None): self.processor = Processor() else: self.processor = processor self.actions.SetProcessor(self.processor)
def main(): parser = argparse.ArgumentParser(description='Part-of-Speech Tagging.') parser.add_argument( '--prefix', '-p', type=str, default='', help='specify prefix of files which will be used to store model') parser.add_argument('--times', '-t', type=int, default=1, help='specify iteration times') parser.add_argument( '--all', '-a', action='store_true', help='without this switch, model will be trained by random sampled data' ) parser.add_argument('--file', '-f', type=str, default='', help='specify test data file') parser.add_argument('--save', '-s', action='store_true', help='enable this to save model file') args = parser.parse_args() tagger = Tagger('data/wsj00-18.pos', args.times, not args.all, args.save, args.prefix) test_data = Processor(args.file) tagger.benchmark(test_data)
def main(): parser = argparse.ArgumentParser(description='Prerequisite prediction') parser.add_argument('-model', type=str, required=True, choices=['LSTM', 'LSTM_S', 'TextCNN', 'GCN']) parser.add_argument('-dataset', type=str, required=True, choices=['moocen', 'mooczh']) parser.add_argument('-feature_dim', type=int, default=24) parser.add_argument('-seed', type=int, default=0) parser.add_argument('-result_path', type=str, default=None) parser.add_argument('-output_model', action='store_true') args = parser.parse_args() set_seed(args.seed) if args.dataset in ['moocen']: lang = 'en' if args.dataset in ['mooczh']: lang = 'zh' if not os.path.exists('model_states/'): os.mkdir('model_states/') if not os.path.exists('result/'): os.mkdir('result/') data_loader = DataLoader(args.dataset, args.model, lang) config = Config(data_loader, args.feature_dim) processor = Processor(args.model, data_loader, config) processor.run(args.result_path, args.output_model)
def test_create_user_login(self): processor = Processor() # Create a user processor.execute("/", "post", "/users/system", { "new_name": "test_user", "new_password": "******" }) # Login as user response = processor.check_login("/", "post", { "name": "test_user", "password": "******" }) # Authenticate using token user_handle = processor.get_user_for_token(response["token"]) # Get the user handle directly user_handle2 = processor.item_finder.find("/users/test_user") # They are handles for the same item self.assertTrue(user_handle.item_id == user_handle2.item_id) # An incorrect token can't be used with self.assertRaises(ServiceException) as cm: processor.get_user_for_token(response["token"] + "foo") self.assertEqual(cm.exception.response_code, 403) self.assertEqual(cm.exception.message, "Invalid authentication token") # An empty token can't be used with self.assertRaises(ServiceException) as cm: processor.get_user_for_token("") self.assertEqual(cm.exception.response_code, 403) self.assertEqual(cm.exception.message, "Invalid authentication token") # Check we can do something with the user handle we just retrieved - change the user's password processor.execute("/users/test_user", "post", user_handle, {"password": "******"})
def run_processor(g=g): from processor import Processor processor = Processor(projectdb=g.projectdb, inqueue=g.fetcher2processor, status_queue=g.status_queue, newtask_queue=g.newtask_queue, result_queue=g.processor2result) processor.run()
def executeProgram(state, debug, interactive, numMemPrint, numRegPrint, cycleLimit): logString = "" proc = Processor(state) cycleCount = 0 instructionCount = 0 while not state.finished and cycleCount < cycleLimit: thisLogString = "" if cycleCount > 0: if debug: thisLogString += state.instrBufferToString() thisLogString += state.pipelineToString() + "\n" thisLogString += state.specRegToString() + "\n" thisLogString += state.memToString(numMemPrint) + "\n" thisLogString += state.regToString( numRegPrint) + "\n**********\n" if interactive: print thisLogString raw_input("Press to continue") instructionCount += proc.run() logString += thisLogString cycleCount += 1 print str(cycleCount) + " cycles run, with a limit of " + str(cycleLimit) print str(instructionCount) + " instructions executed (not including NOPs)" print str(float(instructionCount) / float(cycleCount)) + " instructions per cycle average" logString += "\n\nFinal Memory: " + state.memToString(numMemPrint) with open("log.out", 'w') as f: f.write(logString) return
def main(): s3_client = S3Client( os.environ.get('s3_url'), os.environ.get('s3_access_key'), os.environ.get('s3_secret_key'), os.environ.get('s3_bucket'), ) pg_client = PgClient(host=os.environ.get('pg_host'), port=os.environ.get('pg_port'), user=os.environ.get('pg_user'), passwd=os.environ.get('pg_pass'), db=os.environ.get('pg_db')) admin_users = os.environ.get('admin_users').split(',') processor = Processor(s3_client, pg_client, admin_users) """Start the bot.""" # Create the Updater and pass it your bot's token. updater = Updater( os.environ.get('bot_key'), use_context=True, ) # Get the dispatcher to register handlers dp = updater.dispatcher # on noncommand i.e message - echo the message on Telegram dp.add_handler(MessageHandler(Filters.sticker, processor.filter)) # Start the Bot updater.start_polling() # Run the bot until you press Ctrl-C or the process receives SIGINT, # SIGTERM or SIGABRT. This should be used most of the time, since # start_polling() is non-blocking and will stop the bot gracefully. updater.idle()
def run_bot(program): running_bot = Processor( program, ((0, 2), ), ).run_on_input_generator() cam_out = next(running_bot) point_cloud = cam_out_to_point_cloud(cam_out) part1 = sum(p.y * p.x for p in point_cloud if sum((p + m) in point_cloud for m in ADJACENT) == 4) main_routine = "A,C,A,B,C,B,C,B,A,C" func_a = "L,6,L,4,R,8" func_b = "L,4,R,4,L,4,R,8" func_c = "R,8,L,6,L,4,L,10,R,8" input_str = "\n".join( (main_routine, func_a, func_b, func_c, "n")) + "\n" try: output = [ ] # Stop Pycharm complaining about reference before assignment for c in input_str: output = running_bot.send(ord(c)) return part1, output[-1] except StopIteration: return NotImplementedError( f"Don't expect the bot to ever halt the program")
def run_bot(program): location = Point(0, 0) room = {location: 1} running_bot = Processor(program).run_on_input_generator() next(running_bot) # Move to first yield for .send( path = [] while True: next_move = next( (move for move in move_vec if (location + move) not in room), None) if not next_move: move_backwards = path.pop() * -1 if not path: return room running_bot.send(move_vec[move_backwards]) location += move_backwards continue next_loc = location + next_move (status, ) = running_bot.send(move_vec[next_move]) room[next_loc] = status if room[next_loc] != 0: location = next_loc path.append(next_move) if room[next_loc] == 2: assert 254 == len(path) # Part 1
def find_state(list_o_codes, desired_state): for noun in range(100): for verb in range(100): if (Processor(list_o_codes, ((1, noun), (2, verb))).run_no_io() == desired_state): return noun, verb
def __init__(self, data,): self.data = data self.model_path = os.path.join(MODEL_PATH, TENSORFLOW_MODEL_DIR) self.vocab_size = Processor().getWordsCount() self.dpNet = create_model(self.vocab_size) if os.path.isfile(self.model_path): self.dpNet.load_weights(self.model_path)
def SRT(proc_list): def find_min_time(proc_list): min_time = 11 index = -1 for i in range(len(proc_list)): if proc_list[i] < min_time and proc_list[i] != 0: min_time = proc_list[i] index = i if min_time == 0 or index == -1: return None else: return index cpu = Processor(proc_list) cpu.add_next_process() cur_proc_index = 0 while not cpu.done_processing: if cur_proc_index is None: cpu.inc_idle_time(1) ind = cpu.add_next_process() if ind is not None: cur_proc_index = ind else: cpu.process(cur_proc_index, 1) new_proc_index = cpu.add_next_process() if new_proc_index is not None and cpu.proc_list[new_proc_index] < cpu.proc_list[cur_proc_index]: cur_proc_index = new_proc_index elif cpu.termination_check(cur_proc_index): cur_proc_index = find_min_time(cpu.proc_list) if cpu.finished_procs == len(proc_list): cpu.done_processing = True return cpu.get_avg_res_time()
def importDag(self, inputFilePath): with open(inputFilePath, "r") as input: lines = input.readlines() self.noOfFogs = int(''.join(char for char in lines[0] if char.isdigit())) self.noOfClouds = int(''.join(char for char in lines[1] if char.isdigit())) currentLineIndex = 3 self.processors = [] for i in range(0, self.noOfFogs + self.noOfClouds): detailsOfRow = [ int(number) for number in lines[currentLineIndex].split()[3:8] ] isFog = lines[currentLineIndex].split()[1] == "True" newProcessor = Processor(i, isFog) newProcessor.processingRate = float( lines[currentLineIndex].split()[2]) newProcessor.costPerTimeUnit = float( lines[currentLineIndex].split()[8]) newProcessor.noOfCores = detailsOfRow[0] newProcessor.ram = detailsOfRow[1] newProcessor.storage = detailsOfRow[2] newProcessor.wanUploadBandwidth = detailsOfRow[3] newProcessor.wanDownloadBandwidth = detailsOfRow[4] self.processors.append(newProcessor) currentLineIndex += 1
def setUp(self): FileCache._cache["/debug.js"] = """ // Copyright 2002 Older Chromium Author dudes. function debug(msg) { if (window.DEBUG) alert(msg); } """.strip() FileCache._cache["/global.js"] = """ // Copyright 2014 Old Chromium Author dudes. <include src="/debug.js"> var global = 'type checking!'; """.strip() FileCache._cache["/checked.js"] = """ // Copyright 2028 Future Chromium Author dudes. /** * @fileoverview Coolest app ever. * @author Douglas Crockford ([email protected]) */ <include src="/global.js"> debug(global); // Here continues checked.js, a swell file. """.strip() FileCache._cache["/double-debug.js"] = """ <include src="/debug.js"> <include src="/debug.js"> """.strip() self._processor = Processor("/checked.js")
def predict(self, processor=None, load_weights=False, **data): if load_weights: self.ner_model.load_weights(self.model_path) # 获取需要预测的图像数据, predict_data 方法默认会去调用 processor.py 中的 input_x 方法 x_data = self.dataset.predict_data(**data) word_num = x_data[0].shape[0] - 1 # 去掉首字符 '[cls]' x_batch_ids, x_batch_mask, x_batch_seg = conver2Input( x_data, max_seq_len=config.max_sequence) # # word_num = x_data.shape[1] # word_num # x_data = np.asarray([list(x_data[0])]) predict = self.ner_model.predict([ x_batch_ids, x_batch_mask, ]) # 将预测数据转换成对应标签 to_categorys 会去调用 processor.py 中的 output_y 方法 # word_num - 1 去掉结尾的 '[sep]'字符。 prediction = self.dataset.to_categorys( np.argmax(predict[0][:word_num - 1], axis=-1)) if processor is None: processor = Processor() prediction = processor.processedOutput(data['source'], x_data[0], prediction) return prediction
def test_processing_files(self): new_repo = create_test_repo() processor = Processor("tests", new_repo.working_dir, tagged_extensions=["txt"], language="swift") # for this test, we only care about the sample processor.source_documents = filter( lambda x: x.path.endswith("sample.txt"), processor.source_documents) self.assertTrue(processor.tagged_documents) self.assertTrue(len(processor.source_documents) == 1) processor.process(suffix=".processed") reference_text = open("tests/sample-expanded.txt", "r").read() processed_text = open("tests/sample.txt.processed").read() self.assertEqual(reference_text, processed_text)
def run_bot(program: List[int], debug: int = 0) -> Dict[Point, TileID]: location = Point(0, 0) room = {location: TileID.EMPTY} running_bot = Processor(program, debug=debug).run_on_input_generator() next(running_bot) # Move to first yield for .send( path = [] try: while True: if debug: print_room(room) print(location) next_move = next( (move for move in MOVE_VEC if (location + move) not in room), None) if not next_move: move_backwards = path.pop() * -1 if not path: return room running_bot.send(MOVE_VEC[move_backwards].value) location = location + move_backwards continue next_loc = location + next_move (status, ) = running_bot.send(MOVE_VEC[next_move].value) room[next_loc] = TileID(status) if room[next_loc] != TileID.WALL: location = next_loc path.append(next_move) if room[next_loc] == TileID.OXYGEN: print(len(path)) # Part 1 except StopIteration: raise NotImplementedError( f"Don't expect the bot to ever halt the program")
def hello_world(): global prcsr if prcsr is None: prcsr = Processor() if not os.path.exists(UPLOAD_FOLDER): os.makedirs("./uploads") return render_template('base.html')
def main(): recognizer = sr.Recognizer() rgb = Squid(RED_GPIO_LEAD, GREEN_GPIO_LEAD, BLUE_GPIO_LEAD) snowboy_detector = snowboydecoder.HotwordDetector( SNOWBOY_HOTWORD_LOCATION, sensitivity=0.5, audio_gain=1 ) while True: snowboy_detector.listen() rgb.set_color(GREEN) with sr.Microphone(device_index=2) as source: audio = recognizer.listen(source=source, timeout=5, phrase_time_limit=2) rgb.set_color(BLUE) processor = Processor(audio=audio) try: processor.run() except: rgb.set_color(RED) sleep(1) rgb.set_color(OFF)
def setUp(self): # setting up the suite for the test self.processor = Processor("test_vlans.csv", "", "") self.processor.process_vlan_file() # we get the device with id 0 for the test self.device = self.processor.net.devices[0]
def __init__(self, env, network_fabric): super().__init__() self.__logger = LOGGER.bind(component=self.ENDPOINT_NAME) self.__env = env self.__cryptogen = SystemRandom() self.__network_fabric = network_fabric self.__processor = Processor( env, self.ENDPOINT_NAME, 24, 2.6 * Processor.FREQUENCY_GHZ, step_cycles=min(self._CPU_CYCLES_PER_KILOBYTE_GENERATE, self._CPU_CYCLES_PER_KILOBYTE_COMPUTE, self._CPU_CYCLES_PER_KILOBYTE_SEND, self._CPU_CYCLES_PER_KILOBYTE_RECEIVE)) self.submission_queues = [] self.completion_queues = [] self.__all_command_submitted = None self.__shutdown_hook = None self.summary = { "num_commands_submitted": 0, "num_commands_completed": 0, "total_data_bytes": 0 }
def run_bot(program: List[int]) -> Dict[Point, int]: bot = Processor(program) points_to_scan = [Point(y, x) for x in range(50) for y in range(50)] scan = {} for point in points_to_scan: scan[point] = check_location(bot, program, point) return scan
def part2(): progstring = load("7.txt")[0] program = [int(inst) for inst in progstring.split(",")] settings = list(permutations( [9, 8, 7, 6, 5], 5)) # all possible permutations of the phase settings max = 0 for setting in settings: processors = [Processor(program) for n in range(5)] # send phase settings to each for i in range(5): out = processors[i].run([setting[i]]) # now start them all running lastE = None output = 0 while sum([p.runState for p in processors ]) > 0: # The runState is zero when the processor is ended for i in range(5): output = processors[i].run( [output] ) # pass the output of this processor as the input of the next # The processors now pause when they run out of input, remembering their current line position if output is not None: lastE = output if lastE > max: max = lastE print(max)
def test_rename(self): expected_message = { "author_icon": "https://avatars.slack-edge.com/2018-05-07/360275784695_b413a925836f89c22c8b_32.jpg", "author_name": "Phillip Piper <@phillip.piper>", "fallback": "Phillip Piper just created a new channel (via renaming) :tada:\n<#CD1USGKT7|dev-test-2>\nIts purpose is: TESTING THIS ", "pretext": "A new channel has been created (via renaming) :tada:", "text": "TESTING THIS", "title": "<#CD1USGKT7>" } event = RENAME_EVENT slack_client = MagicMock() slack_client.user_info.return_value = USER_INFO_SUCCESS slack_client.channel_info.return_value = CHANNEL_INFO_SUCCESS logger = MagicMock() target_channel = "target" processor = Processor(target_channel, [], slack_client, logger=logger) processor.process_channel_event("rename", event) self.assertFalse(logger.error.called) slack_client.channel_info.assert_called_with("CD1USGKT7") slack_client.user_info.assert_called_with("UAKA6GKFF") self.assertTrue(slack_client.post_chat_message.called) ((posted_channel, posted_text, posted_attachments), _) = slack_client.post_chat_message.call_args self.assertEqual(target_channel, posted_channel) self.assertEqual(1, len(posted_attachments)) attachment = posted_attachments[0] del attachment['color'] # the color of the message is random and can't be tested self.assertDictEqual(attachment, expected_message)
def run_bot(program: List[int], bot_instructions: List[str], debug: int = 0): running_bot = Processor(program).run_on_input_generator() output = next(running_bot) # Move to first yield for .send( if debug: print("".join(chr(i) for i in output), end="") assert len(bot_instructions) <= 15 input_str = "\n".join(bot_instructions) + "\n" if debug: print(input_str) try: i = 0 output = [] # Stop Pycharm complaining about reference before assignment while i < len(input_str): output = running_bot.send(ord(input_str[i])) i += 1 if debug: print("Done sending string") print("".join(chr(i) if i < 0x10FFFF else str(i) for i in output)) return output[-1] except StopIteration: return NotImplementedError(f"Don't expect the bot to ever halt the program")
def test_download_single(self): with Processor() as p: # The endpoint returns "Hello, world!". path = p._download_single(self.url + '/download/test.txt') self.assertTrue(path.endswith('.txt')) with open(path, 'rb') as f: self.assertEqual(f.read(), b'Hello, world!')
def __init__(self, env_config_file): with open(env_config_file) as f: config = json.load(f) self.observation_space = ObservationSpace(config["observation_space"]["dim"]) self.action_space = ActionSpace(config["action_space"]["high"]) self.params_template = config["params_template"] self.target_params = config["target_params"] data_dir = util.SETTING_DIR + config["setting_dir"] roadnet_file = data_dir + config["roadnet_file"] flow_file = data_dir + config["flow_file"] signal_file = data_dir + config["signal_file"] self.observed_file = data_dir + config["observed_file"] self.f_observed = open(self.observed_file) self.gen = Generator(flow_file, signal_file) self.proc = Processor() self.eng = engine.Engine(1.0, 2, True, True, False) self.eng.load_roadnet(roadnet_file) self.t = 0 self.total_reward = 0 self.d = False self.steps = self.gen.steps self.reset()