def find_shortest_path(grid: list[list[int]]) -> int: rows = len(grid) cols = len(grid[0]) start = (0, 0) end = (rows - 1, cols - 1) visited = set() queue = [] costs = defaultdict(lambda: float("inf")) costs[start] = 0 heapq.heappush(queue, (0, start)) while queue: _, node = heapq.heappop(queue) visited.add(node) if node == end: return costs[end] i, j = node for (di, dj) in [(0, 1), (1, 0), (-1, 0), (0, -1)]: ai = i + di aj = j + dj if 0 <= ai < len(grid) and 0 <= aj < len(grid[0]): if (ai, aj) in visited: continue new_cost = costs[(i, j)] + grid[ai][aj] if new_cost < costs[(ai, aj)]: costs[(ai, aj)] = new_cost heapq.heappush(queue, (new_cost, (ai, aj))) raise Error("no path found")
def upload_file(file_name, object_name): if not S3_BUCKET_NAME: raise Error("cannot upload to S3, S3_BUCKET_NAME not specified") print(f"Uploading to S3 bucket {S3_BUCKET_NAME}") s3 = boto3.client('s3') with open(file_name, "rb") as f: s3.upload_fileobj(f, S3_BUCKET_NAME, object_name)
def correct_action(self, action: float, model: Module): try: op_name = next(self.ops_iter) index = self.pruning_op_names.index(op_name) _, _, current_statistics = count_flops_params(model, self.dummy_input, verbose=False) current_statistics = {result['name']: result for result in current_statistics} total_current_target = sum([current_statistics[name][self.target] for name in self.pruning_op_names]) previous_pruning_target = self.under_pruning_target - total_current_target max_rest_pruning_target = sum([current_statistics[name][self.target] * self.max_sparsity_per_layer[name] for name in self.pruning_op_names[index + 1:]]) min_current_pruning_target = self.excepted_pruning_target - previous_pruning_target - max_rest_pruning_target max_current_pruning_target_1 = self.origin_statistics[op_name][self.target] * self.max_sparsity_per_layer[op_name] - (self.origin_statistics[op_name][self.target] - current_statistics[op_name][self.target]) max_current_pruning_target_2 = self.excepted_pruning_target - previous_pruning_target max_current_pruning_target = min(max_current_pruning_target_1, max_current_pruning_target_2) min_action = min_current_pruning_target / current_statistics[op_name][self.target] max_action = max_current_pruning_target / current_statistics[op_name][self.target] if min_action > self.max_sparsity_per_layer[op_name]: _logger.warning('[%s] min action > max sparsity per layer: %f > %f', op_name, min_action, self.max_sparsity_per_layer[op_name]) action = max(0., min(max_action, max(min_action, action))) self.current_op_name = op_name self.current_op_target = current_statistics[op_name][self.target] except StopIteration: raise Error('Something goes wrong, this should not happen.') return action
def wrapper(*args, **kwargs): try: # Try to run the test return f(*args, **kwargs) except BaseException as error: if errclass in str(type(error)): # If exception of given type happens # just swallow it and raise pytest.Skip with given reason pytest.skip(reason) else: raise Error(error)
def __interpret_commit(lines): """Extract data from a commit line using regex. """ id = re.search(r'^commit (.+)', lines[0]).group(1) if id is None: raise Error('no match for id in line {}'.format(lines[0])) tmp = re.search(r'^Author: (.+) <(.+)?>', lines[1]) author, email = None, None if tmp is not None: author = tmp.group(1) email = tmp.group(2) tmp = int(re.search(r'^Date: (.+)', lines[2]).group(1)) if tmp is None: raise Error('no match for date in line {}'.format(lines[1])) date = datetime.fromtimestamp(tmp) comment = '' for line in lines[4:]: if line.startswith(' ') or line == '': comment += str(line + '\n') else: break return { 'commit_id': id, 'author': author, 'email': email, 'date': date, 'comment': comment, 'feature_id': get_feature_id(comment) }
async def send_activities( self, context: TurnContext, activities: List[Activity] ) -> List[ResourceResponse]: if not context: raise TypeError("Expected TurnContext but got None instead") if activities is None: raise TypeError("Expected Activities list but got None instead") if len(activities) == 0: raise TypeError("Expecting one or more activities, but the list was empty.") responses = [] for activity in activities: activity.id = None response = ResourceResponse() if activity.type == "delay": delay_time = int((activity.value or 1000) / 1000) await sleep(delay_time) elif activity.type == ActivityTypes.invoke_response: context.turn_state[self._INVOKE_RESPONSE_KEY] = activity elif ( activity.type == ActivityTypes.trace and activity.channel_id != Channels.emulator ): # no-op pass else: connector_client: ConnectorClient = context.turn_state.get( self.BOT_CONNECTOR_CLIENT_KEY ) if not connector_client: raise Error("Unable to extract ConnectorClient from turn context.") if activity.reply_to_id: response = await connector_client.conversations.reply_to_activity( activity.conversation.id, activity.reply_to_id, activity ) else: response = await connector_client.conversations.send_to_conversation( activity.conversation.id, activity ) response = response or ResourceResponse(activity.id or "") responses.append(response) return responses
async def delete_activity( self, context: TurnContext, reference: ConversationReference ): if not context: raise TypeError("Expected TurnContext but got None instead") if not reference: raise TypeError("Expected ConversationReference but got None instead") connector_client: ConnectorClient = context.turn_state.get( self.BOT_CONNECTOR_CLIENT_KEY ) if not connector_client: raise Error("Unable to extract ConnectorClient from turn context.") await connector_client.conversations.delete_activity( reference.conversation.id, reference.activity_id )
def extra_validation(self, argsn): # Validation that Cerberus doesn't do well # entity_rules for climate, rule in self.entity_rules.items(): offrule = rule.get("off_state", {}) if offrule.get("state", "") == "perm_hold": if "temp" not in offrule: self.error( f'Invalid offrule. Perm_hold needs an "temp": {offrule}' ) if "perm_hold_string" not in offrule: self.error( f'Invalid offrule. Perm_hold needs an "perm_hold_string": {offrule}' ) state = self.get_state(climate, attribute="all") if state is None: self.error( f"Probable misconfiguration (bad entity): could not get state for entity: {climate}" ) # inactive_period: mm/dd - mm/dd if argsn.get("inactive_period"): try: match = re.match( r"(\d?\d)/(\d?\d)\s*-\s*(\d?\d)/(\d?\d)", argsn["inactive_period"], ) start = (int(match.group(1)), int(match.group(2)) ) # type: ignore end = (int(match.group(3)), int(match.group(4)) ) # type: ignore if not (1 <= start[0] <= 12 and 1 <= end[0] <= 12 and 1 <= start[1] <= 31 and 1 <= end[1] <= 31): raise Error( f'Invalid day or month value in inactive_period ({argsn["inactive_period"]})' ) except Exception as err: self.error( f'Invalid inactive_period format. Should be: "mm/dd - mm/dd". Error: {err}' ) else: self.inactive_period = (start, end) # ((m,d), (m,d))
def get_valid_moves(self, loc: BoardLoc): # advantages of having a path group # share same id and reduce the amount of evaluation at a given time """returns a list of B_paths which is also a list\n will be concatnated later\n ex)\n [\n 0: [int,int,int] 1: B_path 2: B_path ...\n ]\n Remember to not append 0 length path values""" rt = [[]] + [B_path(loc) for x in repeat(None, MAX_INT)] # 15 elements # print(rt) nonzero = np.where(self.board != 0)[0] if len(nonzero) == 0: raise Error("ASKED TO FIND PATHS ON AN EMPTY BOARD") if len(nonzero) == 1: # there are multiple cases here if self.board[nonzero[0]] == 1: # if there is only 1 number rt[nonzero[0] + 1].paths.append( Path(np.byte(nonzero[0]), np.byte(nonzero[0]))) return rt rt[0].append(nonzero[0]) return rt for key, select in enumerate(nonzero): # print("-- select --") # print(select) if self.board[select] > 1: # print("tile appears more than twice. adding paired") rt[0].append( np.byte(select) ) # in c, it would be presented as a single byte # print("-- target --") for target in nonzero[key + 1:]: # print(target) rt[min(target, select) + 1].paths.append( Path(np.byte(select), np.byte(target))) return rt
async def update_activity(self, context: TurnContext, activity: Activity): if not context: raise TypeError("Expected TurnContext but got None instead") if activity is None: raise TypeError("Expected Activity but got None instead") connector_client: ConnectorClient = context.turn_state.get( self.BOT_CONNECTOR_CLIENT_KEY ) if not connector_client: raise Error("Unable to extract ConnectorClient from turn context.") response = await connector_client.conversations.update_activity( activity.conversation.id, activity.reply_to_id, activity ) response_id = response.id if response and response.id else None return ResourceResponse(id=response_id) if response_id else None
def start_remote_debugging(debug_relay_connection_string_secret: str, debug_relay_connection_name: str, debug_port: int, debugpy_connect_timeout: float = 15): # get connection string from the workspace Key Vault run = Run.get_context() connection_string = run.get_secret(debug_relay_connection_string_secret) if connection_string is None or connection_string == "": err_msg = "Connection string for Azure Relay Hybrid Connection is missing in Key Vault." logging.fatal(err_msg) raise ValueError(err_msg) print( "Remote debugging has been activated. Starting Azure Relay Bridge...") # your Hybrid Connection name relay_connection_name = debug_relay_connection_name debug_mode = DebugMode.Connect hybrid_connection_url = None # can keep it None because using a connection string host = "127.0.0.1" # local hostname or ip address the debugger starts on port = debug_port debug_relay = DebugRelay(connection_string, relay_connection_name, debug_mode, hybrid_connection_url, host, port) debug_relay.open() if debug_relay.is_running(): print( f"Starting debugpy session on {host}:{port} with timeout {debugpy_connect_timeout} seconds." ) if debugpy_connect_with_timeout( host, port, connect_timeout_seconds=debugpy_connect_timeout): print(f"Debugpy is connected!") return True else: print(f"Could not connect to the debugger!") return False else: err_msg = "Cannot connect to a remote debugger" print(err_msg) logging.fatal(err_msg) raise Error(err_msg)
""" Convert the given float to a string, without resorting to scientific notation """ d1 = ctx.create_decimal(repr(f)) return format(d1, 'f') if __name__ == '__main__': ctx = decimal.Context() ctx.prec = 15 if len(sys.argv) != 7: raise Error( 'Numero di parametri errato. Parametri: path input, path output, colonna tempi, colonna valori, colonna errori, range righe (n:m)' ) path = sys.argv[1] workbook = op.load_workbook(filename=path, data_only=True) sheet = workbook.active start = sys.argv[6].split(':')[0] end = sys.argv[6].split(':')[1] out_path = sys.argv[2] times_interval = sys.argv[3] + start + ':' + sys.argv[3] + end values_interval = sys.argv[4] + start + ':' + sys.argv[4] + end errors_interval = sys.argv[5] + start + ':' + sys.argv[5] + end
def main(): args = parse() if args.command not in ["db", "snippet"]: #currently db is a special command that is supposed to be invoked only internaly by gcvb. get_to_gcvb_root() if args.command in ["list", "generate"]: a = yaml_input.load_yaml(args.yaml_file, args.modifier) a = filter_tests(args, a) #Commands if args.command == "list": if not (args.count): if (args.human_readable): r = list_human_readable(a["Packs"]) print(yaml.dump(r, sort_keys=False)) else: print(yaml.dump({"Packs": a["Packs"]})) else: print(len(a["Tests"].keys())) if args.command == "generate": data_root = os.path.join(os.getcwd(), "data") if (args.data_root): data_root = os.path.abspath(args.data_root) if not (os.path.isfile(db.database)): db.create_db() gcvb_id = db.new_gcvb_instance(' '.join(sys.argv[1:]), args.yaml_file, args.modifier) target_dir = "./results/{}".format(str(gcvb_id)) a["data_root"] = data_root job.generate(target_dir, a) if args.command == "compute": gcvb_id = args.gcvb_base if os.path.exists("config.yaml"): config = util.open_yaml("config.yaml") else: config = { "machine_id": platform.node(), "executables": {}, "submit_command": "bash", "va_submit_command": "bash" } config_id = config.get("machine_id") if not (gcvb_id): gcvb_id = db.get_last_gcvb() if args.validate_only and not (db.has_run(gcvb_id)): raise Error("There is no previous run for the base id {}!".format( str(gcvb_id))) run_id = db.add_run(gcvb_id, config_id) computation_dir = "./results/{}".format(str(gcvb_id)) a = yaml_input.load_yaml(os.path.join(computation_dir, "tests.yaml")) a = filter_tests(args, a) all_tests = [t for p in a["Packs"] for t in p["Tests"]] db.add_tests(run_id, all_tests, args.chain) job_file = os.path.join(computation_dir, "job.sh") data_root = a["data_root"] job.write_script(all_tests, config, data_root, gcvb_id, run_id, job_file=job_file, header=args.header, validate_only=args.validate_only) if not (args.dry_run) and not (args.with_jobrunner): job.launch(job_file, config, args.validate_only, args.wait_after_submitting) if (args.with_jobrunner): j = jobrunner.JobRunner(args.with_jobrunner, run_id, config, args.started_first, args.max_concurrent, args.verbose) j.run() if args.command == "jobrunner": run_id, gcvb_id = db.get_last_run() #run chosen should be modifiable config = util.open_yaml("config.yaml") num_cores = args.num_cores j = jobrunner.JobRunner(num_cores, run_id, config, args.started_first, args.max_concurrent, args.verbose) j.run() if args.command == "db": if args.db_command == "start_run": db.start_run(args.first) if args.db_command == "end_run": db.end_run(args.first) if args.db_command == "start_test": db.set_db("../../../gcvb.db") db.start_test(args.first, args.second) if args.db_command == "end_test": db.set_db("../../../gcvb.db") db.end_test(args.first, args.second) a = yaml_input.load_yaml("../tests.yaml") t = a["Tests"][args.third] if "keep" in t: db.save_files(args.first, args.second, t["keep"]) if args.db_command == "start_task": db.set_db("../../../gcvb.db") db.start_task(args.first, args.second) if args.db_command == "end_task": db.set_db("../../../gcvb.db") db.end_task(args.first, args.second, args.third) if args.command == "report": run_id, gcvb_id = db.get_last_run() computation_dir = "./results/{}".format(str(gcvb_id)) a = yaml_input.load_yaml_from_run(run_id) #Is the run finished ? started_at = time.time() previous_completed_tests = -1 completed_tests, tests, finished = report_check_terminaison(run_id) if args.polling: while not finished and time.time() - started_at < args.timeout: completed_tests, tests, finished = report_check_terminaison( run_id) if (previous_completed_tests != len(completed_tests)): now = time.strftime("%H:%M:%S %d/%m/%y") print("Tests completed : {!s}/{!s} ({!s})".format( len(completed_tests), len(tests), now)) time.sleep(args.frequency) previous_completed_tests = len(completed_tests) started_but_not_finished = list( filter(lambda x: not x["end_date"] and x["start_date"], tests)) started_but_not_finished = [ t["name"] for t in started_but_not_finished ] if (started_but_not_finished): print( f"{len(started_but_not_finished)} tests did start but did not finish : " ) print(started_but_not_finished) print("Tests completed : {!s}/{!s}".format(len(completed_tests), len(tests))) tmp = db.load_report(run_id) report = validation.Report(a, tmp) if report.is_success(): if finished: print("Success!") else: print("No failure yet, computation in progress...") else: if report.missing_validations: #should we show only missing_validations for completed tests ? print("Some validation metrics are missing :") pprint.pprint(report.missing_validations) failed = report.get_failed_tests() print("{!s} failure(s) : {!s}".format(len(failed), list(failed))) print("Details of failures :") pprint.pprint(report.failure) if args.command == "snippet": snippet.display(args) if args.command == "generate_refs": gcvb_id = args.gcvb_base files = args.files.split(",") if not (gcvb_id): gcvb_id = db.get_last_gcvb() generate_refs.generate_from_base(gcvb_id, files, args.reference_id, args.description) if args.command == "dashboard": from . import dashboard dashboard.run_server()
async def process_activity( self, auth_header_or_authenticate_request_result: Union[ str, AuthenticateRequestResult ], activity: Activity, logic: Callable[[TurnContext], Awaitable], ): """ Creates a turn context and runs the middleware pipeline for an incoming activity. :param auth_header: The HTTP authentication header of the request :type auth_header: :class:`typing.Union[typing.str, AuthenticateRequestResult]` :param activity: The incoming activity :type activity: :class:`Activity` :param logic: The logic to execute at the end of the adapter's middleware pipeline. :type logic: :class:`typing.Callable` :return: A task that represents the work queued to execute. .. remarks:: This class processes an activity received by the bots web server. This includes any messages sent from a user and is the method that drives what's often referred to as the bots *reactive messaging* flow. Call this method to reactively send a message to a conversation. If the task completes successfully, then an :class:`InvokeResponse` is returned; otherwise. `null` is returned. """ # Authenticate the inbound request, extracting parameters and create a ConnectorFactory for creating a # Connector for outbound requests. authenticate_request_result = ( await self.bot_framework_authentication.authenticate_request( activity, auth_header_or_authenticate_request_result ) if isinstance(auth_header_or_authenticate_request_result, str) else auth_header_or_authenticate_request_result ) # Set the caller_id on the activity activity.caller_id = authenticate_request_result.caller_id # Create the connector client to use for outbound requests. connector_client = ( await authenticate_request_result.connector_factory.create( activity.service_url, authenticate_request_result.audience ) if authenticate_request_result.connector_factory else None ) if not connector_client: raise Error("Unable to extract ConnectorClient from turn context.") # Create a UserTokenClient instance for the application to use. # (For example, it would be used in a sign-in prompt.) user_token_client = await self.bot_framework_authentication.create_user_token_client( authenticate_request_result.claims_identity ) # Create a turn context and run the pipeline. context = self._create_turn_context( activity, authenticate_request_result.claims_identity, authenticate_request_result.audience, connector_client, user_token_client, logic, authenticate_request_result.connector_factory, ) # Run the pipeline await self.run_pipeline(context, logic) # If there are any results they will have been left on the TurnContext. return self._process_turn_results(context)
def get_datadir(cls) -> str: if cls._datadir == "": raise Error("Data directory not set") return cls._datadir
def get_bagdir(cls) -> str: if cls._bagdir == "": raise Error("Bag directory not set") return cls._bagdir
if delay < 20: delay = 20 if platform != 'win32' and platform != 'cygwin': moz_driver_path = os.path.join(os.path.abspath('.'), 'geckodriver') chrome_driver_path = os.path.join(os.path.abspath('.'), 'chromedriver') else: moz_driver_path = os.path.join(os.path.abspath('.'), 'geckodriver.exe') chrome_driver_path = os.path.join(os.path.abspath('.'), 'chromedriver.exe') driver_path_check = os.path.exists(chrome_driver_path) or os.path.exists( moz_driver_path) if not driver_path_check: raise Error( 'Selenium Driver is needed', 'Please ensure you have a chromedrive or mozilla firefox gecko driver in the root of the folder....Try again' ) # start browser in mute mode for the whole download section # ----chrome if os.path.exists(chrome_driver_path): chrome_options = webdriver.ChromeOptions() chrome_options.add_argument("--mute-audio") driver = webdriver.Chrome(chrome_driver_path, options=chrome_options) # ---mozilla else: profile = webdriver.FirefoxProfile() profile.set_preference("media.volume_scale", "0.0") driver = webdriver.Firefox(executable_path=moz_driver_path, firefox_profile=profile)