class Codustry(BaseModel): """This is a graph object.""" craftmen: Set[AllCraftsmanTypes] teams: Set() projects: Set() money_accounts: dict
def to_dict(self, found: typing.Set = None) -> typing.Dict[str, typing.Any]: if found is None: found = set() mapper = class_mapper(self.__class__) columns = [column.key for column in mapper.columns] result = dict() for column in columns: if isinstance(getattr(self, column), type(datetime)): result[column] = getattr(self, column).isoformat() elif isinstance(getattr(self, column), enum.Enum): result[column] = getattr(self, column).name else: result[column] = getattr(self, column) for name, relation in mapper.relationships.items(): if relation not in found: found.add(relation) related_obj = getattr(self, name) if related_obj is not None: if relation.uselist: result[name] = [ child.to_dict(found) for child in related_obj ] else: result[name] = related_obj.to_dict(found) return result
def _tensor_dependencies(tensor: KerasTensor, visited: tp.Set): ref = tensor.ref() if ref not in visited: visited.add(ref) if isinstance(tensor, KerasTensor): # reject `EagerTensor`s for input_tensor in tf.nest.flatten(tensor.node.input_tensors): yield from _tensor_dependencies(input_tensor, visited) yield tensor
def dynamodb_attribute_to_set(cls, value: t.Set) -> t.Set: if len(value) == 1: elem = value.pop() if elem == cls.SET_PLACEHOLDER_VALUE: return set() else: value.add(elem) return value
def find_shared_pts( pts1: List[Point], pts2: List[Point]) -> Tuple(Set(Point), Set(Point), Set(Point)): a = {Point(p.x, p.y) for p in pts1} b = {Point(p.x, p.y) for p in pts2} both_ab = a.intersection(b) just_a = a.difference(both_ab) just_b = b.difference(both_ab) return (just_a, both_ab, just_b)
def _find_target(self, node: TreeNode, nodes: typing.Set, k: int) -> bool: if not node: return False if k - node.val in nodes: return True nodes.add(node.val) return self._find_target(node.left, nodes, k) or self._find_target(node.right, nodes, k)
def __init__(self, file_name: str): self.__file_name = file_name self.__dataset_of_movies: List(Movie) = list() self.__dataset_of_actors: Set(Actor) = set() self.__dataset_of_directors: Set(Director) = set() self.__dataset_of_genres: Set(Genre) = set() self.__rank_of_movies: Dict(Movie) = dict() self.__movies_with_given_year: Dict(Movie) = dict() self.__movies_with_given_director: Dict(Movie) = dict() self.__movies_with_given_actor: Dict(Movie) = dict() self.__movies_with_given_genre: Dict(Movie) = dict()
def resolve_parameter(self, parameter, kwargs: typing.Dict, consts: typing.Dict, seen_state: typing.Set, parent_parameter=None) -> typing.List[typing.Tuple]: """ Resolve a parameter by inferring the component that suits it or by adding a value to kwargs or consts. The list of steps returned consists of a resolver function, a boolean that indicates if the function is async, function kwargs and consts and the output name. :param parameter: parameter to be resolved. :param kwargs: kwargs that defines current context. :param consts: consts that defines current context. :param seen_state: cached state. :param parent_parameter: parent parameter. :return: list of steps to resolve the component. """ if parameter.annotation is ReturnValue: kwargs[parameter.name] = "return_value" return [] # Check if the parameter class exists in 'initial'. if parameter.annotation in self.reverse_initial: initial_kwarg = self.reverse_initial[parameter.annotation] kwargs[parameter.name] = initial_kwarg return [] # The 'Parameter' annotation can be used to get the parameter # itself. Used for example in 'Header' components that need the # parameter name in order to lookup a particular value. if parameter.annotation is inspect.Parameter: consts[parameter.name] = parent_parameter return [] for component in self.components: if component.can_handle_parameter(parameter): identity = component.identity(parameter) kwargs[parameter.name] = identity if identity not in seen_state: seen_state.add(identity) return self.resolve_component( resolver=component.resolve, output_name=identity, seen_state=seen_state, parent_parameter=parameter, ) return [] else: raise ComponentNotFound(parameter.name)
def execute_scheduler_simulation_simple( tasks: TaskSet, aperiodic_tasks_jobs: List[Job], sporadic_tasks_jobs: List[Job], processor_definition: Processor, environment_specification: Environment, scheduler: Union[CentralizedScheduler], simulation_options: SimulationConfiguration = SimulationConfiguration() ) -> Tuple[RawSimulationResult, List[Job], float]: """ Run a simulation without supplying the periodic jobs. It will be generated from the periodic tasks definition, and the simulation will be done between time 0, and the end of the first major cycle :param tasks: Group of tasks in the system. If None it will be the major cycle :param aperiodic_tasks_jobs: Aperiodic lobs in the system :param sporadic_tasks_jobs: Sporadic jobs in the system :param processor_definition: Definition of the CPU to use :param environment_specification: Specification of the environment :param scheduler: Centralized scheduler to use :param simulation_options: Options of the simulation :return: Simulation result Periodic jobs automatically generated Major cycle """ major_cycle = calculate_major_cycle(tasks) number_of_periodic_ids = sum( [round(major_cycle / i.period) for i in tasks.periodic_tasks]) number_of_ids = number_of_periodic_ids + len(aperiodic_tasks_jobs) + len( sporadic_tasks_jobs) job_ids_stack: deque = deque( Set.difference({i for i in range(number_of_ids)}, Set.union({i.identifier for i in aperiodic_tasks_jobs}, {i.identifier for i in sporadic_tasks_jobs}))) periodic_tasks_jobs: List[Job] = list( itertools.chain(*[[ Job(job_ids_stack.popleft(), i, j * i.period) for j in range(round(major_cycle / i.period)) ] for i in tasks.periodic_tasks])) return execute_scheduler_simulation( periodic_tasks_jobs + aperiodic_tasks_jobs + sporadic_tasks_jobs, tasks, processor_definition, environment_specification, scheduler, simulation_options), periodic_tasks_jobs, major_cycle
def submit_content_request_from_s3_object( dynamodb_table: Table, submissions_queue_url: str, bucket: str, key: str, content_id: str = "", content_type: ContentType = PhotoContent, additional_fields: t.Set = set(), force_resubmit: bool = False, ): """ Converts s3 event into a ContentObject and url_submission_message using helpers from submit.py For partner bucket uploads, the content IDs are unique and (somewhat) readable but not reversable * uniqueness is provided by uuid4 which has a collision rate of 2^-36 * readability is provided by including part of the key in the content id * modifications to the key mean that the original content bucket and key are not derivable from the content ID alone The original content (bucket and key) is stored in the reference url which is passed to the webhook via additional_fields Q: Why not include full key and bucket in content_id? A: Bucket keys often have "/" which dont work well with ContentDetails UI page """ readable_key = key.split("/")[-1].replace("?", ".").replace("&", ".") if not content_id: content_id = f"{uuid4()}-{readable_key}" presigned_url = create_presigned_url(bucket, key, None, 3600, "get_object") reference_url = f"https://{bucket}.s3.amazonaws.com/{key}" additional_fields.update({ f"s3_reference_url:{reference_url}", f"bucket_name:{bucket}", f"object_key:{key}", }) record_content_submission( dynamodb_table, content_id, content_type, content_ref=presigned_url, content_ref_type=ContentRefType.URL, additional_fields=additional_fields, ) send_submission_to_url_queue(dynamodb_table, submissions_queue_url, content_id, content_type, presigned_url)
def __init__(self): self.__dataset_of_movies: List(Movie) = list() self.__dataset_of_release_years = list() self.__rank_of_movies: Dict(Movie) = dict() self.__movie_details = dict() self.__dataset_of_actors: Set(Actor) = set() self.__dataset_of_directors: Set(Director) = set() self.__dataset_of_genres: Set(Genre) = set() self.__movies_with_given_year: Dict(Movie) = dict() self.__movies_with_given_director: Dict(Movie) = dict() self.__movies_with_given_actor: Dict(Movie) = dict() self.__movies_with_given_genre: Dict(Movie) = dict() self.__users = list() self.__reviews = list() self.__user_watch_list: Dict(WatchList) = dict()
def offline_stage(self, processor_definition: Processor, environment_specification: Environment, task_set: TaskSet) -> int: """ Method to implement with the offline stage scheduler tasks :param environment_specification: Specification of the environment :param processor_definition: Specification of the cpu :param task_set: Tasks in the system :return CPU frequency """ m = len(processor_definition.cores_definition) clock_available_frequencies = Set.intersection(*[ i.core_type.available_frequencies for i in processor_definition.cores_definition.values() ]) self.__m = m self.__tasks_relative_deadline = { i.identifier: i.relative_deadline for i in task_set.periodic_tasks + task_set.aperiodic_tasks + task_set.sporadic_tasks } return max(clock_available_frequencies)
def v1(cls, name: str, sourceId: IdRange = IdRange(0, 1), nodePath: List[BaseNode] = [], requestFifo: bool = False, visibility: List[AddressSet] = [AddressSet(0, ~0)], supportsProbe: TransferSizes = TransferSizes(), supportsArithmetic: TransferSizes = TransferSizes(), supportsLogical: TransferSizes = TransferSizes(), supportsGet: TransferSizes = TransferSizes(), supportsPutFull: TransferSizes = TransferSizes(), supportsPutPartial: TransferSizes = TransferSizes(), supportsHint: TransferSizes = TransferSizes()): return TLMasterParameters( nodePath=nodePath, resources=[], name=name, visibility=visibility, unusedRegionTypes=Set(), executesOnly=False, requestFifo=requestFifo, supports=TLSlaveToMasterTransferSizes( probe=supportsProbe, arithmetic=supportsArithmetic, logical=supportsLogical, get=supportsGet, putFull=supportsPutFull, putPartial=supportsPutPartial, hint=supportsHint), emits=TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData=False, sourceId=sourceId)
def execute_code(code: List[str]) -> int: """TODO""" accumulator: int = 0 # Line numbers of code we have already executed. already_executed: Set(int) = set() i: int = 0 while i not in already_executed: already_executed.add(i) instruction, value = code[i].split() value = int(value) if instruction == 'acc': # Increment the accumulator by the specified value accumulator += value i += 1 if instruction == 'jmp': # We subtract 1 because i is incremented by 1 in each iteration of # the for loop and we don't want to double-count. # For example, 'jmp +1' continues to the insturction immediately # below it, so setting `i += 1` would increment `i` by 1, then the # for loop increments `i` by 1 again, which means we actually # execute the instruction at code[i+2] rather than code[i+1]. i += value if instruction == 'nop': i += 1 # nop stands for No OPeration. Next instruction is executed. return accumulator
def pick_on_meta(self, axis: int, name: str, values: Union[List(str), Set(str), Tuple(str)]) -> Adat: """Returns an adat with rfu rows or columns excluded given the multiindex name and values to keep. Parameters ---------- axis : int The metadata/multiindex to operate on: 0 - row metadata, 1 - column metadata name : str The name of the metadata/multiindex row/column to filter based on. values : List(str) | Set(str) | Tuple(str) The values to filter on. Can be a tuple, list, or set. Returns ------- adat : Adat Examples -------- >>> new_adat = adat.pick_on_meta(axis=0, name='Barcode', values=['00001']) >>> new_adat = adat.pick_on_meta(axis=1, name='SeqId', values=['10000-01', '12345-10']) >>> new_adat = adat.pick_on_meta(axis=1, name='Type', values=['Spuriomer']) """ return self._filter_on_meta(axis, name, values, include=True)
def exclude_meta(self, axis: int, names: Union[List(str), Set(str), Tuple(str)]) -> Adat: """Returns an adat with excluded metadata/multiindices given the names to exclude. Parameters ---------- axis : int The metadata/multiindex to operate on: 0 - row metadata, 1 - column metadata names : List(str) | Set(str) | Tuple(str) The names to filter on. Can be a tuple, list, or set. Returns ------- adat : Adat Examples -------- >>> new_adat = adat.exclude_meta(axis=0, names=['Barcode']) >>> new_adat = adat.exclude_meta(axis=1, names=['SeqId']) """ return self._filter_meta(axis, names, include=False)
def get_keywords(doc_dict: Dict[str, str], words: int = 10) -> Set[str]: """ Extracts keywords from document dictionary Parameters ---------- doc_dict: Dict[str,str] Dictionary of text documents words: int Number of keywords to extract per document (default 10) Returns ------- result: Set[str] Set containing lists of keywords """ if len(doc_dict) < 1: raise ValueError("No documents to scan") keywords_set: Set(str) = set() for v in doc_dict.values(): keywords_set.update( set(keywords(v, words=words, lemmatize=True, split=True))) # Take only the last element where there are spaces in the items keywords_set = set([x.split(" ")[-1:][0] for x in keywords_set]) return keywords_set
def _filter_meta(self, axis: int, names: Union[List(str), Set(str), Tuple(str)], include: bool) -> Adat: # Check to see if names is the right variable type if not isinstance(names, (list, tuple, set)): raise TypeError('"values" must be a list, tuple, or set.') else: names = set(names) # Make a copy of the df (what we will eventually return) & grab the multiindex adat = self.copy() metadata = get_pd_axis(adat, axis) # Double check to make sure names exist in multiindex for name in names: if name not in metadata.names: raise AdatKeyError(f'Name, "{name}", not found in multiindex') # Filter down the metadata for name in metadata.names: if name not in names and include: metadata = metadata.droplevel(name) if name in names and not include: metadata = metadata.droplevel(name) # Assign the metadata to the appropriate place if axis == 0: adat.index = metadata else: adat.columns = metadata return adat
def closest(self, unit: Unit, coords: List[Coord]): if unit.position in coords: return unit.position targets = set(coords) positionsHistory: Set(Coord) = set() positionsHistory = set() positionsHistory.add(unit.position) newPositions = positionsHistory while True: newPositions = set().union( *[pos.manhattanNeighbours() for pos in newPositions]) newPositions.difference_update(positionsHistory) newPositions.difference_update(set(self.board.keys())) newPositions.difference_update( set(map(lambda u: u.position, self.other_units(unit)))) if newPositions.__len__() == 0: break if not targets.isdisjoint(newPositions): closest = targets.intersection(newPositions) closest = list(closest) closest.sort() return closest[0] positionsHistory = positionsHistory.union(newPositions) return None
def part_two(): if (len(sys.argv) < 2): print("ERROR: no input file") exit(0) # END IF valid_seats: Dict[str, List[str]] = defaultdict(list) with open(sys.argv[1], "r", encoding="utf-8") as file: seats: List[str] = [line for line in file.read().split('\n')] for seat in seats: if seat[:7] in 'FFFFFFFBBBBBBB': continue else: row = seat[:7] valid_seats[row].append(seat[7:]) # END IF # ENF FOR possible_seats = { 'LLR', 'LRL', 'LRR', 'RRL', 'RLR', 'RLL', 'LLL', 'RRR' } for key, value in valid_seats.items(): if len(value) < 8: value.append('') value: Set(str) = set(value) value: Set = possible_seats.difference(value) row: int = binary_partition(list(key), [0, 127]) seatno: int = binary_partition(list(value.pop()), [0, 7]) print(row * 8 + seatno) return
def _generate_transitive_trace_frames(self, run: Run, start_frame: TraceFrame, leaf_ids: Set[int]): """Generates all trace reachable from start_frame, provided they contain a leaf_id from the initial set of leaf_ids.""" kind = start_frame.kind queue = [(start_frame, leaf_ids)] while len(queue) > 0: frame, leaves = queue.pop() if len(leaves) == 0: continue frame_id = frame.id.local_id if frame_id in self.visited_frames: leaves = leaves - self.visited_frames[frame_id] if len(leaves) == 0: continue else: self.visited_frames[frame_id].update(leaves) else: self.visited_frames[frame_id] = leaves next_frames = self._get_or_populate_trace_frames( kind, run, frame.callee_id, caller_port=frame.callee_port) queue.extend([ # pyre-fixme[16]: `_Alias` has no attribute `intersection`. (frame, Set.intersection(leaves, frame_leaves)) for (frame, frame_leaves) in next_frames ])
def check_schedulability(self, processor_definition: Processor, environment_specification: Environment, task_set: TaskSet) -> [bool, Optional[str]]: """ Return true if the scheduler can be able to schedule the system. In negative case, it can return a reason. In example, an scheduler that only can work with periodic tasks with phase=0, can return [false, "Only can schedule tasks with phase=0"] :param environment_specification: Specification of the environment :param processor_definition: Specification of the cpu :param task_set: Tasks in the system :return CPU frequency """ only_0_phase = all(i.phase is None or i.phase == 0 for i in task_set.periodic_tasks) only_periodic_tasks = len(task_set.sporadic_tasks) + len( task_set.aperiodic_tasks) == 0 only_implicit_deadline = all(i.relative_deadline == i.period for i in task_set.periodic_tasks) only_fully_preemptive = all( i.preemptive_execution == PreemptiveExecution.FULLY_PREEMPTIVE for i in task_set.periodic_tasks) if not (only_0_phase and only_periodic_tasks and only_implicit_deadline and only_fully_preemptive): return False, "Error: Only implicit deadline, fully preemptive, 0 phase periodic tasks are allowed" m = len(processor_definition.cores_definition) clock_available_frequencies = list( Set.intersection(*[ i.core_type.available_frequencies for i in processor_definition.cores_definition.values() ])) # Calculate F start major_cycle = calculate_major_cycle(task_set) available_frequencies = [ actual_frequency for actual_frequency in clock_available_frequencies if sum([ i.worst_case_execution_time * round(major_cycle / i.period) for i in task_set.periodic_tasks ]) <= m * round(major_cycle * actual_frequency) and all([ i.worst_case_execution_time * round(major_cycle / i.period) <= round(major_cycle * actual_frequency) for i in task_set.periodic_tasks ]) ] if len(available_frequencies) == 0: return False, "Error: Schedule is not feasible" # All tests passed return True, None
def get_arrows() -> Set[Arrow]: """Return all the constituent arrows of composition""" arrows = Set() for (out_port, in_port) in edges.items(): arrows.add(out_port.arrow) arrows.add(in_port.arrow) self.arrows = arrows return arrows
def __init__(self): self.award_activity: List[int] = [] self.award_student = 0 self.minmax_penalty = 0 self.requests_set: Set[Tuple[str, str, str]] = set() self.request_groups: Dict[Tuple[str, str], Set(str)] = {} self.requested_activities_per_student: Dict[str, int] = {} self.overlaps_matrix: LookupTable = {} self.allowed_overlaps_by_student: Dict[str, Set[Tuple[str, str]]] = {}
def extract_id_from_new_vacancies(vacancies: List[Dict]) -> Set: logging.info("Extracting vacancy id from list of new vacancies") if len(vacancies) == 0: logging.debug("Empty list of vacancies") return Set() ids = set() for r_id, vac in enumerate(vacancies): ids.add(int(vac["id"])) return ids
def offline_stage(self, cpu_specification: Processor, environment_specification: Environment, task_set: TaskSet) -> int: clock_available_frequencies = Set.intersection(*[ i.core_type.available_frequencies for i in cpu_specification.cores_definition.values() ]) return max(clock_available_frequencies)
def __init__(self, problem: Problem, state: str, serial_planning=True): """ :param problem: PlanningProblem (or subclass such as AirCargoProblem or HaveCakeProblem) :param state: str (will be in form TFTTFF... representing fluent states) :param serial_planning: bool (whether or not to assume that only one action can occur at a time) Instance variable calculated: fs: FluentState the state represented as positive and negative fluent literal lists all_actions: list of the PlanningProblem valid ground actions combined with calculated no-op actions s_levels: list of sets of PgNode_s, where each set in the list represents an S-level in the planning graph a_levels: list of sets of PgNode_a, where each set in the list represents an A-level in the planning graph """ self.problem = problem self.fs = decode_state(state, problem.state_map) self.serial = serial_planning self.all_actions = self.problem.actions_list + self.noop_actions(self.problem.state_map) self.s_levels: List[Set(PgNode_s)] = [] self.a_levels: List[Set(PgNode_a)] = [] self.create_graph()
def find_possible_values(grid: List[List[str]], pos: Tuple[int, int]) -> Set[str]: """ Вернуть множество возможных значения для указанной позиции >>> grid = read_sudoku('puzzle1.txt') >>> values = find_possible_values(grid, (0,2)) >>> values == {'1', '2', '4'} True >>> values = find_possible_values(grid, (4,7)) >>> values == {'2', '5', '9'} True """ i, j = pos Set = set() row = get_row(grid, pos) col = get_col(grid, pos) block = get_block(grid, pos) for q in range(1, len(grid) + 1): if str(q) not in row and str(q) not in col and str(q) not in block: Set.add(str(q)) return Set
def __init__(self): self.mGameLogger = GameLogger() self.mGameLogger.clearConsole() self.mGameLogger.addString("Game starts. Have fun!") self.mPlayerList: Set([Player()]) = set([]) self.mNumberOfPlayers: int = self.howManyPlayers() self.mNexusHealth: int = self.setNexusHealth() self.mRing: set() = set([]) self.mGraveyard: set() = set([]) self.invitePlayers() self.mGameLogger.clearConsole() self.mFight = Fight(self)
def on_end(self, data: Data) -> None: index_summaries = DefaultKeyDict(default=lambda x: Summary(name=x)) for mode in self.mode: final_scores = sorted([(idx, elem[-1][1]) for idx, elem in self.index_history[mode].items()], key=lambda x: x[1]) max_idx_list = {elem[0] for elem in final_scores[-1:-self.n_max_to_keep - 1:-1]} min_idx_list = {elem[0] for elem in final_scores[:self.n_min_to_keep]} target_idx_list = Set.union(min_idx_list, max_idx_list, self.idx_to_keep) for idx in target_idx_list: for step, score in self.index_history[mode][idx]: index_summaries[idx].history[mode][self.metric_key][step] = score self.system.add_graph(self.outputs[0], list(index_summaries.values())) # So traceability can draw it data.write_without_log(self.outputs[0], list(index_summaries.values()))