def _parse_tuple(data: Dict[str, Any], field_in: Union[str, List[str]] = ("y_window", "y-window"), length: int = 2, check_type: Optional[Union[type, List[type]]] = None, cast: Optional[type] = None, default: Optional[Tuple] = None) -> Optional[Tuple]: if not isinstance(field_in, list): field_in = [field_in] if check_type is not None and not isinstance(check_type, list): check_type = [check_type] for field in field_in: if field in data: y_win = data[field] if check_type is not None: for item in y_win: checks = map(lambda x: isinstance(item, x), check_type) if not funcy.any(checks): print( f"WARNING: Item {item} is not matching the possible types {check_type}" ) if cast is not None: y_win = map(cast, y_win) if isinstance(y_win, list) and len(y_win) == length: return tuple(y_win) else: print( f"WARNING: Cannot parse y_window, expecting a list of exactly two integers, " f"got: {data[field]}") return default
def name_to_gender(first_name, family_name=None): # пытаемся определить пол по фамилии if family_name is not None: # если определит фамилию как родительный падеж - значит баба xD # upd.: Латанов Владислав стал первым контрпримером # if morph.parse(family_name)[0].tag.case == 'gent': # return 0 # пытаемся детектировать мужские фамилии по окончанию suffixes = ['ов', 'ев', 'ёв', 'ин', 'ын', 'ский', 'цкий'] if funcy.any(lambda suff: family_name.endswith(suff), suffixes): return 1 # пытаемся детектировать женские фамилии по окончанию suffixes = ['ова', 'ева', 'ёва', 'ина', 'ына', 'ская', 'цкая'] if funcy.any(lambda suff: family_name.endswith(suff), suffixes): return 0 # пытаемся определить пол по имени автоматически parse = morph.parse(first_name)[0] score = parse.score gender = parse.tag.gender == 'masc' if score > 0.9: return gender # если не получилось - выводим имя и детектированный пол (для проверки глазами), # и пытаемся определить вручную (как с фамилиями) print(f'{first_name} {family_name} - ', end='') # мужские имена suffixes = ['он', 'Максим', 'Евгений', 'Владислав'] if funcy.any(lambda suff: first_name.endswith(suff), suffixes): print('кунец?') return 1 # женские имена suffixes = ['на', 'Варвара'] if funcy.any(lambda suff: first_name.endswith(suff), suffixes): print('тянка?') return 0 print('ЧТО ЗА ПОКЕМОН?') raise NameError
def has_access(object_groups, user, need_view_only): if 'admin' in user.permissions: return True matching_groups = set(object_groups.keys()).intersection(user.groups) if not matching_groups: return False required_level = 1 if need_view_only else 2 group_level = 1 if any(flatten([object_groups[group] for group in matching_groups])) else 2 return required_level <= group_level
def has_access(object_groups, user, need_view_only): if 'admin' in user.permissions: return True matching_groups = set(object_groups.keys()).intersection(user.groups) if not matching_groups: return False required_level = 1 if need_view_only else 2 group_level = 1 if any( flatten([object_groups[group] for group in matching_groups])) else 2 return required_level <= group_level
def test_multiple_candidates_with_same_ratio(self): match_candidates = [ MovieMatchCandidate(match=92), MovieMatchCandidate(match=92), MovieMatchCandidate(match=81), MovieMatchCandidate(match=80) ] decider = MatchDecider() match = decider.try_match(match_candidates) self.assertIsNotNone(match) self.assertEqual(MatchType.UNMATCHED, match.match_type()) self.assertEqual(NonMatchReason.MULTIPLE_CANDIDATES, match.reason()) self.assertEqual(4, len(match.potential_matches())) self.assertTrue(funcy.any(_.match == 92, match.potential_matches())) self.assertTrue(funcy.one(_.match == 81, match.potential_matches())) self.assertTrue(funcy.one(_.match == 80, match.potential_matches()))
def resolve(self): # Extract global names to module scope for name in self.global_names: nodes = self.unscoped_names.pop(name, []) self.module.names[name].extend(nodes) # TODO: add nonlocal support here # Detect local names for name, nodes in list(self.unscoped_names.items()): if self.is_module or any(is_write, nodes): self.names[name].extend(nodes) self.unscoped_names.pop(name) # Resolve nested if not self.is_class: for scope in self.walk_scopes(): self._resolve_unscoped(scope)
def with_components(function, optional = [], required = []): self = function._args[0] # get components optional = dict([(component_type.__name__.lower(),self.get_component(component_type)) for component_type in optional]) required = dict([(component_type.__name__.lower(),self.get_component(component_type)) for component_type in required]) # check if any required component is not available if any(component is None for component in required.itervalues()): return None # populate function's kwargs with components for component_name, component in optional.iteritems(): function._kwargs[component_name] = component for component_name, component in required.iteritems(): function._kwargs[component_name] = component return function()
def collection_health(mongo): last_items = { 'Posts': find_latest_item(mongo, 'Posts', 'created'), 'Comments': find_latest_item(mongo, 'Comments', 'created'), 'Operations': find_latest_item(mongo, 'Operations', 'timestamp'), 'AccountOperations': find_latest_item(mongo, 'AccountOperations', 'timestamp'), } def time_delta(item_time): delta = dt.datetime.utcnow().replace(tzinfo=None) - item_time.replace( tzinfo=None) return delta.seconds timings = walk_values(time_delta, last_items) return { **timings, 'status': 'impaired' if any(lambda x: x > (60 * 10), timings.values()) else 'ok' }
def __contains__(self, task): return any(partial(operator.eq, task), self)
def family_has_profile(cls): return any(model_profile, model_family(cls))
def get_logs(logs_from_hours_ago: int = 36, cloudwatch_staleness_slo: int = 12, exclude_prefixes: Set[str] = {"sns/"}) -> SnsResponse: """ :param exclude: :param cloudwatch_staleness_slo: Cloudwatch logs availability has a bounded staleness for 12 hours (2018) :param logs_from_hours_ago: how many hours into the past to go when starting logging :return: SnsResponse """ date_format = '%Y-%d-%m %H:%M:%S' current_time = datetime.utcnow() # Human-readable and AWS timestamp formats (API does not work with timestamps and requires an int) log_start_time = (current_time - timedelta(hours=logs_from_hours_ago)) log_start_time_hr = HumanTime(log_start_time.strftime(date_format)) log_start_time_ts = AwsTime(int(log_start_time.timestamp() * 1000)) log_end_time = (current_time - timedelta(hours=cloudwatch_staleness_slo)) log_end_time_hr = HumanTime(log_end_time.strftime(date_format)) log_end_time_ts = AwsTime(int(log_end_time.timestamp() * 1000)) print(f"{datetime.utcnow()} creating client connection") log_client = create_aws_client(resource="logs", profile_name=os.environ.get( "CUSTOM_AWS_PROFILE", None)) sns_client = create_aws_client(resource="sns", profile_name=os.environ.get( "CUSTOM_AWS_PROFILE", None)) print(f"{datetime.utcnow()} getting log groups") print(f"received time start: {log_start_time_hr}, end: {log_end_time_hr}") initial = log_client.describe_log_groups() print(f"initial token: {initial.get('nextToken', None)}") global last_token global total_log_count initial_log_group_group_names, last_token = get_group_names_and_token( initial) total_log_count = len(initial_log_group_group_names) print( f"got {initial_log_group_group_names[:5]}...\n last token: {last_token}" ) # Todo: stop lying that this is a List[str] and handle exceptions in maybe results_report = "\n".join([ maybe_export_log_group(lc=log_client, start_time=log_start_time_ts, end_time=log_end_time_ts, prefix_time=log_start_time_hr, lgn=lgn) for lgn in initial_log_group_group_names if not any(lgn.startswith(ex) for ex in exclude_prefixes) ]) # in case we have more than 50 results, aws will send a nextToken and there is # see https://boto3.readthedocs.io/en/latest/reference/services/logs.html#CloudWatchLogs.Client.describe_log_groups while last_token is not None: print(f"{datetime.utcnow()} last token was: {last_token}") log_group_group_names, last_token = get_group_names_and_token( log_client.describe_log_groups(nextToken=last_token)) print(f"{datetime.utcnow()} last token changed to: {last_token}") progress_message = f"{time.strftime(date_format)}: #{total_log_count} logs processed so far." total_log_count += len(log_group_group_names) print(f"{datetime.utcnow()} {progress_message}") results_report += "\n".join([ maybe_export_log_group(lc=log_client, start_time=log_start_time_ts, end_time=log_end_time_ts, prefix_time=log_start_time_hr, lgn=lgn) for lgn in log_group_group_names if not any(lgn.startswith(ex) for ex in exclude_prefixes) ]) return publish_to_sns( message= f"Processed a total of {total_log_count} logs:\n\n{results_report}", c=sns_client)
def any_win(state): """Faster implementation for checking if any winning combinations exist""" return any(_is_slice_a_win, _win_slices(state))
def is_win(state, identifier): """Determines whether this identifier has won""" return any(Then(_is_slice_win, identifier), _win_slices(state))
def sees_stars(self): parents = takewhile(bool, iterate(lambda s: s.parent, self)) return any(s.has_stars for s in parents)
def find_one_strict_factor(n): if n % 2 == 0: return True divises_n = lambda i: n % i == 0 return any(divises_n, range(3, int(math.sqrt(n)) + 1, 2))
def global_usage(files): used = defaultdict(set) # TODO: detect undefined names in a scope with star imports # # This is used to detect undefined names # starimports = defaultdict(set) # starimports[package].update(exports) # print files['cacheops'].scope for package, pyfile in tqdm(sorted(files.items()), leave=False): for scope in pyfile.scope.walk_scopes(): for node in scope.imports: if isinstance(node, ast.ImportFrom): module = get_import_module(node, pyfile, files) # Mark all imported things as used if module in files: imported_pyfile = files[module] names = {alias.name for alias in node.names} for name in names: find_usage_in_pyfile(name, module, imported_pyfile, used, files) # Handle star imports if '*' in names: exports = files[module].scope.exports if exports is None: print '%s:%d: star import with no __all__ in %s' % \ (pyfile.filename, node.lineno, module) exports = files[module].scope.implicit_exports if pyfile.is_entry: if pyfile.scope.exports: used[module].update( set(exports) & set(pyfile.scope.exports)) else: used[module].update(exports) else: used[module].update( name for name in exports if any(is_use, scope.maybe_from_star.get(name, ( )))) # When importing module look for `module.name` # TODO: support `from mod1 import mod2; mod2.mod3.func()` for alias in node.names: full_name = '%s.%s' % (module, alias.name) if full_name in files: nodes = scope.names[alias.asname or alias.name] used[full_name].update( n.up.attr for n in nodes if isinstance(n.up, ast.Attribute)) elif isinstance(node, ast.Import): # TODO: support `import mod1; mod1.mod2.func()` # TODO: handle non-future relative imports for alias in node.names: if alias.name in files: nodes = scope.names[(alias.asname or alias.name).split('.')[0]] attrs = ikeep( find_attr(alias.asname or alias.name, node) for node in nodes[1:]) used[alias.name].update(attrs) # Direct usage for name, nodes in pyfile.scope.names.items(): if any(is_use, nodes): used[package].add(name) # Entry point usage if pyfile.is_entry: # TODO: warn about no __all__ in entry point? exports = pyfile.scope.exports or pyfile.scope.implicit_exports used[package].update(exports) run_global_usage(files, used) for package, pyfile in sorted(files.items()): for name, nodes in pyfile.scope.names.items(): if name not in used[package] and name not in IGNORED_VARS: print '%s:%d: %s %s is never used (globally)' % \ (pyfile.filename, nodes[0].lineno, name_class(nodes[0]), name)
def table_tracked(table): models = [m for m in apps.get_models(include_auto_created=True) if m._meta.db_table == table] # Unknown table, track it to be safe if not models: raise memoize.skip(True) return any(model_profile, models)