def user(request, site_identifier, authenticated_site_user_id): site = first(Site.objects.select_related('user_sites').filter(identifier=site_identifier)[:1]) authenticated_site_user = first(AuthenticatedSiteUser.objects.filter(id=authenticated_site_user_id)[:1]) if not authenticated_site_user: raise Exception('Invalid user') visits = Visit.objects.filter(site_user__authenticated_site_user=authenticated_site_user).values('url').annotate(count_visits=Count('id')).annotate(max_created=Max('created')).annotate(max_id=Max('id')).order_by('-max_created') page_size = 10 paginator = Paginator(visits, page_size) page = request.GET.get('p') try: visits = paginator.page(page) except PageNotAnInteger: visits = paginator.page(1) except EmptyPage: visits = paginator.page(paginator.num_pages) return { 'TEMPLATE': 'www/site_user.html', 'authenticated_site_user': authenticated_site_user, 'visits': visits, 'site': site, }
def visits(request, site_identifier, authenticated_site_user_id, visit_id): site = first(Site.objects.select_related('user_sites').filter(identifier=site_identifier)[:1]) authenticated_site_user = first(AuthenticatedSiteUser.objects.filter(id=authenticated_site_user_id)[:1]) if not authenticated_site_user: raise Exception('Invalid user') visit = first(Visit.objects.filter(pk=visit_id)[:1]) if not visit: raise Exception('Invalid visit') visits = Visit.objects.filter(site_user__authenticated_site_user=authenticated_site_user, url=visit.url).order_by('-created') page_size = 10 paginator = Paginator(visits, page_size) page = request.GET.get('p') try: visits = paginator.page(page) except PageNotAnInteger: visits = paginator.page(1) except EmptyPage: visits = paginator.page(paginator.num_pages) return { 'TEMPLATE': 'www/visits.html', 'authenticated_site_user': authenticated_site_user, 'visits': visits, 'site': site, 'visit': visit, }
def test_precise_matcher_without_matches(test_job, test_matcher): # create an error log group to match against data1 = { 'action': 'test_result', 'test': 'test1', 'subtest': 'test1', 'status': 'FAIL', 'expected': 'PASS', 'message': 'lost connection to external service', } data2 = { 'action': 'test_result', 'test': 'test2', 'subtest': 'test1', 'status': 'FAIL', 'expected': 'PASS', 'message': 'lost connection to external service', } failure_line1 = first(create_failure_lines(test_job, [(data1, {})])) failure_line2 = first(create_failure_lines(test_job, [(data2, {})])) tle1, tle2 = create_text_log_errors(test_job, [(data1, {}), (data2, {})]) TextLogErrorMetadata.objects.create(text_log_error=tle1, failure_line=failure_line1) TextLogErrorMetadata.objects.create(text_log_error=tle2, failure_line=failure_line2) output = precise_matcher(tle2) assert output is None # we should have no matches
def test_default_value(self): s = set() l = [] assert first(s, default=42) == 42 assert first(l, default=3.14) == 3.14 l = [0, False, []] assert first(l, default=3.14) == 3.14
def _compute_recipient(user, *, email=None): # We want to try and use the user's name, then their username, and finally # nothing to display a "Friendly" name for the recipient. return str( Address( first([user.name, user.username], default=""), addr_spec=first([email, user.email]), ), )
def handle(self, *args, **options): user = first(User.objects.filter(username='******')[:1]) if user: user.delete() user = User(username='******', email='demo') user.set_password('demo') user.save() site = first(Site.objects.filter(identifier='demo')[:1]) if site: site.delete() site = Site(name='Sample demo for Wham', identifier='demo', secret_key='asdf1234', url='http://example.com') site.save() site.users.add(user) site.save() for x in range(1, 10): email = 'user' + str(x) + '@example.com' authenticated_site_user = AuthenticatedSiteUser(user_id=str(x), email=email, username='******' + str(x), number_of_visits=9, number_of_sessions=9) authenticated_site_user.save() site_user = SiteUser(site=site, authenticated_site_user=authenticated_site_user, sf_user_id=Site.generate_identifier(), number_of_visits=9, number_of_sessions=9) site_user.save() for y in range(1, 10): created_datetime = (timezone.now() - timedelta(minutes=y + randint(1, 9))) visit = Visit(created=created_datetime, site_user=site_user, ip_address='127.0.0.1', user_id=str(x), email=email, username='******' + str(x), url='http://example.com/' + str(randint(44, 88)) + '.html', referrer='http://google.com/') visit.save() if y == 1: site_user.first_visit = visit authenticated_site_user.first_visit = visit session = Session(visit=visit) session.save() site_user.first_session = session authenticated_site_user.first_session = session elif y == 9: site_user.last_visit = visit authenticated_site_user.last_visit = visit session = Session(visit=visit) session.save() site_user.last_session = session authenticated_site_user.last_session = session site_user.save() authenticated_site_user.save() self.stdout.write('Demo data is loaded.')
def link_alert_summary_in_perf_data(test_perf_data, test_perf_alert, perf_datum_id): assert perf_datum_id > 0 perf_datum = first(test_perf_data, key=lambda tpd: tpd.id == perf_datum_id) prev_perf_datum = first(test_perf_data, key=lambda tpd: tpd.id == perf_datum_id-1) # adjust relations alert_summary = test_perf_alert.summary alert_summary.repository = perf_datum.repository alert_summary.push = perf_datum.push alert_summary.prev_push = prev_perf_datum.push alert_summary.save()
def _domain_details(self, response): d = {} for k, v in response.APIResponse.DomainDetails: if k == "NameServers": d[k] = [dict(Host=first(o.Host), IP=first(o.IP)) for o in v] elif k == "Eligibility": d[k] = dict(zip(v.__keylist__, itemgetter(*v.__keylist__)(v))) elif k == "Expiry": try: d[k] = parse(v).date() except (AttributeError, TypeError): d[k] = v else: d[k] = v return d
def test_project_docs(db_session): projects = [ProjectFactory.create() for _ in range(2)] releases = { p: sorted( [ReleaseFactory.create(project=p) for _ in range(3)], key=lambda r: packaging.version.parse(r.version), reverse=True, ) for p in projects } assert list(_project_docs(db_session)) == [ { "_id": p.normalized_name, "_type": "doc", "_source": { "created": p.created, "name": p.name, "normalized_name": p.normalized_name, "version": [r.version for r in prs], "latest_version": first( prs, key=lambda r: not r.is_prerelease, ).version, }, } for p, prs in sorted(releases.items(), key=lambda x: x[0].name.lower()) ]
def _domain_price_list(self, response): d = {} for item in response.APIResponse.DomainPriceList: data = dict((k, first(v)) for k, v in item) product = data.pop('Product') d[product] = data return d
def _from_psql_call(args, password="", **kwargs): parser = ArgumentParser(add_help=False) parser.add_argument("-d", "--dbname", dest="name", default="") parser.add_argument("-h", "--host", dest="host", default="") parser.add_argument("-p", "--port", dest="port", default="") parser.add_argument("-U", "--username", dest="user", default="") parser.add_argument("db_name", nargs="?", default="") args, _unused_args = parser.parse_known_args(args) db_name = first([args.name, args.db_name], default="") db_dict = { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": db_name, "USER": args.user, "PASSWORD": password, "HOST": args.host, "PORT": args.port, } if kwargs: db_dict["OPTIONS"] = kwargs return db_dict
def form_valid(self, form): url = furl("https://maps.googleapis.com/maps/api/geocode/json") url.args["address"] = urllib.parse.quote_plus(form.cleaned_data["address"]) url.args["key"] = settings.GOOGLE_MAPS_API_KEY resp = requests.post(url) resp.raise_for_status() location = first(resp.json()["results"])["geometry"]["location"] with transaction.atomic(): house = House.objects.create( address=form.cleaned_data["address"], property_type=form.cleaned_data["property_type"], room_count=form.cleaned_data["room_count"], parking_space_count=form.cleaned_data["parking_space_count"], latitude=location["lat"], longitude=location["lng"], ) Contract.objects.create( house=house, user=self.request.user, price=form.cleaned_data["price"], end_date=form.cleaned_data["available_date"], ) messages.info(self.request, "House added!") return super().form_valid(form)
def get_requirement(self): prefix = "-e " if self.editable else "" line = "{0}{1}".format(prefix, self.link.url) req = first(requirements.parse(line)) if self.path and self.link and self.link.scheme.startswith("file"): req.local_file = True req.path = self.path if self.editable: req.editable = True req.link = self.link if ( self.uri != unquote(self.link.url_without_fragment) and "git+ssh://" in self.link.url and "git+git@" in self.uri ): req.line = self.uri req.uri = self.uri if not req.name: raise ValueError( "pipenv requires an #egg fragment for version controlled " "dependencies. Please install remote dependency " "in the form {0}#egg=<package-name>.".format(req.uri) ) if self.vcs and not req.vcs: req.vcs = self.vcs if self.ref and not req.revision: req.revision = self.ref if self.extras and not req.extras: req.extras = self.extras return req
def from_db(cls, release): obj = cls(meta={"id": release.project.normalized_name}) obj["name"] = release.project.name obj["normalized_name"] = release.project.normalized_name obj["version"] = [ r.version for r in sorted( release.project.releases, key=lambda r: parse_version(r.version), reverse=True, ) ] obj["latest_version"] = first( sorted( release.project.releases, key=lambda r: parse_version(r.version), reverse=True, ), key=lambda r: not r.is_prerelease, default=release.project.releases[0], ).version obj["summary"] = release.summary obj["description"] = release.description obj["author"] = release.author obj["author_email"] = release.author_email obj["maintainer"] = release.maintainer obj["maintainer_email"] = release.maintainer_email obj["home_page"] = release.home_page obj["download_url"] = release.download_url obj["keywords"] = release.keywords obj["platform"] = release.platform obj["created"] = release.created obj["classifiers"] = list(release.classifiers) return obj
def split_vcs_method_from_uri(uri): """Split a vcs+uri formatted uri into (vcs, uri)""" vcs_start = "{0}+" vcs = first([vcs for vcs in VCS_LIST if uri.startswith(vcs_start.format(vcs))]) if vcs: vcs, uri = uri.split("+", 1) return vcs, uri
def from_pipfile(cls, name, pipfile): _pipfile = {} if hasattr(pipfile, "keys"): _pipfile = dict(pipfile).copy() _pipfile["version"] = get_version(pipfile) vcs = first([vcs for vcs in VCS_LIST if vcs in _pipfile]) if vcs: _pipfile["vcs"] = vcs r = VCSRequirement.from_pipfile(name, pipfile) elif any(key in _pipfile for key in ["path", "file", "uri"]): r = FileRequirement.from_pipfile(name, pipfile) else: r = NamedRequirement.from_pipfile(name, pipfile) markers = PipenvMarkers.from_pipfile(name, _pipfile) if markers: markers = str(markers) args = { "name": r.name, "vcs": vcs, "req": r, "markers": markers, "extras": _pipfile.get("extras"), "editable": _pipfile.get("editable", False), "index": _pipfile.get("index"), } if any(key in _pipfile for key in ["hash", "hashes"]): args["hashes"] = _pipfile.get("hashes", [pipfile.get("hash")]) return cls(**args)
def is_pinned_requirement(ireq): """ Returns whether an InstallRequirement is a "pinned" requirement. An InstallRequirement is considered pinned if: - Is not editable - It has exactly one specifier - That specifier is "==" - The version does not contain a wildcard Examples: django==1.8 # pinned django>1.8 # NOT pinned django~=1.8 # NOT pinned django==1.* # NOT pinned """ if ireq.editable: return False if len(ireq.specifier._specs) != 1: return False op, version = first(ireq.specifier._specs)._spec return (op == '==' or op == '===') and not version.endswith('.*')
def handle(self, *args, **options): url = 'https://api.github.com/repos/pyconuk/pyconuk.org/contents/content/talks/' r = requests.get(url) r.raise_for_status() talks = r.json() s = requests.Session() # loop the talks from the csv/txt file with open('talk_titles.txt', 'r') as titles: for title in titles: # generate slugged titles tmp = re.sub('[^\w\s-]', '', title) slug = re.sub('[-\s]+', '-', tmp).strip('-').strip().lower() # get the download path for the abstract in the pyconuk repo talk_data = first(filter(lambda x: x['name'] == slug + '.md', talks)) if not talk_data: print('Talk not found: {}'.format(slug)) continue r = s.get(talk_data['download_url']) r.raise_for_status() # strip the wok metadata and title talk = r.text.split('\n') author = talk[7] abstract = '\n'.join(talk[9:]) obj, created = Proposal.objects.update_or_create( title=title, defaults={'abstract': abstract, 'author': author}, ) print('{} Talk: {}'.format('Created' if created else 'Updated', title.strip()))
def resets_in(self, *identifiers): resets = [] for limit in self._limits: resets_at, remaining = self._window.get_window_stats( limit, *self._get_identifiers(identifiers) ) # If this limit has any remaining limits left, then we will skip it # since it doesn't need reset. if remaining > 0: continue current = datetime.now(tz=timezone.utc) reset = datetime.fromtimestamp(resets_at, tz=timezone.utc) # If our current datetime is either greater than or equal to when # the limit resets, then we will skipp it since it has either # already reset, or it is resetting now. if current >= reset: continue # Add a timedelta that represents how long until this limit resets. resets.append(reset - current) # If we have any resets, then we'll go through and find whichever one # is going to reset soonest and use that as our hint for when this # limit might be available again. return first(sorted(resets))
def handle_error_nicely(self, tb_1, request): response = sys.exc_info()[1] if not isinstance(response, Response): # We have a true Exception; convert it to a Response object. response = Response(500, tb_1) response.request = request if 200 <= response.code < 300: # The app raised a Response(2xx). Act as if nothing # happened. This is unusual but allowed. pass else: # Delegate to any error simplate. # =============================== rc = str(response.code) possibles = [ rc + ".html", rc + ".html.spt", "error.html", "error.html.spt" ] fs = first( self.ours_or_theirs(errpage) for errpage in possibles ) if fs is not None: request.fs = fs request.original_resource = request.resource request.resource = resources.get(request) response = request.resource.respond(request, response) return response
def _group_constraints(self, constraints): """ Groups constraints (remember, InstallRequirements!) by their key name, and combining their SpecifierSets into a single InstallRequirement per package. For example, given the following constraints: Django<1.9,>=1.4.2 django~=1.5 Flask~=0.7 This will be combined into a single entry per package: django~=1.5,<1.9,>=1.4.2 flask~=0.7 """ for _, ireqs in full_groupby(constraints, key=_dep_key): ireqs = list(ireqs) editable_ireq = first(ireqs, key=lambda ireq: ireq.editable) if editable_ireq: yield editable_ireq # ignore all the other specs: the editable one is the one that counts continue ireqs = iter(ireqs) combined_ireq = next(ireqs) combined_ireq.comes_from = None for ireq in ireqs: # NOTE we may be losing some info on dropped reqs here combined_ireq.req.specifier &= ireq.req.specifier # Return a sorted, de-duped tuple of extras combined_ireq.extras = tuple(sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras)))) yield combined_ireq
def parse_extras(extras_str): """Turn a string of extras into a parsed extras list""" import requirements extras = first( requirements.parse("fakepkg{0}".format(extras_to_string(extras_str))) ).extras return extras
def failure_line_summary(formatter, failure_line): """ Create a mozlog formatted error summary string from the given failure_line. Create a string which can be compared to a TextLogError.line string to see if they match. """ if failure_line.action == "test_result": action = "test_status" if failure_line.subtest is not None else "test_end" elif failure_line.action == "truncated": return else: action = failure_line.action try: mozlog_func = getattr(formatter, action) except AttributeError: logger.warning('Unknown mozlog function "%s"', action) return formatted_log = mozlog_func(failure_line.to_mozlog_format()) split_log = first(formatted_log.split("\n", 1)) if not split_log: logger.debug('Failed to split log', formatted_log) return return split_log.strip()
def find_source(sources, name=None, url=None): source = None if name: source = [s for s in sources if s.get("name") == name] elif url: source = [s for s in sources if url.startswith(s.get("url"))] if source: return first(source)
def test_scored_matches_with_manipulated_score(classified_failures): matches = TextLogErrorMatch.objects.all() results = list(score_matches(matches, score_multiplier=(8, 10))) assert len(results) == len(matches) score, _ = first(results) assert score == Decimal('0.8')
def user_suggest(prefix): prefix = prefix.strip().replace("%", "") if len(prefix) < 3: return bottle.abort(412, "Need at least 3 chars") with stats.timer(metric_name("request.user-suggest")): query = "SELECT name FROM users WHERE lower(name) LIKE lower(%s) ORDER BY score DESC LIMIT 20" return {"names": [first(row) for row in execute(query, [prefix.replace("%", "") + "%"])]}
def from_pipfile(cls, name, pipfile): creation_args = {} if hasattr(pipfile, "keys"): creation_args = {k: v for k, v in pipfile.items() if k in cls.attr_fields()} creation_args["name"] = name version = get_version(pipfile) creation_args["version"] = version creation_args["req"] = first(requirements.parse("{0}{1}".format(name, version))) return cls(**creation_args)
def get_requirement(self): from pkg_resources import RequirementParseError try: req = first(requirements.parse("{0}{1}".format(self.name, self.version))) except RequirementParseError: raise RequirementError( "Error parsing requirement: %s%s" % (self.name, self.version) ) return req
def test_precise_matcher_with_matches(classified_failures): tle = TextLogErrorMatch.objects.first().text_log_error results = precise_matcher(tle) score, classified_failure_id = first(results) match = tle.matches.first() assert classified_failure_id == match.classified_failure_id assert score == match.score
def __init__(self, endpoints, prefix="avro-schemas"): if isinstance(endpoints, str): endpoints = [endpoints] self.endpoints = tuple(endpoints) self.prefix = prefix if not first(self.endpoints): raise ValueError("Endpoints must not be empty")
#!/usr/bin/env python # -*- coding: utf-8 -*- import operator from first import first def greater_than_zero(number): return number > 0 first([-1, 0, 1, 2], key=greater_than_zero)
def from_line(cls, line): req = first(requirements.parse(line)) specifiers = None if req.specifier: specifiers = specs_to_string(req.specs) return cls(name=req.name, version=specifiers, req=req)
async def get_engine_check_run( self, name: str) -> typing.Optional[github_types.GitHubCheckRun]: return first.first(await self.pull_engine_check_runs, key=lambda c: c["name"] == name)
def getFollow(input): start, productionList = decodeProductionList(input) firstMap = first(productionList) return follow(productionList, firstMap, start)
def curl_to_httpie(cmd: str, long_option: bool = False) -> ConversionResult: # The cmd can be multiline string, with escape symbols, shlex doesn't support it, so # we should convert it to one-line first. oneline = clean_curl(cmd) try: cargs = shlex.split(oneline) except ValueError as e: logger.error('Failed to parse as shell command. Error: %s', e) return ConversionResult(httpie='', errors=[str(e)]) if not cargs: return ConversionResult(httpie='') if cargs[0] == 'curl': cargs = cargs[1:] if not cargs: return ConversionResult(httpie='http') args = CURLArgumentParser().parse_args(cargs) cmds = deque(['http']) if args.verbose: cmds.append('--verbose' if long_option else '-v') if args.location: if long_option: cmds.append('--follow') else: join_previous_arg(cmds, 'F') if args.remote_name: if long_option: cmds.append('--download') else: join_previous_arg(cmds, 'd') if args._data and not args._request_json: if long_option: cmds.append('--form') else: join_previous_arg(cmds, 'f') if args.proxy: cmds.extend(('--proxy', args.proxy)) if args.user or args._auth: user = args.user if not user and args._auth: user = '******'.join(args._auth.get_username_password()) if long_option: cmds.extend(('--auth', quote(user))) else: join_previous_arg(cmds, 'a') cmds.append(quote(user)) if args.include: cmds.append('--all') if args.insecure: cmds.append('--verify', 'no') elif args.cacert: cmds.extend(('--verify', args.cacert)) if args.cert: cmds.extend(('--cert', quote(args.cert))) if args.max_redirs: cmds.extend('--max-redirects', args.max_redirs) if args.max_time: cmds.extend('--timeout', args.max_time) if args.head: cmds.append('HEAD') elif args.request and not (args._data and args.request == 'POST'): cmds.append(args.request) # URL cmds.append(args._url) # Headers for k, v in args._headers.to_dict().items(): cmds.append(f'{quote(k)}:{quote(v)}') if args._request_json and not args._data: mime = quote(HH.CONTENT_TYPE_VALUES.json) key = quote(HH.CONTENT_TYPE) cmds.append(f'{key}:{mime}') if args.user_agent: cmds.append(f'{HH.USER_AGENT}:{quote(args.user_agent)}') # Params for k, v in args._params: if k.startswith('-'): cmds.append('--') k = k.replace('=', r'\=') cmds.append(f'{quote(k)}=={quote(v)}') # Data for p, v in args._data: p = str(p) if p.startswith('-'): cmds.append('--') p = p.replace('=', r'\=') qp = quote(p) # Syntax for uploading file if isinstance(v, str) and v.startswith('@') and not args._request_json: # Strip beginning @ filepath = v[1:] cmds.append(f'{qp}@{quote(filepath)}') continue # Not uploading file # Python shlex's quote will turn bool value to empty string, that is not we want if isinstance(v, bool): js_bool = str(v).lower() cmds.append(f'{qp}:={js_bool}' if not args.get else f'{qp}=={str(v)}') continue try: qv = quote(v) cmds.append(f'{qp}={qv}' if not args.get else f'{qp}=={qv}') except TypeError: # v is not string, normally after parsed from JSON if isinstance(v, (list, dict)): v = quote(json_dump(v)) cmds.append(f'{qp}:={v}' if not args.get else f'{qp}=={quote(str(v))}') if args.data_binary: fn = first(v for v in args.data_binary if v and v.startswith('@')) if fn: # Strip @ fn = fn[1:] cmds.append(f'@{quote(fn)}') if args.output: param = '-o' if not long_option else '--output' cmds.extend((param, quote(args.output))) return ConversionResult(httpie=' '.join(cmds), errors=frozenset(args._errors))
def get_standings(self, users=None, statistics=None): if not self.standings_url: self.standings_url = f'https://projecteuler.net/fastest={self.key}' user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36' # noqa page = REQ.get(self.standings_url, headers={'User-Agent': user_agent}) sign_out = re.search('<form[^>]*action="sign_out"[^>]*>', page) if not sign_out: for attempt in range(20): while True: value = f'{random.random():.16f}' image_bytes = REQ.get(f'https://projecteuler.net/captcha/show_captcha.php?{value}') image_stream = io.BytesIO(image_bytes) image_rgb = Image.open(image_stream) text = pytesseract.image_to_string(image_rgb, config='--oem 0 --psm 13 digits') text = text.strip() if re.match('^[0-9]{5}$', text): break REQ.get('https://projecteuler.net/sign_in') page = REQ.submit_form( name='sign_in_form', action=None, data={ 'username': conf.PROJECTEULER_USERNAME, 'password': conf.PROJECTEULER_PASSWORD, 'captcha': text, 'remember_me': '1', }, ) match = re.search('<p[^>]*class="warning"[^>]*>(?P<message>[^<]*)</p>', page) if match: REQ.print(match.group('message')) else: break else: raise ExceptionParseStandings('Did not recognize captcha for sign in') page = REQ.get(self.standings_url) result = {} problem_name = self.name.split('.', 1)[1].strip() problems_info = [{'name': problem_name, 'url': self.url}] regex = '<table[^>]*>.*?</table>' html_table = re.search(regex, page, re.DOTALL) if html_table: table = parsed_table.ParsedTable(html_table.group(0)) for r in table: row = OrderedDict() row['solving'] = 1 for k, v in r.items(): if isinstance(v, list): place, country = v row['place'] = re.match('[0-9]+', place.value).group(0) country = first(country.column.node.xpath('.//@title')) if country: row['country'] = country elif k == 'Time To Solve': params = {} for x in v.value.split(', '): value, field = x.split() if field[-1] != 's': field += 's' params[field] = int(value) rel_delta = relativedelta(**params) now = timezone.now() delta = now - (now - rel_delta) row['penalty'] = f'{delta.total_seconds() / 60:.2f}' elif k == 'User': member = first(v.column.node.xpath('.//@title')) or v.value row['member'] = member else: row[k.lower()] = v.value problems = row.setdefault('problems', {}) problem = problems.setdefault(problem_name, {}) problem['result'] = '+' problem['binary'] = True row['_skip_for_problem_stat'] = True if 'member' not in row: continue result[row['member']] = row standings = { 'result': result, 'url': self.standings_url, 'problems': problems_info, } if len(result) < 100: delta = timezone.now() - self.start_time if delta < timedelta(days=1): standings['timing_statistic_delta'] = timedelta(minutes=60) elif delta < timedelta(days=30): standings['timing_statistic_delta'] = timedelta(days=1) return standings
def slot_length(self) -> int: return first([self._slot_length, self.event_type._slot_length])
from django.conf import settings from first import first __all__ = ( 'NODE_CACHE', 'PAGE_CONTENT_BLOCKS', 'PAGE_CONTENT_CLASSES', 'PAGE_TEMPLATE_BASE', 'PAGE_TEMPLATE_FOLDER', 'PAGE_TEMPLATE_REGEX', ) S = lambda n, d=None: getattr(settings, 'TOUCHTECHNOLOGY_' + n, d) project_template_dirs = first(getattr(settings, 'TEMPLATES', ()), {}).get('DIRS', []) project_template_base = first(project_template_dirs, 'templates') NODE_CACHE = getattr(settings, 'TOUCHTECHNOLOGY_NODE_CACHE', 'default') # These settings allow us to determine where page templates live, which allows # us not to need to specify each actual template in code. PAGE_CONTENT_BLOCKS = S('PAGE_CONTENT_BLOCKS', 1) PAGE_CONTENT_CLASSES = S('PAGE_CONTENT_CLASSES', ('copy',)) PAGE_TEMPLATE_BASE = S('PAGE_TEMPLATE_BASE', project_template_base) PAGE_TEMPLATE_FOLDER = S('PAGE_TEMPLATE_FOLDER', 'touchtechnology/content/pages/') PAGE_TEMPLATE_REGEX = S('PAGE_TEMPLATE_REGEX', r'\.html$') TENANT_MEDIA_PUBLIC = S('TENANT_MEDIA_PUBLIC', True)
def test_convert_from_pipfile(requirement, expected): pkg_name = first(requirement.keys()) pkg_pipfile = requirement[pkg_name] req = Requirement.from_pipfile(pkg_name, pkg_pipfile) assert req.as_line() == expected.lower( ) if '://' not in expected else expected
def test_one_way_editable_extras(): dep = '-e .[socks]' dep = Requirement.from_line(dep).as_pipfile() k = first(dep.keys()) assert dep[k]['extras'] == ['socks']
def extract_uri_from_vcs_dep(dep): valid_keys = VCS_LIST + ('uri', 'file') if hasattr(dep, 'keys'): return first(dep[k] for k in valid_keys if k in dep) or None return None
def find_device(self): device_slot = self.slot("device") try: devices = self.api_get("devices", playback=False).json() logger.info(devices) devices = [addict.Dict(d) for d in devices] speakers = {d.name: d for d in devices if d.type == "Speaker"} not_speakers = {d.name: d for d in devices if d.type != "Speaker"} except Exception as e: logger.exception(e) if self.device_id in self.req_attr: del self.req_attr[self.device_id] return None speaker_list = list(speakers.values()) not_speaker_list = list(not_speakers.values()) if len(speaker_list) == 1: logger.info("Found 1 speaker, using it as the playing device") self.save_speaker(speaker_list[0]) elif len(speaker_list) > 1: logger.info("Found %s speakers", len(speaker_list)) if self.device_id not in self.req_attr: if device_slot: logger.info("Searching for %s device in speakers", device_slot) return self.choose_speaker(speakers, device_slot) elif self.req_attr[self.device_id] not in (speakers.keys() | not_speakers.keys()): logger.info("Saved speaker does not exist: %s", self.req_attr[self.device_id]) device = first( speaker_list, key=lambda s: s.is_active) or first( speaker_list, key=lambda s: not s.is_restricted) self.save_speaker(device) if device: logger.info( "Using speaker %s because it is %s", device, ("active" if device.is_active else "not restricted"), ) else: logger.info( "Couldn't find a usable speaker, letting Noiseblend find one for us" ) elif len(not_speaker_list) == 1: logger.info( "Found 1 device (not speaker), using it as the playing device") self.save_speaker(not_speaker_list[0]) elif len(not_speaker_list) > 1 and device_slot: logger.info("Found %s devices", len(not_speaker_list)) if self.device_id not in self.req_attr: if device_slot: logger.info("Searching for %s device in not speakers", device_slot) return self.choose_speaker(not_speakers, device_slot) elif self.req_attr[self.device_id] not in (speakers.keys() | not_speakers.keys()): logger.info("Saved device does not exist: %s", self.req_attr[self.device_id]) device = first( not_speaker_list, key=lambda s: s.is_active) or first( not_speaker_list, key=lambda s: not s.is_restricted) self.save_speaker(device) if device: logger.info( "Using device %s because it is %s", device, ("active" if device.is_active else "not restricted"), ) else: logger.info( "Couldn't find a usable device, letting Noiseblend find one for us" ) return None
def get_requirement(self): return first( requirements.parse("{0}{1}".format(self.name, self.version)))
def seeded_tournament(seeded_team_list, days_available, max_per_day=1, min_per_day=1): """ Using the available input variables, divide the list of seeded teams into the appropriate number of pools. Produce suitable draw formats definitions to execute the tournament. :param seeded_team_list: list of teams ordered by seeding rank :type seeded_team_list: int :param days_available: number of days available for play :type days_available: int :param max_per_day: max number of games a team may play in one day :type max_per_day: int :param min_per_day: min number of prelim games a team must play in one day :type min_per_day: int :returns: pools (list of lists) and draw_formats (list of dicts) :rtype: dict """ number_of_teams = len(seeded_team_list) number_of_pools = optimum_tournament_pool_count( number_of_teams, days_available, max_per_day, min_per_day, ) if isinstance(first(seeded_team_list), str): @functools.total_ordering class Team(object): def __init__(self, st, order): self.st = st self.order = order def __eq__(self, other): return self.order == other.order def __ne__(self, other): return not (self == other) def __lt__(self, other): return self.order < other.order def __str__(self): return self.st def __repr__(self): return "<Team: {} ({})>".format(self.st, self.order) seeded_team_list = [ Team(t, order=order) for order, t in enumerate(seeded_team_list, 1) ] if number_of_pools is None: raise ValueError("Incompatible set of constraints") # split teams into number of pools, employing the "serpent" pattern pools = sorted( zip(*[ g if i % 2 else reversed(g) for i, g in enumerate(grouper(seeded_team_list, number_of_pools)) ]), key=None, ) # remove any None items from each pool pools = [[p for p in pool if p] for pool in pools] # produce round robin formats draw_formats = [ { "label": "Round Robin (%d/%d teams)" % (size - 1, size), "format": round_robin_format(range(1, size + 1)), } # unique set of pool sizes requiring individual draw formats for size in sorted( set([ int(ceiling(size, 2)) for size in [len(pool) for pool in pools] ])) ] # produce finals formats draw_formats += [{ "label": _("Final Series (%s pools)") % number_of_pools, "format": single_elimination_final_format( number_of_pools, bronze_playoff=_("Bronze Medal"), ), }] return dict(pools=pools, draw_formats=draw_formats)
def location(self) -> str: return first([self._location, self.event_type._location])
def _if_shorten_replace_func(mo): r_name, r_val = first(filter(lambda i: i[1], mo.groupdict().items())) return _if_shorten_replace_patterns[r_name].format(r_val)
# _*_ coding: utf-8 _*_ __author__ = 'wuhao' __date__ = '2017/7/25 13:32' from first import first first([0, False, None, [], (), 42]) first([-1, 0, 1, 2]) first([-1, 0, 1, 2], key=lambda x: x > 0)
def get_vcs_deps( project, pip_freeze=None, which=None, verbose=False, clear=False, pre=False, allow_global=False, dev=False, ): from .patched.notpip._internal.vcs import VcsSupport section = "vcs_dev_packages" if dev else "vcs_packages" lines = [] lockfiles = [] try: packages = getattr(project, section) except AttributeError: return [], [] src_dir = Path( os.environ.get("PIP_SRC", os.path.join(project.virtualenv_location, "src"))) src_dir.mkdir(mode=0o775, exist_ok=True) vcs_registry = VcsSupport vcs_uri_map = { extract_uri_from_vcs_dep(v): { "name": k, "ref": v.get("ref") } for k, v in packages.items() } for line in pip_freeze.strip().split("\n"): # if the line doesn't match a vcs dependency in the Pipfile, # ignore it _vcs_match = first(_uri for _uri in vcs_uri_map.keys() if _uri in line) if not _vcs_match: continue pipfile_name = vcs_uri_map[_vcs_match]["name"] pipfile_rev = vcs_uri_map[_vcs_match]["ref"] pipfile_req = Requirement.from_pipfile(pipfile_name, [], packages[pipfile_name]) names = {pipfile_name} backend = vcs_registry()._registry.get(pipfile_req.vcs) # TODO: Why doesn't pip freeze list 'git+git://' formatted urls? if line.startswith("-e ") and not "{0}+".format( pipfile_req.vcs) in line: line = line.replace("-e ", "-e {0}+".format(pipfile_req.vcs)) installed = Requirement.from_line(line) __vcs = backend(url=installed.req.uri) names.add(installed.normalized_name) locked_rev = None for _name in names: locked_rev = install_or_update_vcs(__vcs, src_dir.as_posix(), _name, rev=pipfile_rev) if installed.is_vcs: installed.req.ref = locked_rev lockfiles.append({pipfile_name: installed.pipfile_entry[1]}) pipfile_srcdir = (src_dir / pipfile_name).as_posix() lockfile_srcdir = (src_dir / installed.normalized_name).as_posix() lines.append(line) if os.path.exists(pipfile_srcdir): lockfiles.extend( venv_resolve_deps( ["-e {0}".format(pipfile_srcdir)], which=which, verbose=verbose, project=project, clear=clear, pre=pre, allow_global=allow_global, )) else: lockfiles.extend( venv_resolve_deps( ["-e {0}".format(lockfile_srcdir)], which=which, verbose=verbose, project=project, clear=clear, pre=pre, allow_global=allow_global, )) return lines, lockfiles
def get_kind(scopes): return first(get_if_in(scopes, i) for i in ["provider", "client"])
async def is_first_pull(self, ctxt: context.Context) -> bool: item = first.first(self._iter_pseudo_cars()) return item is not None and item.user_pull_request_number == ctxt.pull[ "number"]
def find_version(input_string): match = first(pattern.findall(input_string)) if match is None: raise NoVersionFound return match[1]
def get_car(self, ctxt: context.Context) -> typing.Optional[TrainCar]: return first.first( self._cars, key=lambda car: car.user_pull_request_number == ctxt.pull["number" ], )
def extract_uri_from_vcs_dep(dep): valid_keys = VCS_LIST + ("uri", "file") if hasattr(dep, "keys"): return first(dep[k] for k in valid_keys if k in dep) or None return None
# _*_ coding: utf-8 _*_ __author__ = 'wuhao' __date__ = '2017/7/25 13:41' from functools import partial from first import first import operator def greater_than(number, min1=0): return number > min1 first([-1, 0, 1, 2], key=lambda x: x>0) # 将以函数用一行代码表示 first([-1, 0, 1, 2], key=partial(greater_than, min1=42)) # 应用partial改进 lambda的一行函数式编程, 实现对greater_than的封装 first([-1, 0, 1, 2], key=partial(operator.le, 0)) # 操作符的使用
def release_detail(release, request): project = release.project if project.name != request.matchdict.get("name", project.name): return HTTPMovedPermanently( request.current_route_path(name=project.name), ) # Get all of the registered versions for this Project, in order of newest # to oldest. all_releases = ( request.db.query(Release) .filter(Release.project == project) .with_entities( Release.version, Release.is_prerelease, Release.created) .order_by(Release._pypi_ordering.desc()) .all() ) # Get the latest non-prerelease of this Project, or the latest release if # all releases are prereleases. latest_release = first( all_releases, key=lambda r: not r.is_prerelease, default=all_releases[0], ) # Get all of the maintainers for this project. maintainers = [ r.user for r in ( request.db.query(Role) .join(User) .filter(Role.project == project) .distinct(User.username) .order_by(User.username) .all() ) ] # Get the license from the classifiers or metadata, preferring classifiers. license = None if release.license: # Make a best effort when the entire license text is given # by using the first line only. license = release.license.split('\n')[0] license_classifiers = [c.split(" :: ")[-1] for c in release.classifiers if c.startswith("License")] if license_classifiers: license = ', '.join(license_classifiers) return { "project": project, "release": release, "files": release.files.all(), "latest_release": latest_release, "all_releases": all_releases, "maintainers": maintainers, "license": license, }
def add(self, device_id: str = None, host: str = None, username: str = None, password: str = None, schema: DeviceSchema = None, **kwargs): """ Add device to HealthBot :param str device_id: The name of the device as provided by the User :param str host: The hostname/ip-address of the target device :param str username: The login user-name for the target device :param str password: The login password for the user :param object schema: `DeviceSchema <jnpr.healthbot.swagger.models.html#deviceschema>`_ Example: :: from jnpr.healthbot import HealthBotClient from jnpr.healthbot import DeviceSchema hb = HealthBotClient('xx.xxx.x.xx', 'xxxx', 'xxxx') ds = DeviceSchema(device_id='xyz', host='xx.xxx.xxx.xxx', authentication={"password": {"password": "******", "username": "******"}}) # we can also later assign values like this ds.description = "HbEZ testing" # This will add device in candidate DB hb.device.add(schema=ds) # commit changes to master DB hb.commit() """ def _add_device(device_id=None): if kwargs: if schema is not None: raise SystemError( "schema and kwargs are mutually exclusive") device_schema = DeviceSchema(device_id=device_id, host=host, **kwargs) if username is not None and password is not None: device_schema.authentication = { "password": { "password": password, "username": username } } payload = self.hbot._create_payload(device_schema) elif schema is not None: payload = self.hbot._create_payload(schema) device_id = schema.device_id else: payload = { "authentication": { "password": { "password": password, "username": username } }, "device-id": device_id, "host": host, } url = self.hbot.urlfor.device(device_id) response = self.api.post(url, json=payload) if response.status_code != 200: logger.error(response.text) response.raise_for_status() return True if schema is not None: if not isinstance(schema, DeviceSchema): raise SchemaError(DeviceSchema) device_id = schema.device_id host = schema.host devices_list_url = self.hbot.urlfor.devices( ) + self.hbot.apiopt_candidate resp = self.api.get(devices_list_url) # The API will return a 404 if there are no devices in the system. We need to check for # this condition if resp.status_code == 404: return _add_device(device_id) # examine the existing devices and see if there is one that already # exists by this name and host values existing_devices = resp.json()['device'] found = first( filter(lambda i: i['device-id'] == device_id and i['host'] == host, existing_devices)) if found: logger.debug( "Device with given device-id '{}' already exists. Updating same." .format(device_id)) # if we are here, then we need to add this new device return _add_device(device_id)
def _compute_recipient(user, email): # We want to try and use the user's name, then their username, and finally # nothing to display a "Friendly" name for the recipient. return str( Address(first([user.name, user.username], default=""), addr_spec=email))
def capacity(self) -> int: return first([self._capacity, self.event_type._capacity], default=1)
def host(self) -> Optional[User]: return first([self._host, self.event_type._host])
def release_detail(release, request): project = release.project # Check if the requested version is equivalent but not exactly the same as # the release's version. Use `.get` because this view is used by # `project_detail` and there may not be a version. # # This also handles the case where both the version and the project name # need adjusted, and handles it in a single redirect. if release.version != request.matchdict.get("version", release.version): return HTTPMovedPermanently( request.current_route_path( name=project.name, version=release.version, ), ) # It's possible that the requested version was correct (or not provided), # but we still need to adjust the project name. if project.name != request.matchdict.get("name", project.name): return HTTPMovedPermanently( request.current_route_path(name=project.name), ) # Get all of the registered versions for this Project, in order of newest # to oldest. all_releases = (request.db.query(Release).filter( Release.project == project).with_entities( Release.version, Release.is_prerelease, Release.created).order_by(Release._pypi_ordering.desc()).all()) # Get the latest non-prerelease of this Project, or the latest release if # all releases are prereleases. latest_release = first( all_releases, key=lambda r: not r.is_prerelease, default=all_releases[0], ) # Get all of the maintainers for this project. maintainers = [ r.user for r in (request.db.query(Role).join(User).filter( Role.project == project).distinct(User.username).order_by( User.username).all()) ] # Get the license from both the `Classifier` and `License` metadata fields license_classifiers = ', '.join( c.split(" :: ")[-1] for c in release.classifiers if c.startswith("License")) # Make a best effort when the entire license text is given by using the # first line only. short_license = release.license.split('\n')[0] if release.license else None if license_classifiers and short_license: license = f'{license_classifiers} ({short_license})' else: license = license_classifiers or short_license or None return { "project": project, "release": release, "files": release.files.all(), "latest_release": latest_release, "all_releases": all_releases, "maintainers": maintainers, "license": license, }