def get_clone_url(clone_uri_tmpl, prefix_url, repo_name, repo_id, username=None): parsed_url = urlobject.URLObject(prefix_url) prefix = urllib.parse.unquote(parsed_url.path.rstrip('/')) try: system_user = pwd.getpwuid(os.getuid()).pw_name except NameError: # TODO: support all systems - especially Windows system_user = '******' # hardcoded default value ... args = { 'scheme': parsed_url.scheme, 'user': urllib.parse.quote(username or ''), 'netloc': parsed_url.netloc + prefix, # like "hostname:port/prefix" (with optional ":port" and "/prefix") 'prefix': prefix, # undocumented, empty or starting with / 'repo': repo_name, 'repoid': str(repo_id), 'system_user': system_user, 'hostname': parsed_url.hostname, } url = re.sub('{([^{}]+)}', lambda m: args.get(m.group(1), m.group(0)), clone_uri_tmpl) # remove leading @ sign if it's present. Case of empty user url_obj = urlobject.URLObject(url) if not url_obj.username: url_obj = url_obj.with_username(None) return str(url_obj)
def process_settings(self,url,post_data,headers,settings={}): extra = {} extra['query'] = settings.get('query',{}) extra['data'] = settings.get('data',{}) extra['headers'] = settings.get('headers',{}) for setting in extra: if not isinstance(extra[setting],dict): raise InvalidSettings('Invalid setting:{setting} ,it ' \ 'needs to be a dictionary of ' \ 'additional settings.Instead, '\ 'it got an instance of:{instance}'.format(setting=setting, instance=extra[setting]) ) url = urlobject.URLObject(url) url = str(url.query.add_params(extra['query'])) if isinstance(post_data,dict): post_data.update(extra['data']) if isinstance(post_data,list): for item in extra['data']: post_data.append((item,extra['data'][item])) headers.update(extra['headers']) return (url,post_data,headers)
def coerce(self, value): value = re.sub(r'^[\s\/\.]+', '', text_type(urlobject.URLObject(value).path)) try: f = self.storage.open(value) except (Exception, EnvironmentError): raise FileMakerValidationError( 'Could not open file "{0}".'.format(value)) else: return {'file': f, 'filename': f.name}
def get_clone_url(uri_tmpl, qualified_home_url, repo_name, repo_id, **override): parsed_url = urlobject.URLObject(qualified_home_url) decoded_path = safe_unicode(urllib.unquote(parsed_url.path.rstrip('/'))) args = { 'scheme': parsed_url.scheme, 'user': '', 'netloc': parsed_url.netloc+decoded_path, # path if we use proxy-prefix 'prefix': decoded_path, 'repo': repo_name, 'repoid': str(repo_id) } args.update(override) args['user'] = urllib.quote(safe_str(args['user'])) for k, v in args.items(): uri_tmpl = uri_tmpl.replace('{%s}' % k, v) # remove leading @ sign if it's present. Case of empty user url_obj = urlobject.URLObject(uri_tmpl) url = url_obj.with_netloc(url_obj.netloc.lstrip('@')) return safe_unicode(url)
def coerce(self, value): url = urlobject.URLObject(smart_text(value or '')) try: if not url.scheme: url = url.with_scheme(self.base_url.scheme or '') if not url.hostname: url = url.with_hostname(self.base_url.hostname or '') if url.auth == (None, None) \ and not self.base_url.auth == (None, None): url = url.with_auth(*self.base_url.auth) except (TypeError, ValueError): # pragma: no cover raise FileMakerValidationError('Could not determine file url.') return self._get_file(url)
def _get_scheme_host_port(url): url = urlobject.URLObject(url) if '://' not in url: return None, url, None scheme = url.scheme or 'http' port = url.port if not port: if scheme == 'http': port = 80 elif scheme == 'https': port = 443 host = url.netloc.without_port() return scheme, host, port
def replay_urls(symlink_directory, output_directory): "Print list of replay URLs known in the datastore but not in output_directory" client = get_client() for game_id, json_blob in client.hgetall('load').items(): obj = json.loads(json_blob) url = urlobject.URLObject(obj['replayUrl']) basename = url.path.segments[-1] output_path = path.join(output_directory, basename) if not path.exists(output_path): print(url) if not symlink_directory: continue symlink_path = path.join(symlink_directory, basename) if not path.exists(symlink_path): os.symlink(output_path, symlink_path)
def __init__(self, fm_attr=None, *args, **kwargs): self.base_url = urlobject.URLObject(kwargs.pop('base_url', '')) return super(FileField, self)\ .__init__(fm_attr=fm_attr, *args, **kwargs)
from os import path import os import time import json import datetime import urlobject import requests import click from .storage import get_client API_BASE = urlobject.URLObject('https://api.faforever.com') def isoformat(date): return datetime.datetime.combine( date, datetime.time()).isoformat(timespec='seconds') + 'Z' def build_url(api_base, page_size, max_pages, page_number, start_date, duration_weeks): url = api_base.add_path('/data/game') url = url.add_query_param('page[size]', page_size) url = url.add_query_param('page[number]', page_number) url = url.add_query_param('page[totals]', '') start_formatted = isoformat(start_date) end_formatted = isoformat(start_date + duration_weeks) filter_param = ( '((playerStats.ratingChanges.leaderboard.id=in=("2");endTime=ge="%s";' 'endTime=le="%s";validity=in=("VALID")));endTime=isnull=false' %
def test_api_get_pull_request(self, pr_util): pull_request = pr_util.create_pull_request(mergeable=True) id_, params = build_data(self.apikey, 'get_pull_request', repoid=pull_request.target_repo.repo_name, pullrequestid=pull_request.pull_request_id) response = api_call(self.app, params) assert response.status == '200 OK' url_obj = urlobject.URLObject( url('pullrequest_show', repo_name=pull_request.target_repo.repo_name, pull_request_id=pull_request.pull_request_id, qualified=True)) pr_url = unicode(url_obj.with_netloc('test.example.com:80')) source_url = unicode(pull_request.source_repo.clone_url().with_netloc( 'test.example.com:80')) target_url = unicode(pull_request.target_repo.clone_url().with_netloc( 'test.example.com:80')) expected = { 'pull_request_id': pull_request.pull_request_id, 'url': pr_url, 'title': pull_request.title, 'description': pull_request.description, 'status': pull_request.status, 'created_on': pull_request.created_on, 'updated_on': pull_request.updated_on, 'commit_ids': pull_request.revisions, 'review_status': pull_request.calculated_review_status(), 'mergeable': { 'status': True, 'message': 'This pull request can be automatically merged.', }, 'source': { 'clone_url': source_url, 'repository': pull_request.source_repo.repo_name, 'reference': { 'name': pull_request.source_ref_parts.name, 'type': pull_request.source_ref_parts.type, 'commit_id': pull_request.source_ref_parts.commit_id, }, }, 'target': { 'clone_url': target_url, 'repository': pull_request.target_repo.repo_name, 'reference': { 'name': pull_request.target_ref_parts.name, 'type': pull_request.target_ref_parts.type, 'commit_id': pull_request.target_ref_parts.commit_id, }, }, 'author': pull_request.author.get_api_data(include_secrets=False, details='basic'), 'reviewers': [{ 'user': reviewer.get_api_data(include_secrets=False, details='basic'), 'review_status': st[0][1].status if st else 'not_reviewed', } for reviewer, st in pull_request.reviewers_statuses()] } assert_ok(id_, expected, response.body)