async def makeHttpRequest(self, method, route, payload=None, session=None, **kwargs): """ Make an HTTP Request for the API endpoint. This method wraps the logic about doing failure retry and passes off the actual work of doing an HTTP request to another method.""" url = self.makeFullUrl(route, **kwargs) log.debug('Full URL used is: %s', url) hawkExt = self.makeHawkExt() # Serialize payload if given if payload is not None and isinstance(payload, dict): payload = utils.dumpJson(payload) # Do a loop of retries retry = -1 # we plus first in the loop, and attempt 1 is retry 0 retries = self.options['maxRetries'] with self.contextSession(session=session) as session: while True: retry += 1 await asyncio.sleep(utils.calculateSleepTime(retry)) headers = self.makeHeaders(method, url, payload, hawkExt) log.debug('Making attempt %d', retry) try: return await self._makeHttpRequest(method, url, payload, headers, session=session) except (exceptions.TaskclusterConnectionError, exceptions.TaskclusterRestFailure) as exc: if retry < retries: log.warn("Retrying because of %s" % str(exc)) continue raise
def test_has_no_spaces(self): expected = [ '{"test":"works","doesit":"yes"}', '{"doesit":"yes","test":"works"}' ] actual = subject.dumpJson({'test': 'works', 'doesit': 'yes'}) self.assertTrue(actual in expected)
def test_success_first_try_payload(client, apiPath): with mock.patch.object(utils, 'makeSingleHttpRequest') as p: expected = {'test': 'works'} p.return_value = ObjWithDotJson(200, expected) v = client._makeHttpRequest('GET', 'test', {'payload': 2}) p.assert_called_once_with('GET', apiPath, utils.dumpJson({'payload': 2}), mock.ANY) assert expected == v
def test_success_first_try_payload(self): with mock.patch.object(utils, 'makeSingleHttpRequest') as p: expected = {'test': 'works'} p.return_value = ObjWithDotJson(200, expected) v = self.client._makeHttpRequest('GET', 'http://www.example.com', {'payload': 2}) p.assert_called_once_with('GET', 'http://www.example.com', utils.dumpJson({'payload': 2}), mock.ANY) self.assertEqual(expected, v)
def test_serializes_naive_date(): dateObj = datetime.datetime(year=2000, month=1, day=1, hour=1, minute=1, second=1) expected = '{"date":"2000-01-01T01:01:01Z"}' actual = subject.dumpJson({'date': dateObj}) assert expected == actual
def test_serializes_aware_date(self): dateObj = datetime.datetime(year=2000, month=1, day=1, hour=1, minute=1, second=1, tzinfo=utc) expected = '{"date":"2000-01-01T01:01:01Z"}' actual = subject.dumpJson({'date': dateObj}) self.assertEqual(expected, actual)
def test_serializes_naive_date(self): dateObj = datetime.datetime( year=2000, month=1, day=1, hour=1, minute=1, second=1 ) expected = '{"date":"2000-01-01T01:01:01Z"}' actual = subject.dumpJson({'date': dateObj}) self.assertEqual(expected, actual)
def makeHawkExt(self): """ Make an 'ext' for Hawk authentication """ o = self.options c = o.get('credentials', {}) if c.get('clientId') and c.get('accessToken'): ext = {} cert = c.get('certificate') if cert: if six.PY3 and isinstance(cert, six.binary_type): cert = cert.decode() if isinstance(cert, six.string_types): cert = json.loads(cert) ext['certificate'] = cert if 'authorizedScopes' in o: ext['authorizedScopes'] = o['authorizedScopes'] # .encode('base64') inserts a newline, which hawk doesn't # like but doesn't strip itself return utils.makeB64UrlSafe(utils.encodeStringForB64Header(utils.dumpJson(ext)).strip()) else: return {}
def makeHawkExt(self): """ Make an 'ext' for Hawk authentication """ o = self.options c = o.get('credentials', {}) if c.get('clientId') and c.get('accessToken'): ext = {} cert = c.get('certificate') if cert: if six.PY3 and isinstance(cert, six.binary_type): cert = cert.decode() if isinstance(cert, six.string_types): cert = json.loads(cert) ext['certificate'] = cert if 'authorizedScopes' in o: ext['authorizedScopes'] = o['authorizedScopes'] # .encode('base64') inserts a newline, which hawk doesn't # like but doesn't strip itself return utils.makeB64UrlSafe( utils.encodeStringForB64Header(utils.dumpJson(ext)).strip()) else: return {}
def test_has_no_spaces(self): expected = '{"test":"works","doesit":"yes"}' actual = subject.dumpJson({'test': 'works', 'doesit': 'yes'}) self.assertEqual(expected, actual)
def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None): """ Create a set of temporary credentials Callers should not apply any clock skew; clock drift is accounted for by auth service. clientId: the issuing clientId accessToken: the issuer's accessToken start: start time of credentials (datetime.datetime) expiry: expiration time of credentials, (datetime.datetime) scopes: list of scopes granted name: credential name (optional) Returns a dictionary in the form: { 'clientId': str, 'accessToken: str, 'certificate': str} """ for scope in scopes: if not isinstance(scope, six.string_types): raise exceptions.TaskclusterFailure('Scope must be string') # Credentials can only be valid for 31 days. I hope that # this is validated on the server somehow... if expiry - start > datetime.timedelta(days=31): raise exceptions.TaskclusterFailure('Only 31 days allowed') # We multiply times by 1000 because the auth service is JS and as a result # uses milliseconds instead of seconds cert = dict( version=1, scopes=scopes, start=calendar.timegm(start.utctimetuple()) * 1000, expiry=calendar.timegm(expiry.utctimetuple()) * 1000, seed=utils.slugId() + utils.slugId(), ) # if this is a named temporary credential, include the issuer in the certificate if name: cert['issuer'] = utils.toStr(clientId) sig = ['version:' + utils.toStr(cert['version'])] if name: sig.extend([ 'clientId:' + utils.toStr(name), 'issuer:' + utils.toStr(clientId), ]) sig.extend([ 'seed:' + utils.toStr(cert['seed']), 'start:' + utils.toStr(cert['start']), 'expiry:' + utils.toStr(cert['expiry']), 'scopes:' ] + scopes) sigStr = '\n'.join(sig).encode() if isinstance(accessToken, six.text_type): accessToken = accessToken.encode() sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest() cert['signature'] = utils.encodeStringForB64Header(sig) newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest() newToken = utils.makeB64UrlSafe( utils.encodeStringForB64Header(newToken)).replace(b'=', b'') return { 'clientId': name or clientId, 'accessToken': newToken, 'certificate': utils.dumpJson(cert), }
def _makeHttpRequest(self, method, route, payload): """ Make an HTTP Request for the API endpoint. This method wraps the logic about doing failure retry and passes off the actual work of doing an HTTP request to another method.""" url = self._constructUrl(route) log.debug('Full URL used is: %s', url) hawkExt = self.makeHawkExt() # Serialize payload if given if payload is not None: payload = utils.dumpJson(payload) # Do a loop of retries retry = -1 # we plus first in the loop, and attempt 1 is retry 0 retries = self.options['maxRetries'] while retry < retries: retry += 1 # if this isn't the first retry then we sleep if retry > 0: time.sleep(utils.calculateSleepTime(retry)) # Construct header if self._hasCredentials(): sender = mohawk.Sender( credentials={ 'id': self.options['credentials']['clientId'], 'key': self.options['credentials']['accessToken'], 'algorithm': 'sha256', }, ext=hawkExt if hawkExt else {}, url=url, content=payload if payload else '', content_type='application/json' if payload else '', method=method, ) headers = {'Authorization': sender.request_header} else: log.debug('Not using hawk!') headers = {} if payload: # Set header for JSON if payload is given, note that we serialize # outside this loop. headers['Content-Type'] = 'application/json' log.debug('Making attempt %d', retry) try: response = utils.makeSingleHttpRequest(method, url, payload, headers) except requests.exceptions.RequestException as rerr: if retry < retries: log.warn('Retrying because of: %s' % rerr) continue # raise a connection exception raise exceptions.TaskclusterConnectionError( "Failed to establish connection", superExc=rerr) # Handle non 2xx status code and retry if possible status = response.status_code if status == 204: return None # Catch retryable errors and go to the beginning of the loop # to do the retry if 500 <= status and status < 600 and retry < retries: log.warn('Retrying because of a %s status code' % status) continue # Throw errors for non-retryable errors if status < 200 or status >= 300: data = {} try: data = response.json() except: pass # Ignore JSON errors in error messages # Find error message message = "Unknown Server Error" if isinstance(data, dict): message = data.get('message') else: if status == 401: message = "Authentication Error" elif status == 500: message = "Internal Server Error" # Raise TaskclusterAuthFailure if this is an auth issue if status == 401: raise exceptions.TaskclusterAuthFailure(message, status_code=status, body=data, superExc=None) # Raise TaskclusterRestFailure for all other issues raise exceptions.TaskclusterRestFailure(message, status_code=status, body=data, superExc=None) # Try to load JSON try: return response.json() except ValueError: return {"response": response} # This code-path should be unreachable assert False, "Error from last retry should have been raised!"
def main(): _idMaker = stableSlugId() def idMaker(name): return _idMaker(name).decode() decisionTaskId = os.environ['TASK_ID'] owner = os.environ['GITHUB_HEAD_USER_EMAIL'] source = os.environ['GITHUB_HEAD_REPO_URL'] with requests.Session() as session: for task in tasks: dependencies = [ idMaker(name) for name in task.get('dependencies', []) ] dependencies.append(decisionTaskId) env = task.get('env', {}) for key, val in os.environ.items(): if key.startswith('GITHUB_'): env.setdefault(key, val) for spec in task.get('artifacts_from', []): task_id = idMaker(spec['task_name']) path = spec['path'] env[spec[ 'env_var']] = f'{BASE_URL}/task/{task_id}/artifacts/{path}' task_id = idMaker(task['name']) res = session.put( f'{BASE_URL}/task/{task_id}', data=dumpJson({ 'metadata': { 'name': task['name'], 'description': task['description'], 'owner': owner, 'source': source, }, 'provisionerId': 'aws-provisioner-v1', 'workerType': 'gecko-1-b-linux', 'schedulerId': 'taskcluster-github', 'taskGroupId': decisionTaskId, 'created': fromNow('0 seconds'), 'deadline': fromNow('1 day'), 'expires': fromNow('365 days'), 'payload': { 'image': 'mozilla/normandy-taskcluster:2017-07-26', 'command': [ '/bin/bash', '-c', ' && '.join([ 'apt-get update', 'apt-get install -y git', 'mkdir /artifacts', 'cd ~', 'git clone $GITHUB_HEAD_REPO_URL normandy', 'pushd normandy', 'git checkout $GITHUB_HEAD_SHA', 'popd', task['command'], ]) ], 'maxRunTime': 28800, # 8 hours 'env': env, 'artifacts': { 'public': { 'type': 'directory', 'path': '/artifacts', 'expires': fromNow('364 days'), # must expire before task }, }, 'features': { 'taskclusterProxy': True, }, }, 'dependencies': dependencies, })) print(res.text) res.raise_for_status()
def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None): """ Create a set of temporary credentials Callers should not apply any clock skew; clock drift is accounted for by auth service. clientId: the issuing clientId accessToken: the issuer's accessToken start: start time of credentials (datetime.datetime) expiry: expiration time of credentials, (datetime.datetime) scopes: list of scopes granted name: credential name (optional) Returns a dictionary in the form: { 'clientId': str, 'accessToken: str, 'certificate': str} """ for scope in scopes: if not isinstance(scope, six.string_types): raise exceptions.TaskclusterFailure('Scope must be string') # Credentials can only be valid for 31 days. I hope that # this is validated on the server somehow... if expiry - start > datetime.timedelta(days=31): raise exceptions.TaskclusterFailure('Only 31 days allowed') # We multiply times by 1000 because the auth service is JS and as a result # uses milliseconds instead of seconds cert = dict( version=1, scopes=scopes, start=calendar.timegm(start.utctimetuple()) * 1000, expiry=calendar.timegm(expiry.utctimetuple()) * 1000, seed=utils.slugId().encode('ascii') + utils.slugId().encode('ascii'), ) # if this is a named temporary credential, include the issuer in the certificate if name: cert['issuer'] = utils.toStr(clientId) sig = ['version:' + utils.toStr(cert['version'])] if name: sig.extend([ 'clientId:' + utils.toStr(name), 'issuer:' + utils.toStr(clientId), ]) sig.extend([ 'seed:' + utils.toStr(cert['seed']), 'start:' + utils.toStr(cert['start']), 'expiry:' + utils.toStr(cert['expiry']), 'scopes:' ] + scopes) sigStr = '\n'.join(sig).encode() if isinstance(accessToken, six.text_type): accessToken = accessToken.encode() sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest() cert['signature'] = utils.encodeStringForB64Header(sig) newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest() newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'') return { 'clientId': name or clientId, 'accessToken': newToken, 'certificate': utils.dumpJson(cert), }
def _makeHttpRequest(self, method, route, payload): """ Make an HTTP Request for the API endpoint. This method wraps the logic about doing failure retry and passes off the actual work of doing an HTTP request to another method.""" url = self._constructUrl(route) log.debug('Full URL used is: %s', url) hawkExt = self.makeHawkExt() # Serialize payload if given if payload is not None: payload = utils.dumpJson(payload) # Do a loop of retries retry = -1 # we plus first in the loop, and attempt 1 is retry 0 retries = self.options['maxRetries'] while retry < retries: retry += 1 # if this isn't the first retry then we sleep if retry > 0: time.sleep(utils.calculateSleepTime(retry)) # Construct header if self._hasCredentials(): sender = mohawk.Sender( credentials={ 'id': self.options['credentials']['clientId'], 'key': self.options['credentials']['accessToken'], 'algorithm': 'sha256', }, ext=hawkExt if hawkExt else {}, url=url, content=payload if payload else '', content_type='application/json' if payload else '', method=method, ) headers = {'Authorization': sender.request_header} else: log.debug('Not using hawk!') headers = {} if payload: # Set header for JSON if payload is given, note that we serialize # outside this loop. headers['Content-Type'] = 'application/json' log.debug('Making attempt %d', retry) try: response = utils.makeSingleHttpRequest(method, url, payload, headers) except requests.exceptions.RequestException as rerr: if retry < retries: log.warn('Retrying because of: %s' % rerr) continue # raise a connection exception raise exceptions.TaskclusterConnectionError( "Failed to establish connection", superExc=rerr ) # Handle non 2xx status code and retry if possible status = response.status_code if status == 204: return None # Catch retryable errors and go to the beginning of the loop # to do the retry if 500 <= status and status < 600 and retry < retries: log.warn('Retrying because of a %s status code' % status) continue # Throw errors for non-retryable errors if status < 200 or status >= 300: data = {} try: data = response.json() except Exception: pass # Ignore JSON errors in error messages # Find error message message = "Unknown Server Error" if isinstance(data, dict): message = data.get('message') else: if status == 401: message = "Authentication Error" elif status == 500: message = "Internal Server Error" # Raise TaskclusterAuthFailure if this is an auth issue if status == 401: raise exceptions.TaskclusterAuthFailure( message, status_code=status, body=data, superExc=None ) # Raise TaskclusterRestFailure for all other issues raise exceptions.TaskclusterRestFailure( message, status_code=status, body=data, superExc=None ) # Try to load JSON try: return response.json() except ValueError: return {"response": response} # This code-path should be unreachable assert False, "Error from last retry should have been raised!"
def _makeHttpRequest(self, method, route, payload): """ Make an HTTP Request for the API endpoint. This method wraps the logic about doing failure retry and passes off the actual work of doing an HTTP request to another method.""" baseUrl = self.options['baseUrl'] # urljoin ignores the last param of the baseUrl if the base url doesn't end # in /. I wonder if it's better to just do something basic like baseUrl + # route instead if not baseUrl.endswith('/'): baseUrl += '/' url = urllib.parse.urljoin(baseUrl, route.lstrip('/')) log.debug('Full URL used is: %s', url) hawkExt = self.makeHawkExt() # Serialize payload if given if payload is not None: payload = utils.dumpJson(payload) # Do a loop of retries retry = -1 # we plus first in the loop, and attempt 1 is retry 0 retries = self.options['maxRetries'] while retry < retries: retry += 1 # if this isn't the first retry then we sleep if retry > 0: snooze = float(retry * retry) / 10.0 log.info('Sleeping %0.2f seconds for exponential backoff', snooze) time.sleep(snooze) # Construct header if self._hasCredentials(): sender = mohawk.Sender( credentials={ 'id': self.options['credentials']['clientId'], 'key': self.options['credentials']['accessToken'], 'algorithm': 'sha256', }, ext=hawkExt if hawkExt else {}, url=url, content=payload if payload else '', content_type='application/json' if payload else '', method=method, ) headers = {'Authorization': sender.request_header} else: log.debug('Not using hawk!') headers = {} if payload: # Set header for JSON if payload is given, note that we serialize # outside this loop. headers['Content-Type'] = 'application/json' log.debug('Making attempt %d', retry) try: response = utils.makeSingleHttpRequest(method, url, payload, headers) except requests.exceptions.RequestException as rerr: if retry < retries: log.warn('Retrying because of: %s' % rerr) continue # raise a connection exception raise exceptions.TaskclusterConnectionError( "Failed to establish connection", superExc=rerr ) # Handle non 2xx status code and retry if possible try: response.raise_for_status() if response.status_code == 204: return None except requests.exceptions.RequestException as rerr: status = response.status_code if 500 <= status and status < 600 and retry < retries: log.warn('Retrying because of: %s' % rerr) continue # Parse messages from errors data = {} try: data = response.json() except: pass # Ignore JSON errors in error messages # Find error message message = "Unknown Server Error" if isinstance(data, dict): message = data.get('message') else: if status == 401: message = "Authentication Error" elif status == 500: message = "Internal Server Error" # Raise TaskclusterAuthFailure if this is an auth issue if status == 401: raise exceptions.TaskclusterAuthFailure( message, status_code=status, body=data, superExc=rerr ) # Raise TaskclusterRestFailure for all other issues raise exceptions.TaskclusterRestFailure( message, status_code=status, body=data, superExc=rerr ) # Try to load JSON try: return response.json() except ValueError: return {"response": response} # This code-path should be unreachable assert False, "Error from last retry should have been raised!"