def do_check(conn): try: conn.request( "HEAD", "/" ) #This will just return status code and reason, HEAD requests don't have data res = conn.getresponse() return res except socket.timeout as e: #This will return to the check_django function in the HTTPException except block raise httplib.HTTPException( e ) #(cont.) this is expected when the instance isn't up at all yet. except socket.error as e: if '[Errno 111]' in str( e ): #This will return to the check_django function in the HTTPException except block raise httplib.HTTPException( e) #(cont.) this is expected as the instance is coming up else: #This is if something is really wrong, i.e. malformed IP, network stack malfunction body = "Something is wrong with the connection checking. This is bad. Check the logs." body += "\nError message: %s" % e write_to_slack(body) print(body) sys.exit("ohnooooo!") except Exception as e: #Final exception for uncaught errors. print(e) print('Uncaught exception here - failed during connectivity test') sys.exit('Uncaught exception. Failed during connectivity test')
def testRetryConnection(self, mock_conn, mock_sleep): feeds = self.customerconfig.get_mapfeedjobs( 'downtimes-gocdb-connector.py', deffeed='https://goc.egi.eu/gocdbpi/') gocdb = DowntimesGOCDBReader(feeds.keys()[0]) datestamp = datetime.datetime.strptime('2017-01-19', '%Y-%m-%d') start = datestamp.replace(hour=0, minute=0, second=0) end = datestamp.replace(hour=23, minute=59, second=59) self.orig_get_downtimes = gocdb.getDowntimes gocdb.getDowntimes = self.wrap_get_downtimes mock_sleep.return_value = True mock_conn.__name__ = 'mock_conn' mock_conn.side_effect = [ httplib.HTTPException('Bogus'), httplib.HTTPException('Bogus'), httplib.HTTPException('Bogus') ] self.assertEqual(gocdb.getDowntimes(start, end, mock_conn), []) self.assertEqual(mock_conn.call_count, int(self.globopts['ConnectionRetry'.lower()]) + 1) self.assertTrue(mock_sleep.called) self.assertEqual(mock_sleep.call_count, int(self.globopts['ConnectionRetry'.lower()])) sleepretry = int(self.globopts['ConnectionSleepRetry'.lower()]) self.assertEqual(mock_sleep.call_args_list, [ mock.call(sleepretry), mock.call(sleepretry), mock.call(sleepretry) ])
def ensure_depot_tools(): """Fetches depot_tools to temp dir to use it to fetch the gclient solution.""" # We don't really want to trust that the existing version of depot_tools # is pristine and uncorrupted. So delete it and re-download. print 'Setting up depot_tools in %s' % TEMP_DEPOT_TOOLS rmtree(TEMP_DEPOT_TOOLS) parent = os.path.dirname(TEMP_DEPOT_TOOLS) if not os.path.exists(parent): os.makedirs(parent) tmpdir = tempfile.mkdtemp() tmpzip = os.path.join(tmpdir, 'depot_tools.zip') with open(tmpzip, 'wb') as f: http = httplib2.Http() response, content = http.request(DEPOT_TOOLS_URL, 'GET') if response['status'] != '200': # Request did not succeed. Try again. print 'response: %s' % response print 'content: %s' % content print 'Error fetching depot_tools.' raise httplib.HTTPException('HTTP status %s != 200' % response['status']) f.write(content) with zipfile.ZipFile(tmpzip, 'r') as f: f.extractall(TEMP_DEPOT_TOOLS) rmtree(tmpdir) if sys.platform.startswith('linux'): os.chmod(os.path.join(TEMP_DEPOT_TOOLS, GCLIENT_BIN), 0755) os.chmod(os.path.join(TEMP_DEPOT_TOOLS, 'update_depot_tools'), 0755) os.chmod(os.path.join(TEMP_DEPOT_TOOLS, 'cipd'), 0755) return TEMP_DEPOT_TOOLS
def request(self, url, params, headers=None, verb='GET', verbose=0, ckey=None, cert=None, capath=None, doseq=True, decode=False, cainfo=None): """Fetch data for given set of parameters""" curl = pycurl.Curl() bbuf, hbuf = self.set_opts(curl, url, params, headers, ckey, cert, capath, verbose, verb, doseq, cainfo) curl.perform() if verbose: print(verb, url, params, headers) header = self.parse_header(hbuf.getvalue()) if header.status < 300: if verb == 'HEAD': data = '' else: data = self.parse_body(bbuf.getvalue(), decode) else: data = bbuf.getvalue() msg = 'url=%s, code=%s, reason=%s, headers=%s' \ % (url, header.status, header.reason, header.header) exc = httplib.HTTPException(msg) setattr(exc, 'req_data', params) setattr(exc, 'req_headers', headers) setattr(exc, 'url', url) setattr(exc, 'result', data) setattr(exc, 'status', header.status) setattr(exc, 'reason', header.reason) setattr(exc, 'headers', header.header) bbuf.flush() hbuf.flush() raise exc bbuf.flush() hbuf.flush() return header, data
def _request(method, url, json_data=None, headers=None, timeout=None): if headers is None: headers = {} if timeout is None: timeout = socket._GLOBAL_DEFAULT_TIMEOUT data = None request = urllib2.Request(url, headers=headers) request.get_method = lambda: method if json_data is not None: request.add_header('Content-Type', 'application/json') data = json.dumps(json_data) try: url_response = urllib2.urlopen(request, data=data, timeout=timeout) return Response(url, headers=dict(url_response.info().items()), status_code=url_response.getcode(), reason='OK', content=url_response.read(), ) except urllib2.HTTPError as error: return Response(url, headers=dict(error.info().items()), status_code=error.getcode(), reason=error.reason, content=error.read(), ) except Exception as error: raise httplib.HTTPException(error)
def testNewBug_Failure_HTTPException(self): service = issue_tracker_service.IssueTrackerService(mock.MagicMock()) service._ExecuteRequest = mock.Mock( side_effect=httplib.HTTPException('reason')) response = service.NewBug('Bug title', 'body', owner='*****@*****.**') self.assertEqual(1, service._ExecuteRequest.call_count) self.assertIn('error', response)
def put_import_task_status(self, task_id=None, status=None, volume_id=None, bytes_converted=None, error_code=None, message=None): if task_id is None or status is None: raise RuntimeError("Invalid parameters") params = { 'InstanceId': config.get_worker_id(), 'ImportTaskId': task_id, 'Status': status } if bytes_converted is not None: params['BytesConverted'] = bytes_converted if volume_id is not None: params['VolumeId'] = volume_id if error_code is not None: params['ErrorCode'] = error_code if message is not None: params['Message'] = message resp = self.conn.make_request('PutInstanceImportTaskStatus', params, path='/', verb='POST') if resp.status != 200: raise httplib.HTTPException(resp.status, resp.reason, resp.read()) response = resp.read() root = fromstring(response) cancelled = root.getchildren()[0] if len( root.getchildren()) == 1 else 'true' return 'true' != cancelled.text
def test_download_failed_HTTPException(self, mock_urlopen): mock_urlopen.side_effect = httplib.HTTPException() fake_request = urllib2.Request('http://fakeurl.com') self.assertRaises(self.glance.RetryableError, self.glance._download_tarball_and_verify, fake_request, 'fake_staging_path')
def fetch_request_token(self, callback): if not callback: callback = 'oob' h = typepad.client h.clear_credentials() req = oauth.OAuthRequest.from_consumer_and_token( self.consumer, http_method='GET', http_url=self.request_token_url, callback=callback, ) sign_method = oauth.OAuthSignatureMethod_HMAC_SHA1() req.set_parameter('oauth_signature_method', sign_method.get_name()) log.debug('Signing base string %r in fetch_request_token()' % (sign_method.build_signature_base_string( req, self.consumer, self.token), )) req.sign_request(sign_method, self.consumer, self.token) log.debug('Asking for request token from %r', req.to_url()) resp, content = h.request(req.to_url(), method=req.get_normalized_http_method()) if resp.status != 200: log.debug(content) raise httplib.HTTPException('WHAT %d %s?!' % (resp.status, resp.reason)) self.token = oauth.OAuthToken.from_string(content) return self.token
def _get_remote_max_timestamps(project_id, dataset_id, table_id): """ Get the max created and modified dates from the BigQuery table so we can determine which records we want to sync from MySQL. :param dataset_id: dataset name :param table_id: table name :return: tuple (max_created, max_modified) """ query = 'select max(created) as max_created, max(modified) as max_modified from {0}.{1}'.\ format(dataset_id, table_id) try: job = BigQueryJob(query, project_id=project_id, default_dataset_id=dataset_id) except HttpError: raise httplib.HTTPException() for page in job: for row in page: mc = row.max_created if row.max_created else datetime.min mm = row.max_modified if row.max_modified else datetime.min return mc, mm raise LookupError( 'Failed to get max created and modified values from bigquery table {0}.{1}.{2}' .format(project_id, dataset_id, table_id))
def getHttpResponse(self): response = self.connection.getresponse() text = response.read().strip() if response.status == 500: common.debug("500 internal server error", "psocket") self.quit() return 'OUT' elif response.status != 200: raise httplib.HTTPException("Server not available") try: # this header contains important information # such as the IP of the next server we should connect # and the session id assigned data = response.getheader('x-msn-messenger', '') if data.count("Session=close"): common.debug("Session closed", "socket") self.quit() return 'OUT' # parse the field self.sessionID = data.split("; GW-IP=")[0].replace( "SessionID=", "") self.gatewayIP = data.split("; ")[1].replace("GW-IP=", "") self.host = self.gatewayIP self.path = "/gateway/gateway.dll?SessionID=" + self.sessionID except Exception, e: common.debug('In getHttpResponse: ' + str(e), 'socket') common.debug('Data: "%s"' % data, 'socket')
def put_import_task_status(self, worker_id=None, task_id=None, status=None, volume_id=None, bytes_converted=None, error_code=None): if task_id is None or status is None: raise RuntimeError("Invalid parameters") params = { 'InstanceId': worker_id, 'ImportTaskId': task_id, 'Status': status } if bytes_converted != None: params['BytesConverted'] = bytes_converted if volume_id is not None: params['VolumeId'] = volume_id if error_code is not None: params['ErrorCode'] = error_code resp = self.conn.make_request('PutInstanceImportTaskStatus', params, path='/', verb='POST') if resp.status != 200: raise httplib.HTTPException(resp.status, resp.reason, resp.read()) root = objectify.XML(resp.read()) return 'true' != root.cancelled.text if hasattr(root, 'cancelled') else True
def _api_call(name, method, path, body='', headers={}): browser = None try: # Change to HTTPConnection() if necessary browser = httplib.HTTPSConnection(url) browser.request(method, path, body, append_auth_token_to_headers(headers)) res = browser.getresponse() # read() must be called before close(), otherwise it will return an empty string data = res.read() if res.status >= 400: raise httplib.HTTPException( str(res.status) + ' ' + res.reason + (': ' + data.strip() if data.strip() else '')) # close() must be called after each request, or subsequent requests will be denied browser.close() #return data if data != '': return json.loads(data) else: raise Exception(str(res.status) + res.reason) except httplib.HTTPException as h: raise Exception('UpGuard API request failed [' + name + ']: ' + h.message) finally: if browser is not None: browser.close()
def send_pushover(self, message, title='PokeAlert', url=None, url_title=None, sound=None): # Establish connection connection = httplib.HTTPSConnection("api.pushover.net:443", timeout=10) payload = { "token": self.app_token, "user": self.user_key, "title": title, "message": message } if url is not None: payload['url'] = url payload['url_title'] = url_title if sound is not None: payload['sound'] = sound connection.request( "POST", "/1/messages.json", urllib.urlencode(payload), {"Content-Type": "application/x-www-form-urlencoded"}) r = connection.getresponse() if r.status != 200: raise httplib.HTTPException("Response not 200")
def do_search(self, metadata): keys = [] if metadata.title: keys.append(metadata.title) if metadata.artist: keys.append(metadata.artist) urlkey = (' '.join(keys)) params = {'keyword': urlkey, 'field': 'all'} try: status, content = http_download( url=HOST + '/', params=params, proxy=get_proxy_settings(config=self.config_proxy)) except pycurl.error as e: logging.error('Download failed. %s', e.args[1]) return [] if status < 200 or status >= 400: raise httplib.HTTPException(status) match = RESULT_PATTERN.findall(content) result = [] if match: for artist, album, title, url in match: title = title.replace('<span class="highlighter">', '').replace('</span>', '') artist = artist.replace('<span class="highlighter">', '').replace('</span>', '') album = album.replace('<span class="highlighter">', '').replace('</span>', '') url = DOWNLOAD_URL_PREFIX + url result.append(SearchResult(title=title, artist=artist, album=album, sourceid=self.id, downloadinfo=url)) return result
def test_with_disabled_network(self): # Disable network access, at least via socket.socket. with mock.patch('socket.socket'): socket.socket.side_effect = httplib.HTTPException( 'Network disabled by test.') token = cloud_utils.oauth2_token() self.assertEqual('', token)
def call(self, name, method, path, body='', headers={}): browser = None try: if self.use_ssl: browser = httplib.HTTPSConnection(self.url) else: browser = httplib.HTTPConnection(self.url) browser.request(method, path, body, self.append_auth_token_to_headers(headers)) res = browser.getresponse() # read() must be called before close(), otherwise it will return an empty string data = res.read() if res.status >= 400: raise httplib.HTTPException( str(res.status) + ' ' + res.reason + (': ' + data.strip() if data.strip() else '')) # close() must be called after each request, or subsequent requests will be denied browser.close() #return data if data != '': return json.loads(data) else: # No Content is a legitimate response if str(res.status) == '204': return {} raise Exception(str(res.status) + res.reason) except httplib.HTTPException as h: raise Exception('ScriptRock API request failed [' + name + ']: ' + str(h)) finally: if browser is not None: browser.close()
def __init__(self, netloc, *args, **kwargs): if netloc not in ('myhost.com', 'www.myhost.com'): raise httplib.HTTPException(netloc) self.netloc = netloc self.path = None self.method = None self.response = {}
def article(source=None, wiki_site='{scheme}://{host}'.format(scheme=wiki_scheme, host=wiki_hostname), follow_redirects=True): """ Gets an article if giving a path leads to a 404, raises NotFoundException, otherwise http exception :param path: (either complete (url or just the path) to the article :return: Article object >>> print article('/wiki/Human').first_link /wiki/Hominini >>> print article('http://en.wikipedia.org/wiki/Wikipedia:Getting_to_Philosophy').first_link /wiki/Hyperlink >>> print article('en.wikipedia.org/wiki/Wikipedia:Getting_to_Philosophy').first_link /wiki/Hyperlink >>> print article('en.wikipedia.org/wiki/Wikipedia:Getting_to_Philosophy1') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... NotFoundException: >>> print article('http://en.wikipedia.org/wiki/computer_science').url_tail Computer_science """ if isinstance(source, Article): return source if not source.startswith( '/' ): # If not complete path make sure its either a complete or semi-complete url. if source.startswith('http'): assert source.startswith( wiki_site + wiki_article_path) # make sure its a complete url source = source[len(wiki_site):] # <<< get path else: assert source.startswith( wiki_hostname + wiki_article_path ) # At least make sure it starts with wiki article path source = source[len(wiki_hostname):] conn = get_or_make_conn() # Get a HEAD request first, to test its status code, (404s return ~25KB) # on the other hand it doesn't guarantee it since the article can be deleted in between calls. # and we are waiting on IO, TODO: set up timings for comparisons. conn.request('HEAD', source) resp = conn.getresponse() _ = resp.read() if follow_redirects: while 300 < resp.status < 310: # TODO, bound redirects, test redirects. source = resp.getheader('location') conn.request('HEAD', source) resp = conn.getresponse() _ = resp.read() if resp.status != 200: recycle_conn(conn) raise NotFoundException('Article Not Found path: {}'.format(source)) if resp.status == 404 else \ httplib.HTTPException('Non 200 response from : {} status: {} reasons: {}'.format( source, resp.status, resp.reason or httplib.responses.get(resp.status, None) )) conn.request('GET', source) resp = conn.getresponse() root = html.parse(resp, base_url=wiki_site + source).getroot() recycle_conn(conn) return Article(next(iter_text_links_path(root), None), article_url_tail(source), last_modified_on(root))
def get_file_id(node, file_path): browser = httplib.HTTPConnection(url) try: browser.request( 'GET', '/api/v2/nodes/' + str(node) + '/ci_data?ci_type=files', '', { 'Authorization': 'Token token="' + api_key + secret_key + '"', 'Accept': 'application/json' }) res = browser.getresponse() data = res.read() if res.status >= 400: print str(res.status) + ' ' + res.reason raise httplib.HTTPException( str(res.status) + ' ' + res.reason + (': ' + data.strip() if data.strip() else '')) browser.close() if data != '': for t, tval in json.loads(data).iteritems(): for f, fval in tval.iteritems(): if f == file_path and 'text_file_id' in fval: return fval['text_file_id'] return None else: return str(res.status) + res.reason except httplib.HTTPException as h: return h.message finally: browser.close()
def get_file(text_file_id): browser = httplib.HTTPConnection(url) try: browser.request( 'GET', '/api/v2/text_files/' + str(text_file_id), '', { 'Authorization': 'Token token="' + api_key + secret_key + '"', 'Accept': 'application/json' }) res = browser.getresponse() data = res.read() if res.status >= 400: print str(res.status) + ' ' + res.reason raise httplib.HTTPException( str(res.status) + ' ' + res.reason + (': ' + data.strip() if data.strip() else '')) browser.close() if data != '': return json.loads(data)['data'] else: return str(res.status) + res.reason except httplib.HTTPException as h: return h.message finally: browser.close()
def do_search(self, metadata): keys = [] if metadata.title: keys.append(metadata.title) if metadata.artist: keys.append(metadata.artist) urlkey = ensure_utf8('+'.join(keys)).replace(' ', '+') url = XIAMI_HOST + XIAMI_SEARCH_URL status, content = http_download(url=url, params={'key': urlkey}, proxy=get_proxy_settings( self.config_proxy)) if status < 200 or status >= 400: raise httplib.HTTPException(status, '') match = XIAMI_SEARCH_PATTERN.findall(content) result = [] if match: for title_elem, id, artist_elem, album_elem in match: title = TITLE_ATTR_PATTERN.search(title_elem).group(1) artist = TITLE_ATTR_PATTERN.search(artist_elem).group(1) album = TITLE_ATTR_PATTERN.search(album_elem).group(1) url = self.get_url(id) if url is not None: result.append( SearchResult(title=title, artist=artist, album=album, sourceid=self.id, downloadinfo=url)) return result
def do_search(self, metadata): keys = [] if metadata.title: keys.append(metadata.title) if metadata.artist: keys.append(metadata.artist) urlkey = ensure_utf8('+'.join(keys)).replace(' ', '+') url = NETEASE_HOST + NETEASE_SEARCH_URL params = 's=%s&type=1' % urlkey status, content = http_download(url=url, method='POST', params=params, proxy=get_proxy_settings(self.config_proxy)) if status < 200 or status >= 400: raise httplib.HTTPException(status, '') def map_func(song): if len(song['artists']) > 0: artist_name = song['artists'][0]['name'] else: artist_name = '' url = NETEASE_HOST + NETEASE_LYRIC_URL + '?id=' + str(song['id']) + '&lv=-1&kv=-1&tv=-1' return SearchResult(title=song['name'], artist=artist_name, album=song['album']['name'], sourceid=self.id, downloadinfo=url) parsed = json.loads(content) result = list(map(map_func, parsed['result']['songs'])) return result
def __connect(host, url, method, body, headers, https=False): connection = None response = None try: if https: connection = httplib.HTTPSConnection( host, context=ssl._create_unverified_context()) else: connection = httplib.HTTPConnection(host) sys.stderr.write('%s, %s, %s, %s, %s\n' % (method, host, url, body, headers)) connection.request(method, url, body, headers) response = connection.getresponse() if not 100 <= response.status < 300: raise httplib.HTTPException('Response code is %d' % response.status) return response.read() except httplib.HTTPException as h_exp: sys.stderr.write('HTTP exception: %s\n' % str(h_exp)) raise h_exp except socket.error as s_exp: sys.stderr.write('Socket exception: %s\n' % str(s_exp)) raise s_exp finally: if response is not None: response.close() if connection is not None: connection.close()
def _retreivePLS(self, url): conn = httplib.HTTPConnection(TUNEIN) conn.request("GET", TUNEINFORMAT % url.split('=')[-1]) resp = conn.getresponse() if resp.status == 200: return resp.read().split('\n') else: raise httplib.HTTPException('Got bad status code.')
def request(self, method, url, body=None, headers={}): if isinstance(body, unicode): body = body.encode('utf8') self.prepare(method, url, body=body, headers=headers) try: self.handle.perform() except pycurl.error, e: raise httplib.HTTPException(self.handle.errstr())
def _parse_response(fp): '''A striped down version of httplib.HttpResponse.begin''' headers = {} # read until we get a non-100 response while True: version, status, reason = _read_status(fp) if status != httplib.CONTINUE: break # skip the header from the 100 response while True: skip = fp.readline(_MAXLINE + 1) if len(skip) > _MAXLINE: raise httplib.LineTooLong("header line") skip = skip.strip() if not skip: break # read the rest of the headers n_headers = 0 while True: line = fp.readline(_MAXLINE +1) if len(line) > _MAXLINE: raise httplib.LineTooLong("header line") line = line.strip() #print(line) if line in (b'\r\n', b'\n', b''): break n_headers += 1 if n_headers > httplib._MAXHEADERS: raise httplib.HTTPException("got more than %d headers" % httplib._MAXHEADERS) key, value = line.split(b': ') headers[key.strip().lower()] = value.strip() #print(headers) #if six.PY2: # msg = httplib.HTTPMessage(fp, 0) # msg.fp = None #force the message to relinquish it's file pointer # length = msg.getheader('content-length') #else: # # noting that httplib here is actually http.client # msg = httplib.parse_headers(fp) length = int(headers.get(b'content-length', 0)) if length > 0: data = fp.read(length) else: data = b'' return status, reason, data
def taistorequest(con, cpu): conn = httplib.HTTPConnection(config.url["taisto"]) url = "/api?con=%s&cpu=%s" % (con, cpu) conn.request("GET", url) response = conn.getresponse() conn.close() if (response.status == 500): raise httplib.HTTPException( "request unsucceful, check your parameters")
def _RequestAndProcessHttpErrors(*args, **kwargs): """Requests a URL, converting HTTP errors to Python exceptions.""" http = utils.ServiceAccountHttp(timeout=10) response, content = http.request(*args, **kwargs) if not response['status'].startswith('2'): raise httplib.HTTPException('HTTP status code %s: %s' % (response['status'], content)) return content
def real_search(self, title='', artist='', page=0): query = VIEWLYRICS_QUERY_FORM query = query.replace('%title', title) query = query.replace('%artist', artist) query = ensure_utf8( query.replace('%etc', ' client=\"MiniLyrics\" RequestPage=\'%d\'' % page)) #Needs real RequestPage queryhash = hashlib.md5() queryhash.update(query) queryhash.update(VIEWLYRICS_KEY) masterquery = '\2\0\4\0\0\0' + queryhash.digest() + query url = VIEWLYRICS_HOST + VIEWLYRICS_SEARCH_URL status, content = http_download(url=url, method='POST', params=masterquery, proxy=get_proxy_settings( self.config_proxy)) if status < 200 or status >= 400: raise httplib.HTTPException(status, '') contentbytes = map(ord, content) codekey = contentbytes[1] deccontent = '' for char in contentbytes[22:]: deccontent += chr(char ^ codekey) result = [] pagesleft = 0 tagreturn = parseString(deccontent).getElementsByTagName('return')[0] if tagreturn: pagesleftstr = self.alternative_gettagattribute( tagreturn.attributes.items(), 'PageCount') #tagreturn.attributes['PageCount'].value if pagesleftstr == '': pagesleft = 0 else: pagesleft = int(pagesleftstr) tagsfileinfo = tagreturn.getElementsByTagName('fileinfo') if tagsfileinfo: for onefileinfo in tagsfileinfo: if onefileinfo.hasAttribute('link'): title = onefileinfo.getAttribute('title') artist = onefileinfo.getAttribute('artist') album = onefileinfo.getAttribute('album') url = VIEWLYRICS_BASE_LRC_URL + onefileinfo.getAttribute( 'link') result.append( SearchResult(title=title, artist=artist, album=album, sourceid=self.id, downloadinfo=url)) return result, (pagesleft - page)