def save_buffer(self, buffer, key): # proxy = os.environ.get('HTTP_PROXY') # c.setopt(c.PROXY, proxy) # logger.print('proxy:', proxy) if isinstance(buffer, BytesIO): from requests_toolbelt import MultipartEncoder encoder = MultipartEncoder({'file': (key, buf), 'canary': true}) self.session.post(self.url, data=encoder, headers={'Content-Type': encoder.content_type}) elif isinstance(buffer, StringIO): from pycurl import Curl c = Curl() c.setopt(c.URL, self.url) c.setopt(c.TIMEOUT, 3600) c.setopt(c.HTTPPOST, [ ('file', ( c.FORM_BUFFER, source_path, c.FORM_BUFFERPTR, buffer.read(), c.FORM_CONTENTTYPE, 'plain/text', )), ]) c.perform() c.close()
def save_file(self, source_path, key): """ :param source_path: is relative to the local file system. :param key: the key is relative to the current prefix. :return: """ if self.local_server: if source_path.startswith('/'): source_path = "/" + source_path return self.local_server.copy(source_path, key) # proxy = os.environ.get('HTTP_PROXY') # c.setopt(c.PROXY, proxy) # logger.print('proxy:', proxy) from pycurl import Curl c = Curl() c.setopt(c.URL, self.url) c.setopt(c.TIMEOUT, 3600) c.setopt(c.HTTPPOST, [ ('file', ( c.FORM_FILE, source_path, c.FORM_FILENAME, key, c.FORM_CONTENTTYPE, 'plain/text', )), ]) c.perform() c.close()
def __init__(self): self.curl = Curl() self.curl.setopt(self.curl.SSL_VERIFYPEER, 0) self.curl.setopt(self.curl.SSL_VERIFYHOST, 0) self.curl.setopt(self.curl.TIMEOUT, DEFAULT_TIMEOUT) self.curl.setopt(self.curl.PROXY, HTTP_PROXY) self.curl.setopt(self.curl.FOLLOWLOCATION, True)
def __init__(self): self.curl = Curl() self.url = None self.headers = {} self.status = '' self.code = 0 self.charset_re = re.compile('charset=(\S+)')
def moodle_admin_login_curl(self): fd, path = tempfile.mkstemp() try: response = BytesIO() url = 'https://' + self.deployment['siteURL'] + '/login/index.php' curl = Curl() curl.setopt(pycurl.URL, url) curl.setopt(pycurl.SSL_VERIFYPEER, False) curl.setopt(pycurl.WRITEFUNCTION, response.write) curl.setopt(pycurl.POST, True) curl.setopt(pycurl.COOKIEJAR, path) curl.setopt(pycurl.COOKIEFILE, path) post = urllib.parse.urlencode({ 'username': '******', 'password': self.deployment['moodleAdminPassword'] }) curl.setopt(pycurl.POSTFIELDS, post) curl.setopt(pycurl.FOLLOWLOCATION, True) curl.perform() status = curl.getinfo(pycurl.HTTP_CODE) if status != 200: print("*** FAILED: {} ***".format(status)) sys.exit(1) response = response.getvalue().decode('utf-8') finally: os.remove(path) return response
def curl_ix(content=[]): # Provide a filename to generate a ix.io link # import necesssary classes and functions global ERROR from pycurl import Curl from io import BytesIO from urllib.parse import urlencode curl=Curl() buf=BytesIO() curl.setopt(curl.URL, "ix.io") curl.setopt(curl.WRITEDATA, buf) if content==[]: try: with open(LOGFILE, 'r') as f: content=f.readlines() except FileNotFoundError: ERROR(f"{LOGFILE} not found.") except Exception as e: ERROR(f"Error occured:\n{str(e)}") curl.setopt(curl.POSTFIELDS, urlencode({"f:1": '\n'.join(content)})) try: curl.perform() except Exception as e: ERROR(f"Error occured:\n{str(e)}") curl.close() return buf.getvalue().decode().strip()
def curl(url, file_ids, log): log.info('\tstarting curl fetch of gdc files') params = {'ids': file_ids} c = None with open('gdc_curl_download.tar.gz', 'wb') as f: try: c = Curl() c.setopt(c.URL, url) c.setopt(c.WRITEDATA, f) c.setopt(c.HTTPHEADER, ["Content-Type: application/json"]) c.setopt(pycurl.CUSTOMREQUEST, "POST") c.setopt(pycurl.POSTFIELDS, json.dumps(params)) # TODO: set up using a local certificate c.setopt(pycurl.SSL_VERIFYPEER, 0) c.setopt(pycurl.SSL_VERIFYHOST, 0) c.perform() except: log.exception('problem with curl') raise finally: if None != c: if int(c.getinfo(pycurl.RESPONSE_CODE)) != 200: f.close() with open('gdc_curl_download.tar.gz') as e: err = e.read() log.error('\tbad status on curl call(%s):\n%s' % (c.getinfo(pycurl.RESPONSE_CODE), err)) c.close()
def getc(url): buf = BytesIO() c = Curl() c.setopt(c.URL, url) c.setopt(c.WRITEDATA, buf) c.perform() c.close() return buf
def init_curl(self): self.curl = Curl() self.curl.setopt(self.curl.SSL_VERIFYPEER, 0) self.curl.setopt(self.curl.SSL_VERIFYHOST, 0) self.curl.setopt(pycurl.TIMEOUT, HttpDirectory.TIMEOUT) self.curl_head = self._curl_handle()
def prepare_connection(cls, conf, timeout=DEFAULT_HTTP_TIMEOUT): url = 'http://%s:%s' % (conf['rpchost'], conf['rpcport']) conn = Curl() conn.setopt(conn.CONNECTTIMEOUT, timeout) conn.setopt(conn.TIMEOUT, timeout) conn.setopt(conn.URL, url) conn.setopt(conn.POST, 1) return conn
def build_thread(gitpath, ref, buildid, cburl=None, submodules=False): tmpdir = os.path.join(conf('buildbot.buildpath'), buildid) repo = GitRepository(tmpdir) output, retcode = repo.clone(gitpath) if retcode: buildlog(buildid, 'Unable to clone %s. %s\n' % (gitpath, '\n'.join(output))) return output, retcode = repo.checkout(ref) if retcode: buildlog(buildid, 'Unable to checkout %s. %s\n' % (ref, '\n'.join(output))) return if submodules: output, retcode = repo.submodule_init() buildlog(buildid, output[0]) buildlog(buildid, output[1]) output, retcode = repo.submodule_update() buildlog(buildid, output[0]) buildlog(buildid, output[1]) resultsdir = os.path.join(tmpdir, '.build_results') os.makedirs(resultsdir) output, retcode = repo.build(conf('buildbot.signkey'), conf('buildbot.pbuilderrc'), resultsdir) buildlog(buildid, output[0]) buildlog(buildid, output[1]) #logging.debug(output[0]) #logging.debug(output[1]) os.chdir(resultsdir) if not os.listdir(resultsdir) or retcode != 0: buildlog(buildid, 'Nothing in results directory. Giving up.') return tarpath = os.path.join(tmpdir, 'package.tar.gz') tar = tarfile.open(tarpath, 'w:gz') for name in os.listdir(resultsdir): tar.add(name) tar.close() buildlog(buildid, 'Build complete. Results in %s\n' % tarpath) data = file(tarpath, 'rb').read() buildlog(buildid, 'Built %i byte tarball' % len(data)) if cburl: buildlog(buildid, 'Performing callback: %s' % cburl) req = Curl() req.setopt(req.POST, 1) req.setopt(req.URL, str(cburl)) req.setopt(req.HTTPPOST, [('package', (req.FORM_FILE, str(tarpath)))]) req.setopt(req.WRITEDATA, file('%s/build.log' % tmpdir, 'a+')) req.perform() req.close()
def _curl_handle(): curl_head = Curl() curl_head.setopt(pycurl.SSL_VERIFYPEER, 0) curl_head.setopt(pycurl.SSL_VERIFYHOST, 0) curl_head.setopt(pycurl.NOBODY, 1) curl_head.setopt(pycurl.TIMEOUT, HttpDirectory.TIMEOUT) return curl_head
def init_curl(self): self.curl = Curl() self.curl.setopt(self.curl.SSL_VERIFYPEER, 0) self.curl.setopt(self.curl.SSL_VERIFYHOST, 0) self.curl.setopt(pycurl.TIMEOUT, HttpDirectory.TIMEOUT) self.curl.setopt(pycurl.USERAGENT, config.HEADERS["User-Agent"]) self.curl_head = self._curl_handle()
def runst(msg, STATE, rounds=27): assert(len(msg) == 243) h = Curl() h._state = STATE[:] h._state [0:243] = msg[:] for r in xrange(rounds): h._transform(1) return h._state
def curl_connection(): config_params = get_chain_config_params() curl = Curl() curl.setopt(curl.URL, 'http://127.0.0.1:%s' % config_params['rpcport']) b64cred = base64.b64encode(('%s:%s' % (config_params['rpcuser'], config_params['rpcpassword'])).encode('utf8')) curl.setopt(curl.HTTPHEADER, ["Content-Type: text/plain", "Authorization: Basic {}".format(b64cred.decode('utf8'))]) # curl.setopt(curl.VERBOSE, True) # to print entire request flow # curl.setopt(curl.DEBUGFUNCTION, curl_debug_log) return curl
def _get(url: str, headers: Mapping[str, str] = None, rate_limiters: List[RateLimiter] = None, connection: Curl = None) -> (int, bytes, dict): if not headers: request_headers = ["Accept-Encoding: gzip"] else: request_headers = ["{header}: {value}".format(header=key, value=value) for key, value in headers.items()] if "Accept-Encoding" not in headers: request_headers.append("Accept-Encoding: gzip") response_headers = {} def get_response_headers(header_line: bytes) -> None: header_line = header_line.decode("ISO-8859-1") if ":" not in header_line: return name, value = header_line.split(":", 1) response_headers[name.strip()] = value.strip() buffer = BytesIO() curl = connection if connection is not None else Curl() curl.setopt(curl.URL, url) curl.setopt(curl.WRITEDATA, buffer) curl.setopt(curl.HEADERFUNCTION, get_response_headers) curl.setopt(curl.HTTPHEADER, request_headers) if certifi: curl.setopt(curl.CAINFO, certifi.where()) if _print_calls: _url = url if isinstance(_url, bytes): _url = str(_url)[2:-1] if _print_api_key and ".api.riotgames.com/lol" in _url: _url += "?api_key={}".format(headers["X-Riot-Token"]) print("Making call: {}".format(_url)) if rate_limiters: with ExitStack() as stack: # Enter each context manager / rate limiter limiters = [stack.enter_context(rate_limiter) for rate_limiter in rate_limiters] status_code = HTTPClient._execute(curl, connection is None) else: status_code = HTTPClient._execute(curl, connection is None) body = buffer.getvalue() # Decompress if we got gzipped data try: content_encoding = response_headers["Content-Encoding"].upper() if "GZIP" == content_encoding: body = zlib.decompress(body, zlib.MAX_WBITS | 16) except KeyError: pass return status_code, body, response_headers
def cache_and_parse(self, url) : """A low level shortcut that Caches and Parses a PDSC file. :param url: The URL of the PDSC file. :type url: str :return: A parsed representation of the PDSC file. :rtype: BeautifulSoup """ self.cache_file(Curl(), url) return self.pdsc_from_cache(url)
def curl(url): io = BytesIO() c = Curl() c.setopt(c.URL, url) c.setopt(c.WRITEDATA, io) c.perform() c.close() res = io.getvalue() io.close() return res
def __fetch_page(self, url): useragent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36' encoding = 'gzip, deflate, sdch' httpheader = [ 'Accept: text/html, application/xhtml+xml, application/xml; q=0.9, image/webp, */*; q=0.8', 'Accept-Language: it-IT, it; q=0.8, en-US; q=0.6, en; q=0.4', 'Host: uniparthenope.esse3.cineca.it' ] cookiefile = 'cookiefile' page = StringIO() c = Curl() c.setopt(c.FOLLOWLOCATION, True) c.setopt(c.WRITEFUNCTION, page.write) c.setopt(c.COOKIEJAR, cookiefile) c.setopt(c.URL, url) c.perform() c.close() page.close() page = StringIO() c = Curl() c.setopt(c.USERPWD, self.__username + ':' + self.__password) c.setopt(c.FOLLOWLOCATION, 1) c.setopt(c.WRITEFUNCTION, page.write) c.setopt(c.COOKIEFILE, cookiefile) c.setopt(c.ENCODING, encoding) c.setopt(c.HTTPHEADER, httpheader) c.setopt(c.REFERER, url) c.setopt(c.USERAGENT, useragent) c.setopt(c.URL, url) c.perform() if (c.getinfo(pycurl.HTTP_CODE) != 200): return None c.close() page_str = page.getvalue() page.close() p = re.compile('\\s+') page_str = p.sub(" ", page_str) return page_str
def _curl_handle(): curl_head = Curl() curl_head.setopt(pycurl.SSL_VERIFYPEER, 0) curl_head.setopt(pycurl.SSL_VERIFYHOST, 0) curl_head.setopt(pycurl.NOBODY, 1) curl_head.setopt(pycurl.TIMEOUT, HttpDirectory.TIMEOUT) curl_head.setopt(pycurl.USERAGENT, config.HEADERS["User-Agent"]) return curl_head
def blocking_io(num): # TODO: Use pycurl buf = BytesIO() c = Curl() c.setopt(c.URL, f'https://xkcd.com/{num}/info.0.json') c.setopt(c.WRITEDATA, buf) c.setopt(c.CAINFO, certifi.where()) c.perform() c.close() buf.seek(0) return load(buf)
def load_url(self, url): buffer = BytesIO() c = Curl() c.setopt(c.URL, url) c.setopt(c.WRITEDATA, buffer) c.perform() c.close() return CSVFile(buffer)
def _get(url: str, headers: Mapping[str, str] = None, rate_limiter: RateLimiter = None, connection: Curl = None) -> (int, bytes, dict): if not headers: request_headers = ["Accept-Encoding: gzip"] else: request_headers = [ "{header}: {value}".format(header=key, value=value) for key, value in headers.items() ] if "Accept-Encoding" not in headers: request_headers.append("Accept-Encoding: gzip") response_headers = {} def get_response_headers(header_line: bytes) -> None: header_line = header_line.decode("ISO-8859-1") if ":" not in header_line: return name, value = header_line.split(":", 1) response_headers[name.strip()] = value.strip() buffer = BytesIO() curl = connection if connection is not None else Curl() curl.setopt(curl.URL, url) curl.setopt(curl.WRITEDATA, buffer) curl.setopt(curl.HEADERFUNCTION, get_response_headers) curl.setopt(curl.HTTPHEADER, request_headers) if certifi: curl.setopt(curl.CAINFO, certifi.where()) if _print_calls: # TODO print("Making call: {}".format(url)) if rate_limiter: with rate_limiter: status_code = HTTPClient._execute(curl, connection is None) else: status_code = HTTPClient._execute(curl, connection is None) body = buffer.getvalue() # Decompress if we got gzipped data try: content_encoding = response_headers["Content-Encoding"].upper() if "GZIP" == content_encoding: body = zlib.decompress(body, zlib.MAX_WBITS | 16) except KeyError: pass return status_code, body, response_headers
def prepare_connection(cls, conf, timeout=DEFAULT_HTTP_TIMEOUT): url = 'http://%s:%s' % (conf['rpchost'], conf['rpcport']) auth_header = b"Basic " + base64.b64encode( ('%s:%s' % (conf['rpcuser'], conf['rpcpassword'])).encode('utf8')) conn = Curl() conn.setopt(conn.HTTPHEADER, ["Authorization: %s" % auth_header.decode('utf8')]) conn.setopt(conn.CONNECTTIMEOUT, timeout) conn.setopt(conn.TIMEOUT, timeout) conn.setopt(conn.URL, url) conn.setopt(conn.POST, 1) return conn
def get(name): base = 'https://www1.ncdc.noaa.gov/pub/data/igra/data/data-por/{}-data.txt.zip' buf = BytesIO() c = Curl() c.setopt(c.URL, base.format(name)) c.setopt(c.WRITEDATA, buf) c.perform() c.close() z = ZipFile(buf) out = z.open(z.infolist()[0]).read() z.close() return out.decode()
def fetchAnnotationJson(self, rawRequestURL=None): try: postData = {'sequence': self.rawSequence} # Using configuration here causes circular dependency. So I'll just pass it in. if(rawRequestURL is None): logging.error('You must pass a rawRequestURL to fetchAnnotationJson.') return else: requestURL = rawRequestURL + '?' + urlencode(postData) resultsIoObject = BytesIO() curlObject = Curl() curlObject.setopt(curlObject.URL, requestURL) curlObject.setopt(curlObject.WRITEDATA, resultsIoObject) curlObject.perform() curlObject.close() getBody = resultsIoObject.getvalue().decode('utf8') logging.debug('JSON Request Body:\n' + getBody) # TODO: # Detect error <head><title>414 Request-URI Too Large</title></head> # For larger DRB alleles the webserver fails. # Detect error if the result is not json. # Maybe this error detection happens in parseExons. But i maybe need to detect server errors here. # Simple case is an empty string. if(getBody is None or len(getBody)<1): logging.error('The JSON results were an empty string. Is there a problem with the ACT server?:' + str(requestURL)) showInfoBox('Problem Accessing Annotation Service','The JSON results were an empty string. Is there a problem with the ACT server?') return None # If it's an html error we can respond nicely. if(getBody[0:5]=='<html>'): # TODO: this might not work if i get some other kind of html. errorCode = getBody[getBody.find('<title>'):getBody.find('</title>')] logging.error('The annotation JSON results are html, this probably indicates an issue with the annotation webserver:\n' + str(requestURL)) showInfoBox('Problem Accessing Annotation Service', 'The annotation results are HTML, not JSON, probably an issue with the ACT webserver:\n' + str(errorCode)) return None return getBody except Exception: logging.error('Exception when performing CURL:\n') logging.error(str(exc_info())) logging.error('URL:' + str(requestURL)) raise
def performSubmission(submissionFileName, POST_DATA, enaUserName, enaPassword): logging.info('Performing submission of ' + submissionFileName + '\n') logging.info('POST Data:\n' + str(POST_DATA) + '\n') if (str(getConfigurationValue('test_submission')) == '0'): logging.info ('THIS IS A LIVE SUBMISSION AT ENA.') requestURL = str(getConfigurationValue('ena_rest_address_prod')) + '?auth=ENA%20' + str(enaUserName) + '%20' + str(enaPassword) else: logging.info ('THIS IS A TEST SUBMISSION AT ENA.') requestURL = str(getConfigurationValue('ena_rest_address_test')) + '?auth=ENA%20' + str(enaUserName) + '%20' + str(enaPassword) # Problem: StringIO Doesn't work with pycurl in python 3.6. Must replace this with a BytesIO. curlResponseBuffer = BytesIO() try: curlObject = Curl() curlObject.setopt(curlObject.URL, requestURL) curlObject.setopt(curlObject.POST, 1) curlObject.setopt(curlObject.HTTPPOST, POST_DATA) curlObject.setopt(curlObject.USERAGENT, 'Curl') curlObject.setopt(curlObject.WRITEFUNCTION, curlResponseBuffer.write) curlObject.setopt(HTTPHEADER, ['Accept:application/xml']) # Insecure. Any security experts want to make this better? curlObject.setopt(SSL_VERIFYHOST, 0) curlObject.setopt(SSL_VERIFYPEER, 0) curlObject.perform() curlObject.close() except Exception: logging.error ('Exception when performing CURL:\n') #logging.error (str(exc_info())) logging.error('Exception when performing CURL.\n') logging.error('URL:' + str(requestURL)) raise responseText = curlResponseBuffer.getvalue() #logging.info ('the type of the responseText is:' + str(type(responseText))) #logging.info ('after it becomes a string:' + str(type(str(responseText)))) # write XML to file. projectSubResultsFileName = submissionFileName.replace('.xml','_results.xml') resultsFile = createOutputFile(projectSubResultsFileName) resultsFile.write(str(responseText)) resultsFile.close() return responseText
def moodle_smoke_test(self): print("\nMoodle Smoke Test...") url = 'https://' + self.deployment['siteURL'] curl = Curl() curl.setopt(pycurl.URL, url) curl.setopt(pycurl.SSL_VERIFYPEER, False) curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) curl.perform() status = curl.getinfo(pycurl.HTTP_CODE) if status != 200: print("*** DEPLOY FAILED ***") print('HTTP Status Code: {}'.format(status)) sys.exit(1) print('(ok: {})'.format(status))
def _fetch(self, url, query, on_progress=None): logging.debug('query={query}'.format(query=query)) from pycurl import Curl, POST, POSTFIELDS from io import BytesIO c = Curl() c.setopt(c.URL, url) c.setopt(POST, 1) c.setopt(POSTFIELDS, query) if on_progress: c.setopt(c.HEADERFUNCTION, self._on_header(on_progress)) buffer = BytesIO() c.setopt(c.WRITEDATA, buffer) c.perform() c.close() return buffer.getvalue().decode('UTF-8')
def sendTelegramAlert(self, telegram_chat_id, telegram_bot_token, message): if len(message) > 4096: message = "The size of the message in Telegram (4096) has been exceeded. Overall size: " + str( len(message)) c = Curl() url = 'https://api.telegram.org/bot' + str( telegram_bot_token) + '/sendMessage' c.setopt(c.URL, url) data = {'chat_id': telegram_chat_id, 'text': message} pf = urlencode(data) c.setopt(c.POSTFIELDS, pf) c.perform_rs() status_code = c.getinfo(HTTP_CODE) c.close() self.getStatusByTelegramCode(status_code)