def _check_url(self, url, timeout=1): try: urlopen(url, timeout=timeout) return True except (URLError, socket.timeout): pass return False
def track(self, message, event_name='event'): with open (self.log_file, 'a') as file: s = '{time} - {event} - {message}\n' file.write(s.format(time=time.time(), event=event_name, message=message)) try: uid = message.chat_id except AttributeError: self.logger.warn('No chat_id in message') return False data = message.to_json() try: url = self.url_template.format(token=str(self.token), uid=str(uid), name=quote(event_name)) request = Request(url, data=data.encode(), headers={'Content-Type': 'application/json'}) urlopen(request) return True except HTTPError as error: self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode( 'utf-8')) return False except URLError as error: self.logger.warn('Botan track error ' + str(error.reason)) return False
def test(self): ''' Tests that it is possibly to connect to the URL. ''' try: urlopen('http://purl.obolibrary.org/obo/go.owl', timeout=1) return True except urllib.error.URLError: pass
def exists(self): req = Request("https://pypi.python.org/pypi/{}".format(self.name)) req.get_method = lambda: "HEAD" try: urlopen(req) return True except HTTPError as e: if e.code == 404: return False raise
def get_tx(self, tx_hash): URL = "%s/api/rawtx/%s" % (self.base_url, b2h_rev(tx_hash)) r = json.loads(urlopen(URL).read().decode("utf8")) tx = Tx.from_hex(r["rawtx"]) if tx.hash() == tx_hash: return tx return None
def dataset_list(self, **kwargs): """Return relevant information concerning the datasets of your project List all datasets in the Project and create the dataset objects raise ----- Exception Dataset not accessible """ sitools_url = self.url.split("/")[0] + "//" + self.url.split( "//")[1].split("/")[0] kwargs.update({'media': 'json'}) url = self.url + '/datasets' + '?' + urlencode(kwargs) data = [] try: result = load(urlopen(url)) if len(result['data']) != 0: for i, dataset in enumerate(result['data']): ds_url = sitools_url + dataset['url'] data.append(Dataset(ds_url)) except HTTPError: out_mess = ("Error in Project.dataset_list() :\nCannot access dataset list %s" "\nContact [email protected] and report that issue\n" % url) stderr.write(out_mess) raise return data
def __init__(self, url): """Initialize class Dataset""" # print("url to load :",url) # dataset_url_txt=urlopen(url) try: load(urlopen(url)) except HTTPError: err_mess = ("Error in Dataset.__init__() :\nDataset %s not " "available\nPlease send an email to [email protected] " "to report an issue if the problem persists\n" % url) stderr.write(err_mess) raise self.name = "" self.description = "" self.uri = "/" + url.split("/")[-1] self.url = url self.fields_list = [] self.fields_dict = {} self.filter_list = [] self.allowed_filter_list = [] self.sort_list = [] self.allowed_sort_list = [] self.resources_target = [] self.noClientAccess_list = [] self.primary_key = "" self.compute_attributes() self.resources_list()
def expand_themes(rules_doc, parser=None, absolute_prefix=None, read_network=False): """Expand <theme href='...'/> nodes with the theme html. """ if absolute_prefix is None: absolute_prefix = '' base = rules_doc.docinfo.URL if parser is None: parser = etree.HTMLParser() for element in rules_doc.xpath('//diazo:theme[@href]', namespaces=namespaces): url = urljoin(base, element.get('href')) if not read_network and \ url.startswith(('ftp://', 'ftps://', 'http://', 'https://')): raise ValueError("Supplied theme '%s', " "but network access denied." % url) elif read_network and \ url.startswith(('ftp://', 'ftps://', 'http://', 'https://')): theme = urlopen(url) else: theme = url theme_doc = etree.parse(theme, parser=parser, base_url=url) expand_theme(element, theme_doc, absolute_prefix) return rules_doc
def get_tx(self, tx_hash): url = "%s/rawtx/%s" % (self.base_url, b2h_rev(tx_hash)) result = json.loads(urlopen(url).read().decode("utf8")) tx = Tx.from_hex(result['rawtx']) if tx.hash() == tx_hash: return tx return None
def upload_log(): """Upload our full Kodi log as a GitHub gist""" try: log_content = get_kodi_log() except Exception as e: utils.log("Failed to read log: %s" % e) return utils.log('Sending log file...') try: data = { "files": { "kodi.log": { "content": log_content } } } response = urlopen(make_request(GIST_API_URL), json.dumps(data).encode('utf-8')) except HTTPError as e: utils.log("Failed to save log: HTTPError %s" % e.code) return False except URLError as e: utils.log("Failed to save log: URLError %s" % e.reason) return False try: return json.load(response)['html_url'] except Exception: utils.log("Failed to parse API response: %s" % response.read())
def GetArtifact(self, request, context=None): if request.artifact.type_urn == common_urns.artifact_types.FILE.urn: payload = proto_utils.parse_Bytes( request.artifact.type_payload, beam_runner_api_pb2.ArtifactFilePayload) read_handle = self._file_reader(payload.path) elif request.artifact.type_urn == common_urns.artifact_types.URL.urn: payload = proto_utils.parse_Bytes( request.artifact.type_payload, beam_runner_api_pb2.ArtifactUrlPayload) # TODO(Py3): Remove the unneeded contextlib wrapper. read_handle = contextlib.closing(urlopen(payload.url)) elif request.artifact.type_urn == common_urns.artifact_types.EMBEDDED.urn: payload = proto_utils.parse_Bytes( request.artifact.type_payload, beam_runner_api_pb2.EmbeddedFilePayload) read_handle = BytesIO(payload.data) else: raise NotImplementedError(request.artifact.type_urn) with read_handle as fin: while True: chunk = fin.read(self._chunk_size) if not chunk: break yield beam_artifact_api_pb2.GetArtifactResponse(data=chunk)
def license_url(name, project_url, dep_config): ''' Gets the license URL for a dependency, either from the parsed yaml or, if it is github, by looking for a license file in the repo. ''' configs = dep_config['pip_dependencies'] if name.lower() in configs: return configs[name.lower()]['license'] p = urlparse(project_url) if p.netloc != "github.com": return project_url raw = "https://raw.githubusercontent.com" path = p.path if not path.endswith("/"): path = path + "/" for license in ("LICENSE", "LICENSE.txt", "LICENSE.md", "LICENSE.rst", "COPYING"): try: url = raw + urljoin(path, "master/" + license) with urlopen(url) as a: if a.getcode() == 200: return url except: pass return project_url
def create_ticket(self, api_key, fields, files=None): """ Create ticket on osTicket server. :arg api_key: (String) API-Key :arg fields: (String --> String) dictionary containing keys name, email, subject, message :arg files: (None or list of Strings) pathname of zip files that should be attached :returns: (int) response code :raises ValueError: ticket upload failed :raises urllib.error.HTTPError: key not accepted :raises urllib.error.URLError: connection problem """ if not files: files = [] fields["attachments"] = [] for fn in files: with open(fn, "rb") as f: encoded_data = base64.b64encode(f.read()) att_desc = {os.path.basename(fn): "data:application/zip;base64,%s" % encoded_data} fields["attachments"].append(att_desc) description = json.dumps(fields) req = Request(OS_TICKET_URL, description, headers={"X-API-Key": api_key}) f = urlopen(req) response = f.getcode() f.close() if response == RESPONSE_SUCCESS: return else: raise ValueError('Ticket creation failed with error code %s.' % response)
def get_event(self, name): logger.debug('In Geofon.get_event("%s")' % name) if name not in self.events: url = 'http://geofon.gfz-potsdam.de/eqinfo/event.php?id=%s' % name logger.debug('Opening URL: %s' % url) page = urlopen(url).read() logger.debug('Received page (%i bytes)' % len(page)) try: d = self._parse_event_page(page) ev = model.Event( lat=d['epicenter'][0], lon=d['epicenter'][1], time=d['time'], name=name, depth=d['depth'], magnitude=d['magnitude'], region=d['region'], catalog='GEOFON') if d['have_moment_tensor']: ev.moment_tensor = True self.events[name] = ev except NotFound: raise NotFound(url) # reraise with url ev = self.events[name] if ev.moment_tensor is True: ev.moment_tensor = self.get_mt(ev) return ev
def fetch_dataset(url, sourcefile, destfile, totalsz): """ Download the file specified by the given URL. Args: url (str): Base URL of the file to be downloaded. sourcefile (str): Name of the source file. destfile (str): Path to the destination. totalsz (int): Size of the file to be downloaded. """ req = Request('/'.join([url, sourcefile]), headers={'User-Agent': 'neon'}) # backport https limitation and workaround per http://python-future.org/imports.html cloudfile = urlopen(req) neon_logger.display("Downloading file: {}".format(destfile)) blockchar = u'\u2588' # character to display in progress bar with open(destfile, 'wb') as f: data_read = 0 chunksz = 1024**2 while 1: data = cloudfile.read(chunksz) if not data: break data_read = min(totalsz, data_read + chunksz) progress_string = u'Download Progress |{:<50}| '.format( blockchar * int(float(data_read) / totalsz * 50)) sys.stdout.write('\r') if PY3: sys.stdout.write(progress_string) else: sys.stdout.write(progress_string.encode("utf-8")) sys.stdout.flush() f.write(data) neon_logger.display("Download Complete")
def handle_POST(self): self.verifyAllowRemote() try: post_args = urlencode(self.request["form"]) if sys.version_info >= (3, 0): post_args = post_args.encode() logger.debug("Logging into %s" % self._login) bundle_paths.BundleInstaller().validate_server_cert( self._login, self._sslpol) # Forward post arguments, including username and password. with closing(urlopen(self._login, post_args, URLOPEN_TIMEOUT)) as f: root = safe_lxml.parse(f).getroot() token = root.xpath("a:id", namespaces=NSMAP)[0].text if self.request["output_mode"] == "json": self.response.setHeader('content-type', 'application/json') sessDict = {"response": {"sessionKey": token}} self.response.write(json.dumps(sessDict)) else: # Generate response. response = etree.Element("response") sessionKey = etree.SubElement(response, "sessionKey") sessionKey.text = token self.response.setHeader('content-type', 'text/xml') self.response.write( etree.tostring(response, pretty_print=True)) logger.debug("Login successful") except HTTPError as e: if e.code in [401, 405]: # Returning 401 logs off current session # Splunkbase retuns 405 when only password is submitted raise splunk.RESTException(400, e.msg) raise splunk.RESTException(e.code, e.msg) except Exception as e: logger.exception(e) raise splunk.AuthenticationFailed
def transactions_for_address(self, bitcoin_address): url = "{0}/txs/?address={1}".format(self.base_url, bitcoin_address) result = json.loads(urlopen(url).read().decode("utf8")) transactions = [] for tx in result["txs"]: transactions.append(tx["txid"]) return transactions
def get_tx(self, tx_hash): URL = "%s/api/rawtx/%s" % (self.base_url, b2h_rev(tx_hash)) r = json.loads(urlopen(URL).read().decode("utf8")) tx = Tx.from_hex(r['rawtx']) if tx.hash() == tx_hash: return tx return None
def log_event(settings, event, source, sourcetype, host, index): if event is None: sys.stderr.write("ERROR No event provided\n") return False query = [('source', source), ('sourcetype', sourcetype), ('index', index)] if host: query.append(('host', host)) url = '%s/services/receivers/simple?%s' % (settings.get('server_uri'), urlencode(query)) try: encoded_body = unicode(event).encode('utf-8') req = Request( url, encoded_body, {'Authorization': 'Splunk %s' % settings.get('session_key')}) res = urlopen(req) if 200 <= res.code < 300: sys.stderr.write( "DEBUG receiver endpoint responded with HTTP status=%d\n" % res.code) return True else: sys.stderr.write( "ERROR receiver endpoint responded with HTTP status=%d\n" % res.code) return False except HTTPError as e: sys.stderr.write("ERROR Error sending receiver request: %s\n" % e) except URLError as e: sys.stderr.write("ERROR Error sending receiver request: %s\n" % e) except Exception as e: sys.stderr.write("ERROR Error %s\n" % e) return False
def get_google_finance_stock_quote(self, ticker_symbol): """ http://stackoverflow.com/questions/18115997/unicodedecodeerror-utf8-codec-cant-decode-byte-euro-symbol for explanation for cp1252 """ url = 'http://finance.google.com/finance/info?q=%s' % ticker_symbol try: # decode() for 2.x and 3.x compatibility (in 3.x the result # without decode() is 'bytes', which neither the join below nor # json_loads like lines = urlopen(url).read().decode('utf-8').splitlines() except HTTPError: # For example, wrong/unknown ticker return {} google_quote = json.loads(''.join([x for x in lines if x not in ('// [', ']')]), 'cp1252') quote = {} quote['price'] = google_quote['l'] quote['change'] = google_quote['c'] quote['change_percentage'] = google_quote['cp'] # Values from google finance can be in any currency, so far # handling only EUR and USD if '€' in google_quote['l_cur']: currency = Currency.objects.filter( iso_code='EUR')[0] else: currency = Currency.objects.filter( iso_code='USD')[0] quote['currency'] = currency return quote
def download_(url, dst): """ @param: url to download file @param: dst place to put the file """ file_size = int(urlopen(url).info().get("Content-Length", -1)) if os.path.exists(dst): first_byte = os.path.getsize(dst) else: first_byte = 0 if first_byte >= file_size: return file_size header = {"Range": "bytes=%s-%s" % (first_byte, file_size)} pbar = tqdm( total=file_size, initial=first_byte, unit="B", unit_scale=True, desc=url.split("/")[-1], ) req = requests.get(url, headers=header, stream=True) with (open(get_path_data(dst), "wb")) as f: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) pbar.update(1024) pbar.close()
def expand_themes( rules_doc, parser=None, absolute_prefix=None, read_network=False, ): """Expand <theme href='...'/> nodes with the theme html. """ if absolute_prefix is None: absolute_prefix = '' base = rules_doc.docinfo.URL if parser is None: parser = etree.HTMLParser() for element in rules_doc.xpath( '//diazo:theme[@href]', namespaces=namespaces, ): url = urljoin(base, element.get('href')) if not read_network and \ url.startswith(('ftp://', 'ftps://', 'http://', 'https://')): raise ValueError( "Supplied theme '{url}', but network access denied.".format( url=url, ), ) elif read_network and \ url.startswith(('ftp://', 'ftps://', 'http://', 'https://')): theme = urlopen(url) else: theme = url theme_doc = etree.parse(theme, parser=parser, base_url=url) expand_theme(element, theme_doc, absolute_prefix) return rules_doc
def test_build_min_free_space(self): store_path = tempfile.mktemp() os.mkdir(store_path) my_free_size = psutil.disk_usage(store_path).free - (1024 * 256) # 256 client = api.Client(url=url, config_path=tempfile.mktemp(), store_path=store_path, max_size=1024 * 1024 * 2, min_free_size=my_free_size) # 256 config = client.config() client.register() generated = client.build() self.assertTrue(len(generated) > 0) # build at least 1 shard self.assertTrue(len(generated) < 16) # stoped cause of free Space result = json.loads( urlopen(url + '/api/online/json').read().decode('utf8') ) result = [farmers for farmers in result['farmers'] if farmers['btc_addr'] == config['payout_address']] last_seen = result[0]['last_seen'] reg_time = result[0]['reg_time'] result = json.dumps(result, sort_keys=True) expected = json.dumps([{ 'height': len(generated), 'btc_addr': config['payout_address'], 'last_seen': last_seen, 'payout_addr': config['payout_address'], 'reg_time': reg_time, 'uptime': 100.0 }], sort_keys=True) self.assertEqual(result, expected)
def test_farm_registered(self): client = api.Client(url=url, config_path=tempfile.mktemp(), max_size=1024 * 256) # 256K config = client.config() client.register() befor = datetime.datetime.now() self.assertTrue(client.farm(delay=2, limit=2)) # check farm return true after = datetime.datetime.now() # check that farm did 2 pings with 2 sec delay self.assertTrue(datetime.timedelta(seconds=2) <= (after - befor)) result = json.loads( urlopen(url + '/api/online/json').read().decode('utf8') ) result = [farmers for farmers in result['farmers'] if farmers['btc_addr'] == config['payout_address']] last_seen = result[0]['last_seen'] reg_time = result[0]['reg_time'] result = json.dumps(result, sort_keys=True) expected = json.dumps([{ 'height': 2, 'btc_addr': config['payout_address'], 'last_seen': last_seen, 'payout_addr': config['payout_address'], 'reg_time': reg_time, 'uptime': 100.0 }], sort_keys=True) self.assertEqual(result, expected)
def test_build(self): client = api.Client(url=url, config_path=tempfile.mktemp(), max_size=1024 * 256) # 256K client.register() generated = client.build(cleanup=True) self.assertTrue(len(generated)) client = api.Client(url=url, config_path=tempfile.mktemp(), max_size=1024 * 512) # 512K config = client.config() client.register() generated = client.build(cleanup=True) self.assertTrue(len(generated) == 4) result = json.loads( urlopen(url + '/api/online/json').read().decode('utf8') ) result = [farmers for farmers in result['farmers'] if farmers['btc_addr'] == config['payout_address']] last_seen = result[0]['last_seen'] reg_time = result[0]['reg_time'] result = json.dumps(result, sort_keys=True) expected = json.dumps([{ 'height': 4, 'btc_addr': config['payout_address'], 'last_seen': last_seen, 'payout_addr': config['payout_address'], 'reg_time': reg_time, 'uptime': 100.0 }], sort_keys=True) self.assertEqual(result, expected)
def get_options(cls): """ Linux (OpenVZ) and Windows (KVM) pages are slightly different, therefore their pages are parsed by different methods. Windows configurations allow a selection of Linux distributions, but not vice-versa. :return: possible configurations. """ context = ssl._create_unverified_context() url = ProxHost.BASE_URL + '/options' response = request.urlopen(url, context=context) response_json = json.loads(response.read().decode('utf-8')) options = [] for joption in response_json: options.append(VpsOption( name=joption['name'], storage=joption['storage'], cores=joption['cores'], memory=joption['memory'], bandwidth='unmetered', connection=joption['connection'], price=joption['price'], purchase_url=str(joption['vmid']) )) return list(options)
def send(self): response_xml = urlopen(self._url).read() if b'error' in response_xml: self.response = None else: self.response = objectify.fromstring(response_xml, parser=_sdmbuilder_parser)
def fetch_dataset(url, sourcefile, destfile, totalsz): """ Download the file specified by the given URL. Args: url (str): Base URL of the file to be downloaded. sourcefile (str): Name of the source file. destfile (str): Path to the destination. totalsz (int): Size of the file to be downloaded. """ req = Request(os.path.join(url, sourcefile), headers={'User-Agent': 'neon'}) # backport https limitation and workaround per http://python-future.org/imports.html cloudfile = urlopen(req) neon_logger.display("Downloading file: {}".format(destfile)) blockchar = u'\u2588' # character to display in progress bar with open(destfile, 'wb') as f: data_read = 0 chunksz = 1024**2 while 1: data = cloudfile.read(chunksz) if not data: break data_read = min(totalsz, data_read + chunksz) progress_string = u'Download Progress |{:<50}| '.format( blockchar * int(float(data_read) / totalsz * 50)) sys.stdout.write('\r') if PY3: sys.stdout.write(progress_string) else: sys.stdout.write(progress_string.encode("utf-8")) sys.stdout.flush() f.write(data) neon_logger.display("Download Complete")
def post(url, data, timeout=None): """Request an URL. Args: url: The web location we want to retrieve. data: A dict of (str, unicode) key/value pairs. timeout: float. If this value is specified, use it as the definitive timeout (in seconds) for urlopen() operations. [Optional] Notes: If neither `timeout` nor `data['timeout']` is specified. The underlying defaults are used. Returns: A JSON object. """ urlopen_kwargs = {} if timeout is not None: urlopen_kwargs['timeout'] = timeout if InputFile.is_inputfile(data): data = InputFile(data) request = Request(url, data=data.to_form(), headers=data.headers) else: data = json.dumps(data) request = Request(url, data=data.encode(), headers={'Content-Type': 'application/json'}) result = urlopen(request, **urlopen_kwargs).read() return _parse(result)
def local_jar(cls, url, cache_dir=None): if cache_dir is None: cache_dir = cls.JAR_CACHE # TODO: Verify checksum? if _is_service_endpoint(url): return url elif os.path.exists(url): return url else: cached_jar = os.path.join(cache_dir, os.path.basename(url)) if os.path.exists(cached_jar): _LOGGER.info('Using cached job server jar from %s' % url) else: _LOGGER.info('Downloading job server jar from %s' % url) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # TODO: Clean up this cache according to some policy. try: url_read = urlopen(url) with open(cached_jar + '.tmp', 'wb') as jar_write: shutil.copyfileobj(url_read, jar_write, length=1 << 20) os.rename(cached_jar + '.tmp', cached_jar) except URLError as e: raise RuntimeError( 'Unable to fetch remote job server jar at %s: %s' % (url, e)) return cached_jar
def get_wan_ip(n=0): """ That IP module sucks. Occasionally it returns an IP address behind cloudflare which probably happens when cloudflare tries to proxy your web request because it thinks you're trying to DoS. It's better if we just run our own infrastructure. """ if n == 5: try: return myip() except: return None # Fail-safe: use centralized server for IP lookup. from .net import forwarding_servers for forwarding_server in forwarding_servers: url = "http://" + forwarding_server["addr"] + ":" url += str(forwarding_server["port"]) url += forwarding_server["url"] url += "?action=get_wan_ip" try: r = urlopen(url, timeout=5) response = r.read().decode("utf-8") if is_ip_valid(response): return response except: continue time.sleep(1) return get_wan_ip(n + 1)
def request(url, tries=3): """Wrapper around :func:`urlopen` to AWIS call. On failure, will attempt another 2 tries for success. **Args:** *url*: the AWIS URL to call *tries*: number of failed tries allowed before flagging this attempt as a failure **Returns:** the HTTP response value """ failed_requests = 0 response_value = None while failed_requests < tries: try: log.debug('Request %d of %d: "%s"', (failed_requests + 1), tries, url) response = urlopen(url) if response.code == 200: response_value = response.read() break except HTTPError as err: log.error('Request failed "%s"', err) failed_requests += 1 if failed_requests >= tries: log.error('All requests failed') return response_value
def _lb_agent_is_haproxy_alive(self, lb_use_tls): """ Invoke HAProxy through HTTP monitor_uri and return ZATO_OK if HTTP status code is 200. Raise Exception otherwise. """ host = self.config.frontend['front_http_plain']['bind']['address'] port = self.config.frontend['front_http_plain']['bind']['port'] path = self.config.frontend['front_http_plain']['monitor_uri'] url = 'http{}://{}:{}{}'.format('s' if lb_use_tls else '', host, port, path) try: conn = urlopen(url) except Exception: msg = 'Could not open URL `{}`, e:`{}`'.format(url, format_exc()) logger.error(msg) raise Exception(msg) else: try: code = conn.getcode() if code == OK: return ZATO_OK else: msg = 'Could not open URL [{url}], HTTP code:[{code}]'.format(url=url, code=code) logger.error(msg) raise Exception(msg) finally: conn.close()
def get_event(self, name): logger.debug('In Geofon.get_event("%s")' % name) if name not in self.events: url = 'http://geofon.gfz-potsdam.de/eqinfo/event.php?id=%s' % name logger.debug('Opening URL: %s' % url) page = urlopen(url).read() logger.debug('Received page (%i bytes)' % len(page)) try: d = self._parse_event_page(page) ev = model.Event(lat=d['epicenter'][0], lon=d['epicenter'][1], time=d['time'], name=name, depth=d['depth'], magnitude=d['magnitude'], region=d['region'], catalog='GEOFON') if d['have_moment_tensor']: ev.moment_tensor = True self.events[name] = ev except NotFound: raise NotFound(url) # reraise with url ev = self.events[name] if ev.moment_tensor is True: ev.moment_tensor = self.get_mt(ev) return ev
def test_farm_registered(self): client = api.Client(url=url, config_path=tempfile.mktemp(), max_size=1024 * 256) # 256K config = client.config() client.register() befor = datetime.datetime.now() self.assertTrue(client.farm(delay=2, limit=2)) # check farm return true after = datetime.datetime.now() # check that farm did 2 pings with 2 sec delay self.assertTrue(datetime.timedelta(seconds=2) <= (after - befor)) result = json.loads( urlopen(url + '/api/online/json').read().decode('utf8')) result = [ farmers for farmers in result['farmers'] if farmers['btc_addr'] == config['payout_address'] ] last_seen = result[0]['last_seen'] reg_time = result[0]['reg_time'] result = json.dumps(result, sort_keys=True) expected = json.dumps([{ 'height': 2, 'btc_addr': config['payout_address'], 'last_seen': last_seen, 'payout_addr': config['payout_address'], 'reg_time': reg_time, 'uptime': 100.0 }], sort_keys=True) self.assertEqual(result, expected)
def send_webhook_request(url, body, user_agent=None): if url is None: sys.stderr.write("ERROR No URL provided\n") return False sys.stderr.write( "INFO Sending POST request to url=%s with size=%d bytes payload\n" % (url, len(body))) sys.stderr.write("DEBUG Body: %s\n" % body) try: if sys.version_info >= (3, 0) and type(body) == str: body = body.encode() req = Request(url, body, { "Content-Type": "application/json", "User-Agent": user_agent }) res = urlopen(req) if 200 <= res.code < 300: sys.stderr.write( "INFO Webhook receiver responded with HTTP status=%d\n" % res.code) return True else: sys.stderr.write( "ERROR Webhook receiver responded with HTTP status=%d\n" % res.code) return False except HTTPError as e: sys.stderr.write("ERROR Error sending webhook request: %s\n" % e) except URLError as e: sys.stderr.write("ERROR Error sending webhook request: %s\n" % e) except ValueError as e: sys.stderr.write("ERROR Invalid URL: %s\n" % e) return False
def shorten(aUrl): tinyurl = 'http://tinyurl.com/api-create.php?url=' req = urlopen(tinyurl + aUrl) data = req.read() # should be a tiny url return data
def test_build(self): client = api.Client(url=url, config_path=tempfile.mktemp(), max_size=1024 * 256) # 256K client.register() generated = client.build(cleanup=True) self.assertTrue(len(generated)) client = api.Client(url=url, config_path=tempfile.mktemp(), max_size=1024 * 512) # 512K config = client.config() client.register() generated = client.build(cleanup=True) self.assertTrue(len(generated) == 4) result = json.loads( urlopen(url + '/api/online/json').read().decode('utf8')) result = [ farmers for farmers in result['farmers'] if farmers['btc_addr'] == config['payout_address'] ] last_seen = result[0]['last_seen'] reg_time = result[0]['reg_time'] result = json.dumps(result, sort_keys=True) expected = json.dumps([{ 'height': 4, 'btc_addr': config['payout_address'], 'last_seen': last_seen, 'payout_addr': config['payout_address'], 'reg_time': reg_time, 'uptime': 100.0 }], sort_keys=True) self.assertEqual(result, expected)
def getblflags(datasetId, blarr, startTime=None, endTime=None): """ Call antenna flag server for given datasetId and return flags per baseline. Optional input are startTime and endTime. blarr is array of baselines to be flagged (see rfpipe state.blarr) that defines structure of returned flag array. """ # set up query to flag server query = '?' if startTime is not None: query += 'startTime={0}&'.format(startTime) if endTime is not None: query += 'endTime={0}'.format(endTime) url = 'https://{0}/{1}/{2}/flags{3}'.format(_host, _antpath, datasetId, query) # call server and parse response response_xml = urlopen(url).read() response = objectify.fromstring(response_xml, parser=_antflagger_parser) # find bad ants and baselines badants = set(sorted([int(flag.attrib['antennas'].lstrip('ea')) for flag in response.findall('flag')])) flags = np.ones(len(blarr), dtype=int) for badant in badants: flags *= (badant != blarr).prod(axis=1) return flags
def _open_remote_file(fn): """ Opens a remote file, trying different locations fn (str): the filename return (File): the opened File-like from urllib2 raise HTTPError: in case of failure to find the file """ # Sadly our website has to block requests with user-agent that looks too # much like bots and script-kiddies. That includes the default Python # user-agent. IOW, it checks that the caller is clever enough to # change the user-agent. So we have to show that we are "that" clever... headers = {"User-Agent": "Mozilla/5.0 Odemis"} for url in VIEWER_ROOT_URLS: try: web_url = url + fn req = Request(web_url, headers=headers) web_file = urlopen(req, timeout=10) break except HTTPError as err: if err.getcode() == 404 and url != VIEWER_ROOT_URLS[-1]: logging.info( "Opening URL %s failed, will try another address", web_url) continue raise # It should now either have succeeded or raised an exception return web_file
def urlopen(self, *args, **kwargs): resource = args[0] with support.transient_internet(resource): r = urllib_request.urlopen(*args, **kwargs) try: yield r finally: r.close()
def compute_attributes(self, **kwargs): """Compute_attributes builds value for instance Project """ kwargs.update({'media': 'json'}) url = self.url + '?' + urlencode(kwargs) result = load(urlopen(url)) self.name = result['project']['name'] self.description = result['project']['description']
def get_shard_stats(es_url): stats_url = "{0}/{1}".format(es_url, SHARD_STATS_PATH) try: response = urlopen(stats_url) except HTTPError as exc: response = exc json_data = json.loads(response.read()) return json_data
def open(self, request): try: return urlopen(request) except HTTPError as e: return False except URLError as e: self.cache_request(request) return False
def btc_height(self): """Bitcoin height""" url = 'https://chain.so/api/v2/get_info/BTC' result = json.loads(urlopen(url).read().decode('utf8')) if result['status'] == 'success': return result['data']['blocks'] else: raise BlockExplorerApiFailed(url)
def download_file(url, outfile): """ Downloads a file from url and saves it to outfile. """ print("Downloading {}".format(url)) with closing(urlopen(url)) as request: with open(outfile, 'wb') as fp: shutil.copyfileobj(request, fp)
def get_mt(self, ev): syear = time.strftime('%Y', time.gmtime(ev.time)) url = 'http://geofon.gfz-potsdam.de/data/alerts/%s/%s/mt.txt' % ( syear, ev.name) logger.debug('Opening URL: %s' % url) page = urlopen(url).read() logger.debug('Received page (%i bytes)' % len(page)) return self._parse_mt_page(page)
def getOncoprintHTML(self, gene_list, study=None, study_name=None): '''returns the HTML for the oncoprint report for the specified gene list and study''' url = "/".join(self.url.split("/")[0:-1]) gene_list = ",".join(gene_list) command = "%s/link.do?cancer_study_id=%s&gene_list=%s&report=oncoprint_html" % ( url, study, gene_list) return urlopen(command).read()
def lsun_categories(tag): """ Query LSUN_URL and return a list of LSUN categories Argument: tag (str): version tag, use "latest" for most recent """ f = urlopen(LSUN_URL + 'list.cgi?tag=' + tag) return json.loads(f.read())
def test_urllib_request_http(self): """ This site (python-future.org) uses plain http (as of 2014-09-23). """ import urllib.request as urllib_request from pprint import pprint URL = 'http://python-future.org' r = urllib_request.urlopen(URL) data = r.read() self.assertTrue(b'</html>' in data)
def test_urllib_request_http(self): """ This site (amazon.com) uses plain http (as of 2014-04-12). """ import future.moves.urllib.request as urllib_request from pprint import pprint URL = 'http://amazon.com' r = urllib_request.urlopen(URL) data = r.read() self.assertTrue(b'<html>' in data)
def confirms(self, txid): try: url = "%s/tx/%s" % (self.base_url, b2h_rev(txid)) result = json.loads(urlopen(url).read().decode("utf8")) return result.get("confirmations", 0) except HTTPError as ex: if ex.code == 404: # unpublished tx return None else: raise ex
def _run(self, params): if params["test"] == "mongodb-insert": return connections.mongodb_jobs.tests_inserts.insert({"params": params["params"]}, manipulate=False) elif params["test"] == "mongodb-find": cursor = connections.mongodb_jobs.tests_inserts.find({"test": "x"}) return list(cursor) elif params["test"] == "mongodb-count": return connections.mongodb_jobs.tests_inserts.count() elif params["test"] == "mongodb-full-getmore": connections.mongodb_jobs.tests_inserts.insert_many([{"a": 1}, {"a": 2}]) return list(connections.mongodb_jobs.tests_inserts.find(batch_size=1)) elif params["test"] == "redis-llen": return connections.redis.llen(params["params"]["key"]) elif params["test"] == "redis-lpush": return connections.redis.lpush(params["params"]["key"], "xxx") elif params["test"] == "urllib2-get": fp = urlopen(params["params"]["url"]) return fp.read() elif params["test"] == "urllib2-post": return urlopen(params["params"]["url"], data="x=x").read() elif params["test"] == "requests-get": import requests return requests.get(params["params"]["url"], verify=False).text
def _refresh(self, http_request): refresh_time = datetime.datetime.utcnow() metadata_root = os.environ.get( 'GCE_METADATA_ROOT', 'metadata.google.internal') token_url = ('http://{}/computeMetadata/v1/instance/service-accounts/' 'default/token').format(metadata_root) req = Request(token_url, headers={'Metadata-Flavor': 'Google'}) token_data = json.loads(urlopen(req).read()) self.access_token = token_data['access_token'] self.token_expiry = (refresh_time + datetime.timedelta(seconds=token_data['expires_in']))