def modified_run(self): import sys try: try: from urllib2 import HTTPHandler, build_opener from urllib2 import urlopen, Request from urllib import urlencode except ImportError: from urllib.request import HTTPHandler, build_opener from urllib.request import urlopen, Request from urllib.parse import urlencode os_ver = platform.system() py_ver = "_".join(str(x) for x in sys.version_info) now_ver = __version__.replace(".", "_") code = "os:{0},py:{1},now:{2}".format(os_ver, py_ver, now_ver) action = command_subclass.action cid = getnode() payload = {"v": "1", "tid": "UA-61791314-1", "cid": str(cid), "t": "event", "ec": action, "ea": code} url = "http://www.google-analytics.com/collect" data = urlencode(payload).encode("utf-8") request = Request(url, data=data) request.get_method = lambda: "POST" connection = urlopen(request) except: pass orig_run(self)
def commitValues(self, *args, **kwargs): """ Set values given by kwargs set a variable by giving its name and its value as a parameter to this function (name is given by its GET parameter in the balancer-manager) Example : set lf to 2 and ls to 10 : worker.commitValues(lf=2, ls=10) """ srv = self.parentServer vh = self.parentVHost try: print ("[%s:%s - %s] Applying values %s" % (srv.ip, srv.port, vh.name, kwargs)) except: pass if srv is None: return False url = self.actionURL for arg in iter(kwargs): val = kwargs[arg] if val is not None: url += "&%s=%s" % (arg, val) ## Caling url to set values given try: protocol = srv.secure and "https" or "http" req = Request("%s://%s:%s/%s" % (protocol, srv.ip, srv.port, url)) if vh is not None and vh.name != "": req.add_header("Host", vh.name) urlopen(req) except: ## Error return False return True
def get_title(url): """Fetches the contents of url and extracts (and utf-8 encodes) the contents of <title>""" if not url or not url.startswith(('http://', 'https://')): return None try: req = Request(url) if g.useragent: req.add_header('User-Agent', g.useragent) opener = urlopen(req, timeout=15) # determine the encoding of the response for param in opener.info().getplist(): if param.startswith("charset="): param_name, sep, charset = param.partition("=") codec = codecs.getreader(charset) break else: codec = codecs.getreader("utf-8") with codec(opener, "ignore") as reader: # Attempt to find the title in the first 1kb data = reader.read(1024) title = extract_title(data) # Title not found in the first kb, try searching an additional 10kb if not title: data += reader.read(10240) title = extract_title(data) return title except: return None
def call_hipchat(cls, ReturnType, url='', urlv2='', data=True, **kw): auth = [('format', 'json'), ('auth_token', hipchat.config.token)] if hipchat.config.api_version == 2: url=urlv2 if not data: auth.extend(kw.items()) req = Request(url=hipchat.config.api_url + url + '?%s' % urlencode(auth)) if data: req.add_data(urlencode(kw.items())) if hipchat.config.proxy_server and hipchat.config.proxy_type: req.set_proxy(hipchat.config.proxy_server, hipchat.config.proxy_type) try: res = urlopen(req) except Exception, e: resp = "".join(e.readlines()) try: err_resp = json.loads(resp) except ValueError: raise Exception( "unknown error: %d response was: %s" % ( e.getcode(), resp ), ) error = err_resp.get("error", {}) raise Exception( "%d %s error: %s" % ( error.get("code", -1), error.get("type", "unknown"), error.get("message", "no message"), ) )
def hit_endpoint(self, url, data_dict={}, http_method='GET'): """ A reusable method that actually performs the request to the specified Atlas API endpoint. """ if self.verbose == 'true': print "HIT_ENDPOINT" data_dict.update({ "access_token" : self.access_token }) if self.verbose == 'true': print " Added access_token to data_dict (inside hit_endpoint)" if self.verbose == 'true': print " Constructing request URL" request = Request(url, urllib.urlencode(data_dict)) if self.verbose == 'true': print " Setting request http_method: %s" % http_method request.get_method = lambda: http_method try: if self.verbose == 'true': print " Opening Request URL: %s?%s" % (request.get_full_url(),request.get_data()) response = urlopen(request) except URLError, e: raise SystemExit(e)
def report_now(self, registry=None, timestamp=None): if self.autocreate_database and not self._did_create_database: self._create_database() timestamp = timestamp or int(round(self.clock.time())) metrics = (registry or self.registry).dump_metrics() post_data = [] for key, metric_values in metrics.items(): if not self.prefix: table = key else: table = "%s.%s" % (self.prefix, key) values = ",".join(["%s=%s" % (k, v if type(v) is not str \ else '"{}"'.format(v)) for (k, v) in metric_values.items()]) line = "%s %s %s" % (table, values, timestamp) post_data.append(line) post_data = "\n".join(post_data) path = "/write?db=%s&precision=s" % self.database url = "%s://%s:%s%s" % (self.protocol, self.server, self.port, path) request = Request(url, post_data.encode("utf-8")) if self.username: auth = _encode_username(self.username, self.password) request.add_header("Authorization", "Basic %s" % auth.decode('utf-8')) try: response = urlopen(request) response.read() except URLError as err: LOG.warning("Cannot write to %s: %s", self.server, err.reason)
def put_photo(self, image, message=None, album_id=None, **kwargs): """Uploads an image using multipart/form-data image=File like object for the image message=Caption for your image album_id=None posts to /me/photos which uses or creates and uses an album for your application. """ object_id = album_id or "me" #it would have been nice to reuse self.request; but multipart is messy in urllib post_args = { 'access_token': self.access_token, 'source': image, 'message': message } post_args.update(kwargs) content_type, body = self._encode_multipart_form(post_args) req = Request("https://graph.facebook.com/%s/photos" % object_id, data=body) req.add_header('Content-Type', content_type) try: data = urlopen(req).read() except HTTPError: e = sys.exc_info()[1] # for compatibility with python 2.X and 3.X data = e.read() # Facebook sends OAuth errors as 400, and urllib2 throws an exception, we want a GraphAPIError try: response = _parse_json(data) # Raise an error if we got one, but don't freak out if Facebook just gave us a Bool value if response and isinstance(response, dict) and response.get("error"): raise GraphAPIError(response["error"].get("code", 1), response["error"]["message"]) except ValueError: response = data return response
def test_http_doubleslash(self): # Checks that the presence of an unnecessary double slash in a url doesn't break anything # Previously, a double slash directly after the host could cause incorrect parsing of the url h = urllib2.AbstractHTTPHandler() o = h.parent = MockOpener() data = "" ds_urls = [ "http://example.com/foo/bar/baz.html", "http://example.com//foo/bar/baz.html", "http://example.com/foo//bar/baz.html", "http://example.com/foo/bar//baz.html", ] for ds_url in ds_urls: ds_req = Request(ds_url, data) # Check whether host is determined correctly if there is no proxy np_ds_req = h.do_request_(ds_req) self.assertEqual(np_ds_req.unredirected_hdrs["Host"],"example.com") # Check whether host is determined correctly if there is a proxy ds_req.set_proxy("someproxy:3128",None) p_ds_req = h.do_request_(ds_req) self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com")
def run_query(query_str): req = Request('{0}?{1}'.format(QUERY_BASE, urlencode({'q':query_str.replace(' ', '+')}))) req.add_header('User-Agent', USER_AGENT) try: resp = urlopen(req) except URLError, e: return 0, None
def _send_webhook_msg(self, ip, port, payload_str, url_path='', content_len=-1, content_type='application/json', get_method=None): headers = { 'content-type': content_type, } if not payload_str: content_len = None payload = None else: payload = bytes(payload_str, encoding='utf-8') if content_len == -1: content_len = len(payload) if content_len is not None: headers['content-length'] = str(content_len) url = 'http://{ip}:{port}/{path}'.format(ip=ip, port=port, path=url_path) req = Request(url, data=payload, headers=headers) if get_method is not None: req.get_method = get_method return urlopen(req)
def attachment(self, attachid): """Get an attachment by attachment_id @param attachid: attachment id @type attachid: int @return: dict with three keys, 'filename', 'size', 'fd' @rtype: dict """ if not self.authenticated and not self.skip_auth: self.auth() qparams = config.params['attach'].copy() qparams['id'] = attachid req_params = urlencode(qparams, True) req_url = urljoin(self.base, config.urls['attach']) req_url += '?' + req_params req = Request(req_url, None, config.headers) if self.httpuser and self.httppassword: base64string = base64.encodestring('%s:%s' % (self.httpuser, self.httppassword))[:-1] req.add_header("Authorization", "Basic %s" % base64string) resp = self.opener.open(req) try: content_type = resp.info()['Content-type'] namefield = content_type.split(';')[1] filename = re.search(r'name=\"(.*)\"', namefield).group(1) content_length = int(resp.info()['Content-length'], 0) return {'filename': filename, 'size': content_length, 'fd': resp} except: return {}
def test_compare_triples(): for mime, fext in MIME_TYPES.items(): dump_path = path.join(DUMP_DIR, path.basename(mime)) for url in URLs: if six.PY2: fname = '%s.%s' % (path.basename(urlparse.urlparse(url).path), fext) else: fname = '%s.%s' % (path.basename(urlparse(url).path), fext) fname = path.join(dump_path, fname) req = Request(url) req.add_header('Accept', mime) res = urlopen(req) g_fdp.parse(data=res.read(), format=mime) g_dump.parse(fname, format=mime) both, first, second = graph_diff(g_fdp, g_dump) n_first = len(first) # n_second = len(second) # n_both = len(both) assert_equals( n_first, 0, '{} triple(s) different from reference:\n\n{}===\n{}\n'.format( n_first, first.serialize(format='turtle'), second.serialize(format='turtle')))
def namedcmd(self, cmd): """Run command stored in Bugzilla by name. @return: Result from the stored command. @rtype: list of dicts """ if not self.authenticated and not self.skip_auth: self.auth() qparams = config.params['namedcmd'].copy() # Is there a better way of getting a command with a space in its name # to be encoded as foo%20bar instead of foo+bar or foo%2520bar? qparams['namedcmd'] = quote(cmd) req_params = urlencode(qparams, True) req_params = req_params.replace('%25','%') req_url = urljoin(self.base, config.urls['list']) req_url += '?' + req_params req = Request(req_url, None, config.headers) if self.user and self.hpassword: base64string = base64.encodestring('%s:%s' % (self.user, self.hpassword))[:-1] req.add_header("Authorization", "Basic %s" % base64string) resp = self.opener.open(req) return self.extractResults(resp)
def open(self): request = Request(self.url) request.add_header('User-Agent','lastfm-lda recommender v.0.0.-1') request.add_header('Accept-encoding', 'gzip') while True: URLLoadListener.num_connections+=1 response = None try: response = urlopen(request,timeout=10) if response.info().get('Content-Encoding') == 'gzip': f = GzipFile(fileobj=StringIO(response.read())) result = f.read() f.close() else: result = response.read() break except Exception, e: if self.retries>2: if isinstance(e, BadStatusLine): raise Exception("last.fm server does not respond (%s)" % e) raise e self.retries+=1 print self.url print "failed with", e print "retry #",self.retries print finally:
def get(self, bugid): """Get an ElementTree representation of a bug. @param bugid: bug id @type bugid: int @rtype: ElementTree """ if not self.authenticated and not self.skip_auth: self.auth() qparams = config.params['show'].copy() qparams['id'] = bugid req_params = urlencode(qparams, True) req_url = urljoin(self.base, config.urls['show']) req_url += '?' + req_params req = Request(req_url, None, config.headers) if self.httpuser and self.httppassword: base64string = base64.encodestring('%s:%s' % (self.httpuser, self.httppassword))[:-1] req.add_header("Authorization", "Basic %s" % base64string) resp = self.opener.open(req) fd = StringIO(resp.read()) # workaround for ill-defined XML templates in bugzilla 2.20.2 parser = ForcedEncodingXMLTreeBuilder(encoding = 'utf-8') etree = ElementTree.parse(fd, parser) bug = etree.find('.//bug') if bug and bug.attrib.has_key('error'): return None else: return etree
def openURL(url_base, data, method='Get', cookies=None, username=None, password=None): ''' function to open urls - wrapper around urllib2.urlopen but with additional checks for OGC service exceptions and url formatting, also handles cookies and simple user password authentication''' url_base.strip() lastchar = url_base[-1] if lastchar not in ['?', '&']: if url_base.find('?') == -1: url_base = url_base + '?' else: url_base = url_base + '&' if username and password: # Provide login information in order to use the WMS server # Create an OpenerDirector with support for Basic HTTP # Authentication... passman = HTTPPasswordMgrWithDefaultRealm() passman.add_password(None, url_base, username, password) auth_handler = HTTPBasicAuthHandler(passman) opener = urllib2.build_opener(auth_handler) openit = opener.open else: openit = urlopen try: if method == 'Post': req = Request(url_base, data) else: req=Request(url_base + data) if cookies is not None: req.add_header('Cookie', cookies) u = openit(req) except HTTPError, e: #Some servers may set the http header to 400 if returning an OGC service exception or 401 if unauthorised. if e.code in [400, 401]: raise ServiceException, e.read() else: raise e
def kickstarter(self, trigger): def IntCutter(fullstring, starter, ender): stringAndSuffix = fullstring.partition(starter) stringpart = str(stringAndSuffix[2]).partition(ender) stringsplit = stringpart[0].split(".") stringint = stringsplit[0] return int(stringint) req = Request('http://www.kickstarter.com/projects/sistratton/banshee-the-game-and-feature-film/comments') req.add_header ('User-agent', 'Mozilla 5.10') result = urlopen(req) html = result.read() people = IntCutter(html, 'itemprop="Project[backers_count]" value="', '">' ) pledge = IntCutter(html, 'Project[pledged]" value="', '">') daysleft = IntCutter(html, '<meta property="twitter:data2" content="', '"/>') goal = 14500 togo = goal - pledge average = pledge / people neededPerPerson = togo / average self.IRC.send('PRIVMSG '+self.MSG_CHANNEL+' :Banshee Kickstarter pledges so far: ' +str(pledge)+ ' GBP. - Left to go: ' +str(togo)+ ' GBP. - Backers: ' +str(people)+ ' - Average per backer: ' +str(average)+ ' GBP. - Additional backers needed pledging average amount: ' +str(neededPerPerson)+ ' people. - Days left: ' +str(daysleft)+ '/30' +'\r\n' )
def fetch_darned(self): ''' Fetch specify raw dataest from darned.ucc.ie/static/downloads/ into the APP_ROOT/data. Create './data' directory if APP_ROOT/data is not found. Returns: bool Raises: URLError: could not connect Darned server ''' if os.path.isfile(self.saved_abs_path + self.filename): sys.stderr.write("{f:s} is already exist".format(f=self.filename)) return False req = Request(self.url) try: response = urlopen(req, timeout=10) except URLError, e: if hasattr(e, 'reason'): sys.stderr.write('We failed to reach a server due to {0}'.format(e.reason)) raise URLError(", Could not connect " + req.get_full_url()) elif hasattr(e, 'code'): sys.stderr.write('The server couldn\'t fulfill the request\n') sys.stderr.write('Error code: {e}'.format(e=e.code)) raise URLError(", Could not connect " + req.get_full_url())
def _uploadBatch(self, batch): """ Uploads a batch to the server """ url = "%s/ImportXML" % self._url self._logger.debug('getting a batch') tstart = time.time() # get a batch self._logger.info('Generating metadata') data = self._agent._getMetadata(batch, logger=self._logger) self._logger.info('Metadata ready ') postData = { 'xml': data } tgen = time.time() - tstart req = Request(url) # remove line break cred = base64.encodestring( '%s:%s' % (self._username, self._password)).strip() req.add_header("Authorization", "Basic %s" % cred) try: result = urlopen(req, data=urlencode(postData)) except HTTPError, e: self._logger.exception("Status %s: \n %s" % (e.code, e.read())) raise Exception('upload failed')
def post(url,data): req = Request(url) req.add_data(data) req.get_method = lambda: 'POST' if not open(req): print url print data
def get_page_urls(url, user_agent=None): req = Request(url) if user_agent: req.add_header('User-Agent', user_agent) response = urlopen(req) urls = REGEX_URLS.findall(str(response.read())) return set(url[0] for url in urls)
def howmuch(loc_param, date_param, apt, pyung): res='' data=[] start = time.time() request = Request(url+'&LAWD_CD='+loc_param+'&DEAL_YMD='+date_param) request.get_method = lambda: 'GET' try: res_body = urlopen(request).read() except: return '',[], traceback.format_exc().splitlines()[-1] end = time.time() soup = BeautifulSoup(res_body, 'html.parser') items = soup.findAll('item') for item in items: item = item.text.encode('utf-8') item = re.sub('<.*?>', '|', item) parsed = item.split('|') try: #res = parsed[3]+' '+parsed[4]+', '+parsed[7]+'m², '+parsed[9]+'F, '+parsed[1].strip()+'만원\n' if parsed[7]==pyung and parsed[4].find(apt)>-1: res+='<tr><td>'+parsed[2]+'/'+parsed[5]+'/'+parsed[6]+'</td><td>'+parsed[3]+' '+parsed[4]+'</td><td>'+parsed[7]+'</td><td>'+parsed[9]+'</td><td>'+parsed[1]+'</td></tr>' data.append( (parsed[2],parsed[5],parsed[6], parsed[1]) ) except IndexError: continue return res, data, None, int((end-start)*1000)
def makeRequest(self, method, path, params=None): if not params: params = {} params['key'] = self.api_key params['token'] = self.oauth_token url = self.base_url + path data = None if method == 'GET': url += '?' + urlencode(params) elif method in ['DELETE', 'POST', 'PUT']: data = urlencode(params).encode('utf-8') request = Request(url) if method in ['DELETE', 'PUT']: request.get_method = lambda: method try: if data: response = urlopen(request, data=data) else: response = urlopen(request) except HTTPError as e: print(e) print(e.read()) result = None else: result = json.loads(response.read().decode('utf-8')) return result
def auth(self): """Authenticate a session. """ if self.try_auth(): return # prompt for username if we were not supplied with it if not self.user: self.log('No username given.') self.user = self.get_input('Username: '******'No password given.') self.password = getpass.getpass() # perform login qparams = config.params['auth'].copy() qparams['Bugzilla_login'] = self.user qparams['Bugzilla_password'] = self.password req_url = urljoin(self.base, config.urls['auth']) req = Request(req_url, urlencode(qparams), config.headers) if self.httpuser and self.httppassword: base64string = base64.encodestring('%s:%s' % (self.httpuser, self.httppassword))[:-1] req.add_header("Authorization", "Basic %s" % base64string) resp = self.opener.open(req) if resp.info().has_key('Set-Cookie'): self.authenticated = True if not self.forget: self.cookiejar.save() os.chmod(self.cookiejar.filename, 0700) return True else: raise RuntimeError("Failed to login")
def fetch(url, auth=None): """Fetch URL, optional with HTTP Basic Authentication.""" if not (url.startswith('http://') or url.startswith('https://')): try: with io.open(url, 'r', encoding='utf-8', errors='replace') as fp: return u''.join(fp.readlines()) except OSError as e: raise AcrylamidException(e.args[0]) req = Request(url) if auth: req.add_header('Authorization', 'Basic ' + b64encode(auth)) try: r = urlopen(req) except HTTPError as e: raise AcrylamidException(e.msg) if r.getcode() == 401: user = input('Username: '******':' + passwd) elif r.getcode() == 200: try: enc = re.search('charset=(.+);?', r.headers.get('Content-Type', '')).group(1) except AttributeError: enc = 'utf-8' return u'' + r.read().decode(enc) raise AcrylamidException('invalid status code %i, aborting.' % r.getcode())
def download_checked(cls, url, target_file, expected_digest): if os.path.exists(target_file): # file already exists, see if we can use it. if PBFile.md5_digest(target_file) == expected_digest: # local file is ok return else: os.unlink(target_file) user_agent = ("pbunder/%s " % (cls.my_version) + "(http://github.com/zeha/pbundler/issues)") try: req = Request(url) req.add_header("User-Agent", user_agent) req.add_header("Accept", "*/*") with file(target_file, 'wb') as f: sock = urlopen(req) try: f.write(sock.read()) finally: sock.close() except Exception as ex: raise PBundlerException("Downloading %s failed (%s)" % (url, ex)) local_digest = PBFile.md5_digest(target_file) if local_digest != expected_digest: os.unlink(target_file) msg = ("Downloading %s failed (MD5 Digest %s did not match expected %s)" % (url, local_digest, expected_digest)) raise PBundlerException(msg) else: # local file is ok return
def try_auth(self): """Check whether the user is already authenticated.""" if self.authenticated: return True # try seeing if we really need to request login if not self.forget: try: self.cookiejar.load() except IOError: pass req_url = urljoin(self.base, config.urls['auth']) req_url += '?GoAheadAndLogIn=1' req = Request(req_url, None, config.headers) if self.httpuser and self.httppassword: base64string = base64.encodestring('%s:%s' % (self.httpuser, self.httppassword))[:-1] req.add_header("Authorization", "Basic %s" % base64string) resp = self.opener.open(req) re_request_login = re.compile(r'<title>.*Log in to Bugzilla</title>') if not re_request_login.search(resp.read()): self.log('Already logged in.') self.authenticated = True return True return False
def parseFeed(cls, url, lastModified=None, etag=None): ''' Return tuple of feed object, last-modified, etag. ''' req = Request(normalize_url(url)) if lastModified: req.add_header('if-modified-since', lastModified) if etag: req.add_header('if-none-match', etag) resp = None try: resp = urlopen(req, None, 10) except HTTPError as error: # HTTP 304 not modifed raise an exception resp = error # url of local file returns empty code if resp.code and resp.code != 200: return None feedDoc = etree.parse(resp) feedType = None for ft in cls._feedTypes: if ft.accept(feedDoc): feedType = ft break if not feedType: raise ValueError('Cannot handle ' + feedDoc.getroot().tag) return (feedType(url, feedDoc), resp.headers.get('last-modified'), resp.headers.get('etag'))
def _download_as_child(url, if_modified_since): from httplib import HTTPException from urllib2 import urlopen, Request, HTTPError, URLError try: #print "Child downloading", url if url.startswith('http:') or url.startswith('https:') or url.startswith('ftp:'): req = Request(url) if url.startswith('http:') and if_modified_since: req.add_header('If-Modified-Since', if_modified_since) src = urlopen(req) else: raise Exception(_('Unsupported URL protocol in: %s') % url) try: sock = src.fp._sock except AttributeError: sock = src.fp.fp._sock # Python 2.5 on FreeBSD while True: data = sock.recv(256) if not data: break os.write(1, data) sys.exit(RESULT_OK) except (HTTPError, URLError, HTTPException) as ex: if isinstance(ex, HTTPError) and ex.code == 304: # Not modified sys.exit(RESULT_NOT_MODIFIED) print >>sys.stderr, "Error downloading '" + url + "': " + (str(ex) or str(ex.__class__.__name__)) sys.exit(RESULT_FAILED)
def __init__(self, url, method="GET", headers=None, data=None, downloadTo=None, closeConnection=False, proxy=None, redirectedFrom=None, unredirectedHeaders=None, **kw): """ """ headers = headers or dict() urllib2_Request.__init__( self, str(url), data=data, headers=headers, origin_req_host=kw.get("origin_req_host", redirectedFrom), unverifiable=kw.get("unverifiable", False), ) Message.__init__(self, url, method, self.headers) self.host = self._url.host self.port = self._url.port self.setProxy(proxy) assert isinstance(self.headers, util.InsensitiveDict) unredirectedHeaders = unredirectedHeaders or dict() self.unredirectedHeaders = util.InsensitiveDict(unredirectedHeaders) self.closeConnection = closeConnection is True self.downloadTo = downloadTo self.redirectedTo = None self.redirectedFrom = tuple() self.response = defer.Deferred()
def test_conn(request): request = Request('http://oauth.tangyue.com/?aa=aa') response = urlopen(request) return HttpResponse(response.read())
def update_library(self, ep_obj): # Values from config if not sickbeard.USE_PYTIVO: return False host = sickbeard.PYTIVO_HOST shareName = sickbeard.PYTIVO_SHARE_NAME tsn = sickbeard.PYTIVO_TIVO_NAME # There are two more values required, the container and file. # # container: The share name, show name and season # # file: The file name # # Some slicing and dicing of variables is required to get at these values. # # There might be better ways to arrive at the values, but this is the best I have been able to # come up with. # # Calculated values showPath = ep_obj.show.location showName = ep_obj.show.name rootShowAndSeason = ek.ek(os.path.dirname, ep_obj.location) absPath = ep_obj.location # Some show names have colons in them which are illegal in a path location, so strip them out. # (Are there other characters?) showName = showName.replace(":", "") root = showPath.replace(showName, "") showAndSeason = rootShowAndSeason.replace(root, "") container = shareName + "/" + showAndSeason file = "/" + absPath.replace(root, "") # Finally create the url and make request requestUrl = "http://" + host + "/TiVoConnect?" + urlencode( { 'Command': 'Push', 'Container': container, 'File': file, 'tsn': tsn }) logger.log(u"pyTivo notification: Requesting " + requestUrl) request = Request(requestUrl) try: response = urlopen(request) #@UnusedVariable except URLError, e: if hasattr(e, 'reason'): logger.log( u"pyTivo notification: Error, failed to reach a server") logger.log(u"'Error reason: " + e.reason) return False elif hasattr(e, 'code'): logger.log( u"pyTivo notification: Error, the server couldn't fulfill the request" ) logger.log(u"Error code: " + e.code) return False
from urllib2 import Request, urlopen values = """ { "image": "http://35.154.49.223/image/ankit/ankit_enroll2.jpg", "gallery_name": "MyGallery" } """ headers = { 'Content-Type': 'application/json', 'app_id': 'fe2b1d88', 'app_key': '622354d3f6cbcfde77192f290ef6e293' } request = Request('https://api.kairos.com/recognize', data=values, headers=headers) response_body = urlopen(request).read() #print response_body b = () b = response_body.split(":") c = b[3] d = c.split(",") a = "Ankit_Sinha" b = "Anurag_Kumar" if a in response_body: print("Attendance Updated.") print("Ankit Sinha Found.") print("Confidence: " + d[0]) elif b in response_body:
def getWeather(self): # skip if weather-widget is already up to date tdelta = datetime.now() - datetime.strptime( config.plugins.MetrixWeather.lastUpdated.value, "%Y-%m-%d %H:%M:%S") if int(tdelta.seconds) < ( config.plugins.MetrixWeather.refreshInterval.value * 60): ##### 1=60 for testing purpose ##### return id = "" name = "" temp = "" temp_max = "" temp_min = "" cityname = config.plugins.MetrixWeather.weathercity.value print "[OMMetrixWeather] lookup for city " + str(cityname) language = config.osd.language.value.replace('_', '-') if language == 'en-EN': language = 'en-US' city = "%s" % cityname feedurl = "http://weather.service.msn.com/data.aspx?weadegreetype=%s&culture=%s&weasearchstr=%s&src=outlook" % ( self.getTemp(), language, urllib2_quote(city)) msnrequest = Request(feedurl, None, std_headers) try: msnpage = urlopen2(msnrequest) except (URLError) as err: print '[OMMetrixWeather] Error: Unable to retrieve page - Error code: ', str( err) config.plugins.MetrixWeather.lastUpdated.value = str( datetime.now().strftime("%Y-%m-%d %H:%M:%S")) return content = msnpage.read() msnpage.close() dom = parseString(content) currentWeather = dom.getElementsByTagName('weather')[0] titlemy = currentWeather.getAttributeNode('weatherlocationname') config.plugins.MetrixWeather.currentLocation.value = titlemy.nodeValue name = titlemy.nodeValue idmy = currentWeather.getAttributeNode('weatherlocationcode') id = idmy.nodeValue currentWeather = dom.getElementsByTagName('current')[0] currentWeatherCode = currentWeather.getAttributeNode('skycode') config.plugins.MetrixWeather.currentWeatherCode.value = self.ConvertCondition( currentWeatherCode.nodeValue) currentWeatherTemp = currentWeather.getAttributeNode('temperature') temp = currentWeatherTemp.nodeValue config.plugins.MetrixWeather.currentWeatherTemp.value = currentWeatherTemp.nodeValue currentWeatherText = currentWeather.getAttributeNode('skytext') config.plugins.MetrixWeather.currentWeatherText.value = currentWeatherText.nodeValue n = 1 currentWeather = dom.getElementsByTagName('forecast')[n] currentWeatherCode = currentWeather.getAttributeNode('skycodeday') config.plugins.MetrixWeather.forecastTodayCode.value = self.ConvertCondition( currentWeatherCode.nodeValue) currentWeatherTemp = currentWeather.getAttributeNode('high') temp_max = currentWeatherTemp.nodeValue config.plugins.MetrixWeather.forecastTodayTempMax.value = currentWeatherTemp.nodeValue currentWeatherTemp = currentWeather.getAttributeNode('low') temp_min = currentWeatherTemp.nodeValue config.plugins.MetrixWeather.forecastTodayTempMin.value = currentWeatherTemp.nodeValue currentWeatherText = currentWeather.getAttributeNode('skytextday') config.plugins.MetrixWeather.forecastTodayText.value = currentWeatherText.nodeValue currentWeather = dom.getElementsByTagName('forecast')[n + 1] currentWeatherCode = currentWeather.getAttributeNode('skycodeday') config.plugins.MetrixWeather.forecastTomorrowCode.value = self.ConvertCondition( currentWeatherCode.nodeValue) currentWeatherTemp = currentWeather.getAttributeNode('high') config.plugins.MetrixWeather.forecastTomorrowTempMax.value = currentWeatherTemp.nodeValue currentWeatherTemp = currentWeather.getAttributeNode('low') config.plugins.MetrixWeather.forecastTomorrowTempMin.value = currentWeatherTemp.nodeValue currentWeatherText = currentWeather.getAttributeNode('skytextday') config.plugins.MetrixWeather.forecastTomorrowText.value = currentWeatherText.nodeValue config.plugins.MetrixWeather.save() configfile.save()
c = hex(random.randrange(0,256)) a = a[2:] b = b[2:] c = c[2:] if len(a)<2: a = "0" + a if len(b)<2: b = "0" + b if len(c)<2: c = "0" + c z = a + b + c return "#" + z.upper() # default config url = 'http://www.viki.com' req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) html = urlopen(req) bsObj = BeautifulSoup(html.read(), "html.parser") # find duplicate links of homepage links = [] for link in bsObj.findAll("a"): links.append(link.get('href')) links = RiteshKumar(links) # highlight the duplicate links duplicateIndex = 0 for duplicateLink in links: color = hexCodeColors() duplicateIndex = duplicateIndex + 1
with open('result_url_topic_desc_JSON.json', 'w') as f: json.dump(rows, f, sort_keys=False, indent=4, separators=(',', ': '),encoding='utf-8') """ user_input = raw_input("Enter your search string : ") Google.search1(user_input) # user search string #Google.search1('cloud managed services') # user search string df2=pd.DataFrame() df2 = pd.read_csv('result_url_topic_desc.csv', encoding='utf-8') phn_1 = [] #store all the extracted Phn numbers in a List mail_1 = [] #store all the extracted E-mail in a List for row in df2.iterrows(): # Parse through each url in the list. try: try: req1 = Request(row[1]['URL'], headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36'}) gcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) # Bypass SSL certification verification f = urlopen(req1, context=gcontext) url_name = f.geturl() #extract URL name s = f.read() phone = re.findall(r"((?:\d{3}|\(\d{3}\))?(?:\s|-|\.)?\d{3}(?:\s|-|\.)\d{4})",s) # Phone regex emails = re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,3}",s) #Email regex if len(phone) == 0: print("No phone number found.") err_msg_phn = "No phone number found." phn_1.append((url_name, err_msg_phn)) else: count = 1 for item in phone:
dnsomaticSuccess = True # if testing OR if the IPs addresses don't match OR if it is now 3:15AM, then update IP addresses... if (testing) or ((lookup != myip) or (datetime.datetime.now().hour == 3)): #change the DNS entry data = {} data['hostname'] = hostname data['myip'] = myip data['wildcard'] = wildcard data['mx'] = mx data['backmx'] = backmx url_values = urlencode(data) url = 'https://updates.dnsomatic.com:443/nic/update?' + url_values request = Request(url) base64string = encodestring(username + ':' + password)[:-1] request.add_header("Authorization", "Basic %s" % base64string) request.add_header('User-Agent', username + ' - Home User - 1.0') htmlFile = urlopen(request) htmlData = htmlFile.read() htmlFile.close() results = htmlData.split() if results[0] == 'good': logging.info('DNS-O-Matic updated to: ' + results[1]) print "DNS-O-Matic updated to: " + results[1] else: logging.info("DNS-O-Matic update failed with error: " + results[0])
from urllib2 import Request, urlopen from CoinMktAPI import CoinMktAPI URL = "%s/%s/%s/%s" % (CoinMktAPI.URL, "order/cancel", CoinMktAPI.SessionToken, CoinMktAPI.TradeId) print "URL : " + URL request = Request(URL, "POST") response_body = urlopen(request).read() print response_body
def download(self, source=None, destination=None, skip_download=False): '''Download MODIS Level 2 files. Kwargs: * source (str, optional) Full FTP source for MODIS level-2 data to override default settings * destination (str, optional) Full local path to save the level2 files ''' url = self.getUrl() if source is None else source if self.nrt is True: otag = 'MODIS_NRT_C' + str(self.collection).strip() else: otag = 'MODIS_SCI_C' + str(self.collection).strip() self.local = os.path.join( os.environ['DATADIR'] if destination is None else destination, otag, self.product, self.dt2yjd()) if skip_download is True: return #------------------------------------------------------------ Print info print '[Collection={}; NRT={}; Remote={}; Local={}]'.format( str(self.collection), str(self.nrt), str(self.remote), str(self.local)) #------------------------------------------------------------ Print info # Create destination path if necessary if not os.path.exists(self.local): try: os.makedirs(self.local) except IOError as e: print e sys.exit(1) # Begin download l2 files cnt = 0 try: # print "URL>>>" + url r = urlopen(Request(url)) regex = r'(' + self.product + '.+?.hdf)' # Read l2 filenames from manifest file for line in r.readlines(): if '.hdf' in line: # filter file with matching regex pattern matches = re.findall(regex, line) if (len(matches) > 0): remote = url + '/' + matches[-1] local = self.local + '/' + matches[-1] # Download new files if not os.path.exists(local): cnt += 1 #print ' Downloading ' + matches[-1] print matches[-1] with closing(urlopen(remote)) as r: with open(local, 'wb') as f: copyfileobj(r, f) if (cnt > 0): print dt.utcnow().strftime('%T') + ' Download complete.\n' else: print 'All remote files already in: ' + self.local except KeyboardInterrupt: print 'Interrupted' sys.exit(0) except URLError, e: print ' ** URLError=' + str(e.reason) print ' Hint: switching remote server might help' if not os.listdir(self.local): # remove local directory if empty os.rmdir(self.local) self.status[0] = -1 return (self.status[0])
from urllib2 import Request, urlopen, URLError, HTTPError req = Request('http://bbs.csdn.net/callmewhy') try: response = urlopen(req) except URLError, e: if hasattr(e, 'code'): print 'The server couldn\'t fulfill the request.' print 'Error code: ', e.code elif hasattr(e, 'reason'): print 'We failed to reach a server.' print 'Reason: ', e.reason else: print 'No exception was raised.' # everything is fine
def lambda_handler(event, context): print "Running EC2 Scheduler" ec2 = boto3.client('ec2') cf = boto3.client('cloudformation') outputs = {} stack_name = context.invoked_function_arn.split(':')[6].rsplit('-', 2)[0] response = cf.describe_stacks(StackName=stack_name) for e in response['Stacks'][0]['Outputs']: outputs[e['OutputKey']] = e['OutputValue'] ddbTableName = outputs['DDBTableName'] awsRegions = ec2.describe_regions()['Regions'] dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(ddbTableName) response = table.get_item(Key={'SolutionName': 'EC2Scheduler'}) item = response['Item'] # Reading Default Values from DynamoDB customTagName = str(item['CustomTagName']) customTagLen = len(customTagName) defaultStartTime = str(item['DefaultStartTime']) defaultStopTime = str(item['DefaultStopTime']) defaultTimeZone = 'utc' defaultDaysActive = str(item['DefaultDaysActive']) sendData = str(item['SendAnonymousData']).lower() createMetrics = str(item['CloudWatchMetrics']).lower() UUID = str(item['UUID']) TimeNow = datetime.datetime.utcnow().isoformat() TimeStamp = str(TimeNow) # Declare Dicts regionDict = {} allRegionDict = {} regionsLabelDict = {} postDict = {} for region in awsRegions: try: # Create connection to the EC2 using Boto3 resources interface ec2 = boto3.resource('ec2', region_name=region['RegionName']) awsregion = region['RegionName'] now = datetime.datetime.now().strftime("%H%M") nowMax = datetime.datetime.now() - datetime.timedelta(minutes=59) nowMax = nowMax.strftime("%H%M") nowDay = datetime.datetime.today().strftime("%a").lower() # Declare Lists startList = [] stopList = [] runningStateList = [] stoppedStateList = [] # List all instances instances = ec2.instances.all() print "Creating", region['RegionName'], "instance lists..." for i in instances: if i.tags != None: for t in i.tags: if t['Key'][:customTagLen] == customTagName: ptag = t['Value'].split(";") # Split out Tag & Set Variables to default default1 = 'default' default2 = 'true' startTime = defaultStartTime stopTime = defaultStopTime timeZone = defaultTimeZone daysActive = defaultDaysActive state = i.state['Name'] itype = i.instance_type # Post current state of the instances if createMetrics == 'enabled': if state == "running": putCloudWatchMetric( region['RegionName'], i.instance_id, 1) if state == "stopped": putCloudWatchMetric( region['RegionName'], i.instance_id, 0) # Parse tag-value if len(ptag) >= 1: if ptag[0].lower() in (default1, default2): startTime = defaultStartTime else: startTime = ptag[0] stopTime = ptag[0] if len(ptag) >= 2: stopTime = ptag[1] if len(ptag) >= 3: timeZone = ptag[2].lower() if len(ptag) >= 4: daysActive = ptag[3].lower() isActiveDay = False # Days Interpreter if daysActive == "all": isActiveDay = True elif daysActive == "weekdays": weekdays = ['mon', 'tue', 'wed', 'thu', 'fri'] if (nowDay in weekdays): isActiveDay = True else: daysActive = daysActive.split(",") for d in daysActive: if d.lower() == nowDay: isActiveDay = True # Append to start list if startTime >= str(nowMax) and startTime <= str(now) and \ isActiveDay == True and state == "stopped": startList.append(i.instance_id) print i.instance_id, " added to START list" if createMetrics == 'enabled': putCloudWatchMetric( region['RegionName'], i.instance_id, 1) # Append to stop list if stopTime >= str(nowMax) and stopTime <= str(now) and \ isActiveDay == True and state == "running": stopList.append(i.instance_id) print i.instance_id, " added to STOP list" if createMetrics == 'enabled': putCloudWatchMetric( region['RegionName'], i.instance_id, 0) if state == 'running': runningStateList.append(itype) if state == 'stopped': stoppedStateList.append(itype) # Execute Start and Stop Commands if startList: print "Starting", len(startList), "instances", startList ec2.instances.filter(InstanceIds=startList).start() else: print "No Instances to Start" if stopList: print "Stopping", len(stopList), "instances", stopList ec2.instances.filter(InstanceIds=stopList).stop() else: print "No Instances to Stop" # Built payload for each region if sendData == "yes": countRunDict = {} typeRunDict = {} countStopDict = {} typeStopDict = {} runDictType = {} stopDictType = {} runDict = dict(Counter(runningStateList)) for k, v in runDict.iteritems(): countRunDict['Count'] = v typeRunDict[k] = countRunDict['Count'] stopDict = dict(Counter(stoppedStateList)) for k, v in stopDict.iteritems(): countStopDict['Count'] = v typeStopDict[k] = countStopDict['Count'] runDictType['instance_type'] = typeRunDict stopDictType['instance_type'] = typeStopDict typeStateSum = {} typeStateSum['running'] = runDictType typeStateSum['stopped'] = stopDictType StateSum = {} StateSum['instance_state'] = typeStateSum regionDict[awsregion] = StateSum allRegionDict.update(regionDict) except Exception as e: print("Exception: " + str(e)) continue # Build payload for the account if sendData == "yes": regionsLabelDict['regions'] = allRegionDict postDict['Data'] = regionsLabelDict postDict['TimeStamp'] = TimeStamp postDict['Solution'] = 'SO0002' postDict['UUID'] = UUID # API Gateway URL to make HTTP POST call url = 'https://metrics.awssolutionsbuilder.com/generic' data = json.dumps(postDict) headers = {'content-type': 'application/json'} req = Request(url, data, headers) rsp = urlopen(req) content = rsp.read() rsp_code = rsp.getcode() print('Response Code: {}'.format(rsp_code))
name: "UHH2" ) { pullRequest(number: %d) { id } } } """ % (options.id) GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN', None) if not GITHUB_TOKEN: raise RuntimeError("No github token, cannot make API requests") headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': 'bearer %s' % GITHUB_TOKEN } blob = json.dumps(dict(query=query.strip())).encode('utf-8') this_request = Request("https://api.github.com/graphql", blob, headers) response = urlopen(this_request) data = json.loads(response.read().decode('utf-8')) pr = data['data']['repository']['pullRequest'] if pr: pr_id = pr['id'] print(pr_id) if options.dumpFilename != "": with open(options.dumpFilename, "a") as f: f.write("export %s=%s\n" % (options.dumpVarname, pr_id)) else: print("No Pull Request number %d" % options.id)
import categoriser from urllib2 import Request, urlopen, URLError import json import os,sys import codecs import time reload(sys) sys.setdefaultencoding("utf-8") request = Request('http://juicer.api.bbci.co.uk/articles?q=%22london%22&recent_first=yes&api_key=<API_KEY>') before = "2016-06-01" after = "2016-01-01" key = '<API-KEY>' source = "&sources%5B%5D=1" call = "http://juicer.api.bbci.co.uk/articles?recent_first=yes" + source + "&published_after=" + after + "T00:00:00.000Z" + "&published_before=" + before + "T00:00:00.000Z" + "&size=10" + "&api_key=" + key print call response = urlopen(call) articleJSON = response.read() result = json.loads(articleJSON) hits = int(result['total']) print hits try: f = open('bbc-dataset-201601-201607.tab', 'a') size = 10 # default apparently, getting error with setting size in API call
vt_hunter_downloader_log = 'vt_hunter_downloader_log.txt' def write_log(filename, line): f = open(filename, 'a') f.write(line) f.close write_log(hash_log, "\n----------\n{0}\n----------\n".format(datetime.datetime.now())) write_log(vt_hunter_downloader_log, "\n----------\n{0}\n----------\n".format(datetime.datetime.now())) # Get the hunting url json contents: request = Request(virustotal_notification_url) request_response = urlopen(request).read() data = json.loads(request_response) sha_list = [] # Store the Sha's for download i = 0 for d in data['notifications']: # if d['subject'] == 'hancitor': try: for item in data['notifications'][i]: date = data['notifications'][i]['date'] sha256 = data['notifications'][i]['sha256'] size = data['notifications'][i]['size'] ruleset_name = data['notifications'][i]['ruleset_name'] first_seen = data['notifications'][i]['first_seen'] AV_positives = data['notifications'][i]['positives']
def http_get(self, url): request = Request(url) request.add_header("Content-Type", "text/xml;charset=UTF-8") response = self.opener.open(request) return response
def request_factory(path='/'): url = 'http://127.0.0.1:5001%s' % path headers = { 'Content-Type': 'application/html' } return Request(url, data=html_data.encode('utf-8'), headers=headers, method='POST')
class YTTrailer: def __init__(self, session): self.session = session # self.l3cert = etpm.getCert(eTPM.TPMD_DT_LEVEL3_CERT) def showTrailer(self, eventname): if eventname: feeds = self.getYTFeeds(eventname, 1) if feeds and len(feeds.entry) >= 1: ref = self.setServiceReference(feeds.entry[0]) if ref: self.session.open(TrailerPlayer, ref) def getYTFeeds(self, eventname, max_results): yt_service = gdata.youtube.service.YouTubeService() # developer key and client id taken from mytube-plugin with permission from acid_burn. yt_service.developer_key = 'AI39si4AjyvU8GoJGncYzmqMCwelUnqjEMWTFCcUtK-VUzvWygvwPO-sadNwW5tNj9DDCHju3nnJEPvFy4WZZ6hzFYCx8rJ6Mw' yt_service.client_id = 'ytapi-dream-MyTubePlayer-i0kqrebg-0' query = gdata.youtube.service.YouTubeVideoQuery() if int(config.plugins.yttrailer.best_resolution.value) <= 1: shd = "HD" else: shd = "" query.vq = "%s %s Trailer %s" % ( eventname, shd, config.plugins.yttrailer.ext_descr.value) query.max_results = max_results try: feeds = yt_service.YouTubeQuery(query) except gaierror: feeds = None return feeds def setServiceReference(self, entry): url = self.getVideoUrl(entry) if url: ref = eServiceReference(4097, 0, url) ref.setName(entry.media.title.text) else: ref = None return ref def getTubeId(self, entry): ret = None if entry.media.player: split = entry.media.player.url.split("=") ret = split.pop() if ret.startswith('youtube_gdata'): tmpval = split.pop() if tmpval.endswith("&feature"): tmp = tmpval.split("&") ret = tmp.pop(0) return ret def getVideoUrl(self, entry): std_headers = { 'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.6) Gecko/20100627 Firefox/3.6.6', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-us,en;q=0.5', } VIDEO_FMT_PRIORITY_MAP = { '18': 4, #MP4 360p '35': 5, #FLV 480p '34': 6, #FLV 360p } if int(config.plugins.yttrailer.best_resolution.value) <= 1: VIDEO_FMT_PRIORITY_MAP["38"] = 1 #MP4 Original (HD) VIDEO_FMT_PRIORITY_MAP["22"] = 3 #MP4 720p (HD) if int(config.plugins.yttrailer.best_resolution.value) == 0: VIDEO_FMT_PRIORITY_MAP["37"] = 2 #MP4 1080p (HD) video_url = None video_id = str(self.getTubeId(entry)) # Getting video webpage #URLs for YouTube video pages will change from the format http://www.youtube.com/watch?v=ylLzyHk54Z0 to http://www.youtube.com/watch#!v=ylLzyHk54Z0. watch_url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en' % video_id watchrequest = Request(watch_url, None, std_headers) try: print "[YTTrailer] trying to find out if a HD Stream is available", watch_url watchvideopage = urlopen2(watchrequest).read() except (URLError, HTTPException, socket_error), err: print "[YTTrailer] Error: Unable to retrieve watchpage - Error code: ", str( err) return video_url # Get video info for el in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: info_url = ( 'http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' % (video_id, el)) request = Request(info_url, None, std_headers) try: infopage = urlopen2(request).read() videoinfo = parse_qs(infopage) if ('url_encoded_fmt_stream_map' or 'fmt_url_map') in videoinfo: break except (URLError, HTTPException, socket_error), err: print "[YTTrailer] Error: unable to download video infopage", str( err) return video_url
from urllib2 import urlopen, Request, URLError, HTTPError try: timeout = 2 url = 'http://10.251.21.176:9001/index.html?processname=elasticsearch&action=start' response = urlopen(Request(url), timeout=timeout) html = response.read() except (URLError, HTTPError) as e: print e
from urllib2 import urlopen, Request headers = {'Authorization': 'Token token=eebf8075f1d24f268bb2f15a4f159a95'} url = "http://www.cepaberto.com/api/v2/ceps.json?cep=40010000" json = urlopen(Request(url, None, headers=headers)).read() print json from urllib2 import urlopen, Request headers = {'Authorization': 'Token token=eebf8075f1d24f268bb2f15a4f159a95'} url = "http://www.cepaberto.com/api/v2/cities.json?estado=SP" json = urlopen(Request(url, None, headers=headers)).read() print json
def speedtest(): """Run the full speedtest.net test""" global shutdown_event, source shutdown_event = threading.Event() signal.signal(signal.SIGINT, ctrl_c) description = ( 'Command line interface for testing internet bandwidth using ' 'speedtest.net.\n' '------------------------------------------------------------' '--------------\n' 'https://github.com/sivel/speedtest-cli') parser = ArgParser(description=description) # Give optparse.OptionParser an `add_argument` method for # compatibility with argparse.ArgumentParser try: parser.add_argument = parser.add_option except AttributeError: pass parser.add_argument('--bytes', dest='units', action='store_const', const=('bytes', 1), default=('bits', 8), help='Display values in bytes instead of bits. Does ' 'not affect the image generated by --share') parser.add_argument('--share', action='store_true', help='Generate and provide a URL to the speedtest.net ' 'share results image') parser.add_argument('--simple', action='store_true', help='Suppress verbose output, only show basic ' 'information') parser.add_argument('--list', action='store_true', help='Display a list of speedtest.net servers ' 'sorted by distance') parser.add_argument('--server', help='Specify a server ID to test against') parser.add_argument('--mini', help='URL of the Speedtest Mini server') parser.add_argument('--source', help='Source IP address to bind to') parser.add_argument('--version', action='store_true', help='Show the version number and exit') options = parser.parse_args() if isinstance(options, tuple): args = options[0] else: args = options del options # Print the version and exit if args.version: version() # If specified bind to a specific IP address if args.source: source = args.source socket.socket = bound_socket if not args.simple: print_('Retrieving speedtest.net configuration...') try: config = getConfig() except URLError: print_('Cannot retrieve speedtest configuration') sys.exit(1) if not args.simple: print_('Retrieving speedtest.net server list...') if args.list or args.server: servers = closestServers(config['client'], True) if args.list: serverList = [] for server in servers: line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) ' '[%(d)0.2f km]' % server) serverList.append(line) # Python 2.7 and newer seem to be ok with the resultant encoding # from parsing the XML, but older versions have some issues. # This block should detect whether we need to encode or not try: unicode() print_('\n'.join(serverList).encode('utf-8', 'ignore')) except NameError: print_('\n'.join(serverList)) except IOError: pass sys.exit(0) else: servers = closestServers(config['client']) if not args.simple: print_('Testing from %(isp)s (%(ip)s)...' % config['client']) if args.server: try: best = getBestServer( filter(lambda x: x['id'] == args.server, servers)) except IndexError: print_('Invalid server ID') sys.exit(1) elif args.mini: name, ext = os.path.splitext(args.mini) if ext: url = os.path.dirname(args.mini) else: url = args.mini urlparts = urlparse(url) try: f = urlopen(args.mini) except: print_('Invalid Speedtest Mini URL') sys.exit(1) else: text = f.read() f.close() extension = re.findall('upload_extension: "([^"]+)"', text.decode()) if not extension: for ext in ['php', 'asp', 'aspx', 'jsp']: try: f = urlopen('%s/speedtest/upload.%s' % (args.mini, ext)) except: pass else: data = f.read().strip() if (f.code == 200 and len(data.splitlines()) == 1 and re.match('size=[0-9]', data)): extension = [ext] break if not urlparts or not extension: print_('Please provide the full URL of your Speedtest Mini server') sys.exit(1) servers = [{ 'sponsor': 'Speedtest Mini', 'name': urlparts[1], 'd': 0, 'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]), 'latency': 0, 'id': 0 }] try: best = getBestServer(servers) except: best = servers[0] else: if not args.simple: print_('Selecting best server based on latency...') best = getBestServer(servers) if not args.simple: # Python 2.7 and newer seem to be ok with the resultant encoding # from parsing the XML, but older versions have some issues. # This block should detect whether we need to encode or not try: unicode() print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: ' '%(latency)s ms' % best).encode('utf-8', 'ignore')) except NameError: print_('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: ' '%(latency)s ms' % best) else: print_('Ping: %(latency)s ms' % best) sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000] urls = [] for size in sizes: for i in range(0, 4): urls.append('%s/random%sx%s.jpg' % (os.path.dirname(best['url']), size, size)) if not args.simple: print_('Testing download speed', end='') dlspeed = downloadSpeed(urls, args.simple) if not args.simple: print_() print_('Download: %0.2f M%s/s' % ((dlspeed / 1000 / 1000) * args.units[1], args.units[0])) sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)] sizes = [] for size in sizesizes: for i in range(0, 25): sizes.append(size) if not args.simple: print_('Testing upload speed', end='') ulspeed = uploadSpeed(best['url'], sizes, args.simple) if not args.simple: print_() print_('Upload: %0.2f M%s/s' % ((ulspeed / 1000 / 1000) * args.units[1], args.units[0])) if args.share and args.mini: print_('Cannot generate a speedtest.net share results image while ' 'testing against a Speedtest Mini server') elif args.share: dlspeedk = int(round((dlspeed / 1000) * 8, 0)) ping = int(round(best['latency'], 0)) ulspeedk = int(round((ulspeed / 1000) * 8, 0)) # Build the request to send results back to speedtest.net # We use a list instead of a dict because the API expects parameters # in a certain order apiData = [ 'download=%s' % dlspeedk, 'ping=%s' % ping, 'upload=%s' % ulspeedk, 'promo=', 'startmode=%s' % 'pingselect', 'recommendedserverid=%s' % best['id'], 'accuracy=%s' % 1, 'serverid=%s' % best['id'], 'hash=%s' % md5( ('%s-%s-%s-%s' % (ping, ulspeedk, dlspeedk, '297aae72')).encode()).hexdigest() ] req = Request('http://c.speedtest.net/api/api.php', data='&'.join(apiData).encode()) req.add_header('Referer', 'http://c.speedtest.net/flash/speedtest.swf') f = urlopen(req) response = f.read() code = f.code f.close() if int(code) != 200: print_('Could not submit results to speedtest.net') sys.exit(1) qsargs = parse_qs(response.decode()) resultid = qsargs.get('resultid') if not resultid or len(resultid) != 1: print_('Could not submit results to speedtest.net') sys.exit(1) print_('Share results: http://c.speedtest.net/result/%s.png' % resultid[0])
def download_file_with_cookiejar(self, url, file_count, total, recursion=False): # see if we've already download this file and if it is that it is the correct size download_file = os.path.basename(url).split('?')[0] if os.path.isfile(download_file): try: request = Request(url) request.get_method = lambda: 'HEAD' response = urlopen(request, timeout=30) remote_size = self.get_total_size(response) # Check that we were able to derive a size. if remote_size: local_size = os.path.getsize(download_file) if remote_size < (local_size + (local_size * .01)) and remote_size > ( local_size - (local_size * .01)): print( " > Download file {0} exists! \n > Skipping download of {1}. " .format(download_file, url)) return None, None #partial file size wasn't full file size, lets blow away the chunk and start again print( " > Found {0} but it wasn't fully downloaded. Removing file and downloading again." .format(download_file)) os.remove(download_file) except ssl.CertificateError as e: print(" > ERROR: {0}".format(e)) print( " > Could not validate SSL Cert. You may be able to overcome this using the --insecure flag" ) return False, None except HTTPError as e: if e.code == 401: print( " > IMPORTANT: Your user may not have permission to download this type of data!" ) else: print(" > Unknown Error, Could not get file HEAD: {0}". format(e)) except URLError as e: print("URL Error (from HEAD): {0}, {1}".format(e.reason, url)) if "ssl.c" in "{0}".format(e.reason): print( "IMPORTANT: Remote location may not be accepting your SSL configuration. This is a terminal error." ) return False, None # attempt https connection try: request = Request(url) response = urlopen(request, timeout=30) # Watch for redirect if response.geturl() != url: # See if we were redirect BACK to URS for re-auth. if 'https://urs.earthdata.nasa.gov/oauth/authorize' in response.geturl( ): if recursion: print( " > Entering seemingly endless auth loop. Aborting. " ) return False, None # make this easier. If there is no app_type=401, add it new_auth_url = response.geturl() if "app_type" not in new_auth_url: new_auth_url += "&app_type=401" print( " > While attempting to download {0}....".format(url)) print(" > Need to obtain new cookie from {0}".format( new_auth_url)) old_cookies = [cookie.name for cookie in self.cookie_jar] opener = build_opener(HTTPCookieProcessor(self.cookie_jar), HTTPHandler(), HTTPSHandler(**self.context)) request = Request(new_auth_url) try: response = opener.open(request) for cookie in self.cookie_jar: if cookie.name not in old_cookies: print(" > Saved new cookie: {0}".format( cookie.name)) # A little hack to save session cookies if cookie.discard: cookie.expires = int( time.time()) + 60 * 60 * 24 * 30 print( " > Saving session Cookie that should have been discarded! " ) self.cookie_jar.save(self.cookie_jar_path, ignore_discard=True, ignore_expires=True) except HTTPError as e: print("HTTP Error: {0}, {1}".format(e.code, url)) return False, None # Okay, now we have more cookies! Lets try again, recursively! print(" > Attempting download again with new cookies!") return self.download_file_with_cookiejar(url, file_count, total, recursion=True) print( " > 'Temporary' Redirect download @ Remote archive:\n > {0}" .format(response.geturl())) # seems to be working print("({0}/{1}) Downloading {2}".format(file_count, total, url)) # Open our local file for writing and build status bar tf = tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir='.') self.chunk_read(response, tf, report_hook=self.chunk_report) # Reset download status sys.stdout.write('\n') tempfile_name = tf.name tf.close() #handle errors except HTTPError as e: print("HTTP Error: {0}, {1}".format(e.code, url)) if e.code == 401: print( " > IMPORTANT: Your user does not have permission to download this type of data!" ) if e.code == 403: print(" > Got a 403 Error trying to download this file. ") print( " > You MAY need to log in this app and agree to a EULA. ") return False, None except URLError as e: print("URL Error (from GET): {0}, {1}, {2}".format( e, e.reason, url)) if "ssl.c" in "{0}".format(e.reason): print( "IMPORTANT: Remote location may not be accepting your SSL configuration. This is a terminal error." ) return False, None except socket.timeout as e: print(" > timeout requesting: {0}; {1}".format(url, e)) return False, None except ssl.CertificateError as e: print(" > ERROR: {0}".format(e)) print( " > Could not validate SSL Cert. You may be able to overcome this using the --insecure flag" ) return False, None # Return the file size shutil.copy(tempfile_name, download_file) os.remove(tempfile_name) file_size = self.get_total_size(response) actual_size = os.path.getsize(download_file) if file_size is None: # We were unable to calculate file size. file_size = actual_size return actual_size, file_size
def get_page(**kwargs): """ Retrieves page content from a given target URL """ url = kwargs.get("url", None) post = kwargs.get("data", None) header = kwargs.get("header", None) cookie = kwargs.get("cookie", None) user_agent = kwargs.get("user_agent", None) verbose = kwargs.get("verbose", False) headers = {} parsed_url = None page = None if url is None: raise Exception("[!] URL cannot be None.") try: parsed_url = urlsplit(url) except: raise Exception("[!] Unable to parse URL: %s" % url) if user_agent is None: user_agent = "%s %s" % (NAME, VERSION) if post is None: parsed_url = parsed_url._replace( query=urlencode(parse_qsl(parsed_url.query))) url = urlunsplit(parsed_url) else: post = urlencode(parse_qsl(post), "POST") # Perform HTTP Request try: headers[HTTP_HEADER.USER_AGENT] = user_agent if cookie: headers[HTTP_HEADER.COOKIE] = cookie if header: headers[header.split("=")[0]] = header.split("=", 1)[1] req = Request(url, post, headers) conn = urlopen(req) if not args.write_files and kb.original_response and kb.invalid_response: _ = conn.headers.get(HTTP_HEADER.CONTENT_LENGTH, "") if _.isdigit(): _ = int(_) if _ - max(len(kb.original_response), len( kb.invalid_response)) > SKIP_RETRIEVE_THRESHOLD: page = "".join( random.choice(string.letters) for i in xrange(_)) # Get HTTP Response if not page: page = conn.read() except KeyboardInterrupt: raise except Exception, e: if hasattr(e, "read"): page = page or e.read() if verbose: if hasattr(e, "msg"): print("[x] Error msg '%s'" % e.msg) if hasattr(e, "reason"): print("[x] Error reason '%s'" % e.reason) if hasattr(e, "message"): print("[x] Error message '%s'" % e.message) if hasattr(e, "code"): print("[x] HTTP error code '%d'" % e.code) if hasattr(e, "info"): print("[x] Response headers '%s'" % e.info())
def check_cookie(self): if self.cookie_jar is None: print(" > Cookiejar is bunk: {0}".format(self.cookie_jar)) return False # File we know is valid, used to validate cookie file_check = 'https://urs.earthdata.nasa.gov/profile' # Apply custom Redirect Hanlder opener = build_opener(HTTPCookieProcessor(self.cookie_jar), HTTPHandler(), HTTPSHandler(**self.context)) install_opener(opener) # Attempt a HEAD request request = Request(file_check) request.get_method = lambda: 'HEAD' try: print(" > attempting to download {0}".format(file_check)) response = urlopen(request, timeout=30) resp_code = response.getcode() # Make sure we're logged in if not self.check_cookie_is_logged_in(self.cookie_jar): return False # Save cookiejar self.cookie_jar.save(self.cookie_jar_path) except HTTPError: # If we ge this error, again, it likely means the user has not agreed to current EULA print("\nIMPORTANT: ") print( "Your user appears to lack permissions to download data from the ASF Datapool." ) print( "\n\nNew users: you must first log into Vertex and accept the EULA. In addition, your Study Area must be set at Earthdata https://urs.earthdata.nasa.gov" ) exit(-1) # This return codes indicate the USER has not been approved to download the data if resp_code in (300, 301, 302, 303): try: redir_url = response.info().getheader('Location') except AttributeError: redir_url = response.getheader('Location') #Funky Test env: if ("vertex-retired.daac.asf.alaska.edu" in redir_url and "test" in self.asf_urs4['redir']): print("Cough, cough. It's dusty in this test env!") return True print("Redirect ({0}) occured, invalid cookie value!".format( resp_code)) return False # These are successes! if resp_code in (200, 307): return True return False
def handle(self, *args, **options): bbc_url = 'https://www.bbc.com/portuguese/brasil-45215784' req = Request(bbc_url) try: response = urlopen(req) except HTTPError as e: print 'The server couldn\'t fulfill the request.' print 'Error code: ', e.code except URLError as e: print 'We failed to reach a server.' print 'Reason: ', e.reason else: data = response.read() inicio = data.find('panel-two-content') fim = data.rfind('<style>', inicio) parte_util = data[inicio:fim] print parte_util try: with transaction.atomic(): areas = re.findall('<h2>(.*?)</h2>', parte_util) print u'Áreas' for area in areas: area = area.strip() print area if not Area.objects.filter(nome=area).exists(): Area.objects.create(nome=area) proposta_areas = re.findall('<h2>(.*?)(?=<h2>|\Z)', parte_util, re.DOTALL) if len(proposta_areas) != 6: raise ValueError('erro na quantidade de areas') for proposta_area in proposta_areas: # Buscar área atual area_atual = Area.objects.get( nome=proposta_area[:proposta_area.find('</h2>' )].strip()) # Buscar candidato caso não exista candidatos = re.findall( 'party-content(.*?)(?=</ul>|\Z)', proposta_area, re.DOTALL) for candidato in candidatos: dados_candidato = re.findall( '<span class="header"><span class="header-bold">(.*?)</span> (.*?)</span>', candidato, re.DOTALL) # print dados_candidato nome_candidato, partido = dados_candidato[0] nome_candidato = nome_candidato.strip() partido = partido.strip() if not Candidato.objects.filter( nome=nome_candidato).exists(): Candidato.objects.create(nome=nome_candidato, numero=0, partido=partido) candidato_atual = Candidato.objects.get( nome=nome_candidato) propostas = re.findall('<li>(.*?)</li>', candidato, re.DOTALL) for proposta in propostas: proposta = proposta.strip().replace( '"', '"') if proposta == 'Sem dados até o momento': continue # print proposta if not Proposta.objects.filter( texto=proposta, candidato=candidato_atual).exists(): Proposta.objects.create( texto=proposta, candidato=candidato_atual, area=area_atual) except: raise # <span class="header"><span class="header-bold">(.*?)</span> (.*?)</span>
import requests import datetime # change date to a date close to (before) the SPB crashed cmd = "https://api.github.com/repos/Bioconductor/Contributions/issues?state=open&per_page=100&since=2019-06-16T00:00:00" # commented out code was when there could be more than 100 open issues # because we use since and only expect a day or two for failures, don't need to # loop over pages - if for some reason more than 100 than implement loop (python # uses indentation to determine code chunks so if implemented adjust indentation) issue_rerun = [] #count=1 #while count <= 1: # print(count) #cmd=cmd+"&page="+str(count) request = Request(cmd) response = urlopen(request) res = response.read() git_dir = json.loads(res) for k in git_dir: issue_rerun.append(k['html_url']) #count +=1 for i in issue_rerun: print(i)
def get_new_cookie(self): # Start by prompting user to input their credentials # Another Python2/3 workaround try: new_username = raw_input("Username: "******"Username: "******"Password (will not be displayed): ") # Build URS4 Cookie request auth_cookie_url = self.asf_urs4['url'] + '?client_id=' + self.asf_urs4[ 'client'] + '&redirect_uri=' + self.asf_urs4[ 'redir'] + '&response_type=code&state=' try: #python2 user_pass = base64.b64encode( bytes(new_username + ":" + new_password)) except TypeError: #python3 user_pass = base64.b64encode( bytes(new_username + ":" + new_password, "utf-8")) user_pass = user_pass.decode("utf-8") # Authenticate against URS, grab all the cookies self.cookie_jar = MozillaCookieJar() opener = build_opener(HTTPCookieProcessor(self.cookie_jar), HTTPHandler(), HTTPSHandler(**self.context)) request = Request( auth_cookie_url, headers={"Authorization": "Basic {0}".format(user_pass)}) # Watch out cookie rejection! try: response = opener.open(request) except HTTPError as e: if "WWW-Authenticate" in e.headers and "Please enter your Earthdata Login credentials" in e.headers[ "WWW-Authenticate"]: print( " > Username and Password combo was not successful. Please try again." ) return False else: # If an error happens here, the user most likely has not confirmed EULA. print( "\nIMPORTANT: There was an error obtaining a download cookie!" ) print( "Your user appears to lack permission to download data from the ASF Datapool." ) print( "\n\nNew users: you must first log into Vertex and accept the EULA. In addition, your Study Area must be set at Earthdata https://urs.earthdata.nasa.gov" ) exit(-1) except URLError as e: print( "\nIMPORTANT: There was a problem communicating with URS, unable to obtain cookie. " ) print("Try cookie generation later.") exit(-1) # Did we get a cookie? if self.check_cookie_is_logged_in(self.cookie_jar): #COOKIE SUCCESS! self.cookie_jar.save(self.cookie_jar_path) return True # if we aren't successful generating the cookie, nothing will work. Stop here! print( "WARNING: Could not generate new cookie! Cannot proceed. Please try Username and Password again." ) print("Response was {0}.".format(response.getcode())) print( "\n\nNew users: you must first log into Vertex and accept the EULA. In addition, your Study Area must be set at Earthdata https://urs.earthdata.nasa.gov" ) exit(-1)
def request_issue_creation(path, arguments, error_message): """ request the creation and create the issue """ # TODO:/ we're gonna go ahead and give you guys another chance #if not checksum(path): # lib.output.error( # "It seems you have changed some of the code in the program. We do not accept issues from edited " # "code as we have no way of reliably testing your issue. We recommend that you only use the version " # "that is available on github, no issue will be created for this problem." # ) # exit(1) question = raw_input("do you want to create an anonymized issue?[y/N]: ") if question.lower().startswith("y"): if check_version_number(lib.banner.VERSION): # gonna read a chunk of it instead of one line chunk = 4096 with open(path) as data: identifier = create_identifier(error_message) # gotta seek to the beginning of the file since it's already been read `4096` into it data.seek(0) issue_title = "Unhandled Exception ({})".format(identifier) issue_data = { "title": issue_title, "body": ("Autosploit version: `{}`\n" "OS information: `{}`\n" "Running context: `{}`\n" "Error mesage: `{}`\n" "Error traceback:\n```\n{}\n```\n" "Metasploit launched: `{}`\n".format( lib.banner.VERSION, platform.platform(), ' '.join(sys.argv), error_message, open(path).read(), lib.settings.MSF_LAUNCHED, )) } _json_data = json.dumps(issue_data) if sys.version_info > (3, ): # python 3 _json_data = _json_data.encode("utf-8") if not ensure_no_issue(identifier): req = Request( url= "https://api.github.com/repos/nullarray/autosploit/issues", data=_json_data, headers={ "Authorization": "token {}".format(get_token(lib.settings.TOKEN_PATH)) }) urlopen(req, timeout=10).read() lib.output.info( "issue has been generated with the title '{}', at the following " "URL '{}'".format(issue_title, find_url(identifier))) else: lib.output.error( "someone has already created this issue here: {}".format( find_url(identifier))) try: os.remove(path) except: pass else: sep = "-" * 35 lib.output.error( "it appears you are not using the current version of AutoSploit please update to the newest version " "and try again, this can also happen when a new update has been pushed and the cached raw page has " "not been updated yet. If you feel this is the later please create and issue on AutoSploits Github " "page with the following info:") print("{}\n{}\n{}".format(sep, open(path).read(), sep)) else: lib.output.info( "the issue has been logged to a file in path: '{}'".format(path))
def download_or_cache(url, sha): """ Get bytes from the given url or local cache. Parameters ---------- url : str The url to download sha : str The sha256 of the file Returns ------- BytesIO The file loaded into memory. """ cache_dir = _get_xdg_cache_dir() def get_from_cache(local_fn): if cache_dir is None: raise Exception("no cache dir") cache_filename = os.path.join(cache_dir, local_fn) with open(cache_filename, 'rb') as fin: buf = BytesIO(fin.read()) file_sha = get_fd_hash(buf) if file_sha != sha: return None buf.seek(0) return buf def write_cache(local_fn, data): if cache_dir is None: raise Exception("no cache dir") cache_filename = os.path.join(cache_dir, local_fn) makedirs(cache_dir, exist_ok=True) if sys.version_info < (3, ): if os.path.exists(cache_filename): raise FileExistsError mode = 'wb' else: mode = 'xb' old_pos = data.tell() data.seek(0) with open(cache_filename, mode=mode) as fout: fout.write(data.read()) data.seek(old_pos) try: return get_from_cache(sha) except Exception: pass # jQueryUI's website blocks direct downloads from urllib.request's # default User-Agent, but not (for example) wget; so I don't feel too # bad passing in an empty User-Agent. with urlopen(Request(url, headers={"User-Agent": ""})) as req: file_contents = BytesIO(req.read()) file_contents.seek(0) file_sha = get_fd_hash(file_contents) if file_sha != sha: raise Exception( ("The download file does not match the " "expected sha. {url} was expected to have " "{sha} but it had {file_sha}").format(sha=sha, file_sha=file_sha, url=url)) try: write_cache(sha, file_contents) except Exception: pass file_contents.seek(0) return file_contents
self.title, self.season, self.episode, self.starDate, self.airDate) if __name__ == '__main__': # THE ORIGINAL SERIES reSeasons = re.compile('<h3><span class=.*?</table>') reEpisodes = re.compile('<tr class="vevent".*?</tr>') reNumbers = re.compile('<th scope="row" id=".*?" style="text-align: center;.*?">(\d+)</th><td>(\d+)</td>') reTitleStarDate = re.compile('"<a href=".*?" title=".*?">(.*?)</a>"</td><td>(.*?)</td>') reAirDate = re.compile('\(<span class=".*?">(.*?)</span>\)') request = Request('http://en.wikipedia.org/wiki/List_of_Star_Trek:_The_Original_Series_episodes', headers={'User-Agent': "Magic Browser"}) response = urlopen(request); rawHtml = response.read() rawHtml = rawHtml.replace('\n', '') episodes = [] rawSeasons = re.findall(reSeasons, rawHtml) season = 0 for rawSeason in rawSeasons: rawEpisodes = re.findall(reEpisodes, rawSeason) for rawEpisode in rawEpisodes: episode = Episode('Original Series') numbers = re.findall(reNumbers, rawEpisode) episode.season = str(season)
"data_type": "http_flood_scanner_probe", "blacklist_set_size": blacklist_set_size, "auto_block_ip_set_size": auto_block_ip_set_size, "allowed_requests": allowed_requests, "blocked_requests_all": blocked_requests_all, "blocked_requests_auto_block": blocked_requests_auto_block, "blocked_requests_blacklist": blocked_requests_blacklist, "waf_type": environ['LOG_TYPE'] } } url = 'https://metrics.awssolutionsbuilder.com/generic' data = json.dumps(usage_data) headers = {'content-type': 'application/json'} print("[send_anonymous_usage_data] %s" % data) req = Request(url, data, headers) rsp = urlopen(req) content = rsp.read() rspcode = rsp.getcode() print('[send_anonymous_usage_data] Response Code: {}'.format(rspcode)) print( '[send_anonymous_usage_data] Response Content: {}'.format(content)) print("[send_anonymous_usage_data] End") except Exception, e: print("[send_anonymous_usage_data] Failed to Send Data") #====================================================================================================================== # Lambda Entry Point #======================================================================================================================