Exemple #1
0
def fetch_url(url, useragent, referer=None, retries=1, dimension=False):
    """
    """
    cur_try = 0
    nothing = None if dimension else (None, None)
    url = clean_url(url)

    if not url.startswith(('http://', 'https://')):
        return nothing

    while True:
        try:
            req = Request(url)
            req.add_header('User-Agent', useragent)
            if referer:
                req.add_header('Referer', referer)

            opener = build_opener()
            open_req = opener.open(req, timeout=5)

            # if we only need the dimension of the image, we may not
            # need to download the entire thing
            if dimension:
                content = open_req.read(chunk_size)
            else:
                content = open_req.read()

            content_type = open_req.headers.get('content-type')

            if not content_type:
                return nothing

            if 'image' in content_type:
                p = ImageFile.Parser()
                new_data = content
                while not p.image and new_data:
                    try:
                        p.feed(new_data)
                    except IOError, e:
                        # pil failed to install, jpeg codec broken
                        # **should work if you install via pillow
                        print(
                            '***jpeg misconfiguration! check pillow or pil'
                            'installation this machine: %s' % str(e))
                        p = None
                        break
                    except ValueError, ve:
                        print('cant read image format: %s' % url)
                        p = None
                        break
                    except Exception, e:
                        # For some favicon.ico images, the image is so small
                        # that our PIL feed() method fails a length test.
                        # We add a check below for this.
                        """
                        is_favicon = (urls.url_to_filetype(url) == 'ico')
                        if is_favicon:
                            print 'we caught a favicon!: %s' % url
                        else:
                            # import traceback
                            # print traceback.format_exc()
                            print 'PIL feed() failure for image:', url, str(e)
                            raise e
                        """
                        print 'Exception', str(e)
                        p = None
                        break
def test_conn(request):
    request = Request('http://oauth.tangyue.com/?aa=aa')
    response = urlopen(request)
    return HttpResponse(response.read())
Exemple #3
0
def request_factory(path='/'):
    url = 'http://127.0.0.1:5001%s' % path
    headers = {
        'Content-Type': 'application/html'
    }
    return Request(url, data=html_data.encode('utf-8'), headers=headers, method='POST')
Exemple #4
0
from urllib2 import urlopen, Request
headers = {'Authorization': 'Token token=eebf8075f1d24f268bb2f15a4f159a95'}
url = "http://www.cepaberto.com/api/v2/ceps.json?cep=40010000"
json = urlopen(Request(url, None, headers=headers)).read()
print json

from urllib2 import urlopen, Request
headers = {'Authorization': 'Token token=eebf8075f1d24f268bb2f15a4f159a95'}
url = "http://www.cepaberto.com/api/v2/cities.json?estado=SP"
json = urlopen(Request(url, None, headers=headers)).read()
print json
Exemple #5
0
from urllib2 import urlopen, Request, URLError, HTTPError

try:
    timeout = 2
    url = 'http://10.251.21.176:9001/index.html?processname=elasticsearch&action=start'
    response = urlopen(Request(url), timeout=timeout)
    html = response.read()
except (URLError, HTTPError) as e:
    print e
Exemple #6
0
    def check_cookie(self):

        if self.cookie_jar is None:
            print(" > Cookiejar is bunk: {0}".format(self.cookie_jar))
            return False

        # File we know is valid, used to validate cookie
        file_check = 'https://urs.earthdata.nasa.gov/profile'

        # Apply custom Redirect Hanlder
        opener = build_opener(HTTPCookieProcessor(self.cookie_jar),
                              HTTPHandler(), HTTPSHandler(**self.context))
        install_opener(opener)

        # Attempt a HEAD request
        request = Request(file_check)
        request.get_method = lambda: 'HEAD'
        try:
            print(" > attempting to download {0}".format(file_check))
            response = urlopen(request, timeout=30)
            resp_code = response.getcode()
            # Make sure we're logged in
            if not self.check_cookie_is_logged_in(self.cookie_jar):
                return False

            # Save cookiejar
            self.cookie_jar.save(self.cookie_jar_path)

        except HTTPError:
            # If we ge this error, again, it likely means the user has not agreed to current EULA
            print("\nIMPORTANT: ")
            print(
                "Your user appears to lack permissions to download data from the ASF Datapool."
            )
            print(
                "\n\nNew users: you must first log into Vertex and accept the EULA. In addition, your Study Area must be set at Earthdata https://urs.earthdata.nasa.gov"
            )
            exit(-1)

        # This return codes indicate the USER has not been approved to download the data
        if resp_code in (300, 301, 302, 303):
            try:
                redir_url = response.info().getheader('Location')
            except AttributeError:
                redir_url = response.getheader('Location')

            #Funky Test env:
            if ("vertex-retired.daac.asf.alaska.edu" in redir_url
                    and "test" in self.asf_urs4['redir']):
                print("Cough, cough. It's dusty in this test env!")
                return True

            print("Redirect ({0}) occured, invalid cookie value!".format(
                resp_code))
            return False

        # These are successes!
        if resp_code in (200, 307):
            return True

        return False
Exemple #7
0
    def download_file_with_cookiejar(self,
                                     url,
                                     file_count,
                                     total,
                                     recursion=False):
        # see if we've already download this file and if it is that it is the correct size
        download_file = os.path.basename(url).split('?')[0]
        if os.path.isfile(download_file):
            try:
                request = Request(url)
                request.get_method = lambda: 'HEAD'
                response = urlopen(request, timeout=30)
                remote_size = self.get_total_size(response)
                # Check that we were able to derive a size.
                if remote_size:
                    local_size = os.path.getsize(download_file)
                    if remote_size < (local_size +
                                      (local_size * .01)) and remote_size > (
                                          local_size - (local_size * .01)):
                        print(
                            " > Download file {0} exists! \n > Skipping download of {1}. "
                            .format(download_file, url))
                        return None, None
                    #partial file size wasn't full file size, lets blow away the chunk and start again
                    print(
                        " > Found {0} but it wasn't fully downloaded. Removing file and downloading again."
                        .format(download_file))
                    os.remove(download_file)

            except ssl.CertificateError as e:
                print(" > ERROR: {0}".format(e))
                print(
                    " > Could not validate SSL Cert. You may be able to overcome this using the --insecure flag"
                )
                return False, None

            except HTTPError as e:
                if e.code == 401:
                    print(
                        " > IMPORTANT: Your user may not have permission to download this type of data!"
                    )
                else:
                    print(" > Unknown Error, Could not get file HEAD: {0}".
                          format(e))

            except URLError as e:
                print("URL Error (from HEAD): {0}, {1}".format(e.reason, url))
                if "ssl.c" in "{0}".format(e.reason):
                    print(
                        "IMPORTANT: Remote location may not be accepting your SSL configuration. This is a terminal error."
                    )
                return False, None

        # attempt https connection
        try:
            request = Request(url)
            response = urlopen(request, timeout=30)

            # Watch for redirect
            if response.geturl() != url:

                # See if we were redirect BACK to URS for re-auth.
                if 'https://urs.earthdata.nasa.gov/oauth/authorize' in response.geturl(
                ):

                    if recursion:
                        print(
                            " > Entering seemingly endless auth loop. Aborting. "
                        )
                        return False, None

                    # make this easier. If there is no app_type=401, add it
                    new_auth_url = response.geturl()
                    if "app_type" not in new_auth_url:
                        new_auth_url += "&app_type=401"

                    print(
                        " > While attempting to download {0}....".format(url))
                    print(" > Need to obtain new cookie from {0}".format(
                        new_auth_url))
                    old_cookies = [cookie.name for cookie in self.cookie_jar]
                    opener = build_opener(HTTPCookieProcessor(self.cookie_jar),
                                          HTTPHandler(),
                                          HTTPSHandler(**self.context))
                    request = Request(new_auth_url)
                    try:
                        response = opener.open(request)
                        for cookie in self.cookie_jar:
                            if cookie.name not in old_cookies:
                                print(" > Saved new cookie: {0}".format(
                                    cookie.name))

                                # A little hack to save session cookies
                                if cookie.discard:
                                    cookie.expires = int(
                                        time.time()) + 60 * 60 * 24 * 30
                                    print(
                                        " > Saving session Cookie that should have been discarded! "
                                    )

                        self.cookie_jar.save(self.cookie_jar_path,
                                             ignore_discard=True,
                                             ignore_expires=True)
                    except HTTPError as e:
                        print("HTTP Error: {0}, {1}".format(e.code, url))
                        return False, None

                    # Okay, now we have more cookies! Lets try again, recursively!
                    print(" > Attempting download again with new cookies!")
                    return self.download_file_with_cookiejar(url,
                                                             file_count,
                                                             total,
                                                             recursion=True)

                print(
                    " > 'Temporary' Redirect download @ Remote archive:\n > {0}"
                    .format(response.geturl()))

            # seems to be working
            print("({0}/{1}) Downloading {2}".format(file_count, total, url))

            # Open our local file for writing and build status bar
            tf = tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir='.')
            self.chunk_read(response, tf, report_hook=self.chunk_report)

            # Reset download status
            sys.stdout.write('\n')

            tempfile_name = tf.name
            tf.close()

        #handle errors
        except HTTPError as e:
            print("HTTP Error: {0}, {1}".format(e.code, url))

            if e.code == 401:
                print(
                    " > IMPORTANT: Your user does not have permission to download this type of data!"
                )

            if e.code == 403:
                print(" > Got a 403 Error trying to download this file.  ")
                print(
                    " > You MAY need to log in this app and agree to a EULA. ")

            return False, None

        except URLError as e:
            print("URL Error (from GET): {0}, {1}, {2}".format(
                e, e.reason, url))
            if "ssl.c" in "{0}".format(e.reason):
                print(
                    "IMPORTANT: Remote location may not be accepting your SSL configuration. This is a terminal error."
                )
            return False, None

        except socket.timeout as e:
            print(" > timeout requesting: {0}; {1}".format(url, e))
            return False, None

        except ssl.CertificateError as e:
            print(" > ERROR: {0}".format(e))
            print(
                " > Could not validate SSL Cert. You may be able to overcome this using the --insecure flag"
            )
            return False, None

        # Return the file size
        shutil.copy(tempfile_name, download_file)
        os.remove(tempfile_name)
        file_size = self.get_total_size(response)
        actual_size = os.path.getsize(download_file)
        if file_size is None:
            # We were unable to calculate file size.
            file_size = actual_size
        return actual_size, file_size
Exemple #8
0
    def _request(self, chunk=None, info_request=False):
        """Do the request.

        Used for fetching information and for fetching data.

        chunk -- specifies which range (part) should be loaded.
        info_request -- specifies if only information should be fetched.
        """
        if self._response is not None:
            return self._response

        if self.url_parts.scheme == 'http':
            max_redirects = 0
            if info_request:
                # allow redirects only for info-requests
                max_redirects = self.source.max_redirects
            req = Request(self.url)

            cookie_processor = HTTPCookieProcessor()

            if self.source.cookie_objects is not None:
                # Use the cookies which were received by previous
                # (info-)requests.
                for cookie in self.source.cookie_objects:
                    cookie_processor.cookiejar.set_cookie(cookie)
            elif len(self.source.cookies) > 0 and info_request:
                # This is the first (info-)request where cookies are
                # used. Use user-defined cookies.
                fcres = FakeCookieResponse(self.source.cookies, self.url)
                cookie_processor.cookiejar.extract_cookies(fcres, req)

            if self.source.referrer != '':
                req.add_header('Referer', self.source.referrer)
            if self.source.user_agent != '':
                req.add_header('User-Agent', self.source.user_agent)

            if chunk is not None:
                start_offset = chunk.offset + chunk.loaded
                req.add_header('Range', 'bytes=' + str(start_offset) + '-')

            opener = build_opener(_LimitedHTTPRedirectHandler(max_redirects),
                                  cookie_processor)
            self._response = opener.open(req, timeout=self.source.timeout)

            if self.source.cookie_objects is None:
                # save cookie objects for later use (e.g. DataSlots)
                cookie_objects = []
                for cookie in cookie_processor.cookiejar:
                    cookie_objects.append(cookie)
                self.source.cookie_objects = cookie_objects

            return self._response

        elif self.url_parts.scheme == 'ftp':
            req = Request(self.url)
            if chunk is not None:
                start_offset = chunk.offset + chunk.loaded
                req.add_header('Offset', str(start_offset))
            opener = build_opener(FTPChunkHandler())
            self._response = opener.open(req, timeout=self.source.timeout)
            return self._response
        else:
            raise URLError('The protocol is not supported.')
import json
try:
    from urllib2 import urlopen, Request
except ImportError:
    from urllib.request import urlopen, Request

demo_key = "e1eee2b5677f408da40af8480a5fd5a8"
incidents_url = "https://api.wmata.com/Incidents.svc/json/ElevatorIncidents"
hdrs = {'api_key': demo_key}
request = Request(incidents_url, headers=hdrs)
response = urlopen(request)
raw_response = response.read().decode('utf8')
data = json.loads(raw_response)

incidents = data['ElevatorIncidents']
for i in incidents:
    station_name = i['StationName']
    station_code = i['StationCode']
    print(station_name, station_code)
Exemple #10
0
    def _http(self,
              method,
              path,
              request_headers=None,
              response_headers=None,
              json_post=True,
              timeout=None,
              kw=None):
        if kw is None:
            kw = {}
        data = None
        if method == 'GET' and kw:
            path = '%s?%s' % (path, _encode_params(kw))
        if method in ['POST', 'PATCH', 'PUT']:
            if json_post:
                data = _encode_json(kw)
            else:
                data = urllib.urlencode(kw)
            data = bytes(data, 'utf-8')

        if self._default_headers:
            final_headers = self._default_headers.copy()
            if request_headers:
                final_headers.update(request_headers)
        else:
            final_headers = request_headers

        if method == 'GET':
            if final_headers:
                keys = set(key.lower() for key in final_headers)
            else:
                final_headers = {}
                keys = set()

            if 'if-modified-since' in keys and not final_headers[
                    'if-modified-since']:
                del final_headers['if-modified-since']
            elif 'if-modified-since' not in keys and path in LAST_MODIFIED:
                final_headers['if-modified-since'] = LAST_MODIFIED[path]
            if 'if-none-match' in keys and not final_headers['if-none-match']:
                del final_headers['if-none-match']
            elif 'if-none-match' not in keys and path in ETAG:
                final_headers['if-none-match'] = ETAG[path]

        url = '%s%s' % (_URL, path)
        if logger.level > logging.DEBUG:
            logger.info('REQUEST %s %s %s', method, url, final_headers)
        else:
            logger.info('%s REQUEST %s %s %s', '*' * 10, method, url,
                        pformat(final_headers))
        opener = build_opener(HTTPSHandler)
        request = Request(url, data=data, headers=final_headers or {})
        request.get_method = _METHOD_MAP[method]
        if self._authorization:
            request.add_header('Authorization', self._authorization)
        if method in ['POST', 'PATCH', 'PUT']:
            request.add_header('Content-Type',
                               'application/x-www-form-urlencoded')
        try:
            response = opener.open(request, timeout=timeout or TIMEOUT)

            if method == 'GET':
                keys = {key.lower(): key for key in response.headers}
                if 'last-modified' in keys:
                    LAST_MODIFIED[path] = response.headers[
                        keys['last-modified']]
                if 'etag' in keys:
                    ETAG[path] = response.headers[keys['etag']]

            is_json = self._process_resp(response.headers)
            if isinstance(response_headers, dict):
                response_headers.update(response.headers.dict.copy())
            if logger.level > logging.DEBUG:
                logger.info('==> %s', 200)
            else:
                logger.debug('=========> RESPONSE %s %s', 200,
                             pformat(response_headers))
            content = response.read().decode('utf-8')
            # if logger.level <= logging.DEBUG:
            #     logger.debug('CONTENT\n' + '=' * 40)
            #     logger.debug('%s', pformat(_parse_json(content) if is_json else content))
            #     logger.debug('\n' + '=' * 40)
            return _parse_json(content) if is_json else content
        except HTTPError as e:
            is_json = self._process_resp(e.headers)
            if isinstance(response_headers, dict):
                response_headers.update(e.headers.dict.copy())
            if logger.level > logging.DEBUG:
                logger.info('==> %s', e.code)
            else:
                logger.debug('=========> RESPONSE %s %s', e.code,
                             pformat(response_headers))
            content = e.read().decode('utf-8')
            _json = _parse_json(content) if is_json else None
            req = JsonObject(method=method, url=url, headers=final_headers)
            resp = JsonObject(code=e.code,
                              json=_json,
                              content=content,
                              headers=response_headers)
            if e.code == 304:
                raise RequestNotModified(url, req, resp)
            if e.code == 404:
                raise ApiNotFoundError(url, req, resp)
            raise ApiError(url, req, resp)
class MyTubeFeedEntry():
	def __init__(self, feed, entry, favoritesFeed = False):
		self.feed = feed
		self.entry = entry
		self.favoritesFeed = favoritesFeed
		self.thumbnail = {}
		"""self.myopener = MyOpener()
		urllib.urlopen = MyOpener().open
		if config.plugins.mytube.general.useHTTPProxy.value is True:
			proxy = {'http': 'http://'+str(config.plugins.mytube.general.ProxyIP.getText())+':'+str(config.plugins.mytube.general.ProxyPort.value)}
			self.myopener = MyOpener(proxies=proxy)
			urllib.urlopen = MyOpener(proxies=proxy).open
		else:
			self.myopener = MyOpener()
			urllib.urlopen = MyOpener().open"""

	def isPlaylistEntry(self):
		return False

	def getTubeId(self):
		#print "[MyTubeFeedEntry] getTubeId"
		ret = None
		if self.entry.media.player:
			split = self.entry.media.player.url.split("=")
			ret = split.pop()
			if ret.startswith('youtube_gdata'):
				tmpval=split.pop()
				if tmpval.endswith("&feature"):
					tmp = tmpval.split("&")
					ret = tmp.pop(0)
		return ret

	def getTitle(self):
		#print "[MyTubeFeedEntry] getTitle",self.entry.media.title.text
		return self.entry.media.title.text

	def getDescription(self):
		#print "[MyTubeFeedEntry] getDescription"
		if self.entry.media is not None and self.entry.media.description is not None:
			return self.entry.media.description.text
		return "not vailable"

	def getThumbnailUrl(self, index = 0):
		#print "[MyTubeFeedEntry] getThumbnailUrl"
		if index < len(self.entry.media.thumbnail):
			return self.entry.media.thumbnail[index].url
		return None

	def getPublishedDate(self):
		if self.entry.published is not None:
			return self.entry.published.text
		return "unknown"

	def getViews(self):
		if self.entry.statistics is not None:
			return self.entry.statistics.view_count
		return "not available"

	def getDuration(self):
		if self.entry.media is not None and self.entry.media.duration is not None:
			return self.entry.media.duration.seconds
		else:
			return 0

	def getRatingAverage(self):
		if self.entry.rating is not None:
			return self.entry.rating.average
		return 0


	def getNumRaters(self):
		if self.entry.rating is not None:
			return self.entry.rating.num_raters
		return ""

	def getAuthor(self):
		authors = []
		for author in self.entry.author:
			authors.append(author.name.text)
		author = ", ".join(authors)
		return author

	def getUserFeedsUrl(self):
		for author in self.entry.author:
			return author.uri.text

		return False

	def getUserId(self):
		return self.getUserFeedsUrl().split('/')[-1]

	def subscribeToUser(self):
		username = self.getUserId()
		return myTubeService.SubscribeToUser(username)
		
	def addToFavorites(self):
		video_id = self.getTubeId()
		return myTubeService.addToFavorites(video_id)

	def PrintEntryDetails(self):
		EntryDetails = { 'Title': None, 'TubeID': None, 'Published': None, 'Published': None, 'Description': None, 'Category': None, 'Tags': None, 'Duration': None, 'Views': None, 'Rating': None, 'Thumbnails': None}
		EntryDetails['Title'] = self.entry.media.title.text
		EntryDetails['TubeID'] = self.getTubeId()
		EntryDetails['Description'] = self.getDescription()
		EntryDetails['Category'] = self.entry.media.category[0].text
		EntryDetails['Tags'] = self.entry.media.keywords.text
		EntryDetails['Published'] = self.getPublishedDate()
		EntryDetails['Views'] = self.getViews()
		EntryDetails['Duration'] = self.getDuration()
		EntryDetails['Rating'] = self.getNumRaters()
		EntryDetails['RatingAverage'] = self.getRatingAverage()
		EntryDetails['Author'] = self.getAuthor()
		# show thumbnails
		list = []
		for thumbnail in self.entry.media.thumbnail:
			print 'Thumbnail url: %s' % thumbnail.url
			list.append(str(thumbnail.url))
		EntryDetails['Thumbnails'] = list
		#print EntryDetails
		return EntryDetails
	
	def removeAdditionalEndingDelimiter(self, data):
		pos = data.find("};")
		if pos != -1:
			data = data[:pos + 1]
		return data
	
	def extractFlashVars(self, data, assets):
		flashvars = {}
		found = False
		
		for line in data.split("\n"):
			if line.strip().find(";ytplayer.config = ") > 0:
				found = True
				p1 = line.find(";ytplayer.config = ") + len(";ytplayer.config = ") - 1
				p2 = line.rfind(";")
				if p1 <= 0 or p2 <= 0:
					continue
				data = line[p1 + 1:p2]
				break
		data = self.removeAdditionalEndingDelimiter(data)
		
		if found:
			data = json.loads(data)
			if assets:
				flashvars = data["assets"]
			else:
				flashvars = data["args"]
		return flashvars

	# link resolving from xbmc youtube plugin
	def getVideoUrl(self):
		VIDEO_FMT_PRIORITY_MAP = {
			'38' : 1, #MP4 Original (HD)
			'37' : 2, #MP4 1080p (HD)
			'22' : 3, #MP4 720p (HD)
			'18' : 4, #MP4 360p
			'35' : 5, #FLV 480p
			'34' : 6, #FLV 360p
		}
		video_url = None
		video_id = str(self.getTubeId())

		links = {}
		watch_url = 'http://www.youtube.com/watch?v=%s&safeSearch=none'%video_id
		watchrequest = Request(watch_url, None, std_headers)
		
		try:
			print "[MyTube] trying to find out if a HD Stream is available",watch_url
			result = urlopen2(watchrequest).read()
		except (URLError, HTTPException, socket.error), err:
			print "[MyTube] Error: Unable to retrieve watchpage - Error code: ", str(err)
			return video_url

		# Get video info
		for el in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
			info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' % (video_id, el))
			request = Request(info_url, None, std_headers)
			try:
				infopage = urlopen2(request).read()
				videoinfo = parse_qs(infopage)
				if ('url_encoded_fmt_stream_map' or 'fmt_url_map') in videoinfo:
					break
			except (URLError, HTTPException, socket.error), err:
				print "[MyTube] Error: unable to download video infopage",str(err)
				return video_url
Exemple #12
0
#Fetch API token from setting.txt
def getToken():
    appSettings = readSettings.loadSettings("../../settings.txt")

    firstSetting = appSettings[0].rstrip()
    return firstSetting


token = getToken()

#Validate token
if not token:
    print "Token not set"
    sys.exit(1)

#Reading and validating commnad line argument
try:
    email = sys.argv[1]
except IndexError:
    print "Please enter a email address\nUsage: emailWhois.py <emailID> (Eg: file.py [email protected])"
    sys.exit(1)

# whois

headers = {'Authorization': 'Bearer ' + token}
#This API method returns the WHOIS information for the specified email address
request = Request('https://investigate.api.umbrella.com/whois/emails/' + email,
                  headers=headers)

response_body = urlopen(request).read()
print response_body
def lambda_handler(event, context):
    # Reading output items from the CF stack
    outputs = {}
    stack_name = context.invoked_function_arn.split(':')[6].rsplit('-', 2)[0]
    response = cf_client.describe_stacks(StackName=stack_name)
    for e in response['Stacks'][0]['Outputs']:
        outputs[e['OutputKey']] = e['OutputValue']
    policy_table_name = outputs['PolicyDDBTableName']
    history_table_name = outputs['HistoryDDBTableName']
    uuid = outputs['UUID']
    policy_table = dynamodb.Table(policy_table_name)
    history_table = dynamodb.Table(history_table_name)

    aws_regions = ec2_client.describe_regions()['Regions']

    response = policy_table.get_item(
        Key={'SolutionName': 'EbsSnapshotScheduler'})
    item = response['Item']
    global snapshot_time, retention_days, time_zone, days_active, custom_tag_name

    # Reading Default Values from DynamoDB
    custom_tag_name = str(item['CustomTagName'])
    custom_tag_length = len(custom_tag_name)
    default_snapshot_time = str(item['DefaultSnapshotTime'])
    default_retention_days = int(item['DefaultRetentionDays'])
    auto_snapshot_deletion = str(item['AutoSnapshotDeletion']).lower()
    default_time_zone = str(item['DefaultTimeZone'])
    default_days_active = str(item['DefaultDaysActive']).lower()
    send_data = str(item['SendAnonymousData']).lower()
    time_iso = datetime.datetime.utcnow().isoformat()
    time_stamp = str(time_iso)
    utc_time = datetime.datetime.utcnow()
    # time_delta must be changed before updating the CWE schedule for Lambda
    time_delta = datetime.timedelta(minutes=4)
    # Declare Dicts
    region_dict = {}
    all_region_dict = {}
    regions_label_dict = {}
    post_dict = {}

    if auto_snapshot_deletion == "yes":
        print "Auto Snapshot Deletion: Enabled"
    else:
        print "Auto Snapshot Deletion: Disabled"

    for region in aws_regions:
        try:
            print "\nExecuting for region %s" % (region['RegionName'])

            # Create connection to the EC2 using boto3 resources interface
            ec2 = boto3.client('ec2', region_name=region['RegionName'])
            ec2_resource = boto3.resource('ec2',
                                          region_name=region['RegionName'])
            aws_region = region['RegionName']

            # Declare Lists
            snapshot_list = []
            agg_snapshot_list = []
            snapshots = []
            retention_period_per_instance = {}

            # Filter Instances for Scheduler Tag
            instances = ec2_resource.instances.all()

            for i in instances:
                if i.tags != None:
                    for t in i.tags:
                        if t['Key'][:custom_tag_length] == custom_tag_name:
                            tag = t['Value']

                            # Split out Tag & Set Variables to default
                            default1 = 'default'
                            default2 = 'true'
                            snapshot_time = default_snapshot_time
                            retention_days = default_retention_days
                            time_zone = default_time_zone
                            days_active = default_days_active

                            # First value will always be defaults or start_time.
                            parse_tag_values(tag, default1, default2,
                                             default_snapshot_time)

                            tz = pytz.timezone(time_zone)
                            now = utc_time.replace(tzinfo=pytz.utc).astimezone(
                                tz).strftime("%H%M")
                            now_max = utc_time.replace(
                                tzinfo=pytz.utc).astimezone(tz) - time_delta
                            now_max = now_max.strftime("%H%M")
                            now_day = utc_time.replace(
                                tzinfo=pytz.utc).astimezone(tz).strftime(
                                    "%a").lower()
                            active_day = False

                            # Days Interpreter
                            if days_active == "all":
                                active_day = True
                            elif days_active == "weekdays":
                                weekdays = ['mon', 'tue', 'wed', 'thu', 'fri']
                                if now_day in weekdays:
                                    active_day = True
                            else:
                                days_active = days_active.split(",")
                                for d in days_active:
                                    if d.lower() == now_day:
                                        active_day = True

                            # Append to start list
                            if snapshot_time >= str(now_max) and snapshot_time <= str(now) and \
                                            active_day is True:
                                snapshot_list.append(i.instance_id)
                                retention_period_per_instance[
                                    i.instance_id] = retention_days
            deleted_snapshot_count = 0

            if auto_snapshot_deletion == "yes":
                # Purge snapshots that are scheduled for deletion and snapshots that were manually deleted by users.
                for snap in ec2_resource.snapshots.filter(OwnerIds=['self']):
                    snapshots.append(snap.id)
                deleted_snapshot_count = purge_history(ec2_resource, snapshots,
                                                       history_table,
                                                       aws_region)
                if deleted_snapshot_count > 0:
                    print "Number of snapshots deleted successfully:", deleted_snapshot_count
                    deleted_snapshot_count = 0

            # Execute Snapshot Commands
            if snapshot_list:
                print "Taking snapshot of all the volumes for", len(
                    snapshot_list), "instance(s)", snapshot_list
                for instance in ec2_resource.instances.filter(
                        InstanceIds=snapshot_list):
                    if auto_snapshot_deletion == "no":
                        retention_days = "NA"
                    else:
                        for key, value in retention_period_per_instance.iteritems(
                        ):
                            if key == instance.id:
                                retention_days = value
                    new_snapshots = backup_instance(ec2_resource, instance,
                                                    retention_days,
                                                    history_table, aws_region)
                    return_snapshot_list = new_snapshots
                    agg_snapshot_list.extend(return_snapshot_list)
                print "Number of new snapshots created:", len(
                    agg_snapshot_list)
                if agg_snapshot_list:
                    tag_snapshots(ec2, agg_snapshot_list)
            else:
                print "No new snapshots taken."

            # Build payload for each region
            if send_data == "yes":
                del_dict = {}
                new_dict = {}
                current_dict = {}
                all_status_dict = {}
                del_dict['snapshots_deleted'] = deleted_snapshot_count
                new_dict['snapshots_created'] = len(agg_snapshot_list)
                current_dict['snapshots_existing'] = len(snapshots)
                all_status_dict.update(current_dict)
                all_status_dict.update(new_dict)
                all_status_dict.update(del_dict)
                region_dict[aws_region] = all_status_dict
                all_region_dict.update(region_dict)

        except Exception as e:
            print e
            continue

            # Build payload for the account
    if send_data == "yes":
        regions_label_dict['regions'] = all_region_dict
        post_dict['Data'] = regions_label_dict
        post_dict['Data'].update({'Version': '1'})
        post_dict['TimeStamp'] = time_stamp
        post_dict['Solution'] = 'SO0007'
        post_dict['UUID'] = uuid
        # API Gateway URL to make HTTP POST call
        url = 'https://metrics.awssolutionsbuilder.com/generic'
        data = json.dumps(post_dict)
        headers = {'content-type': 'application/json'}
        req = Request(url, data, headers)
        rsp = urlopen(req)
        content = rsp.read()
        rsp_code = rsp.getcode()
        print('Response Code: {}'.format(rsp_code))
getDateAndTime = requests.post('http://challenge.code2040.org/api/dating',
                               token)

print getDateAndTime.text

d = yaml.load(getDateAndTime.text)

timeNot = d['datestamp']
seconds = d['interval']

# Get All substrings that have the dates, convert them into int, put them in time class
time = datetime.datetime(int(timeNot[0:4]), int(timeNot[5:7]),
                         int(timeNot[8:10]), int(timeNot[11:13]),
                         int(timeNot[14:16]), int(timeNot[17:19]))

delta = datetime.timedelta(seconds=seconds)

time = time + delta

isoTime = time.isoformat() + "Z"

sendTo2040 = {
    'token': 'dfd02630f543502ffb464929a71eafce',
    'datestamp': isoTime
}
URL = 'http://challenge.code2040.org/api/dating/validate'

request = Request(URL, sendTo2040)

request.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(request, json.dumps(sendTo2040))
import requests
import datetime

# change date to a date close to (before) the SPB crashed

cmd = "https://api.github.com/repos/Bioconductor/Contributions/issues?state=open&per_page=100&since=2019-06-16T00:00:00"

# commented out code was when there could be more than 100 open issues
# because we use since and only expect a day or two for failures, don't need to
# loop over pages - if for some reason more than 100 than implement loop (python
# uses indentation to determine code chunks so if implemented adjust indentation)

issue_rerun = []

#count=1
#while count <= 1:
#    print(count)
#cmd=cmd+"&page="+str(count)

request = Request(cmd)
response = urlopen(request)
res = response.read()
git_dir = json.loads(res)
for k in git_dir:
    issue_rerun.append(k['html_url'])

#count +=1

for i in issue_rerun:
    print(i)
# use the GitHub API to get pull request info
for pr_num in pr_numbers:

    # we need to skip very low numbers of pull requests, for example:
    # - a user wants to contribute a change to E+, so they create a fork/branch
    # - their operations result in a pull request into their own repo, so the counting starts at #1...
    # we're at like 5000+, so if we just skip anything less than 1000, we'll be good.
    if int(pr_num) < 1000:
        continue

    # set the url for this pull request
    github_url = "https://api.github.com/repos/NREL/EnergyPlus/issues/" + pr_num

    # make the request
    try:
        req = Request(github_url, headers={'Authorization': 'token %s' % github_token})
        response = urlopen(req)
        the_page = response.read().decode('utf-8')
    except Exception as e:
        print("ERROR: " + str(e))
        continue

    # read the json response
    j = json.loads(the_page)

    # mine the data
    title = j['title']
    labels = j['labels']
    if len(labels) == 0:
        print("WARNING: No labels on PR #" + pr_num, file=sys.stderr)
    for label in labels:
Exemple #17
0
def download_or_cache(url, sha):
    """
    Get bytes from the given url or local cache.

    Parameters
    ----------
    url : str
        The url to download

    sha : str
        The sha256 of the file

    Returns
    -------
    BytesIO
        The file loaded into memory.
    """
    cache_dir = _get_xdg_cache_dir()

    def get_from_cache(local_fn):
        if cache_dir is None:
            raise Exception("no cache dir")
        cache_filename = os.path.join(cache_dir, local_fn)
        with open(cache_filename, 'rb') as fin:
            buf = BytesIO(fin.read())
        file_sha = get_fd_hash(buf)
        if file_sha != sha:
            return None
        buf.seek(0)
        return buf

    def write_cache(local_fn, data):
        if cache_dir is None:
            raise Exception("no cache dir")

        cache_filename = os.path.join(cache_dir, local_fn)
        makedirs(cache_dir, exist_ok=True)
        if sys.version_info < (3, ):
            if os.path.exists(cache_filename):
                raise FileExistsError
            mode = 'wb'
        else:
            mode = 'xb'
        old_pos = data.tell()
        data.seek(0)
        with open(cache_filename, mode=mode) as fout:
            fout.write(data.read())
        data.seek(old_pos)

    try:
        return get_from_cache(sha)
    except Exception:
        pass

    # jQueryUI's website blocks direct downloads from urllib.request's
    # default User-Agent, but not (for example) wget; so I don't feel too
    # bad passing in an empty User-Agent.
    with urlopen(Request(url, headers={"User-Agent": ""})) as req:
        file_contents = BytesIO(req.read())
        file_contents.seek(0)

    file_sha = get_fd_hash(file_contents)

    if file_sha != sha:
        raise Exception(
            ("The download file does not match the "
             "expected sha.  {url} was expected to have "
             "{sha} but it had {file_sha}").format(sha=sha,
                                                   file_sha=file_sha,
                                                   url=url))

    try:
        write_cache(sha, file_contents)
    except Exception:
        pass

    file_contents.seek(0)
    return file_contents
Exemple #18
0
 def _update_request_token(self):
     request_token_url = 'http://api.t.163.com/oauth/request_token'
     request = oauth.OAuthRequest.from_consumer_and_token(self._consumer, http_url=request_token_url)
     request.sign_request(self._sign_method, self._consumer, None)
     resp = urlopen(Request(request_token_url, headers=request.to_header()))
     self.request_token = oauth.OAuthToken.from_string(resp.read())
Exemple #19
0
    def get_new_cookie(self):
        # Start by prompting user to input their credentials

        # Another Python2/3 workaround
        try:
            new_username = raw_input("Username: "******"Username: "******"Password (will not be displayed): ")

        # Build URS4 Cookie request
        auth_cookie_url = self.asf_urs4['url'] + '?client_id=' + self.asf_urs4[
            'client'] + '&redirect_uri=' + self.asf_urs4[
                'redir'] + '&response_type=code&state='

        try:
            #python2
            user_pass = base64.b64encode(
                bytes(new_username + ":" + new_password))
        except TypeError:
            #python3
            user_pass = base64.b64encode(
                bytes(new_username + ":" + new_password, "utf-8"))
            user_pass = user_pass.decode("utf-8")

        # Authenticate against URS, grab all the cookies
        self.cookie_jar = MozillaCookieJar()
        opener = build_opener(HTTPCookieProcessor(self.cookie_jar),
                              HTTPHandler(), HTTPSHandler(**self.context))
        request = Request(
            auth_cookie_url,
            headers={"Authorization": "Basic {0}".format(user_pass)})

        # Watch out cookie rejection!
        try:
            response = opener.open(request)
        except HTTPError as e:
            if "WWW-Authenticate" in e.headers and "Please enter your Earthdata Login credentials" in e.headers[
                    "WWW-Authenticate"]:
                print(
                    " > Username and Password combo was not successful. Please try again."
                )
                return False
            else:
                # If an error happens here, the user most likely has not confirmed EULA.
                print(
                    "\nIMPORTANT: There was an error obtaining a download cookie!"
                )
                print(
                    "Your user appears to lack permission to download data from the ASF Datapool."
                )
                print(
                    "\n\nNew users: you must first log into Vertex and accept the EULA. In addition, your Study Area must be set at Earthdata https://urs.earthdata.nasa.gov"
                )
                exit(-1)
        except URLError as e:
            print(
                "\nIMPORTANT: There was a problem communicating with URS, unable to obtain cookie. "
            )
            print("Try cookie generation later.")
            exit(-1)

        # Did we get a cookie?
        if self.check_cookie_is_logged_in(self.cookie_jar):
            #COOKIE SUCCESS!
            self.cookie_jar.save(self.cookie_jar_path)
            return True

        # if we aren't successful generating the cookie, nothing will work. Stop here!
        print(
            "WARNING: Could not generate new cookie! Cannot proceed. Please try Username and Password again."
        )
        print("Response was {0}.".format(response.getcode()))
        print(
            "\n\nNew users: you must first log into Vertex and accept the EULA. In addition, your Study Area must be set at Earthdata https://urs.earthdata.nasa.gov"
        )
        exit(-1)
Exemple #20
0
 def update_access_token(self, pin):
     request = oauth.OAuthRequest.from_consumer_and_token(self._consumer, self.request_token, http_url=self._access_token_url,verifier=str(pin))
     request.sign_request(self._sign_method, self._consumer, self.request_token)
     resp = urlopen(Request(self._access_token_url, headers=request.to_header()))
     self.access_token = oauth.OAuthToken.from_string(resp.read())
     return self.access_token
Exemple #21
0
    def handle(self, *args, **options):
        bbc_url = 'https://www.bbc.com/portuguese/brasil-45215784'
        req = Request(bbc_url)
        try:
            response = urlopen(req)
        except HTTPError as e:
            print 'The server couldn\'t fulfill the request.'
            print 'Error code: ', e.code
        except URLError as e:
            print 'We failed to reach a server.'
            print 'Reason: ', e.reason
        else:
            data = response.read()
            inicio = data.find('panel-two-content')
            fim = data.rfind('<style>', inicio)
            parte_util = data[inicio:fim]
            print parte_util

            try:
                with transaction.atomic():
                    areas = re.findall('<h2>(.*?)</h2>', parte_util)
                    print u'Áreas'
                    for area in areas:
                        area = area.strip()
                        print area
                        if not Area.objects.filter(nome=area).exists():
                            Area.objects.create(nome=area)

                    proposta_areas = re.findall('<h2>(.*?)(?=<h2>|\Z)',
                                                parte_util, re.DOTALL)
                    if len(proposta_areas) != 6:
                        raise ValueError('erro na quantidade de areas')

                    for proposta_area in proposta_areas:
                        # Buscar área atual
                        area_atual = Area.objects.get(
                            nome=proposta_area[:proposta_area.find('</h2>'
                                                                   )].strip())

                        # Buscar candidato caso não exista
                        candidatos = re.findall(
                            'party-content(.*?)(?=</ul>|\Z)', proposta_area,
                            re.DOTALL)

                        for candidato in candidatos:
                            dados_candidato = re.findall(
                                '<span class="header"><span class="header-bold">(.*?)</span> (.*?)</span>',
                                candidato, re.DOTALL)
                            #                             print dados_candidato
                            nome_candidato, partido = dados_candidato[0]
                            nome_candidato = nome_candidato.strip()
                            partido = partido.strip()
                            if not Candidato.objects.filter(
                                    nome=nome_candidato).exists():
                                Candidato.objects.create(nome=nome_candidato,
                                                         numero=0,
                                                         partido=partido)

                            candidato_atual = Candidato.objects.get(
                                nome=nome_candidato)
                            propostas = re.findall('<li>(.*?)</li>', candidato,
                                                   re.DOTALL)
                            for proposta in propostas:
                                proposta = proposta.strip().replace(
                                    '&quot;', '"')
                                if proposta == 'Sem dados até o momento':
                                    continue
#                                 print proposta
                                if not Proposta.objects.filter(
                                        texto=proposta,
                                        candidato=candidato_atual).exists():
                                    Proposta.objects.create(
                                        texto=proposta,
                                        candidato=candidato_atual,
                                        area=area_atual)
            except:
                raise


#             <span class="header"><span class="header-bold">(.*?)</span> (.*?)</span>
Exemple #22
0
                                                self.title, self.season,
                                                self.episode, self.starDate,
                                                self.airDate)

if __name__ == '__main__':
    
    
    # THE ORIGINAL SERIES
    
    reSeasons = re.compile('<h3><span class=.*?</table>')
    reEpisodes = re.compile('<tr class="vevent".*?</tr>')
    reNumbers = re.compile('<th scope="row" id=".*?" style="text-align: center;.*?">(\d+)</th><td>(\d+)</td>')
    reTitleStarDate = re.compile('"<a href=".*?" title=".*?">(.*?)</a>"</td><td>(.*?)</td>')
    reAirDate = re.compile('\(<span class=".*?">(.*?)</span>\)')
    
    request = Request('http://en.wikipedia.org/wiki/List_of_Star_Trek:_The_Original_Series_episodes',
                  headers={'User-Agent': "Magic Browser"})
    response = urlopen(request);
    
    rawHtml = response.read()
    rawHtml = rawHtml.replace('\n', '')
    
    episodes = []
    rawSeasons = re.findall(reSeasons, rawHtml)
    season = 0
    for rawSeason in rawSeasons:
        rawEpisodes = re.findall(reEpisodes, rawSeason)
        for rawEpisode in rawEpisodes:
            episode = Episode('Original Series')
            numbers = re.findall(reNumbers, rawEpisode)
            episode.season = str(season)
            
Exemple #23
0
def get_page(**kwargs):
    """
    Retrieves page content from a given target URL
    """

    url = kwargs.get("url", None)
    post = kwargs.get("data", None)
    header = kwargs.get("header", None)
    cookie = kwargs.get("cookie", None)
    user_agent = kwargs.get("user_agent", None)
    verbose = kwargs.get("verbose", False)

    headers = {}
    parsed_url = None
    page = None

    if url is None:
        raise Exception("[!] URL cannot be None.")

    try:
        parsed_url = urlsplit(url)
    except:
        raise Exception("[!] Unable to parse URL: %s" % url)

    if user_agent is None:
        user_agent = "%s %s" % (NAME, VERSION)

    if post is None:
        parsed_url = parsed_url._replace(
            query=urlencode(parse_qsl(parsed_url.query)))
        url = urlunsplit(parsed_url)
    else:
        post = urlencode(parse_qsl(post), "POST")

    # Perform HTTP Request
    try:
        headers[HTTP_HEADER.USER_AGENT] = user_agent

        if cookie:
            headers[HTTP_HEADER.COOKIE] = cookie

        if header:
            headers[header.split("=")[0]] = header.split("=", 1)[1]

        req = Request(url, post, headers)
        conn = urlopen(req)

        if not args.write_files and kb.original_response and kb.invalid_response:
            _ = conn.headers.get(HTTP_HEADER.CONTENT_LENGTH, "")
            if _.isdigit():
                _ = int(_)
                if _ - max(len(kb.original_response), len(
                        kb.invalid_response)) > SKIP_RETRIEVE_THRESHOLD:
                    page = "".join(
                        random.choice(string.letters) for i in xrange(_))

        # Get HTTP Response
        if not page:
            page = conn.read()

    except KeyboardInterrupt:
        raise

    except Exception, e:
        if hasattr(e, "read"):
            page = page or e.read()

        if verbose:
            if hasattr(e, "msg"):
                print("[x] Error msg '%s'" % e.msg)
            if hasattr(e, "reason"):
                print("[x] Error reason '%s'" % e.reason)
            if hasattr(e, "message"):
                print("[x] Error message '%s'" % e.message)
            if hasattr(e, "code"):
                print("[x] HTTP error code '%d'" % e.code)
            if hasattr(e, "info"):
                print("[x] Response headers '%s'" % e.info())
Exemple #24
0
def request_issue_creation(path, arguments, error_message):
    """
    request the creation and create the issue
    """

    # TODO:/ we're gonna go ahead and give you guys another chance
    #if not checksum(path):
    #    lib.output.error(
    #        "It seems you have changed some of the code in the program. We do not accept issues from edited "
    #        "code as we have no way of reliably testing your issue. We recommend that you only use the version "
    #        "that is available on github, no issue will be created for this problem."
    #    )
    #    exit(1)

    question = raw_input("do you want to create an anonymized issue?[y/N]: ")
    if question.lower().startswith("y"):
        if check_version_number(lib.banner.VERSION):
            # gonna read a chunk of it instead of one line
            chunk = 4096
            with open(path) as data:
                identifier = create_identifier(error_message)
                # gotta seek to the beginning of the file since it's already been read `4096` into it
                data.seek(0)
                issue_title = "Unhandled Exception ({})".format(identifier)

            issue_data = {
                "title":
                issue_title,
                "body": ("Autosploit version: `{}`\n"
                         "OS information: `{}`\n"
                         "Running context: `{}`\n"
                         "Error mesage: `{}`\n"
                         "Error traceback:\n```\n{}\n```\n"
                         "Metasploit launched: `{}`\n".format(
                             lib.banner.VERSION,
                             platform.platform(),
                             ' '.join(sys.argv),
                             error_message,
                             open(path).read(),
                             lib.settings.MSF_LAUNCHED,
                         ))
            }

            _json_data = json.dumps(issue_data)
            if sys.version_info > (3, ):  # python 3
                _json_data = _json_data.encode("utf-8")

            if not ensure_no_issue(identifier):
                req = Request(
                    url=
                    "https://api.github.com/repos/nullarray/autosploit/issues",
                    data=_json_data,
                    headers={
                        "Authorization":
                        "token {}".format(get_token(lib.settings.TOKEN_PATH))
                    })
                urlopen(req, timeout=10).read()
                lib.output.info(
                    "issue has been generated with the title '{}', at the following "
                    "URL '{}'".format(issue_title, find_url(identifier)))
            else:
                lib.output.error(
                    "someone has already created this issue here: {}".format(
                        find_url(identifier)))
            try:
                os.remove(path)
            except:
                pass
        else:
            sep = "-" * 35
            lib.output.error(
                "it appears you are not using the current version of AutoSploit please update to the newest version "
                "and try again, this can also happen when a new update has been pushed and the cached raw page has "
                "not been updated yet. If you feel this is the later please create and issue on AutoSploits Github "
                "page with the following info:")
            print("{}\n{}\n{}".format(sep, open(path).read(), sep))
    else:
        lib.output.info(
            "the issue has been logged to a file in path: '{}'".format(path))
Exemple #25
0
def speedtest():
    """Run the full speedtest.net test"""

    global shutdown_event, source
    shutdown_event = threading.Event()

    signal.signal(signal.SIGINT, ctrl_c)

    description = (
        'Command line interface for testing internet bandwidth using '
        'speedtest.net.\n'
        '------------------------------------------------------------'
        '--------------\n'
        'https://github.com/sivel/speedtest-cli')

    parser = ArgParser(description=description)
    # Give optparse.OptionParser an `add_argument` method for
    # compatibility with argparse.ArgumentParser
    try:
        parser.add_argument = parser.add_option
    except AttributeError:
        pass
    parser.add_argument('--bytes',
                        dest='units',
                        action='store_const',
                        const=('bytes', 1),
                        default=('bits', 8),
                        help='Display values in bytes instead of bits. Does '
                        'not affect the image generated by --share')
    parser.add_argument('--share',
                        action='store_true',
                        help='Generate and provide a URL to the speedtest.net '
                        'share results image')
    parser.add_argument('--simple',
                        action='store_true',
                        help='Suppress verbose output, only show basic '
                        'information')
    parser.add_argument('--list',
                        action='store_true',
                        help='Display a list of speedtest.net servers '
                        'sorted by distance')
    parser.add_argument('--server', help='Specify a server ID to test against')
    parser.add_argument('--mini', help='URL of the Speedtest Mini server')
    parser.add_argument('--source', help='Source IP address to bind to')
    parser.add_argument('--version',
                        action='store_true',
                        help='Show the version number and exit')

    options = parser.parse_args()
    if isinstance(options, tuple):
        args = options[0]
    else:
        args = options
    del options

    # Print the version and exit
    if args.version:
        version()

    # If specified bind to a specific IP address
    if args.source:
        source = args.source
        socket.socket = bound_socket

    if not args.simple:
        print_('Retrieving speedtest.net configuration...')
    try:
        config = getConfig()
    except URLError:
        print_('Cannot retrieve speedtest configuration')
        sys.exit(1)

    if not args.simple:
        print_('Retrieving speedtest.net server list...')
    if args.list or args.server:
        servers = closestServers(config['client'], True)
        if args.list:
            serverList = []
            for server in servers:
                line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
                        '[%(d)0.2f km]' % server)
                serverList.append(line)
            # Python 2.7 and newer seem to be ok with the resultant encoding
            # from parsing the XML, but older versions have some issues.
            # This block should detect whether we need to encode or not
            try:
                unicode()
                print_('\n'.join(serverList).encode('utf-8', 'ignore'))
            except NameError:
                print_('\n'.join(serverList))
            except IOError:
                pass
            sys.exit(0)
    else:
        servers = closestServers(config['client'])

    if not args.simple:
        print_('Testing from %(isp)s (%(ip)s)...' % config['client'])

    if args.server:
        try:
            best = getBestServer(
                filter(lambda x: x['id'] == args.server, servers))
        except IndexError:
            print_('Invalid server ID')
            sys.exit(1)
    elif args.mini:
        name, ext = os.path.splitext(args.mini)
        if ext:
            url = os.path.dirname(args.mini)
        else:
            url = args.mini
        urlparts = urlparse(url)
        try:
            f = urlopen(args.mini)
        except:
            print_('Invalid Speedtest Mini URL')
            sys.exit(1)
        else:
            text = f.read()
            f.close()
        extension = re.findall('upload_extension: "([^"]+)"', text.decode())
        if not extension:
            for ext in ['php', 'asp', 'aspx', 'jsp']:
                try:
                    f = urlopen('%s/speedtest/upload.%s' % (args.mini, ext))
                except:
                    pass
                else:
                    data = f.read().strip()
                    if (f.code == 200 and len(data.splitlines()) == 1
                            and re.match('size=[0-9]', data)):
                        extension = [ext]
                        break
        if not urlparts or not extension:
            print_('Please provide the full URL of your Speedtest Mini server')
            sys.exit(1)
        servers = [{
            'sponsor':
            'Speedtest Mini',
            'name':
            urlparts[1],
            'd':
            0,
            'url':
            '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
            'latency':
            0,
            'id':
            0
        }]
        try:
            best = getBestServer(servers)
        except:
            best = servers[0]
    else:
        if not args.simple:
            print_('Selecting best server based on latency...')
        best = getBestServer(servers)

    if not args.simple:
        # Python 2.7 and newer seem to be ok with the resultant encoding
        # from parsing the XML, but older versions have some issues.
        # This block should detect whether we need to encode or not
        try:
            unicode()
            print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
                    '%(latency)s ms' % best).encode('utf-8', 'ignore'))
        except NameError:
            print_('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
                   '%(latency)s ms' % best)
    else:
        print_('Ping: %(latency)s ms' % best)

    sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
    urls = []
    for size in sizes:
        for i in range(0, 4):
            urls.append('%s/random%sx%s.jpg' %
                        (os.path.dirname(best['url']), size, size))
    if not args.simple:
        print_('Testing download speed', end='')
    dlspeed = downloadSpeed(urls, args.simple)
    if not args.simple:
        print_()
    print_('Download: %0.2f M%s/s' %
           ((dlspeed / 1000 / 1000) * args.units[1], args.units[0]))

    sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
    sizes = []
    for size in sizesizes:
        for i in range(0, 25):
            sizes.append(size)
    if not args.simple:
        print_('Testing upload speed', end='')
    ulspeed = uploadSpeed(best['url'], sizes, args.simple)
    if not args.simple:
        print_()
    print_('Upload: %0.2f M%s/s' %
           ((ulspeed / 1000 / 1000) * args.units[1], args.units[0]))

    if args.share and args.mini:
        print_('Cannot generate a speedtest.net share results image while '
               'testing against a Speedtest Mini server')
    elif args.share:
        dlspeedk = int(round((dlspeed / 1000) * 8, 0))
        ping = int(round(best['latency'], 0))
        ulspeedk = int(round((ulspeed / 1000) * 8, 0))

        # Build the request to send results back to speedtest.net
        # We use a list instead of a dict because the API expects parameters
        # in a certain order
        apiData = [
            'download=%s' % dlspeedk,
            'ping=%s' % ping,
            'upload=%s' % ulspeedk, 'promo=',
            'startmode=%s' % 'pingselect',
            'recommendedserverid=%s' % best['id'],
            'accuracy=%s' % 1,
            'serverid=%s' % best['id'],
            'hash=%s' % md5(
                ('%s-%s-%s-%s' %
                 (ping, ulspeedk, dlspeedk, '297aae72')).encode()).hexdigest()
        ]

        req = Request('http://c.speedtest.net/api/api.php',
                      data='&'.join(apiData).encode())
        req.add_header('Referer', 'http://c.speedtest.net/flash/speedtest.swf')
        f = urlopen(req)
        response = f.read()
        code = f.code
        f.close()

        if int(code) != 200:
            print_('Could not submit results to speedtest.net')
            sys.exit(1)

        qsargs = parse_qs(response.decode())
        resultid = qsargs.get('resultid')
        if not resultid or len(resultid) != 1:
            print_('Could not submit results to speedtest.net')
            sys.exit(1)

        print_('Share results: http://c.speedtest.net/result/%s.png' %
               resultid[0])
Exemple #26
0
class YTTrailer:
    def __init__(self, session):
        self.session = session


# 		self.l3cert = etpm.getCert(eTPM.TPMD_DT_LEVEL3_CERT)

    def showTrailer(self, eventname):
        if eventname:
            feeds = self.getYTFeeds(eventname, 1)
            if feeds and len(feeds.entry) >= 1:
                ref = self.setServiceReference(feeds.entry[0])
                if ref:
                    self.session.open(TrailerPlayer, ref)

    def getYTFeeds(self, eventname, max_results):
        yt_service = gdata.youtube.service.YouTubeService()
        # developer key and client id taken from mytube-plugin with permission from acid_burn.
        yt_service.developer_key = 'AI39si4AjyvU8GoJGncYzmqMCwelUnqjEMWTFCcUtK-VUzvWygvwPO-sadNwW5tNj9DDCHju3nnJEPvFy4WZZ6hzFYCx8rJ6Mw'
        yt_service.client_id = 'ytapi-dream-MyTubePlayer-i0kqrebg-0'
        query = gdata.youtube.service.YouTubeVideoQuery()
        if int(config.plugins.yttrailer.best_resolution.value) <= 1:
            shd = "HD"
        else:
            shd = ""
        query.vq = "%s %s Trailer %s" % (
            eventname, shd, config.plugins.yttrailer.ext_descr.value)
        query.max_results = max_results
        try:
            feeds = yt_service.YouTubeQuery(query)
        except gaierror:
            feeds = None
        return feeds

    def setServiceReference(self, entry):
        url = self.getVideoUrl(entry)
        if url:
            ref = eServiceReference(4097, 0, url)
            ref.setName(entry.media.title.text)
        else:
            ref = None
        return ref

    def getTubeId(self, entry):
        ret = None
        if entry.media.player:
            split = entry.media.player.url.split("=")
            ret = split.pop()
            if ret.startswith('youtube_gdata'):
                tmpval = split.pop()
                if tmpval.endswith("&feature"):
                    tmp = tmpval.split("&")
                    ret = tmp.pop(0)
        return ret

    def getVideoUrl(self, entry):
        std_headers = {
            'User-Agent':
            'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.6) Gecko/20100627 Firefox/3.6.6',
            'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-us,en;q=0.5',
        }

        VIDEO_FMT_PRIORITY_MAP = {
            '18': 4,  #MP4 360p
            '35': 5,  #FLV 480p
            '34': 6,  #FLV 360p
        }

        if int(config.plugins.yttrailer.best_resolution.value) <= 1:
            VIDEO_FMT_PRIORITY_MAP["38"] = 1  #MP4 Original (HD)
            VIDEO_FMT_PRIORITY_MAP["22"] = 3  #MP4 720p (HD)

            if int(config.plugins.yttrailer.best_resolution.value) == 0:
                VIDEO_FMT_PRIORITY_MAP["37"] = 2  #MP4 1080p (HD)

        video_url = None
        video_id = str(self.getTubeId(entry))

        # Getting video webpage
        #URLs for YouTube video pages will change from the format http://www.youtube.com/watch?v=ylLzyHk54Z0 to http://www.youtube.com/watch#!v=ylLzyHk54Z0.
        watch_url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en' % video_id
        watchrequest = Request(watch_url, None, std_headers)
        try:
            print "[YTTrailer] trying to find out if a HD Stream is available", watch_url
            watchvideopage = urlopen2(watchrequest).read()
        except (URLError, HTTPException, socket_error), err:
            print "[YTTrailer] Error: Unable to retrieve watchpage - Error code: ", str(
                err)
            return video_url

        # Get video info
        for el in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
            info_url = (
                'http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
                % (video_id, el))
            request = Request(info_url, None, std_headers)
            try:
                infopage = urlopen2(request).read()
                videoinfo = parse_qs(infopage)
                if ('url_encoded_fmt_stream_map'
                        or 'fmt_url_map') in videoinfo:
                    break
            except (URLError, HTTPException, socket_error), err:
                print "[YTTrailer] Error: unable to download video infopage", str(
                    err)
                return video_url
Exemple #27
0
from urllib2 import Request, urlopen, URLError, HTTPError

req = Request('http://bbs.csdn.net/callmewhy')
try:
    response = urlopen(req)
except HTTPError, e:
    print "The server couldn't fulfill the request."
    print 'Error code:', e.code

except URLError, e:
    print 'We failed to reach a server.'
    print 'Reason:', e.reason
else:
    print 'No exception was raised,'
Exemple #28
0
                "data_type": "http_flood_scanner_probe",
                "blacklist_set_size": blacklist_set_size,
                "auto_block_ip_set_size": auto_block_ip_set_size,
                "allowed_requests": allowed_requests,
                "blocked_requests_all": blocked_requests_all,
                "blocked_requests_auto_block": blocked_requests_auto_block,
                "blocked_requests_blacklist": blocked_requests_blacklist,
                "waf_type": environ['LOG_TYPE']
            }
        }

        url = 'https://metrics.awssolutionsbuilder.com/generic'
        data = json.dumps(usage_data)
        headers = {'content-type': 'application/json'}
        print("[send_anonymous_usage_data] %s" % data)
        req = Request(url, data, headers)
        rsp = urlopen(req)
        content = rsp.read()
        rspcode = rsp.getcode()
        print('[send_anonymous_usage_data] Response Code: {}'.format(rspcode))
        print(
            '[send_anonymous_usage_data] Response Content: {}'.format(content))

        print("[send_anonymous_usage_data] End")
    except Exception, e:
        print("[send_anonymous_usage_data] Failed to Send Data")


#======================================================================================================================
# Lambda Entry Point
#======================================================================================================================
import categoriser
from urllib2 import Request, urlopen, URLError
import json
import os,sys
import codecs
import time

reload(sys)
sys.setdefaultencoding("utf-8")

request = Request('http://juicer.api.bbci.co.uk/articles?q=%22london%22&recent_first=yes&api_key=<API_KEY>')

before = "2016-06-01"
after = "2016-01-01"
key = '<API-KEY>'
source = "&sources%5B%5D=1"

call = "http://juicer.api.bbci.co.uk/articles?recent_first=yes" + source + "&published_after=" + after + "T00:00:00.000Z" + "&published_before=" + before + "T00:00:00.000Z" + "&size=10" + "&api_key=" + key
print call

response = urlopen(call)
articleJSON = response.read()
result = json.loads(articleJSON)
hits = int(result['total'])

print hits

try:

	f = open('bbc-dataset-201601-201607.tab', 'a')
	size = 10 # default apparently, getting error with setting size in API call
Exemple #30
0
else:
    depsonly = None

try:
    device = product[product.index("_") + 1:]
except:
    device = product

if not depsonly:
    print "Device %s not found. Attempting to retrieve device repository from DU Github (http://github.com/jmz-aosp)." % device

repositories = []

page = 1
while not depsonly:
    request = Request("https://api.github.com/users/jmz-aosp/repos?page=%d" %
                      page)
    api_file = os.getenv("HOME") + '/api_token'
    if (os.path.isfile(api_file)):
        infile = open(api_file, 'r')
        token = infile.readline()
        request.add_header('Authorization', 'token %s' % token.strip())
    result = json.loads(urllib2.urlopen(request).read())
    if len(result) == 0:
        break
    for res in result:
        repositories.append(res)
    page = page + 1

local_manifests = r'.repo/local_manifests'
if not os.path.exists(local_manifests): os.makedirs(local_manifests)