Esempio n. 1
0
    def ensureFileLocal(self, inFilePathOrURL):
        '''
        Takes a file path or URL. Sets self.localFilePath
        to the same path if file is local, or
        if the file is remote but uncompressed. 
        If a file is remote and compressed, retrieves
        the file into a local tmp file and returns that
        file name. In this case the flag self.deleteTempFile
        is set to True. 

        :param inFilePathOrURL: file path or URL to file
        :type inFilePathOrURL: String
        '''
        self.localFilePath = inFilePathOrURL
        self.deleteTempFile = False
        if self.compression == COMPRESSION_TYPE.NO_COMPRESSION:
            return
        # Got compressed file; is it local?
        parseResult = urlparse(inFilePathOrURL)
        if parseResult.scheme == 'file':
            self.localFilePath = parseResult.path
            return
        opener = FancyURLopener()
        # Throws IOError if URL does not exist:
        self.localFilePath = opener.retrieve(inFilePathOrURL)[0]
        self.deleteTempFile = True
Esempio n. 2
0
	def __init__(self):
		try:
			context = ssl._create_unverified_context()
		except AttributeError:
			context = None

		FancyURLopener.__init__(self, context=context)
Esempio n. 3
0
    def POST(self):
        # disable nginx buffering
        web.header('X-Accel-Buffering', 'no')

        i = web.input(fast=False)
        #get app config if not exist will create it
        servers = get_servers(i.app_name)
        if not servers:
            servers = ['deploy']
            save_app_option(i.app_name, 'deploy_servers', 'deploy')

        yield "%d:%s" % (logging.INFO, render_ok("Application allowed to deploy those servers"))
        yield "%d:%s" % (logging.INFO, render_ok(','.join(servers)))
        servers = escape_servers(servers)

        result = {}
        data = {'app_name': i.app_name, 'app_url': i.app_url}
        for server in servers:
            url = SUFFIX % server
            try:
                opener = FancyURLopener()
                f = opener.open(url, urlencode(data))
                line = ''  # to avoid NameError for line if f has no output at all.
                for line in iter(f.readline, ''):
                    logger.info(line)
                    yield line
                if not any(word in line for word in ['succeeded', 'failed']):
                    result[server] = 'Failed'
                else:
                    result[server] = 'Succeeded'
            except Exception, e:
                yield "%d:%s" % (logging.ERROR, render_err(str(e)))
                result[server] = 'Failed'
Esempio n. 4
0
def unicode_urlopen(url, accept_lang=None):
    """Returns a *Unicode* file-like object for non-local documents.
    Client must ensure that the URL points to non-binary data. Pass in
    an Accept-Language value to configure the FancyURLopener we
    use."""

    opener = FancyURLopener()

    if accept_lang:
        opener.addheader("Accept-Language", accept_lang)

    # We want to convert the bytes file-like object returned by
    # urllib, which is bytes in both Python 2 and Python 3
    # fortunately, and turn it into a Unicode file-like object
    # with a little help from our StringIO friend.
    page = opener.open(url)
    encoding = page.headers['content-type']
    encoding = encoding.split('charset=')
    if len(encoding) > 1:
        encoding = encoding[-1]
        page = page.read().decode(encoding)
    else:
        page = page.read()
        encoding = meta_encoding(page) or 'utf8'
        page = page.decode(encoding)

    page = StringIO(page)
    return page
Esempio n. 5
0
 def get(self, url, headers=None):
     o = FancyURLopener()
     if headers:
         for k, v in headers.items():
             o.addheader(k, v)
     self.req = o.open(url)
     return self
Esempio n. 6
0
def unicode_urlopen(url, accept_lang=None):
    """Returns a *Unicode* file-like object for non-local documents.
    Client must ensure that the URL points to non-binary data. Pass in
    an Accept-Language value to configure the FancyURLopener we
    use."""

    opener = FancyURLopener()

    if accept_lang:
        opener.addheader("Accept-Language", accept_lang)

    # We want to convert the bytes file-like object returned by
    # urllib, which is bytes in both Python 2 and Python 3
    # fortunately, and turn it into a Unicode file-like object
    # with a little help from our StringIO friend.
    page = opener.open(url)
    encoding = page.headers['content-type']
    encoding = encoding.split('charset=')
    if len(encoding) > 1:
        encoding = encoding[-1]
        page = page.read().decode(encoding)
    else:
        page = page.read()
        encoding = meta_encoding(page) or 'utf8'
        page = page.decode(encoding)

    page = StringIO(page)
    return page
Esempio n. 7
0
    def ensureFileLocal(self, inFilePathOrURL):
        '''
        Takes a file path or URL. Sets self.localFilePath
        to the same path if file is local, or
        if the file is remote but uncompressed. 
        If a file is remote and compressed, retrieves
        the file into a local tmp file and returns that
        file name. In this case the flag self.deleteTempFile
        is set to True. 

        :param inFilePathOrURL: file path or URL to file
        :type inFilePathOrURL: String
        '''
        self.localFilePath = inFilePathOrURL
        self.deleteTempFile = False
        if self.compression == COMPRESSION_TYPE.NO_COMPRESSION:
            return
        # Got compressed file; is it local?
        parseResult = urlparse(inFilePathOrURL)
        if parseResult.scheme == 'file':
            self.localFilePath = parseResult.path
            return
        opener = FancyURLopener()
        # Throws IOError if URL does not exist:
        self.localFilePath = opener.retrieve(inFilePathOrURL)[0]
        self.deleteTempFile = True
Esempio n. 8
0
def site_a(site):
    if site[0:7] != 'http://':
        site = 'http://' + site
    opener = FancyURLopener()  # criando o 'capturador' de paginas
    page = opener.open(site)  # uma URL de teste
    html = page.read(
    )  # vai se conectar o servidor e capturar o html retornado
    # print html # se quiser ver o html bruto
    soup = BeautifulSoup(
        html, "lxml")  # limpa as tags de html para deixar apenas o conteudo
    for script in soup(["script", "style"]):
        script.extract()  # retirando os codigos em Javascript e CSS
    conteudo = soup.get_text()

    limpa = ['com', 'br', 'www', 'http']
    site = re.sub(r'[^\w]', " ", site).split()
    novo_site = ''
    for a in site:
        if a not in limpa:
            novo_site += a
    site = novo_site
    file = open('site_w/' + site + '.txt', 'w')
    file.write(
        (conteudo.encode('utf-8')
         ).lower())  # imprime o texto limpo (sem tags html, Javascript ou CSS)
    lista_temas = {
        'esporte': ('futebol', 'bola', 'jogador', 'esporte', 'flamengo',
                    'vasco', 'botafogo', 'fluminense', 'sport'),
        'engenharia': ('engenharia', 'engenharias', 'engineer'),
        'jogos': ('jogo', 'jogos', 'game', 'games')
    }
    tema(lista_temas, site)
Esempio n. 9
0
 def getNaturalRandom(self, min=1, max=49, nbNumbers=6):
     unique = False
     while not unique:
         url_opener = FancyURLopener()
         data = url_opener.open("http://www.random.org/integers/?num=%s&min=%s&max=%s&col=%s&base=10&format=plain&rnd=new" % (nbNumbers, min, max, nbNumbers))
         randList = data.readlines()[0].rstrip('\n').split('\t')
         unique = bool(len(randList) == len(list(set(randList))))
     return sorted([int(i) for i in randList])
Esempio n. 10
0
def utOpen(file):
    # Open file
    if 'http' in file:
        opener = FancyURLopener()
        f = opener.open(file)
    else:
        f = open(file,'rb+')
    return f
Esempio n. 11
0
    def download(self, download_dir):
        dir_util.mkpath(download_dir)
        url = self.installer_url()
        print 'Downloading:', url

        web = FancyURLopener()
        web.retrieve(url, path.join(download_dir, path.basename(url)),
                     display_progress)
Esempio n. 12
0
 def __init__(self, *args):
     FancyURLopener.__init__(self, *args)
     for i, (header, val) in enumerate(self.addheaders):
         if header == "User-Agent":
             del self.addheaders[i]
             break
     self.addheader('User-Agent',
                    'OSMViz/1.0 +http://cbick.github.com/osmviz')
Esempio n. 13
0
def utRead(file):
    """ Open file on local or remote system. """
    if 'http' in file:
        opener = FancyURLopener()
        f = opener.open(file)
    else:
        f = open(file,'rb+')
    return f
Esempio n. 14
0
 def fetch_genres(self):
     """
     Grabs genres and returns tuple of genres
     """
     self.genre_url = 'http://www.shoutcast.com/sbin/newxml.phtml'
     self.urlhandler = FancyURLopener()
     self.fd = self.urlhandler.open(self.genre_url)
     self.genre = self.fd.read()
     self.fd.close()
     return self.genre
Esempio n. 15
0
 def fetch_stations(self):
     """
     Grabs the xml list of stations from the shoutcast server
     """
     self.shout_url = 'http://www.shoutcast.com/sbin/newxml.phtml?genre=' + self.genre
     self.urlhandler = FancyURLopener()
     self.fd = self.urlhandler.open(self.shout_url)
     self.stations = self.fd.read()
     self.fd.close()
     return self.stations
Esempio n. 16
0
 def http_error_default(self, url, fp, errcode, errmsg, headers):
     if errcode == 404:
         raise urllib2.HTTPError(url, errcode, errmsg, headers, fp)
     else:
         FancyURLopener.http_error_default(
             url,
             fp,
             errcode,
             errmsg,
             headers)
Esempio n. 17
0
 def __init__(self, *args, **kwargs):
     FancyURLopener.__init__(self, *args, **kwargs)
     # Headers to add to every request.
     # XXX: IMDb's web server doesn't like urllib-based programs,
     #      so lets fake to be Mozilla.
     #      Wow!  I'm shocked by my total lack of ethic! <g>
     self.set_header('User-agent', 'Mozilla/5.0')
     # XXX: This class is used also to perform "Exact Primary
     #      [Title|Name]" searches, and so by default the cookie is set.
     c_header = 'id=%s; uu=%s' % (_cookie_id, _cookie_uu)
     self.set_header('Cookie', c_header)
Esempio n. 18
0
    def fill_hot_cache( self ):
        bases   = [ 'a', 'g', 'c', 't' ]
        url = self.url + urlencode( self.query )
        url_opener = FancyURLopener( )
        fh = url_opener.open( url )
        hot_rand_handle = SGMLExtractorHandle( fh, [ 'pre', ] )

        hot_cache = fh.read()
        self.hot_cache = hot_cache
        fh.close()
        return self.hot_cache
Esempio n. 19
0
	def __load_photo_page(self, photo_id):
		opener = FancyURLopener()
		res = None
		body = None
		link = photo_page_template % photo_id

		try:
			res = opener.open(link)
			body = res.read()
		except IOError, error:
			print "[!] {0}".format(error.strerror)
Esempio n. 20
0
 def __init__(self, *args, **kwargs):
     self._last_url = u''
     FancyURLopener.__init__(self, *args, **kwargs)
     # Headers to add to every request.
     # XXX: IMDb's web server doesn't like urllib-based programs,
     #      so lets fake to be Mozilla.
     #      Wow!  I'm shocked by my total lack of ethic! <g>
     self.set_header('User-agent', 'Mozilla/5.0')
     # XXX: This class is used also to perform "Exact Primary
     #      [Title|Name]" searches, and so by default the cookie is set.
     c_header = 'id=%s; uu=%s' % (_cookie_id, _cookie_uu)
     self.set_header('Cookie', c_header)
Esempio n. 21
0
	def download (self, download_dir):
		result = path.join (download_dir, self.package_basename)
		if path.exists (result):
			print 'Found install', self.package_basename
		else:
			dir_util.mkpath (download_dir)
			url = "http://www.eiffel-loop.com/download/" + self.package_basename
			print 'Downloading:', url
	
			web = FancyURLopener ()
			web.retrieve (url, result, display_progress)

		return result
Esempio n. 22
0
    def download(self, download_dir):
        result = path.join(download_dir, self.package_basename)
        if path.exists(result):
            print 'Found install', self.package_basename
        else:
            dir_util.mkpath(download_dir)
            url = "http://www.eiffel-loop.com/download/" + self.package_basename
            print 'Downloading:', url

            web = FancyURLopener()
            web.retrieve(url, result, display_progress)

        return result
Esempio n. 23
0
    def __init__(self, ftpproxy=''):
        """RebaseUpdate([ftpproxy]]) -> new RebaseUpdate instance.

        if ftpproxy is not given RebaseUpdate uses the corresponding
        variable from RanaConfig.

        ftpproxy is the proxy to use if any.
        """
        proxy = {'ftp': ftpproxy or ftp_proxy}
        if not Rebase_name:
            raise FtpNameError('Rebase')
        if not proxy['ftp']:
            proxy = {}
        FancyURLopener.__init__(self, proxy)
Esempio n. 24
0
 def __init__(self, *args, **kwargs):
     self._last_url = u""
     FancyURLopener.__init__(self, *args, **kwargs)
     # Headers to add to every request.
     # XXX: IMDb's web server doesn't like urllib-based programs,
     #      so lets fake to be Mozilla.
     #      Wow!  I'm shocked by my total lack of ethic! <g>
     for header in ("User-Agent", "User-agent", "user-agent"):
         self.del_header(header)
     self.set_header("User-Agent", "Mozilla/5.0")
     self.set_header("Accept-Language", "en-us,en;q=0.5")
     # XXX: This class is used also to perform "Exact Primary
     #      [Title|Name]" searches, and so by default the cookie is set.
     c_header = "uu=%s; id=%s" % (_cookie_uu, _cookie_id)
     self.set_header("Cookie", c_header)
Esempio n. 25
0
 def __init__(self, *args, **kwargs):
     self._last_url = u''
     FancyURLopener.__init__(self, *args, **kwargs)
     # Headers to add to every request.
     # XXX: IMDb's web server doesn't like urllib-based programs,
     #      so lets fake to be Mozilla.
     #      Wow!  I'm shocked by my total lack of ethic! <g>
     for header in ('User-Agent', 'User-agent', 'user-agent'):
         self.del_header(header)
     self.set_header('User-Agent', 'Mozilla/5.0')
     self.set_header('Accept-Language', 'en-us,en;q=0.5')
     # XXX: This class is used also to perform "Exact Primary
     #      [Title|Name]" searches, and so by default the cookie is set.
     c_header = 'uu=%s; id=%s' % (_cookie_uu, _cookie_id)
     self.set_header('Cookie', c_header)
Esempio n. 26
0
    def __init__(self, e_mail='', ftpproxy=''):
        """RebaseUpdate([e_mail[, ftpproxy]]) -> new RebaseUpdate instance.

        if e_mail and ftpproxy are not given RebaseUpdate uses the corresponding
        variable from RanaConfig.

        e_mail is the password for the anonymous ftp connection to Rebase.
        ftpproxy is the proxy to use if any."""
        proxy = {'ftp': ftpproxy or ftp_proxy}
        global Rebase_password
        Rebase_password = e_mail or Rebase_password
        if not Rebase_password:
            raise FtpPasswordError('Rebase')
        if not Rebase_name:
            raise FtpNameError('Rebase')
        FancyURLopener.__init__(self, proxy)
Esempio n. 27
0
def check_news(db_conn):
    """
    check_news :: Sqlite3ConnectionData -> Void

    Takes an open Sqlite3 connection
    Checks the Archlinux.org news and prints it if it's new
    """
    br = FancyURLopener()
    response = br.open("http://www.archlinux.org/news/").readlines()
    for a in response:
        if 'title="View: ' in a:
            news = re.findall('">([^<]+)</a>', a)[0]
            break
    if sqlite_manager.is_news_new(db_conn, news):
            sqlite_manager.replace_news(db_conn, news)
            print news
Esempio n. 28
0
def deploy_to_server(data, server):
    opener = FancyURLopener()
    f = opener.open(server, urlencode(data))
    line = ''  # to avoid NameError for line if f has no output at all.
    for line in iter(f.readline, ''):
        try:
            loglevel, line = line.split(':', 1)
            loglevel = int(loglevel)
        except ValueError:
            loglevel = logging.DEBUG
        logger.log(loglevel, "%s", line.rstrip())

    if not any(word in line for word in ['succeeded', 'failed']):
        return 'Failed'
    else:
        return 'Succeeded'
Esempio n. 29
0
    def __init__(self, *args, **kwargs):
        try: self.username = kwargs['username']
        except KeyError: self.username = None
        try: self.password = kwargs['password']
        except KeyError: self.password = None

        # once urllib uses new style classes, or in python 3.0+, use:
        # super(FancyURLopenerMod, self).__init__(*args, **kwargs)
        # till then this will work, but not in python 3.0+:
        FancyURLopener.__init__(self, *args, **kwargs)

        # only try opening the account once
        #self.authtries = 0
        #self.maxauthtries = 3

        self.flag = False
Esempio n. 30
0
def download_ims_image(imsresp):
    inputfilepath = imsresp.getUrl()
    is_via_http = 0
    if 'http' in inputfilepath:
        opener = FancyURLopener()
        is_via_http = 1
        l_file = inputfilepath
        l_filename = l_file.split('/')[-1]
        l_data = opener.open(l_file).read()
        l_file = open(join(FILES_PATH, l_filename), 'wb')
        l_file.write(l_data)
        l_file.close()
        l_temploc = inputfilepath.split('/')[-1]
        inputfilepath = join(FILES_PATH, l_temploc)
    imsresp.setUrl(inputfilepath)
    return imsresp
Esempio n. 31
0
    def __init__(self, e_mail="", ftpproxy=""):
        """RebaseUpdate([e_mail[, ftpproxy]]) -> new RebaseUpdate instance.

        if e_mail and ftpproxy are not given RebaseUpdate uses the corresponding
        variable from RanaConfig.

        e_mail is the password for the anonymous ftp connection to Rebase.
        ftpproxy is the proxy to use if any."""
        proxy = {"ftp": ftpproxy or ftp_proxy}
        global Rebase_password
        Rebase_password = e_mail or Rebase_password
        if not Rebase_password:
            raise FtpPasswordError("Rebase")
        if not Rebase_name:
            raise FtpNameError("Rebase")
        FancyURLopener.__init__(self, proxy)
Esempio n. 32
0
def check_news():
	br = FancyURLopener()
	response = br.open("http://www.archlinux.org/news/").readlines()
	for a in response:
		if 'title="View: ' in a:
			news = re.findall('">([^<]+)</a>',a)[0]
			break

	oldnews = re.findall('NEWS:(.*)\n', open(
		configuration.DATA_FILE, 'r').read()
		)[0]
	if oldnews!=news:
		configuration.backup()
		open(configuration.DATA_FILE,"w").write(
			open(configuration.DATA_FILE+".bak", "r").read().replace(
				"NEWS:"+oldnews, "NEWS:"+news)
				)
Esempio n. 33
0
def download_package(pkg_name, pkg_version):
  file_name, path, hash_algorithm, expected_digest = get_package_info(pkg_name,
      pkg_version)
  if not file_name:
    return False
  if os.path.isfile(file_name) and check_digest(file_name, hash_algorithm,
      expected_digest):
    print('File with matching digest already exists, skipping {0}'.format(file_name))
    return True
  downloader = FancyURLopener()
  pkg_url = '{0}/packages/{1}'.format(PYPI_MIRROR, path)
  print('Downloading {0} from {1}'.format(file_name, pkg_url))
  downloader.retrieve(pkg_url, file_name)
  if check_digest(file_name, hash_algorithm, expected_digest):
    return True
  else:
    print('Hash digest check failed in file {0}.'.format(file_name))
    return False
Esempio n. 34
0
 def fetch_stations(self):
     """
     Grabs the xml list of stations from the shoutcast server
     """
     self.shout_url='http://www.shoutcast.com/sbin/newxml.phtml?genre=' + self.genre
     self.urlhandler = FancyURLopener()
     self.fd = self.urlhandler.open(self.shout_url)
     self.stations = self.fd.read()
     self.fd.close()
     return self.stations
Esempio n. 35
0
 def fetch_genres(self):
     """
     Grabs genres and returns tuple of genres
     """
     self.genre_url = 'http://www.shoutcast.com/sbin/newxml.phtml'
     self.urlhandler = FancyURLopener()
     self.fd = self.urlhandler.open(self.genre_url)
     self.genre = self.fd.read()
     self.fd.close()
     return self.genre
Esempio n. 36
0
    def __init__(self, *args, **kwargs):
        try:
            self.username = kwargs['username']
        except KeyError:
            self.username = None
        try:
            self.password = kwargs['password']
        except KeyError:
            self.password = None

        # once urllib uses new style classes, or in python 3.0+, use:
        # super(FancyURLopenerMod, self).__init__(*args, **kwargs)
        # till then this will work, but not in python 3.0+:
        FancyURLopener.__init__(self, *args, **kwargs)

        # only try opening the account once
        #self.authtries = 0
        #self.maxauthtries = 3

        self.flag = False
Esempio n. 37
0
 def _getlinesfromurl(self,url):
     err = 0
     strerr = ''
     # Retry URL download a few times.
     for count in range(self.retries):
         if count != 0:
             time.sleep(self.retrysecs)
         try:
             opener = FancyURLopener()
             f = opener.open(url, data='user_name=%s&password=%s&login=Login' % (self.username, self.password))
             rc = 0
             if 'www-authenticate' in f.headers:
                 rc = 1
                 strerr = 'Authentication is required to access %s' % url
             break
         except IOError, (_err, _strerr):
             rc = 1
             print url
             print _strerr
             (err,strerr) = (_err,_strerr)
Esempio n. 38
0
def retrieveCatalog():
    try:
        cache = SimpleCache()
        catalog = cache.get(ADDON_NAME + '.catalog')
        if catalog:
            log("using cached catalog")
        if not catalog:
            log("downloading catalog")
            opener = FancyURLopener()
            f = opener.open(url)
            catalog = json.load(f)
            cache.set(ADDON_NAME + '.catalog',
                      catalog,
                      expiration=datetime.timedelta(hours=12))
        return catalog
    except Exception as e:
        log("error retrieving catalog - " + str(e), xbmc.LOGERROR)
        xbmcgui.Dialog().notification(ADDON_NAME, LANGUAGE(30003), ICON, 4000)
        xbmc.executebuiltin('Action(PreviousMenu)')
        sys.exit(0)
Esempio n. 39
0
def do_http_call(url, variables, do_post):
    """Make the HTTP call.
    Note exceptions can be raised should the HTTP status require it.
    """
    if type(variables) != str:
        variables = urllib.urlencode(variables)

    opener = FancyURLopener()

    if do_post:
        fhandle = opener.open(url, variables)
    else:
        url_call = url + "?" + variables

        fhandle = opener.open(url_call)

    result = fhandle.read()

    fhandle.close()

    return result
Esempio n. 40
0
    def __install_gatling(self, gatling_path):
        """
        Installs Gatling.
        Gatling version and download link may be set in config:
        "download-link":"http://domain/resource-{version}.zip"
        "version":"1.2.3"
        """
        dest = os.path.dirname(
            os.path.dirname(os.path.expanduser(gatling_path)))  # ../..
        dest = os.path.abspath(dest)

        try:
            self.__gatling(gatling_path)
            return gatling_path
        except OSError:
            self.log.info("Will try to install Gatling into %s", dest)

        # download gatling
        downloader = FancyURLopener()
        gatling_zip_path = self.engine.create_artifact("gatling-dist", ".zip")
        version = self.settings.get("version", GatlingExecutor.VERSION)
        download_link = self.settings.get("download-link",
                                          GatlingExecutor.DOWNLOAD_LINK)
        download_link = download_link.format(version=version)
        self.log.info("Downloading %s", download_link)
        # TODO: check archive checksum/hash before unzip and run

        try:
            downloader.retrieve(download_link, gatling_zip_path,
                                download_progress_hook)
        except BaseException as e:
            self.log.error("Error while downloading %s", download_link)
            raise e

        self.log.info("Unzipping %s", gatling_zip_path)
        unzip(gatling_zip_path, dest,
              'gatling-charts-highcharts-bundle-' + version)
        os.remove(gatling_zip_path)
        os.chmod(os.path.expanduser(gatling_path), 0o755)
        self.log.info("Installed Gatling successfully")
Esempio n. 41
0
    def __install_grinder(self, grinder_path):
        """
        Installs Grinder.
        Grinder version and download link may be set in config:
        "download-link":"http://domain/resource-{version}.zip"
        "version":"1.2.3"
        """

        dest = os.path.dirname(
            os.path.dirname(os.path.expanduser(grinder_path)))
        if not dest:
            dest = os.path.expanduser("~/grinder-taurus")
        dest = os.path.abspath(dest)
        grinder_full_path = os.path.join(dest, "lib", "grinder.jar")
        try:
            self.__grinder(grinder_full_path)
            return grinder_full_path
        except CalledProcessError:
            self.log.info("Will try to install grinder into %s", dest)

        downloader = FancyURLopener()
        grinder_zip_path = self.engine.create_artifact("grinder-dist", ".zip")
        version = self.settings.get("version", GrinderExecutor.VERSION)
        download_link = self.settings.get("download-link",
                                          GrinderExecutor.DOWNLOAD_LINK)
        download_link = download_link.format(version=version)
        self.log.info("Downloading %s", download_link)

        try:
            downloader.retrieve(download_link, grinder_zip_path,
                                download_progress_hook)
        except BaseException as e:
            self.log.error("Error while downloading %s", download_link)
            raise e

        self.log.info("Unzipping %s", grinder_zip_path)
        unzip(grinder_zip_path, dest, 'grinder-' + version)
        os.remove(grinder_zip_path)
        self.log.info("Installed grinder successfully")
        return grinder_full_path
Esempio n. 42
0
 def _getlinesfromurl(self, url):
     err = 0
     strerr = ''
     # Retry URL download a few times.
     for count in range(self.retries):
         if count != 0:
             time.sleep(self.retrysecs)
         try:
             opener = FancyURLopener()
             f = opener.open(url,
                             data='user_name=%s&password=%s&login=Login' %
                             (self.username, self.password))
             rc = 0
             if 'www-authenticate' in f.headers:
                 rc = 1
                 strerr = 'Authentication is required to access %s' % url
             break
         except IOError, (_err, _strerr):
             rc = 1
             print url
             print _strerr
             (err, strerr) = (_err, _strerr)
Esempio n. 43
0
 def _get_sector_url(self, sector, length):
     start = sector * 2048
     if self._buff:
         self._buff.close()
     opener = FancyURLopener()
     opener.http_error_206 = lambda *a, **k: None
     opener.addheader("Range", "bytes=%d-%d" % (start, start + length - 1))
     self._buff = opener.open(self._url)
Esempio n. 44
0
    def __install_grinder(self, grinder_path):
        """
        Installs Grinder.
        Grinder version and download link may be set in config:
        "download-link":"http://domain/resource-{version}.zip"
        "version":"1.2.3"
        """

        dest = os.path.dirname(os.path.dirname(os.path.expanduser(grinder_path)))
        if not dest:
            dest = os.path.expanduser("~/grinder-taurus")
        dest = os.path.abspath(dest)
        grinder_full_path = os.path.join(dest, "lib", "grinder.jar")
        try:
            self.__grinder(grinder_full_path)
            return grinder_full_path
        except CalledProcessError:
            self.log.info("Will try to install grinder into %s", dest)

        downloader = FancyURLopener()
        grinder_zip_path = self.engine.create_artifact("grinder-dist", ".zip")
        version = self.settings.get("version", GrinderExecutor.VERSION)
        download_link = self.settings.get("download-link", GrinderExecutor.DOWNLOAD_LINK)
        download_link = download_link.format(version=version)
        self.log.info("Downloading %s", download_link)

        try:
            downloader.retrieve(download_link, grinder_zip_path, download_progress_hook)
        except BaseException as e:
            self.log.error("Error while downloading %s", download_link)
            raise e

        self.log.info("Unzipping %s", grinder_zip_path)
        unzip(grinder_zip_path, dest, 'grinder-' + version)
        os.remove(grinder_zip_path)
        self.log.info("Installed grinder successfully")
        return grinder_full_path
Esempio n. 45
0
class ShoutcastFeed:
    def __init__(self,
                 genre,
                 min_bitrate=128,
                 cache_ttl=600,
                 cache_dir='/tmp/pyshout_cache'):
        """
        Parses the xml feed and spits out a list of dictionaries with the station info
        keyed by genre. Params are as follows:
        min_bitrate - 128 default, Minimum bitrate filter
        cache_ttl - 600 default, 0 disables, Seconds cache is considered valid
        cache_dir - /tmp/pyshout_cache default, Path to cache directory
        """
        self.min_bitrate = min_bitrate
        self.cache_ttl = cache_ttl
        self.genre = genre
        self.cache_file = cache_dir + '/' + self.genre + '.pickle'
        self.station_list = []

    def fetch_stations(self):
        """
        Grabs the xml list of stations from the shoutcast server
        """
        self.shout_url = 'http://www.shoutcast.com/sbin/newxml.phtml?genre=' + self.genre
        self.urlhandler = FancyURLopener()
        self.fd = self.urlhandler.open(self.shout_url)
        self.stations = self.fd.read()
        self.fd.close()
        return self.stations

    def parse_stations(self):
        ct = None
        if self.cache_ttl:
            ct = cacheTime(self.cache_file)
        if ct:
            try:
                self.station_list = load_cache(self.cache_file)
            except:
                print("Failed to load cache.")
        if not ct or (time.time() - ct) > self.cache_ttl:
            try:
                parseXML = StationParser(self.min_bitrate)
                self.stations = self.fetch_stations()
                parseString(self.stations, parseXML)
                self.station_list = parseXML.station_list
                write_cache(self.cache_file, self.station_list)
            except:
                print("Failed to get a new station list, sorry.")
        return self.station_list
Esempio n. 46
0
    def __install_gatling(self, gatling_path):
        """
        Installs Gatling.
        Gatling version and download link may be set in config:
        "download-link":"http://domain/resource-{version}.zip"
        "version":"1.2.3"
        """
        dest = os.path.dirname(os.path.dirname(os.path.expanduser(gatling_path)))  # ../..
        dest = os.path.abspath(dest)

        try:
            self.__gatling(gatling_path)
            return gatling_path
        except OSError:
            self.log.info("Will try to install Gatling into %s", dest)

        # download gatling
        downloader = FancyURLopener()
        gatling_zip_path = self.engine.create_artifact("gatling-dist", ".zip")
        version = self.settings.get("version", GatlingExecutor.VERSION)
        download_link = self.settings.get("download-link", GatlingExecutor.DOWNLOAD_LINK)
        download_link = download_link.format(version=version)
        self.log.info("Downloading %s", download_link)
        # TODO: check archive checksum/hash before unzip and run

        try:
            downloader.retrieve(download_link, gatling_zip_path, download_progress_hook)
        except BaseException as e:
            self.log.error("Error while downloading %s", download_link)
            raise e

        self.log.info("Unzipping %s", gatling_zip_path)
        unzip(gatling_zip_path, dest, 'gatling-charts-highcharts-bundle-' + version)
        os.remove(gatling_zip_path)
        os.chmod(os.path.expanduser(gatling_path), 0o755)
        self.log.info("Installed Gatling successfully")
Esempio n. 47
0
class ShoutcastFeed:
    def __init__(self, genre, min_bitrate=128, cache_ttl=600, cache_dir='/tmp/pyshout_cache'):
        """
        Parses the xml feed and spits out a list of dictionaries with the station info
        keyed by genre. Params are as follows:
        min_bitrate - 128 default, Minimum bitrate filter
        cache_ttl - 600 default, 0 disables, Seconds cache is considered valid
        cache_dir - /tmp/pyshout_cache default, Path to cache directory
        """
        self.min_bitrate = min_bitrate
        self.cache_ttl = cache_ttl
        self.genre = genre
        self.cache_file = cache_dir + '/' + self.genre + '.pickle'
        self.station_list = []

    def fetch_stations(self):
        """
        Grabs the xml list of stations from the shoutcast server
        """
        self.shout_url='http://www.shoutcast.com/sbin/newxml.phtml?genre=' + self.genre
        self.urlhandler = FancyURLopener()
        self.fd = self.urlhandler.open(self.shout_url)
        self.stations = self.fd.read()
        self.fd.close()
        return self.stations

    def parse_stations(self):
    	ct = None
    	if self.cache_ttl:
            ct = cacheTime(self.cache_file)
        if ct:
            try:
                self.station_list = load_cache(self.cache_file)
            except:
            	print "Failed to load cache."
        if not ct or (time.time() - ct) > self.cache_ttl:
            try:
                parseXML = StationParser(self.min_bitrate)
                self.stations = self.fetch_stations()
                parseString(self.stations, parseXML)
                self.station_list = parseXML.station_list
                write_cache(self.cache_file, self.station_list)
            except:
            	print "Failed to get a new station list, sorry."
        return self.station_list
Esempio n. 48
0
	def __load_page(self, url):
		res = None
		body = None
		opener = FancyURLopener()

		# Clear default User-Agent header which is defined in addheaders.
		opener.addheaders = []
		for key, value in request_headers.iteritems():
			opener.addheader(key, value)
		opener.addheader("Cookie", self.cookie)

		try:
			res = opener.open(url)
			body = res.read()
		except IOError, error:
			logging.error(error.strerror)
Esempio n. 49
0
    def _download_build(self):
        build_url = URL_PREFIX
        if build_number.startswith(('4', '5', '6')):
            build_url += "/ob"
        else:
            build_url += "/sb"
        build_url += "/build/%s" % build_number
        logger.info("Build url is %s" % build_url)

        resource = json.loads(urllib2.urlopen(build_url).read())
        deliverable_url = URL_PREFIX + "/%s" % resource[DELIVERABLE_URL_ATTR]
        infos = json.loads(urllib2.urlopen(deliverable_url).read())
        for info in infos[LIST_ATTR]:
            if info[DOWNLOAD_URL_ATTR].find("VMware-viewagent-x86_64") > 0:
                FancyURLopener(proxies={}).retrieve(info[DOWNLOAD_URL_ATTR],
                                                    INSTALL_FILE)
                logger.info('Download %s to %s SUCCEED' %
                            (info[DOWNLOAD_URL_ATTR], INSTALL_FILE))
Esempio n. 50
0
	def prompt_user_passwd(self, host, realm):

		paths = [
				'alhrc',
				'/etc/alhrc',
			]

		home = os.environ.get('HOME')
		if home is not None:
			paths.append(os.path.join(home, '.alhrc'))

		for path in paths:
			try:
				f = open(path)
			except IOError:
				continue

			match = False
			user = None
			passwd = None

			for line in f:
				if line.startswith('#'):
					continue

				try:
					key, value = line.strip().split()
				except ValueError:
					continue

				if (key == 'Host'):
					match = (value == host)
					user = None
					passwd = None
				elif match and (key == 'User'):
					user = value
				elif match and (key == 'Password'):
					passwd = value

				if match and user and passwd:
					return (user, passwd)

		return FancyURLopener.prompt_user_passwd(self, host, realm)
Esempio n. 51
0
class GenreFeed:
    def __init__(self, cache_ttl=3600, cache_dir='/tmp/pyshout_cache'):
        self.cache_ttl = cache_ttl
        self.cache_file = cache_dir + '/genres.cache'

    self.genre_list = [
        'Sorry, failed to load', '...try again later', 'Rock', 'Pop',
        'Alternative'
    ]

    def fetch_genres(self):
        """
		Grabs genres and returns tuple of genres
		"""
        self.genre_url = 'http://www.shoutcast.com/sbin/newxml.phtml'
        self.urlhandler = FancyURLopener()
        self.fd = self.urlhandler.open(self.genre_url)
        self.genre = self.fd.read()
        self.fd.close()
        return self.genre

    def parse_genres(self):
        ct = None
        if self.cache_ttl:
            ct = cacheTime(self.cache_file)
            try:
                self.genre_list = load_cache(self.cache_file)
            except:
                ct = None
        if not ct or (time.time() - ct) > self.cache_ttl:
            if DEBUG == 1:
                print('Getting fresh feed')
            try:
                parseXML = GenreParse()
                self.genres = self.fetch_genres()
                parseString(self.genres, parseXML)
                self.genre_list = parseXML.genreList
                write_cache(self.cache_file, self.genre_list)
            except:
                print("Failed to get genres from server, sorry.")
        return self.genre_list
Esempio n. 52
0
def fetchURL(url, file='', params=None, headers={}, isBinary=False, encodeURL=True):
	log("> bbbLib.fetchURL() %s isBinary=%s encodeURL=%s" % (url, isBinary, encodeURL))
	if encodeURL:
		safe_url = quote_plus(url,'/:&?=+#@')
	else:
		safe_url = url

	success = False
	data = None
	if not file:
		# create temp file
		file = xbmc.translatePath( "special://temp/temp.html" )

	# remove destination file if exists already
	deleteFile(file)

	# fetch from url
	try:
		opener = FancyURLopener()

		# add headers if supplied
		if not headers.has_key('User-Agent') and not headers.has_key('User-agent'):
			headers['User-Agent'] = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
		for name, value  in headers.items():
			opener.addheader(name, value)

		fn, resp = opener.retrieve(safe_url, file, data=params)
#		print fn, resp

		content_type = resp.get("Content-Type",'').lower()
		# fail if expecting an image but not correct type returned
		if isBinary and (find(content_type,"text") != -1):
			raise "Not Binary"

		opener.close()
		del opener
		urlcleanup()
	except IOError, errobj:
		ErrorCode(errobj)
Esempio n. 53
0
 def __init__(self, username=None, passwd=None, *args, **kw):
     FancyURLopener.__init__(self, *args, **kw)
     self.username = username
     self.passwd = passwd
Esempio n. 54
0
 def __init__(self):
     self.version = 'Audible ADM 6.6.0.19;Windows Vista Service Pack 1 Build 7601'
     FancyURLopener.__init__(self)
Esempio n. 55
0
 def http_error_default(self, url, fp, errcode, errmsg, headers):
     if errcode == 404:
         raise urllib2.HTTPError(url, errcode, errmsg, headers, fp)
     else:
         FancyURLopener.http_error_default(url, fp, errcode, errmsg,
                                           headers)
Esempio n. 56
0
def urlopen_custom(req, rawserver):
    global _urlopener

    if not _urlopener:
        opener = FancyURLopener()
        _urlopener = opener
        #remove User-Agent
        del _urlopener.addheaders[:]

    if not isinstance(req, str):
        #for header in r.headers:
        #    _urlopener.addheaders.append((header, r.headers[header]))
        #return _urlopener.open(r.get_full_url(), r.data)

        # All this has to be done manually, since httplib and urllib 1 and 2
        # add headers to the request that some routers do not accept.
        # A minimal, functional request includes the headers:
        # Content-Length
        # Soapaction
        # I have found the following to be specifically disallowed:
        # User-agent
        # Connection
        # Accept-encoding

        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        (scheme, netloc, path, params, query,
         fragment) = urlparse.urlparse(req.get_full_url())

        if not scheme.startswith("http"):
            raise ValueError("UPnP URL scheme is not http: " +
                             req.get_full_url())

        if len(path) == 0:
            path = '/'

        if netloc.count(":") > 0:
            host, port = netloc.split(':', 1)
            try:
                port = int(port)
            except:
                raise ValueError("UPnP URL port is not int: " +
                                 req.get_full_url())
        else:
            host = netloc
            port = 80

        header_str = ''
        data = ''
        method = ''
        header_str = " " + path + " HTTP/1.0\r\n"
        if req.has_data():
            method = 'POST'
            header_str = method + header_str
            header_str += "Content-Length: " + str(len(req.data)) + "\r\n"
            data = req.data + "\r\n"
        else:
            method = 'GET'
            header_str = method + header_str

        header_str += "Host: " + host + ":" + str(port) + "\r\n"

        for header in req.headers:
            header_str += header + ": " + str(req.headers[header]) + "\r\n"

        header_str += "\r\n"
        data = header_str + data

        try:
            rawserver.add_pending_connection(host)
            s.connect((host, port))
        finally:
            rawserver.remove_pending_connection(host)

        s.send(data)
        r = HTTPResponse(s, method=method)
        r.begin()

        r.recv = r.read
        fp = socket._fileobject(r)

        resp = addinfourl(fp, r.msg, req.get_full_url())
        resp.code = r.status
        resp.msg = r.reason

        return resp

    return _urlopener.open(req)