예제 #1
0
파일: streamer.py 프로젝트: cowlicks/rio
 def get(self, url, headers=None):
     o = FancyURLopener()
     if headers:
         for k, v in headers.items():
             o.addheader(k, v)
     self.req = o.open(url)
     return self
예제 #2
0
파일: utilities.py 프로젝트: Mekyi/crunchy
def unicode_urlopen(url, accept_lang=None):
    """Returns a *Unicode* file-like object for non-local documents.
    Client must ensure that the URL points to non-binary data. Pass in
    an Accept-Language value to configure the FancyURLopener we
    use."""

    opener = FancyURLopener()

    if accept_lang:
        opener.addheader("Accept-Language", accept_lang)

    # We want to convert the bytes file-like object returned by
    # urllib, which is bytes in both Python 2 and Python 3
    # fortunately, and turn it into a Unicode file-like object
    # with a little help from our StringIO friend.
    page = opener.open(url)
    encoding = page.headers['content-type']
    encoding = encoding.split('charset=')
    if len(encoding) > 1:
        encoding = encoding[-1]
        page = page.read().decode(encoding)
    else:
        page = page.read()
        encoding = meta_encoding(page) or 'utf8'
        page = page.decode(encoding)

    page = StringIO(page)
    return page
예제 #3
0
def unicode_urlopen(url, accept_lang=None):
    """Returns a *Unicode* file-like object for non-local documents.
    Client must ensure that the URL points to non-binary data. Pass in
    an Accept-Language value to configure the FancyURLopener we
    use."""

    opener = FancyURLopener()

    if accept_lang:
        opener.addheader("Accept-Language", accept_lang)

    # We want to convert the bytes file-like object returned by
    # urllib, which is bytes in both Python 2 and Python 3
    # fortunately, and turn it into a Unicode file-like object
    # with a little help from our StringIO friend.
    page = opener.open(url)
    encoding = page.headers['content-type']
    encoding = encoding.split('charset=')
    if len(encoding) > 1:
        encoding = encoding[-1]
        page = page.read().decode(encoding)
    else:
        page = page.read()
        encoding = meta_encoding(page) or 'utf8'
        page = page.decode(encoding)

    page = StringIO(page)
    return page
예제 #4
0
파일: deploy.py 프로젝트: xiaomen/deploy
    def POST(self):
        # disable nginx buffering
        web.header('X-Accel-Buffering', 'no')

        i = web.input(fast=False)
        #get app config if not exist will create it
        servers = get_servers(i.app_name)
        if not servers:
            servers = ['deploy']
            save_app_option(i.app_name, 'deploy_servers', 'deploy')

        yield "%d:%s" % (logging.INFO, render_ok("Application allowed to deploy those servers"))
        yield "%d:%s" % (logging.INFO, render_ok(','.join(servers)))
        servers = escape_servers(servers)

        result = {}
        data = {'app_name': i.app_name, 'app_url': i.app_url}
        for server in servers:
            url = SUFFIX % server
            try:
                opener = FancyURLopener()
                f = opener.open(url, urlencode(data))
                line = ''  # to avoid NameError for line if f has no output at all.
                for line in iter(f.readline, ''):
                    logger.info(line)
                    yield line
                if not any(word in line for word in ['succeeded', 'failed']):
                    result[server] = 'Failed'
                else:
                    result[server] = 'Succeeded'
            except Exception, e:
                yield "%d:%s" % (logging.ERROR, render_err(str(e)))
                result[server] = 'Failed'
예제 #5
0
def site_a(site):
    if site[0:7] != 'http://':
        site = 'http://' + site
    opener = FancyURLopener()  # criando o 'capturador' de paginas
    page = opener.open(site)  # uma URL de teste
    html = page.read(
    )  # vai se conectar o servidor e capturar o html retornado
    # print html # se quiser ver o html bruto
    soup = BeautifulSoup(
        html, "lxml")  # limpa as tags de html para deixar apenas o conteudo
    for script in soup(["script", "style"]):
        script.extract()  # retirando os codigos em Javascript e CSS
    conteudo = soup.get_text()

    limpa = ['com', 'br', 'www', 'http']
    site = re.sub(r'[^\w]', " ", site).split()
    novo_site = ''
    for a in site:
        if a not in limpa:
            novo_site += a
    site = novo_site
    file = open('site_w/' + site + '.txt', 'w')
    file.write(
        (conteudo.encode('utf-8')
         ).lower())  # imprime o texto limpo (sem tags html, Javascript ou CSS)
    lista_temas = {
        'esporte': ('futebol', 'bola', 'jogador', 'esporte', 'flamengo',
                    'vasco', 'botafogo', 'fluminense', 'sport'),
        'engenharia': ('engenharia', 'engenharias', 'engineer'),
        'jogos': ('jogo', 'jogos', 'game', 'games')
    }
    tema(lista_temas, site)
예제 #6
0
 def getNaturalRandom(self, min=1, max=49, nbNumbers=6):
     unique = False
     while not unique:
         url_opener = FancyURLopener()
         data = url_opener.open("http://www.random.org/integers/?num=%s&min=%s&max=%s&col=%s&base=10&format=plain&rnd=new" % (nbNumbers, min, max, nbNumbers))
         randList = data.readlines()[0].rstrip('\n').split('\t')
         unique = bool(len(randList) == len(list(set(randList))))
     return sorted([int(i) for i in randList])
예제 #7
0
def utOpen(file):
    # Open file
    if 'http' in file:
        opener = FancyURLopener()
        f = opener.open(file)
    else:
        f = open(file,'rb+')
    return f
예제 #8
0
 def _get_sector_url(self, sector, length):
     start = sector * 2048
     if self._buff:
         self._buff.close()
     opener = FancyURLopener()
     opener.http_error_206 = lambda *a, **k: None
     opener.addheader("Range", "bytes=%d-%d" % (start, start + length - 1))
     self._buff = opener.open(self._url)
예제 #9
0
def utRead(file):
    """ Open file on local or remote system. """
    if 'http' in file:
        opener = FancyURLopener()
        f = opener.open(file)
    else:
        f = open(file,'rb+')
    return f
예제 #10
0
    def fill_hot_cache( self ):
        bases   = [ 'a', 'g', 'c', 't' ]
        url = self.url + urlencode( self.query )
        url_opener = FancyURLopener( )
        fh = url_opener.open( url )
        hot_rand_handle = SGMLExtractorHandle( fh, [ 'pre', ] )

        hot_cache = fh.read()
        self.hot_cache = hot_cache
        fh.close()
        return self.hot_cache
예제 #11
0
파일: parse.py 프로젝트: x-cray/votingstats
	def __load_photo_page(self, photo_id):
		opener = FancyURLopener()
		res = None
		body = None
		link = photo_page_template % photo_id

		try:
			res = opener.open(link)
			body = res.read()
		except IOError, error:
			print "[!] {0}".format(error.strerror)
예제 #12
0
def do_http_call(url, variables, do_post):
    """Make the HTTP call.
    Note exceptions can be raised should the HTTP status require it.
    """
    if type(variables) != str:
        variables = urllib.urlencode(variables)

    opener = FancyURLopener()

    if do_post:
        fhandle = opener.open(url, variables)
    else:
        url_call = url + "?" + variables

        fhandle = opener.open(url_call)

    result = fhandle.read()

    fhandle.close()

    return result
예제 #13
0
class ShoutcastFeed:
    def __init__(self,
                 genre,
                 min_bitrate=128,
                 cache_ttl=600,
                 cache_dir='/tmp/pyshout_cache'):
        """
        Parses the xml feed and spits out a list of dictionaries with the station info
        keyed by genre. Params are as follows:
        min_bitrate - 128 default, Minimum bitrate filter
        cache_ttl - 600 default, 0 disables, Seconds cache is considered valid
        cache_dir - /tmp/pyshout_cache default, Path to cache directory
        """
        self.min_bitrate = min_bitrate
        self.cache_ttl = cache_ttl
        self.genre = genre
        self.cache_file = cache_dir + '/' + self.genre + '.pickle'
        self.station_list = []

    def fetch_stations(self):
        """
        Grabs the xml list of stations from the shoutcast server
        """
        self.shout_url = 'http://www.shoutcast.com/sbin/newxml.phtml?genre=' + self.genre
        self.urlhandler = FancyURLopener()
        self.fd = self.urlhandler.open(self.shout_url)
        self.stations = self.fd.read()
        self.fd.close()
        return self.stations

    def parse_stations(self):
        ct = None
        if self.cache_ttl:
            ct = cacheTime(self.cache_file)
        if ct:
            try:
                self.station_list = load_cache(self.cache_file)
            except:
                print("Failed to load cache.")
        if not ct or (time.time() - ct) > self.cache_ttl:
            try:
                parseXML = StationParser(self.min_bitrate)
                self.stations = self.fetch_stations()
                parseString(self.stations, parseXML)
                self.station_list = parseXML.station_list
                write_cache(self.cache_file, self.station_list)
            except:
                print("Failed to get a new station list, sorry.")
        return self.station_list
예제 #14
0
파일: grab.py 프로젝트: x-cray/votingstats
	def __load_page(self, url):
		res = None
		body = None
		opener = FancyURLopener()

		# Clear default User-Agent header which is defined in addheaders.
		opener.addheaders = []
		for key, value in request_headers.iteritems():
			opener.addheader(key, value)
		opener.addheader("Cookie", self.cookie)

		try:
			res = opener.open(url)
			body = res.read()
		except IOError, error:
			logging.error(error.strerror)
예제 #15
0
def deploy_to_server(data, server):
    opener = FancyURLopener()
    f = opener.open(server, urlencode(data))
    line = ''  # to avoid NameError for line if f has no output at all.
    for line in iter(f.readline, ''):
        try:
            loglevel, line = line.split(':', 1)
            loglevel = int(loglevel)
        except ValueError:
            loglevel = logging.DEBUG
        logger.log(loglevel, "%s", line.rstrip())

    if not any(word in line for word in ['succeeded', 'failed']):
        return 'Failed'
    else:
        return 'Succeeded'
예제 #16
0
def download_ims_image(imsresp):
    inputfilepath = imsresp.getUrl()
    is_via_http = 0
    if 'http' in inputfilepath:
        opener = FancyURLopener()
        is_via_http = 1
        l_file = inputfilepath
        l_filename = l_file.split('/')[-1]
        l_data = opener.open(l_file).read()
        l_file = open(join(FILES_PATH, l_filename), 'wb')
        l_file.write(l_data)
        l_file.close()
        l_temploc = inputfilepath.split('/')[-1]
        inputfilepath = join(FILES_PATH, l_temploc)
    imsresp.setUrl(inputfilepath)
    return imsresp
예제 #17
0
def check_news(db_conn):
    """
    check_news :: Sqlite3ConnectionData -> Void

    Takes an open Sqlite3 connection
    Checks the Archlinux.org news and prints it if it's new
    """
    br = FancyURLopener()
    response = br.open("http://www.archlinux.org/news/").readlines()
    for a in response:
        if 'title="View: ' in a:
            news = re.findall('">([^<]+)</a>', a)[0]
            break
    if sqlite_manager.is_news_new(db_conn, news):
            sqlite_manager.replace_news(db_conn, news)
            print news
예제 #18
0
파일: archnews.py 프로젝트: venam/updater
def check_news():
	br = FancyURLopener()
	response = br.open("http://www.archlinux.org/news/").readlines()
	for a in response:
		if 'title="View: ' in a:
			news = re.findall('">([^<]+)</a>',a)[0]
			break

	oldnews = re.findall('NEWS:(.*)\n', open(
		configuration.DATA_FILE, 'r').read()
		)[0]
	if oldnews!=news:
		configuration.backup()
		open(configuration.DATA_FILE,"w").write(
			open(configuration.DATA_FILE+".bak", "r").read().replace(
				"NEWS:"+oldnews, "NEWS:"+news)
				)
예제 #19
0
class ShoutcastFeed:
    def __init__(self, genre, min_bitrate=128, cache_ttl=600, cache_dir='/tmp/pyshout_cache'):
        """
        Parses the xml feed and spits out a list of dictionaries with the station info
        keyed by genre. Params are as follows:
        min_bitrate - 128 default, Minimum bitrate filter
        cache_ttl - 600 default, 0 disables, Seconds cache is considered valid
        cache_dir - /tmp/pyshout_cache default, Path to cache directory
        """
        self.min_bitrate = min_bitrate
        self.cache_ttl = cache_ttl
        self.genre = genre
        self.cache_file = cache_dir + '/' + self.genre + '.pickle'
        self.station_list = []

    def fetch_stations(self):
        """
        Grabs the xml list of stations from the shoutcast server
        """
        self.shout_url='http://www.shoutcast.com/sbin/newxml.phtml?genre=' + self.genre
        self.urlhandler = FancyURLopener()
        self.fd = self.urlhandler.open(self.shout_url)
        self.stations = self.fd.read()
        self.fd.close()
        return self.stations

    def parse_stations(self):
    	ct = None
    	if self.cache_ttl:
            ct = cacheTime(self.cache_file)
        if ct:
            try:
                self.station_list = load_cache(self.cache_file)
            except:
            	print "Failed to load cache."
        if not ct or (time.time() - ct) > self.cache_ttl:
            try:
                parseXML = StationParser(self.min_bitrate)
                self.stations = self.fetch_stations()
                parseString(self.stations, parseXML)
                self.station_list = parseXML.station_list
                write_cache(self.cache_file, self.station_list)
            except:
            	print "Failed to get a new station list, sorry."
        return self.station_list
예제 #20
0
def retrieveCatalog():
    try:
        cache = SimpleCache()
        catalog = cache.get(ADDON_NAME + '.catalog')
        if catalog:
            log("using cached catalog")
        if not catalog:
            log("downloading catalog")
            opener = FancyURLopener()
            f = opener.open(url)
            catalog = json.load(f)
            cache.set(ADDON_NAME + '.catalog',
                      catalog,
                      expiration=datetime.timedelta(hours=12))
        return catalog
    except Exception as e:
        log("error retrieving catalog - " + str(e), xbmc.LOGERROR)
        xbmcgui.Dialog().notification(ADDON_NAME, LANGUAGE(30003), ICON, 4000)
        xbmc.executebuiltin('Action(PreviousMenu)')
        sys.exit(0)
예제 #21
0
 def _getlinesfromurl(self,url):
     err = 0
     strerr = ''
     # Retry URL download a few times.
     for count in range(self.retries):
         if count != 0:
             time.sleep(self.retrysecs)
         try:
             opener = FancyURLopener()
             f = opener.open(url, data='user_name=%s&password=%s&login=Login' % (self.username, self.password))
             rc = 0
             if 'www-authenticate' in f.headers:
                 rc = 1
                 strerr = 'Authentication is required to access %s' % url
             break
         except IOError, (_err, _strerr):
             rc = 1
             print url
             print _strerr
             (err,strerr) = (_err,_strerr)
예제 #22
0
class GenreFeed:
    def __init__(self, cache_ttl=3600, cache_dir='/tmp/pyshout_cache'):
        self.cache_ttl = cache_ttl
        self.cache_file = cache_dir + '/genres.cache'

    self.genre_list = [
        'Sorry, failed to load', '...try again later', 'Rock', 'Pop',
        'Alternative'
    ]

    def fetch_genres(self):
        """
		Grabs genres and returns tuple of genres
		"""
        self.genre_url = 'http://www.shoutcast.com/sbin/newxml.phtml'
        self.urlhandler = FancyURLopener()
        self.fd = self.urlhandler.open(self.genre_url)
        self.genre = self.fd.read()
        self.fd.close()
        return self.genre

    def parse_genres(self):
        ct = None
        if self.cache_ttl:
            ct = cacheTime(self.cache_file)
            try:
                self.genre_list = load_cache(self.cache_file)
            except:
                ct = None
        if not ct or (time.time() - ct) > self.cache_ttl:
            if DEBUG == 1:
                print('Getting fresh feed')
            try:
                parseXML = GenreParse()
                self.genres = self.fetch_genres()
                parseString(self.genres, parseXML)
                self.genre_list = parseXML.genreList
                write_cache(self.cache_file, self.genre_list)
            except:
                print("Failed to get genres from server, sorry.")
        return self.genre_list
예제 #23
0
 def _getlinesfromurl(self, url):
     err = 0
     strerr = ''
     # Retry URL download a few times.
     for count in range(self.retries):
         if count != 0:
             time.sleep(self.retrysecs)
         try:
             opener = FancyURLopener()
             f = opener.open(url,
                             data='user_name=%s&password=%s&login=Login' %
                             (self.username, self.password))
             rc = 0
             if 'www-authenticate' in f.headers:
                 rc = 1
                 strerr = 'Authentication is required to access %s' % url
             break
         except IOError, (_err, _strerr):
             rc = 1
             print url
             print _strerr
             (err, strerr) = (_err, _strerr)
예제 #24
0
class GenreFeed:
    def __init__(self, cache_ttl=3600, cache_dir = '/tmp/pyshout_cache'):
        self.cache_ttl = cache_ttl
        self.cache_file = cache_dir + '/genres.cache'
	self.genre_list = ['Sorry, failed to load', '...try again later', 'Rock', 'Pop', 'Alternative']
    def fetch_genres(self):
        """
        Grabs genres and returns tuple of genres
        """
        self.genre_url = 'http://www.shoutcast.com/sbin/newxml.phtml'
        self.urlhandler = FancyURLopener()
        self.fd = self.urlhandler.open(self.genre_url)
        self.genre = self.fd.read()
        self.fd.close()
        return self.genre

    def parse_genres(self):
        ct = None
        if self.cache_ttl:
            ct = cacheTime(self.cache_file)
            try:
                self.genre_list = load_cache(self.cache_file)
            except:
                ct = None
        if not ct or (time.time() - ct) > self.cache_ttl:
            if DEBUG == 1:
                print 'Getting fresh feed'
            try:
	        parseXML = GenreParse()
	        self.genres = self.fetch_genres()
	        parseString( self.genres, parseXML )
	        self.genre_list = parseXML.genreList
	        write_cache(self.cache_file, self.genre_list)
	    except:
	    	print "Failed to get genres from server, sorry."
        return self.genre_list
예제 #25
0
파일: upnp.py 프로젝트: Alwnikrotikz/lh-abc
 def open(self, *args):
     f = FancyURLopener.open(self, *args)
     return XML(f)
예제 #26
0
파일: core.py 프로젝트: t-animal/mimms
def run(argv):
  "Run the main mimms program with the given command-line arguments."

  usage = "usage: %prog [options] <url> [filename]"
  #TODO: Deprecated. Use ArgumentParser instead
  parser = OptionParser(
    usage=usage,
    version=("%%prog %s" % VERSION),
    description="mimms is an mms (e.g. mms://) stream downloader")
  parser.add_option(
    "-c", "--clobber",
    action="store_true", dest="clobber",
    help="automatically overwrite an existing file")
  parser.add_option(
    "-r", "--resume",
    action="store_true", dest="resume",
    help="attempt to resume a partially downloaded stream")
  parser.add_option(
    "-b", "--bandwidth",
    type="float", dest="bandwidth",
    help="the desired bandwidth for stream selection in BANDWIDTH bytes/s")
  parser.add_option(
    "-t", "--time",
    type="int", dest="time",
    help="stop downloading after TIME minutes")
  parser.add_option(
    "-v", "--verbose",
    action="store_true", dest="verbose",
    help="print verbose debug messages to stderr")
  parser.add_option(
    "-q", "--quiet",
    action="store_true", dest="quiet",
    help="don't print progress messages to stdout")
  parser.add_option(
    "-s", "--speedup",
    type="int", dest="speedup",
    help="speed up download by downloading SPEEDUP parts simultaneously")
  
  parser.set_defaults(time=0, bandwidth=1e6, speedup=1)
  (options, args) = parser.parse_args(argv)
  if len(args) < 1:
    parser.error("url must be specified")
  elif options.speedup < 1:
    parser.error("speedup must be greater than zero!")
  elif not args[0].startswith("mms") and not args[0].endswith(".asx"):
    parser.error("only mms urls (i.e. mms://, mmst://, mmsh://) and .asx files are supported")
  elif len(args) > 2:
    parser.error("unknown extra arguments: %s" % ' '.join(args[2:]))
  if(args[0].startswith("mms://")):
    options.url = args[0]
  elif(args[0].endswith(".asx")):     #TODO: Determine in a better way if there is a container present
      #Get or download container file
      urlOpener = FancyURLopener({})

      try:
        f = urlOpener.open(args[0])
      except IOError as (errnop, strerror):
        if not options.quiet:
          print 
          print >> sys.stderr,"Could not open asx file!: "+strerror
          return

      #Show ALL mms urls present in .asx file
      urls = f.read()
      urls = re.findall("mms[th]?://[^\"]*", urls)
      url = ""

      if (len (urls) == 0):
        if(not options.quiet):
          print
          print >> sys.stderr,"No mms url found in asx file!"
        return
      
      elif (len (urls) > 1):
        print
        print "This asx file contains more than one mms url. \
Please specify which you want to download:"

        i=0
        for url in urls:
          print "("+str(i)+") "+url

        urlToUse = int(raw_input("Please specify the number which you want to use:\n"))
        if(urlToUse < 0 or urlToUse > len(urls)):
          if(not options.quiet):
            print 
            print "Index out of range"
          return
      else:
        urlToUse=0

      options.url = urls[urlToUse]

      if(not options.quiet):
        print "Downloading extracted mms uri:"+options.url
예제 #27
0
 def download(self):
     bean = self.bean 
     update = self.update 
     if not bean or not bean.path:            
         return None
      
     opener = FancyURLopener()
     remote = opener.open(bean.path)
     remote_size = 0
     
     if "Content-Length" in remote.headers:
         remote_size = int(remote.headers["Content-Length"])
         bean.size = size2text(remote_size) 
     
     block_size = 4096
     block_count = 0
     
     ext = get_file_extension(bean.path)
     
     path = FC().online_save_to_folder
     if not os.path.isdir(path):
         os.makedirs(path)
         
     if bean.artist:
         bean.artist = bean.artist.replace("/", "-")
         bean.artist = bean.artist.replace("\\", "-")
         to_file = os.path.join(FC().online_save_to_folder, bean.artist, bean.get_display_name() + ext)
         if not os.path.isdir(os.path.dirname(to_file)):
             os.makedirs(os.path.dirname(to_file))             
     else:
         to_file = os.path.join(path, bean.get_display_name() + ext)        
     
     to_file_tmp = to_file + ".tmp"
     
     if os.path.exists(to_file_tmp):
         bean.status = DOWNLOAD_STATUS_INACTIVE
         bean.to_file = to_file
         update(bean)
         return  None
     
     if os.path.exists(to_file):
         bean.status = DOWNLOAD_STATUS_COMPLETED
         bean.to_file = to_file
         update(bean)
         return None
     
     bean.save_to = to_file        
     file = open(to_file_tmp, "wb")
     
     data = True
     
     """begin download"""
     self.bean.status = DOWNLOAD_STATUS_DOWNLOADING
     self.update(self.bean)
     
     
     while data:
         data = remote.read(block_size)
         if data:
             block_count += 1
             file.write(data)
             #time.sleep(0.1)
             persent = block_count * block_size * 100.0 / remote_size
             if block_count % 50 == 0:
                 bean.persent = persent
                 update(bean)
                 
     """update file info on finish"""                    
     
     os.rename(to_file_tmp, to_file)
     bean.status = DOWNLOAD_STATUS_COMPLETED
     bean.to_file = to_file
     bean.persent = 100
     update(bean)
예제 #28
0
def main(argv=None):  # {{{
    # Separates the URL into a directory and the file or pattern based on the
    # last appearance of '/'.
    if len(sys.argv) > 1:
        pivot = sys.argv[1].rfind("/")
        url = (sys.argv[1])[:pivot]
        pivot += 1
        find = (sys.argv[1])[pivot:]
    else:
        print "******************************************************************************************************************************"
        print "* Invalid input!                                                                                                             *"
        print "*                                                                                                                            *"
        print "* Try: 'DownloadExternalPackage.py url [localFile]'                                                                          *"
        print "*                                                                                                                            *"
        print "* Where 'URL' is the URL with an explicit package name or the URL followed by the truncated package name. And 'localFile' is *"
        print "* the file name (including extension) that you would like to save as.                                                        *"
        print "*                                                                                                                            *"
        print "* Examples:                                                                                                                  *"
        print "*                                                                                                                            *"
        print "* DownloadExternalPackage.py 'http://issm.jpl.nasa.gov/files/externalpackages/petsc-2.3.2-p3.tar.gz' 'petsc-2.3.2-p3.tar.gz' *"
        print "*                                                                                                                            *"
        print "*     This is the old style and the safest way to download a package.                                                        *"
        print "*                                                                                                                            *"
        print "* DownloadExternalPackage.py 'http://issm.jpl.nasa.gov/files/externalpackages/libtool' 'libtool.tar.gz'                      *"
        print "*                                                                                                                            *"
        print "*     This is the new style. For packages like 'Libtool', which we never expect to be using multiple versions, this will     *"
        print "*     download the most recent version and save it as the generic 'libtool.tar.gz'.                                          *"
        print "*                                                                                                                            *"
        print "* DownloadExternalPackage.py 'http://issm.jpl.nasa.gov/files/externalpackages/gsl-1.' 'gsl-1.15.tar.gz'                      *"
        print "*                                                                                                                            *"
        print "*     This is the new style. This is a demonstration of how this script can be used to disambiguate a package name if there  *"
        print "*     are more than once package matching 'gsl-'.                                                                            *"
        print "*                                                                                                                            *"
        print "* DownloadExternalPackage.py 'http://issm.jpl.nasa.gov/files/externalpackages/libtool'                                       *"
        print "*                                                                                                                            *"
        print "*     This is the new style. This will download a package with 'libtool' as a prefix and save it as its canonical name.      *"
        print "*                                                                                                                            *"
        print "*                                                                                                                            *"
        print "******************************************************************************************************************************"

    if len(sys.argv) > 2:
        localFile = sys.argv[2]
        print "Downloaded file will be saved as: " + localFile
    else:
        localFile = None
        print "Downloaded file will saved with the same file name."

    print "Looking for: " + find

    # As an extra precaution, if no extension is given for a particular package
    # such as '.../libtool', then ensure that files found are of appropriate
    # file extensions.
    #
    # WARNING: The external packages directory includes executable binaries with
    # '.exe' extensions. As such, '.exe' is an acceptable suffix, but this is
    # inherently dangerous since this script can be used to download from any
    # valid website. Furthermore, if an individual attempts a "man-in-the-middle"
    # attack, then the user would be capable of downloading executables from
    # an untrusted source.
    pattern = find + "[\w.-]*(\.tar\.gz|tar\.gz2|tgz|zip|exe)?"
    parser = MyHTMLParser(pattern)

    # Creates a 'FancyURL' which allows the script to fail gracefully by catching
    # HTTP error codes 30X and several 40X(where 'X' is a natural number).
    urlObject = FancyURLopener()
    obj = urlObject.open(url)
    parser.feed(obj.read())

    # If a file pattern was used to describe the file that should be downloaded,
    # then there is the potential for multiple file matches. Currently, the script
    # will detect this ambiguity and print out all the matches, while informing
    # the user that he must refine his search.
    #
    # TODO: Prompt the user to select from a list his/her preferred target.
    if len(parser.targets) > 1:
        print "Could not resolve your download due to the number of hits."
        print "Refine your search."
        for i in parser.targets:
            print i

    elif len(parser.targets) == 1:
        print "Found: " + parser.targets[0]
        url += "/" + parser.targets[0]

        if localFile is None:
            if os.path.exists(parser.targets[0]):
                print "File " + parser.targets[
                    0] + " already exists and will not be downloaded..."
            else:
                urllib.urlretrieve(url, parser.targets[0])
                print "File saved as: " + parser.targets[0]
        else:
            if os.path.exists(localFile):
                print "File " + localFile + " already exists and will not be downloaded..."
            else:
                if parser.targets[0] == localFile:
                    print "File found and destination match."
                elif parser.matcher.match(localFile) != "None":
                    print "File found matches destination pattern."
                else:
                    print "WARNING: the file found \'" + parser.targets[
                        0] + "\' does not match \'" + localFile + "\'"
                    print "Ensure the downloaded version is suitable."

                urllib.urlretrieve(url, localFile)
                print "File saved as: " + localFile

    else:
        print "No matches found!"

    obj.close()
url = "http://%s.craigslist.org/search/?catAbb=sss&minAsk=%d&maxAsk=%d&query=%s" % (region, minAsk, maxAsk, quote_plus(query))
replaceStrings = [("\n", " "), ("\t", " "), ("  ", " "), ("<p class=\"row\">", delimStart), ("</p>", delimEnd)]
results = []

# Reg-Ex Patterns
patItems = re.compile(r"(?<=%s)([^|]*)(?=%s)" % (delimStart, delimEnd))
patSpan = re.compile(r"(?<=</span>)(.*)(?=<span)")
patUrl = re.compile(r"http[^\"']+(?=[\"'])")
patDate = re.compile("[JFMASOND]{1}[a-z]{2}\s+\d+(?=\s)")
patPrice = re.compile(r"(?<=\$)([^\<]*)(?=<)")
patDesc = re.compile(r"(?<=\">)([^\<]*)(?=<)")

try:
	# Get the HTML
	opener = FancyURLopener({})
	req = opener.open(url)
	html = req.read()
	html = html[html.find(itemStart):]
	html = html[0:html.find(itemEnd)-1]

	# Clean up the HTML
	for pair in replaceStrings:
		while html.find(pair[0]) >= 0:
			html = html.replace(pair[0], pair[1])

	# Parse the results
	for item in re.findall(patItems, html):
		"""
		Example:
		<p class="row">
			<span class="ih" id="images:5Ic5Kf5H23L43I63N1c2ffc52eab0d5a11a35.jpg">&nbsp;</span>
예제 #30
0
파일: main.py 프로젝트: vincent19376/Grunge
# -*- coding: utf-8 -*-
예제 #31
0
    def download(self):
        bean = self.bean
        update = self.update
        if not bean or not bean.path:
            return None

        opener = FancyURLopener()
        remote = opener.open(bean.path)
        remote_size = 0

        if "Content-Length" in remote.headers:
            remote_size = int(remote.headers["Content-Length"])
            bean.size = size2text(remote_size)

        block_size = 4096
        block_count = 0

        ext = get_file_extension(bean.path)

        path = FC().online_save_to_folder
        if not os.path.isdir(path):
            os.makedirs(path)

        if bean.save_to:
            to_file = os.path.join(bean.save_to, bean.text + ".mp3")
        else:
            to_file = get_bean_download_path(bean, FC().online_save_to_folder)

        if not os.path.exists(os.path.dirname(to_file)):
            os.makedirs(os.path.dirname(to_file))

        to_file_tmp = to_file + ".tmp"

        if os.path.exists(to_file_tmp):
            bean.status = DOWNLOAD_STATUS_INACTIVE
            bean.to_file = to_file
            update(bean)
            return None

        if os.path.exists(to_file):
            bean.status = DOWNLOAD_STATUS_COMPLETED
            bean.to_file = to_file
            update(bean)
            return None

        bean.save_to = to_file
        with file(to_file_tmp, "wb") as tmp_file:
            data = True
            """begin download"""
            self.bean.status = DOWNLOAD_STATUS_DOWNLOADING
            self.bean.path = to_file
            self.update(self.bean)

            while data:
                data = remote.read(block_size)
                if data:
                    block_count += 1
                    tmp_file.write(data)
                    #time.sleep(0.1)
                    persent = block_count * block_size * 100.0 / remote_size
                    if block_count % 50 == 0:
                        bean.persent = persent
                        update(bean)
        time.sleep(0.5)
        """update file info on finish"""
        logging.debug("rename %s - %s" % (to_file_tmp, to_file))
        os.rename(to_file_tmp, to_file)
        bean.status = DOWNLOAD_STATUS_COMPLETED
        bean.to_file = to_file
        bean.persent = 100
        update(bean)
예제 #32
0
    def download(self):
        bean = self.bean 
        update = self.update 
        if not bean or not bean.path:            
            return None
         
        opener = FancyURLopener()
        remote = opener.open(bean.path)
        remote_size = 0
        
        if "Content-Length" in remote.headers:
            remote_size = int(remote.headers["Content-Length"])
            bean.size = size2text(remote_size) 
        
        block_size = 4096
        block_count = 0
        
        ext = get_file_extension(bean.path)
        
        path = FC().online_save_to_folder
        if not os.path.isdir(path):
            os.makedirs(path)
            
        if bean.save_to:
            to_file = os.path.join(bean.save_to, bean.text + ".mp3") 
        else:            
            to_file = get_bean_download_path(bean, FC().online_save_to_folder)
        
        if not os.path.exists(os.path.dirname(to_file)):
                os.makedirs(os.path.dirname(to_file))        
        
        to_file_tmp = to_file + ".tmp"
        
        if os.path.exists(to_file_tmp):
            bean.status = DOWNLOAD_STATUS_INACTIVE
            bean.to_file = to_file
            update(bean)
            return None
        
        if os.path.exists(to_file):
            bean.status = DOWNLOAD_STATUS_COMPLETED
            bean.to_file = to_file
            update(bean)
            return None
        
        bean.save_to = to_file        
        with file(to_file_tmp, "wb") as tmp_file:
            data = True
            
            """begin download"""
            self.bean.status = DOWNLOAD_STATUS_DOWNLOADING
            self.bean.path = to_file
            self.update(self.bean)

            while data:
                data = remote.read(block_size)
                if data:
                    block_count += 1
                    tmp_file.write(data)
                    #time.sleep(0.1)
                    persent = block_count * block_size * 100.0 / remote_size
                    if block_count % 50 == 0:
                        bean.persent = persent
                        update(bean)
        time.sleep(0.5)           
        """update file info on finish"""                    
        logging.debug("rename %s - %s" % (to_file_tmp, to_file))
        os.rename(to_file_tmp, to_file)
        bean.status = DOWNLOAD_STATUS_COMPLETED
        bean.to_file = to_file
        bean.persent = 100
        update(bean)