Example #1
0
def GetImg(file):
#    reg = r'src="(http.+?\.jpg)" width'
#    imreg = re.compile(reg)
    img = []
    refile = re.findall(r'\bsrc="(.*?\.jpg.*?)" width',file)
    for i in range(len(refile)):
        urllib.urlretrive(refile[i],i+'.jpg')
Example #2
0
    def Download(self):
        url = 'kmmc.in/wp-content/uploads/2014/01/lesson2.pdf'
        print "Downloading wiht urllib"
        urllib.urlretrive(url,"lesson2.pdf")

        print "Downloading worksheet"
        r = request.get(url)
        with open("lesson2.pdf") as file:
            file.write(r.content)

        # self.address
        pass
Example #3
0
def download_images(img_urls, dest_dir):
  """Given the urls already in the correct order, downloads
  each image into the given directory.
  Gives the images local filenames img0, img1, and so on.
  Creates an index.html in the directory
  with an img tag to show each local image file.
  Creates the directory if necessary.
  """
  # +++your code here+++
	if not os.path.exits(dest_dir):
                os.mkdirs(dest_dir)

        index = file(os.path.join(dest_dir, 'index.html'), 'w')
        index.write('<html><body>\n')
        i=0
        for img in img_url:
                name = 'img%d' % i
                print 'image',img
                urllib.urlretrive(img,os.path.join(dest_dir, name))
                index.write('<img src = "%s">' % (name,))
                i += 1

        index.write('\n</body>,/html>\n'))
        i += 1
Example #4
0
import urllib

# urlopen()
f = urllib.urlopen("www.baidu.com")
firstline = f.readline()

print firstline
# readlines(),fileno(),geturl(),getcode(),info(),close(),read()

# urlretrive()
filename = urllib.urlretrive("https://docs.python.org/2/library/urllib.html")
filename = urllib.urlretrive("https://docs.python.org/2/library/urllib.html", filename="test.html")
print filename[0]
print filename[1]
print filename[2]

# urlcleanup()
urllib.urlcleanup()

# quote(),quote_plus(),unquote(),unquote_plus()
urllib.quote("http://www.baidu.com")
urllib.quote_plus("http://www.baidu.com")

# urlencode()
params = urllib.urlencode({"spam": 1, "eggs": 2, "bacon": 0})
f = urllib.urlopen("http://python.org/query?%s" % params)
print f.read()
f = urllib.urlopen("http://python.org/query", params)
print f.read()
Example #5
0
# Temp  First fetch an image from the internet
import urllib
import exifread

myImage = urllib.urlretrive('https://farm9.staticflickr.com/8367/8588252956_49d28ddb1a_z.jpg')
for tag in tags.keys():
    if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename', 'EXIF MakerNote'):
        print "Key: %s, value %s" % (tag, tags[tag])
Example #6
0
f.close()

#zip
zip = zipfile.ZipFile(open(ime, 'w/r/a'))
raw = zip.read('koji clan zipa')
zip.getinfo('file in zip').date_time/comments/file_size_compress_size)

#input/scanf
a = input('nesto usput ispise')			#uzima input, uzima int ili 'string' [potrebni '']
a = raw_input('za ispisat')				#sto god primi, sprema u string [ne treba stavljati '']
sys.argv[num]							#argumenti uzeti s terminala pri pozivu programa, 0 je sam program

#internet
file = urllib.urlopen(url)				#dalje kao file: read(), readlines()...
urllib2.urlopen('http://www.blabla.com')
urllib.urlretrive(url, dest)			#skida objekt sa url (jpg, gif, txt...) i sprema na dest

#server
server=xmlrpclib.ServerProxy(url)			#spaja se na server
print server.system.listMethods()			#ispisuje metode, tj. naredbe koje podrzava
print server.system.methodHelp(metoda)		#objasnjenje naredbe
print server.system.methodSignature(metoda)	#daje tip ulaza/izlaza

#dictionary
dict = {}							#inicijalizacija
dict['a'] = 'alpha'
dict.keys()							#vraca sve kljuceve rijecnika ['a']
dict.values()						#vraca sve asocirane vrijednosti rijecnika ['alpha']
dict.items()						#vraca tupleove povezanih [('a', 'alpha')]

Example #7
0
# Asumptions:
# Vector Algorithm is used in it
# 1. Melody is monophonic
# 2. Define time class into deferent core class
# Sequence learning Models is a model for it
# Have both Long and Short term memory storage thing in it
#
#	Train it until You get
#	Best Loss so far encountered, Saving Model....
#
#	After that type in
#	python rnn_sample --config_file model/folder/x.config

import urllib
import zipfile
import nottingham_util
import rnn

url = "www-etud.iro.umontreal.ca/~boulanni/Nottingham.zip"
urllib.urlretrive(url, 'dataset.zip')

zip = zipfile.ZipFile(r'dataset.zip')
zip.extractall('data')

nottingham_util.create_model()
rnn.train_model()