def getPhotoIDbyTag(tag):
  
    
    retries = 0
    photos = None
    while (retries < 3):
        try:
                logging.debug(user.id)
                photos = flickr.photos_search(user_id=user.id, auth=all, tags=tag,tag_mode='any')
                break
        except:
                logging.error("flickr2history: Flickr error while searching ....retrying")
                logging.error(sys.exc_info()[0])
                
        retries = retries + 1
        
    if (not photos or len(photos) == 0):
        logging.debug("flickr2history: No image in Flickr (yet) with tags %s (possibly deleted in Flickr by user)" % tag)
        return None
    
    logging.debug("flickr2history: Tag=%s found %d" % (tag, len(photos)))
    while (len(photos)>1):
        logging.debug( "flickr2history :Tag %s matches %d images!" % (tag, len(photos)))
        logging.debug("flickr2history: Removing other images")
        try:
            photos.pop().delete()
        except:
            logging.error("flickr2history: Flickr error while deleting duplicate image")
            logging.error(sys.exc_info()[0])
   
    return photos[0]
Esempio n. 2
0
def get_urls_for_tags(tags, number):
    # TODO: might need to change this to loop through each tag 
    photos = flickr.photos_search(tags=tags, per_page=number)
    urls = []
    for photo in photos:
        urls.append(photo.getURL(size='Square', urlType='source'))
    return urls
Esempio n. 3
0
def quick_get_urls_for_tags(tags, number):
    photos = flickr.photos_search(tags=tags, per_page=number)
    urls = []
    for photo in photos:
        urls.append('http://photos%s.flickr.com/%s_%s_s.jpg' %\
                    (photo.server, photo.id, photo.secret))
    return urls
Esempio n. 4
0
def get_biased_photos(tag, min_taken_date, max_taken_date):
    #Change Folder Path
    if os.path.isdir(tag):
        pass
    else:
        os.mkdir(tag)
    os.chdir(tag)

    #Run image download
    for page in range(1, 8):
        photos = flickr.photos_search(tags=tag,
                                      page=page,
                                      per_page=500,
                                      tag_mode='all',
                                      sort="interestingness-desc",
                                      min_taken_date=min_taken_date,
                                      max_taken_date=max_taken_date)
        for photo in photos:
            try:
                url = photo.getURL(size='Original', urlType='source')
                urllist.append(url)
                image = urllib.URLopener()
                image.retrieve(url,
                               os.path.basename(urlparse.urlparse(url).path))
                print 'Downloading...', url
            except flickr.FlickrError:
                print 'Link no longer available (!)'
Esempio n. 5
0
def get_photos_for_tags(tags, number=50, unique_owners=False, start_page=0):

    photos = []
    seen_owner = {}
    page = start_page
    need_more = True

    debug("starting to get photos...")

    while (need_more):
        page += 1

        debug("page %s of photo search" % page)

        for p in flickr.photos_search(tags=tags, tag_mode='all', page=page):
            debug("photo id %s" % p.id)

            if unique_owners:

                # silly thing is unicode, although just a number.
                uid = p.owner.id.encode('ascii')
                debug("   photo has uid %s" % uid)

                if seen_owner.has_key(uid): continue
                seen_owner[uid] = 1

            photos.append(p)
            if (len(photos) == number):
                need_more = False
                break

    return photos
Esempio n. 6
0
def get_photos(from_date, to_date):
  print "\tRetrieving photos from {date}".format(date=from_date.strftime("%Y-%m-%d"))
  
  ##PHOTOS on this day
  for photo in flickr.photos_search(auth=True,
        user_id='36251685@N00',
        min_taken_date=from_date.strftime("%Y-%m-%d"),
        max_taken_date=to_date.strftime("%Y-%m-%d"),
        media='photos'):
        
    print "\t\t * Found a photo: {title} {url}".format(title=photo.title, url=photo.url)
    folder = '/home/brenda/Photos/{year}/{month}/{day}'.format(year=from_date.strftime("%Y"), month=from_date.strftime("%m"), day=from_date.strftime("%d"))
    
    ensure_folder_exists(folder)
    
    filename = '{folder}/{id}.jpg'.format(folder=folder, id=photo.id)
    
    if os.path.exists(filename):
        print "\t\t * skipping {filename}".format(filename=filename)
    else: 
      largest = photo.getSizes()[-1]
      url = largest['source']
      print "\t\t * Retrieving: {url}".format(url=url)
      print "\t\t * Saving to: {filename}".format(filename=filename)
      (filename, headers) = urllib.urlretrieve(url, filename)  
Esempio n. 7
0
def get_urls_for_tags(tag, number):
    """
    :param tag: the for which to fetch the url for photos
    :param number: the number of photo urls to fetch
    :return: urls array
    """
    try:
        urls = []
        i = 0
        path_to_directory = "FlickrImageFiles"

        if not os.path.exists(path_to_directory):
            os.makedirs(path_to_directory)

        file = open(path_to_directory + "/" + tag + ".txt", "w")    # open file for writing urls
        photos = flickr.photos_search(tags=tag, per_page=number)    # call the api function

        for photo in photos:
            i += 1
            url = photo.getURL(size='Medium', urlType='source')     # specify the size and source for url
            print("\n" + url)
            file.write("\n" + url)                                  # write url in file
            urls.append(url)                                        # append in array

        file.close()
        return urls

    except Exception as e:
        print(e)
    except KeyboardInterrupt as e:
        print(e)
Esempio n. 8
0
    def get_flickr(self, keyword):
        """ search Flickr for keyword """

        if (flickr.API_KEY == ''):
            raise Exception('Flickr API key not set')

        if (flickr.API_SECRET == ''):
            raise Exception('Bing API secret not set')

        photos = flickr.photos_search(text=keyword, per_page=20)
        urls = []

        for photo in photos:
            furl = 'http://farm%s.static.flickr.com/%s/%s_%s_s.jpg' % (
                photo.farm, photo.server, photo.id, photo.secret)
            url = 'http://www.flickr.com/photos/%s/%s' % (photo.owner.id,
                                                          photo.id)
            urls.append(url)

            i = Item(title=photo.title,
                     url=url,
                     source='flickr',
                     keyword=keyword)
            self.all_data.append(i)

        return self.all_data
def create_products(queue):
    """
    Download an image from Flickr for the product on the queue and if
    successful now or previously, create the applicable product records.
    """

    # Close the connection for this process to avoid the issue discussed here:
    # http://groups.google.com/group/django-users/
    # browse_thread/thread/2c7421cdb9b99e48
    connection.close()
    product_options = ProductOption.objects.as_fields()
    while True:

        # Get next set of data from queue.
        data = queue.get()
        if data is None:
            break
        main_category, sub_category, product = data[0], data[1], data[-1]

        # Try and download a product image from Flickr.
        image = join(image_dir, "%s.jpg" % product)
        if exists(image):
            message = "Using already downloaded image for %s" % data
        else:
            try:
                images = flickr.photos_search(tags=[product], per_page=1)
                if not images:
                    raise Exception("No images found")
                url = images[0].getURL(size="Large", urlType="source")
                urlretrieve(url, image)
            except Exception, e:
                message = "Error [%s] for %s" % (e, data)
            else:
                message = "Successfully downloaded image for %s" % data
Esempio n. 10
0
def get_urls_for_tags(tags, number):
    # TODO: might need to change this to loop through each tag
    photos = flickr.photos_search(tags=tags, per_page=number)
    urls = []
    for photo in photos:
        urls.append(photo.getURL(size='Square', urlType='source'))
    return urls
Esempio n. 11
0
def create_products(queue):
    """
    Download an image from Flickr for the product on the queue and if
    successful now or previously, create the applicable product records.
    """

    # Close the connection for this process to avoid the issue discussed here:
    # http://groups.google.com/group/django-users/
    # browse_thread/thread/2c7421cdb9b99e48
    connection.close()
    product_options = ProductOption.objects.as_fields()
    while True:

        # Get next set of data from queue.
        data = queue.get()
        if data is None:
            break
        main_category, sub_category, product = data[0], data[1], data[-1]

        # Try and download a product image from Flickr.
        image = join(image_dir, "%s.jpg" % product)
        if exists(image):
            message = "Using already downloaded image for %s" % data
        else:
            try:
                images = flickr.photos_search(tags=[product], per_page=1)
                if not images:
                    raise Exception("No images found")
                url = images[0].getURL(size="Large", urlType="source")
                urlretrieve(url, image)
            except Exception, e:
                message = "Error [%s] for %s" % (e, data)
            else:
                message = "Successfully downloaded image for %s" % data
Esempio n. 12
0
def set_from_tags(tags, title, description, all=True):
    """all=True means include non-public photos"""
    user = flickr.test_login()
    photos = flickr.photos_search(user_id=user.id, auth=all, tags=tags)
    set = flickr.Photoset.create(photos[0], title, description)
    set.editPhotos(photos)
    return set
Esempio n. 13
0
def search(loc):
    # downloading image data
    for page in range(1, 9):
        f = flickr.photos_search(text=name + ' clouds',
                                 per_page=500,
                                 page=page)
        urllist = []  #store a list of what was downloaded

        # downloading images
        for k in f:
            try:
                url = k.getURL(size='Medium', urlType='source')
                urllist.append(url)
                image = urllib.URLopener()
                if not os.path.exists(
                        loc + os.path.basename(urlparse.urlparse(url).path)):
                    image.retrieve(
                        url,
                        loc + os.path.basename(urlparse.urlparse(url).path))
                    print 'downloading:', url
                else:
                    print 'already downloaded ' + image + ' skipping'
            except:
                print 'error on ' + url + ' skipping'
                continue

        # write the list of urls to file
        fl = open('urllist.txt', 'w')
        for url in urllist:
            fl.write(url + '\n')
        fl.close()
Esempio n. 14
0
def set_from_tags(tags, title, description, all=True):
    """all=True means include non-public photos"""
    user = flickr.test_login()
    photos = flickr.photos_search(user_id=user.id, auth=all, tags=tags)
    set = flickr.Photoset.create(photos[0], title, description)
    set.editPhotos(photos)
    return set
Esempio n. 15
0
def search(loc):
    # downloading image data
    for page in range(1, 9):
        f = flickr.photos_search(text=name+' clouds', per_page=500, page=page)
        urllist = [] #store a list of what was downloaded

        # downloading images
        for k in f:
            try:
                url = k.getURL(size='Medium', urlType='source')
                urllist.append(url)
                image = urllib.URLopener()
                if not os.path.exists(loc+os.path.basename(urlparse.urlparse(url).path)):
                    image.retrieve(url, loc+os.path.basename(urlparse.urlparse(url).path))
                    print 'downloading:', url
                else:
                    print 'already downloaded ' + image + ' skipping'
            except:
                print 'error on ' + url + ' skipping'
                continue

        # write the list of urls to file       
        fl = open('urllist.txt', 'w')
        for url in urllist:
            fl.write(url+'\n')
        fl.close()
Esempio n. 16
0
def quick_get_urls_for_tags(tags, number):
    photos = flickr.photos_search(tags=tags, per_page=number)
    urls = []
    for photo in photos:
        urls.append('http://photos%s.flickr.com/%s_%s_s.jpg' %\
                    (photo.server, photo.id, photo.secret))
    return urls
Esempio n. 17
0
def search_perso():
    flickr.email = '*****@*****.**'
    flickr.password = '******'

    #To do only the first time
    if (False):
        permission = "read"
        myAuth = flickr.Auth()
        frob = myAuth.getFrob()
        link = myAuth.loginLink(permission, frob)
        raw_input("Please make sure you are logged into Flickr in Firefox")
        firefox = os.popen('firefox \"' + link + '\"')
        raw_input(
            "A Firefox window should have opened. Press enter when you have authorized this program to access your account"
        )
        token = myAuth.getToken(frob)
        f = file('token.txt', 'w')
        f.write(token)
        f.close()

    #user = flickr.people_findByEmail(flickr.email)
    user = flickr.test_login()
    print user.username
    photoList = flickr.photos_search(user.id, True,
                                     '')  #can be used to search by theme

    urls = []
    for photo in photoList:
        urls.append(getURL(photo, 'Large', False))

    for url in urls:
        print url
Esempio n. 18
0
def get_photos_for_tags(tags, number=50, unique_owners=False, start_page=0):

    photos = []
    seen_owner = {}
    page = start_page
    need_more = True
    
    
    debug("starting to get photos...")
    
    while (need_more):
        page += 1
    
        debug("page %s of photo search" % page)
            
        for p in flickr.photos_search(tags=tags, tag_mode='all', page=page):
            debug ("photo id %s" % p.id)
            
            if unique_owners:
                
                # silly thing is unicode, although just a number.
                uid = p.owner.id.encode('ascii') 
                debug ("   photo has uid %s" % uid)

                if seen_owner.has_key(uid): continue
                seen_owner[uid] = 1
            
            photos.append(p)
            if (len(photos) == number):
                need_more = False
                break
        
    return photos
Esempio n. 19
0
def search(year, month, day, lat=52.52992, lon=13.41157, limit=32, bust=1):
    """
    Return photos for the given date object.
    """
    start = date(year, month, day)
    end = start + timedelta(days=1)
    photos = flickr.photos_search(
        min_taken_date=start.strftime("%Y-%m-%d"),
        max_taken_date=end.strftime("%Y-%m-%d"),
        lat=str(lat),
        lon=str(lon),
        #sort='interestingness-desc',
        #geo_context='2',
        per_page=limit * 2,
        radius='20')
    final = []
    users = set()
    removed = 0
    # Filter multiple photos by same user
    for photo in photos:
        if photo.owner.id not in users:
            final.append(photo)
            users.add(photo.owner.id)
        elif len(photos) - removed <= limit:
            # Don't remove if we'd end up with not enough photos
            final.append(photo)
        else:
            removed += 1

    return [(p.getMedium(), p.title) for p in final]
Esempio n. 20
0
def search(year, month, day, lat=52.52992, lon=13.41157, limit=32, bust=1):
    """
    Return photos for the given date object.
    """
    start = date(year, month, day)
    end = start + timedelta(days=1)
    photos = flickr.photos_search(min_taken_date=start.strftime("%Y-%m-%d"),
                                  max_taken_date=end.strftime("%Y-%m-%d"),
                                  lat=str(lat),
                                  lon=str(lon),
                                  #sort='interestingness-desc',
                                  #geo_context='2',
                                  per_page=limit*2,
                                  radius='20')
    final = []
    users = set()
    removed = 0
    # Filter multiple photos by same user
    for photo in photos:
        if photo.owner.id not in users:
            final.append(photo)
            users.add(photo.owner.id)
        elif len(photos) - removed <= limit:
            # Don't remove if we'd end up with not enough photos
            final.append(photo)
        else:
            removed += 1

    return [(p.getMedium(), p.title) for p in final]
Esempio n. 21
0
def search_perso():
    flickr.email = '*****@*****.**'
    flickr.password = '******'
    
    #To do only the first time
    if (False):
        permission = "read"
        myAuth = flickr.Auth()
        frob = myAuth.getFrob()
        link = myAuth.loginLink(permission,frob)
        raw_input("Please make sure you are logged into Flickr in Firefox")
        firefox=os.popen('firefox \"'+link+'\"')
        raw_input("A Firefox window should have opened. Press enter when you have authorized this program to access your account")
        token = myAuth.getToken(frob)
        f = file('token.txt','w')
        f.write(token)
        f.close()


    #user = flickr.people_findByEmail(flickr.email)
    user = flickr.test_login()
    print user.username
    photoList = flickr.photos_search(user.id, True, '')#can be used to search by theme
    
    urls = []
    for photo in photoList:
        urls.append(getURL(photo, 'Large', False))

    for url in urls:
        print url
Esempio n. 22
0
def deleteAllPics( ):
        
        global user

        try:
             user = flickr.test_login()
             logging.debug(user.id)
        except:
            logging.error(sys.exc_info()[0])
            return None

        if(deleteAll.startswith('true') == False):
            return #check again to be sure if to go one

        logging.debug('deleteAll: Started Delete')
        retries = 0
        
        #this may take very long time !!!!
        while (retries < 3):
            try:
                    photos = []
                    logging.debug(user.id)
                    np = flickr.photos_search_pages(user_id=user.id, auth=all, per_page="500")
                    numPages = int(np)
                    i = 1
                    logging.debug("found %d num pages" % numPages)
                    while ( numPages > 0):
                        spage = str(i)
                        photos.extend(flickr.photos_search(user_id=user.id, auth=all, per_page="500", page=spage))
                        logging.debug( "added %d page to %d pic" % (i, len(photos)))
                        
                        numPages = numPages - 1
                        i = i + 1
                        
                    logging.debug( "got all %d pics to delete" % len(photos))
                    break
            except:
                    logging.error("deleteAll: Flickr error while searching ....retrying")
                    logging.error(sys.exc_info()[0])
                    
            retries = retries + 1
            
        if (not photos or len(photos) == 0):
            logging.debug("deleteAll: No files in Flickr to delete" )
            return None

        logging.debug("deleteAll: found %d media files to delete" % (len(photos)))
        while (len(photos)>1):
            try:
                photos.pop().delete()
                print "deleting pic " 
                logging.debug("deleteAll: Removed one image... %d images to go" % (len(photos)))

            except:
                logging.error("deleteAll: Flickr error while deleting image")
                logging.error(sys.exc_info()[0])

        logging.debug("deleteAll: DONE DELETING - NOTHING ELSE TO DO - EXITING")
        os._exit(1)    
Esempio n. 23
0
    def query_flickr(self):
        tags = self.query.split(' ')
        #tags = '76228432@N00'
        #photos = flickr.photos_search(user_id=tags, per_page=self.number, sort='relevance')
        photos = flickr.photos_search(tags=tags,
                                      per_page=self.options.nritems,
                                      tag_mode='all',
                                      sort='relevance')
        self._logInfo("Found %d" % len(photos))

        nrimages = 0
        for photo in photos:
            image = None
            nrprefix = "%05d" % nrimages
            try:
                imageurl = photo.getURL(size='Original', urlType='source')
                image = internetFile(imageurl, nrprefix=nrprefix)
            except flickr.FlickrError:
                #print "Could not get size %s of %d" %('Original',photo.id)
                pass  #probably size does not exist try next
            if image == None or image.discardReason == "too big":
                try:
                    imageurl = photo.getURL(size='Large', urlType='source')
                    image = internetFile(imageurl, nrprefix=nrprefix)
                except flickr.FlickrError:
                    #print "Could not get size %s of %d" %('Large',photo.id)
                    pass  #probably size does not exist try next
            if image == None or image.discardReason == "too big":
                try:
                    imageurl = photo.getURL(size='Medium', urlType='source')
                    image = internetFile(imageurl, nrprefix=nrprefix)
                except flickr.FlickrError:
                    #print "Could not get size %s of %d" %('Medium',photo.id)
                    pass  #probably size does not exist try next

            if image == None or image.discardReason == "too big":
                self._logDebug("Cannot find proper size of %s" % photo.id)
                continue
            if image.isNotAnImage:
                self._logDebug("Image discarded because: %s" %
                               image.discardReason)
                continue

            image.info.addQueryInfo(('flickr_id', photo.id))
            image.info.addQueryInfo(('query', self.query))
            try:
                context = ' '.join([tag.text for tag in photo.tags])
            except AttributeError:
                context = ''
                pass
            image.info.addQueryInfo(('context', context))
            if not self.options.noresize:
                image.resizeArea()
            image.info.addQueryInfo(('callingdetails', self.callingdetails))
            image.info.addQueryInfo(('service', 'flickr'))
            image.saveToDisk(self.options.output)  # Save the image to disk.
            nrimages = nrimages + 1

        self._logInfo("%d images were downloaded!" % nrimages)
Esempio n. 24
0
 def scrapeTag(self, tags, per_page, page=1, sort='interestingness=desc'):
     photos = flickr.photos_search(tags=tags, per_page=per_page, page=page, sort=sort)
     
     urls = []
     for photo in photos:
         urls.append(self.get_url(photo))
     
     return urls
Esempio n. 25
0
def main():
	photos = flickr.photos_search(tags="dog",per_page=3)
	urls = []
	for photo in photos:
		urls.append(photo.getURL(size='Medium',urlType='source'))

	print urls
	return render_template("index.html",urls=urls)
def get_urls_for_tags(tags, number):
    photos = flickr.photos_search(tags=tags, tag_mode='all', per_page=number)
    urls = []
    for photo in photos:
        try:
            urls.append(photo.getURL(size='Large', urlType='source'))
        except:
            continue
    return urls
Esempio n. 27
0
def get_urls_for_tags(tags, number):
    photos = flickr.photos_search(tags=tags, tag_mode='all', per_page=number)
    urls = []
    for photo in photos:
        try:
            urls.append(photo.getURL(size='Large', urlType='source'))
        except:
            continue
    return urls
Esempio n. 28
0
def search_by_tag(tag, number):
    photoList = flickr.photos_search('', False, tag, '', '', '', '', '', '', '', number, 1)
    
    urls = []
    for photo in photoList:
        urls.append(getURL(photo, 'Large', False))

        for url in urls:
            print url
Esempio n. 29
0
def search_by_tag(tag, number):
    photoList = flickr.photos_search('', False, tag, '', '', '', '', '', '',
                                     '', number, 1)

    urls = []
    for photo in photoList:
        urls.append(getURL(photo, 'Large', False))

        for url in urls:
            print url
Esempio n. 30
0
def search_photos(text,per_page=PER_PAGE):
	photos = []
	_out_log('Search likes %s photos...' % text)
	try:
		photos = flickr.photos_search(text=text,per_page=per_page)
	except:
		_out_log(u'Flick 服务超时,下载失败.')
		return 
	_out_log(u'找到 %s 张照片。' % len(photos))
	_down_files(photos,text)
def get_urls(tags):
	"""
	parameters = tags: string delimted by comma.
	returns the urls which contains those tags
	"""
	photos = flickr.photos_search(tags=tags)
	urls = []
	for photo in photos:
		urls.append('https://farm%s.staticflickr.com/%s/%s_%s_b.jpg' % (photo.farm, photo.server, photo.id, photo.secret))
	return urls
Esempio n. 32
0
def get_urls_for_tags(tags, number):
    photos = flickr.photos_search(tags=tags, tag_mode='all', per_page=number)
    urls = []
    for photo in photos:
        try:
            # size : size of photo Thumbnail, Small,
            #               Medium, Large, Original
            urls.append(photo.getURL(size='Original', urlType='source'))
        except:
            continue
    return urls
Esempio n. 33
0
	def search_photos(self,text):
	    photos = flickr.photos_search(text=text, per_page=9, extras="owner_name,url_sq, url_t, url_s, url_m, url_o")
	    r = []
	    for photo in photos:
			sizes=photo.getSizes()
			nsizes={}
			for size in sizes:
				nsizes[size["label"].lower()]=size
			photodata = {"owner":{"username":photo.owner.username,"realname": photo.owner.realname},"sizes":nsizes}
			r.append(photodata)
	    return r
Esempio n. 34
0
    def post(self):
        keyword = self.request.get('keyword')
        photos = flickr.photos_search(tags=keyword)

        if photos:
            p = random.choice(photos)
            url = 'https://farm%s.staticflickr.com/%s/%s_%s.jpg'%(p.farm, p.server, p.id, p.secret)
            print(url)
            self.render('front.html', pic=url)
        else:
            error = "Results Not Found"
            elf.render('front.html', error=error)
Esempio n. 35
0
def show_pic(tags, idx):
    photos = flickr.photos_search(tags=tags, per_page=50)
    ppp = choice(photos)
    u = ppp.getURL(size="Medium", urlType="source")
    ph = load_photo(u)
    # ph.show()
    ph.save("pictures/" + folder_name + "/" + str(idx) + ".jpg", "JPEG")
    newtagset = tags[1:]
    newtags = list(set(ppp.tags) - alltags)
    if len(newtags) == 0:
        print "done"
    newtagset.append(choice(newtags).text.encode("utf-8"))
    return newtagset
Esempio n. 36
0
def get_urls_for_tags(tags, number):
    page_number = 1
    photos = []
    for page_number in range(1,11):       
        photos.append(flickr.photos_search(tags=tags, per_page=number, page=page_number))
    data = list(utilities.traverse(photos))
    urls = []
    for photo in data:
        try:
            urls.append(photo.getURL(size='Large', urlType='source'))
        except:
            continue
    return urls
Esempio n. 37
0
def show_results():
    if request.method == 'POST' and request.form['search']:
        print request.form['search']
        photos = flickr.photos_search(text=request.form['search'])
        url_list = [url.getMedium() for url in photos]

        if not len(url_list):
            flash("Sorry, no photos were found")
            return redirect(url_for('initial'))

        random_index = int(random() * len(url_list))
        photo = url_list[random_index]
        return render_template('result.html', photo=photo)
    return redirect(url_for('initial'))
Esempio n. 38
0
def show_results():
	if request.method == 'POST' and request.form['search']:
		print request.form['search']
		photos = flickr.photos_search(text=request.form['search'])
		url_list = [url.getMedium() for url in photos]
		
		if not len(url_list):
			flash("Sorry, no photos were found")
			return redirect(url_for('initial'))
		
		random_index = int(random()*len(url_list))
		photo = url_list[random_index]
		return render_template('result.html', photo=photo)
	return redirect(url_for('initial'))
Esempio n. 39
0
	def retrieve_urls(self, query, number, page):
		print datetime.now().strftime('%Y-%m-%d %H:%M:%S'), ': retrieving photo ojbects with query ', query, '...'
		try:
			photos = flickr.photos_search(text=query, per_page=number, page=page, sort='relevance')
		except IOError:
			print 'IO error happens!'
			return []
		urls = []
		for photo in photos:
			try:
				urls.append(photo.getURL(size='Medium', urlType='source'))
			except:
				print 'Query {}: could not get url'.format(query)
				continue
				# log error cases
		return urls
Esempio n. 40
0
    def getCollectionFromFlickr(self,repView):
        coll = pim.ItemCollection(view = repView)
        if self.username:
            flickrUsername = flickr.people_findByUsername(self.username)
            flickrPhotos = flickr.people_getPublicPhotos(flickrUsername.id,10)
            coll.displayName = self.username
        elif self.tag:
            flickrPhotos = flickr.photos_search(tags=self.tag,per_page=10,sort="date-posted-asc")
            coll.displayName = self.tag.displayName
            
        self.sidebarCollection = coll

        for i in flickrPhotos:
            photoItem = getPhotoByFlickrID(repView, i.id)
            if photoItem is None:
                photoItem = FlickrPhoto(photo=i,view=repView,parent=coll)
            coll.add(photoItem)
        repView.commit()
Esempio n. 41
0
    def fillCollectionFromFlickr(self, view, n=16, apiKey=None):
        """
        Fills the collection with photos from the flickr website.
        """
        if apiKey:
            flickr.setLicense(apiKey)

        if self.userName:
            flickrUserName = flickr.people_findByUsername(
                self.userName.encode('utf8'))
            flickrPhotos = flickr.people_getPublicPhotos(flickrUserName.id, n)
        elif self.tag:
            flickrPhotos = flickr.photos_search(tags=self.tag,
                                                per_page=n,
                                                sort="date-posted-desc")
        else:
            assert (False, "we should have either a userName or tag")

        # flickrPhotosCollection is a collection of all FlickrPhotos. It has
        # an index named flickerIDIndex which indexes all the photos by
        # their flickrID which makes it easy to quickly lookup any photo by
        # index.
        flickrPhotosCollection = schema.ns('flickr',
                                           view).flickrPhotosCollection
        for flickrPhoto in flickrPhotos:
            """
            If we've already downloaded a photo with this id use it instead.
            """
            photoUUID = flickrPhotosCollection.findInIndex(
                'flickrIDIndex',  # name of Index
                'exact',  # require an exact match
                # compare function
                lambda uuid: cmp(flickrPhoto.id,
                                 view.findValue(uuid, 'flickrID')))

            if photoUUID is None:
                photoItem = FlickrPhoto(photo=flickrPhoto, itsView=view)
            else:
                photoItem = view[photoUUID]

            self.add(photoItem)

        view.commit()
Esempio n. 42
0
    def get(self):
        if self.request.get('method') == 'listkeywords':
            user_object = people_findByUsername(self.request.get('username'))
            data = tags_getListUser(user_object.id)

            template_values = {
                               'nsid' : user_object.id,
                               'keywords' : data,
                               }
            path = os.path.join(os.path.dirname(__file__),TEMPLATE_DIR,TAG_LIST)
        elif self.request.get('method') == 'listphotos':
            data = photos_search(self.request.get('username'),False,self.request.get('tags').encode('utf-8'))

            template_values = {
                               'photos' : data,
                               }
            path = os.path.join(os.path.dirname(__file__),TEMPLATE_DIR,THUMB_LIST)
            
        self.response.out.write(template.render(path, template_values).decode('utf-8'))
Esempio n. 43
0
    def get_flickr(self, keyword):
        """ search Flickr for keyword """
        
        if (flickr.API_KEY == ''):
            raise Exception('Flickr API key not set')
            
        if (flickr.API_SECRET == ''):
            raise Exception('Bing API secret not set')  
        
        photos = flickr.photos_search(text=keyword, per_page=20)
        urls = []

        for photo in photos:
            furl = 'http://farm%s.static.flickr.com/%s/%s_%s_s.jpg' % (photo.farm, photo.server, photo.id, photo.secret)
            url = 'http://www.flickr.com/photos/%s/%s' % (photo.owner.id, photo.id)
            urls.append(url)
            
            i = Item(title=photo.title, url=url, source='flickr', keyword=keyword)
            self.all_data.append(i)
                        
        return self.all_data
Esempio n. 44
0
def add_entry_drink():
    if not session.get('logged_in'):
        abort(401)
    else:
        user_id = session['user_id']
        user_name = get_username(user_id)
        tag = 'creativecommons'
        text = request.form['winery'] + ' ' + request.form['style']
        print 'Searching for: ', text
        photos = flickr.photos_search(text=text, tags=tag)
        urllist = []  #store a list of what was downloaded
        path = ''
        if not photos:
            photo = open("static/default.jpg", "rb").read()
            photobin = sqlite3.Binary(photo)
        else:
            flash('Downloading image, please be patient')
            url = photos[0].getURL(size='Medium', urlType='source')
            urllist.append(url)
            path = os.path.basename(urlparse.urlparse(url).path)
            photo = urllib.URLopener().retrieve(url, path)
            print 'downloading: ', url
            file, mime = urllib.urlretrieve(url)
            photo = open(file, "rb").read()
            photobin = sqlite3.Binary(photo)

        db = get_db()
        db.execute(
            'insert into entries (winery, location, vintage, style, vineyard, drank, username, photo) values (?, ?, ?, ?, ?, 0, ?, ?)',
            [
                request.form['winery'], request.form['location'],
                request.form['vintage'], request.form['style'],
                request.form['vineyard'], user_name, photobin
            ])
        db.commit()
        if (path):
            print 'removing photo at: ', path
            os.remove(path)
        flash('New entry was successfully posted')
        return redirect(url_for('show_entries_drink'))
Esempio n. 45
0
    def fillCollectionFromFlickr(self, view, n=16, apiKey=None):
        """
        Fills the collection with photos from the flickr website.
        """
        if apiKey:
            flickr.setLicense(apiKey)

        if self.userName:
            flickrUserName = flickr.people_findByUsername(self.userName.encode('utf8'))
            flickrPhotos = flickr.people_getPublicPhotos(flickrUserName.id, n)
        elif self.tag:
            flickrPhotos = flickr.photos_search(tags=self.tag, per_page=n,
                                                sort="date-posted-desc")
        else:
            assert(False, "we should have either a userName or tag")

        # flickrPhotosCollection is a collection of all FlickrPhotos. It has
        # an index named flickerIDIndex which indexes all the photos by
        # their flickrID which makes it easy to quickly lookup any photo by
        # index.
        flickrPhotosCollection = schema.ns('flickr', view).flickrPhotosCollection
        for flickrPhoto in flickrPhotos:
            """
            If we've already downloaded a photo with this id use it instead.
            """
            photoUUID = flickrPhotosCollection.findInIndex(
                'flickrIDIndex', # name of Index
                'exact',         # require an exact match
                                 # compare function
                lambda uuid: cmp(flickrPhoto.id,
                                 view.findValue(uuid, 'flickrID')))

            if photoUUID is None:
                photoItem = FlickrPhoto(photo=flickrPhoto, itsView=view)
            else:
                photoItem = view[photoUUID]

            self.add(photoItem)

        view.commit()
Esempio n. 46
0
def main():
    # downloading image data
    f = flickr.photos_search(tags=tag)
    # create a directory for the files if it doesn't exist
    path = os.path.join(DATA_DIR, tag)
    if not os.path.isdir(path):
        os.makedirs(path)
    os.chdir(path)
    urllist = [] #store a list of what was downloaded
    # downloading images 
    for k in f:
        url = k.getURL(size='Medium', urlType='source') 
        urllist.append(url)
        image = urllib.URLopener()
        image.retrieve(url, os.path.basename(urlparse.urlparse(url).path)) 
        print 'downloading:', url

    # write the list of urls to file 
    fl = open('urllist.txt', 'w') 
    for url in urllist:
        fl.write(url+'\n') 
    fl.close()
Esempio n. 47
0
def main():
    # downloading image data
    f = flickr.photos_search(tags=tag)
    # create a directory for the files if it doesn't exist
    path = os.path.join(DATA_DIR, tag)
    if not os.path.isdir(path):
        os.makedirs(path)
    os.chdir(path)
    urllist = []  #store a list of what was downloaded
    # downloading images
    for k in f:
        url = k.getURL(size='Medium', urlType='source')
        urllist.append(url)
        image = urllib.URLopener()
        image.retrieve(url, os.path.basename(urlparse.urlparse(url).path))
        print 'downloading:', url

    # write the list of urls to file
    fl = open('urllist.txt', 'w')
    for url in urllist:
        fl.write(url + '\n')
    fl.close()
Esempio n. 48
0
def get_videos(from_date, to_date):
  """
  VIDEOS on this day
  """
  print "\tRetrieving videos from {date}".format(date=from_date.strftime("%Y-%m-%d"))
  for video in flickr.photos_search(auth=True,
      user_id='36251685@N00',
      min_taken_date=from_date.strftime("%Y-%m-%d"),
      max_taken_date=to_date.strftime("%Y-%m-%d"),
      media='videos'):
        
    print "\t\t * Found a video: {title} {url}".format(title=video.title, url=video.url)
    #print video
    #fetch the photos
    folder = '{year}-{month}'.format(year=from_date.strftime("%Y"), month=from_date.strftime("%m"))
    
    ensure_folder_exists(folder)
    
    largest = video.getSizes()[-1]
    url = largest['source']
    print "\t\t * Getting headers of {url}".format(url=url)

    request = HeadRequest(url)
    response = urllib2.urlopen(request)
    headers = response.info()

    cd = headers['Content-Disposition']
    filename = cd.replace('attachment; filename=', '{folder}/'.format(folder=folder))        

    if os.path.exists(filename):
        print "\t\t * Skipping {filename}".format(filename=filename)
        continue
    print "\t Retrieving {url}".format(url=url)

    (tmp_filename, headers) = urllib.urlretrieve(url)

    #move to real filename
    shutil.move(tmp_filename, filename)
Esempio n. 49
0
def getPhotoIDbyTag(tag):

    retries = 0
    photos = None
    while (retries < 3):
        try:
            logging.debug(user.id)
            photos = flickr.photos_search(user_id=user.id,
                                          auth=all,
                                          tags=tag,
                                          tag_mode='any')
            break
        except:
            logging.error(
                "flickr2history: Flickr error while searching ....retrying")
            logging.error(sys.exc_info()[0])

        retries = retries + 1

    if (not photos or len(photos) == 0):
        logging.debug(
            "flickr2history: No image in Flickr (yet) with tags %s (possibly deleted in Flickr by user)"
            % tag)
        return None

    logging.debug("flickr2history: Tag=%s found %d" % (tag, len(photos)))
    while (len(photos) > 1):
        logging.debug("flickr2history :Tag %s matches %d images!" %
                      (tag, len(photos)))
        logging.debug("flickr2history: Removing other images")
        try:
            photos.pop().delete()
        except:
            logging.error(
                "flickr2history: Flickr error while deleting duplicate image")
            logging.error(sys.exc_info()[0])

    return photos[0]
Esempio n. 50
0
def get_biased_photos(tag, min_taken_date, max_taken_date):
	#Change Folder Path
	if os.path.isdir(tag):
		pass
	else:
		os.mkdir(tag)
	os.chdir(tag)

	#Run image download
	for page in range(1,8):
		photos = flickr.photos_search(tags=tag, page=page, per_page=500, tag_mode='all',
									  sort="interestingness-desc",
									  min_taken_date=min_taken_date,
									  max_taken_date=max_taken_date)
		for photo in photos:
			try:
				url = photo.getURL(size='Original', urlType='source')
				urllist.append(url)
				image = urllib.URLopener()
				image.retrieve(url, os.path.basename(urlparse.urlparse(url).path))
				print 'Downloading...', url
			except flickr.FlickrError:
				print 'Link no longer available (!)'
Esempio n. 51
0
import flickr
from flickr import Photoset
import urllib, urlparse
import os
import sys

if len(sys.argv) > 1:
    tag = sys.argv[1]
else:
    print('no tag specified')

# downloading image data
f = flickr.photos_search(tags="city, day",
                         sort="interestingness-desc",
                         tag_mode="all")
#obj = Photoset(id=72157654804104859)
#f = obj.getPopular()
urllist = []  #store a list of what was downloaded

# downloading images
index = 142
for k in f:
    fileName = "Photo_" + str(index)
    index += 1
    path = "/home/oscar/Documents/ISC/8vo/Sistemas_Inteligentes/Flickr/day"
    url = k.getURL(size='Medium', urlType='source')
    count_photo = k.getFavoriteCount()
    print(count_photo)
    urllist.append(url)
    image = urllib.URLopener()
    image.retrieve(url, os.path.join(path, fileName))
Esempio n. 52
0
init_tags = [concept]
tags = []
tags = tags + init_tags
photos = []
concept = concept.replace(',', '+')
i = 1

for tag in tags:
    print "Searching for tags: %s" % tag
    phototags = []

    for p in range(1, numPagesToGet + 1):
        #group = flickr.Group(Gid)
        #photos = group.getPhotos(per_page = 100, page = p)
        photos = flickr.photos_search(tags=tag,
                                      per_page=100,
                                      page=p,
                                      sort='relevance')

        #print "Got page %d" %p

        for photo in photos:
            try:
                url = photo.getURL(urlType='source')
                print url
            except:
                pass

            phototags = photo.__getattr__('tags')

            if phototags and url != None:
                try:
Esempio n. 53
0
#Take user input
print "--- Press <Enter> to stop entering the tags --- \n"
tag=[]
i=0
while 1:
    i+=1
    input_tag=raw_input('Enter tag %d : '%i )
    if input_tag=='':
        break;

    tag.append(input_tag)

if not tag:
    print "No tags entered, aborting!"
    exit(0)

images_count=input('Enter the total number of images you want to download : ')

#downloading image data
f = flickr.photos_search(tags=tag, tag_mode='all', per_page=images_count)
urllist = [] #store a list of what was downloaded

#downloading images
for k in f:
    url = k.getURL(size='Medium', urlType='source')
    urllist.append(url) 
    image = urllib.URLopener()
    image.retrieve(url, os.path.basename(urlparse.urlparse(url).path)) 
    print 'downloading:', url
Esempio n. 54
0
def get_urls_for_tags(tags, number):
    photos = flickr.photos_search(tags=tags, per_page=number)
    urls = []
    for photo in photos:
        urls.append(photo.getURL(size='Square', urlType='source'))
    return urls
Esempio n. 55
0
# A simple script to query images using the Flickr API
# Author - Nikhil Naik ([email protected])

import flickr  # flickr.py from https://code.google.com/p/flickrpy/
import os

# if you want to search for specific user - obtain  user ID from this link for
# a specific user  : http://idgettr.com/
# in this case I have chosen Werner Kunz, a Boston area photographer -
# http://www.flickr.com/photos/werkunz
id = '35375520@N07'  #'XXXXXXXX@XXX'
print "Searching for Images..."
photos = flickr.photos_search(tags='Boston', user_id=id)
print "Obtaining Image URL and Geolocation Information..."
for photo in photos:
    url = photo.getURL(size='Medium',
                       urlType='source')  # get URL for the image
    loc = photo.getLocation()  # get latitude and longitude
    cmd = 'echo %s,%s >> image_info.csv' % (url, loc)
    os.system(cmd)
Esempio n. 56
0
def main():
  with open('API key here.txt', 'r') as f:
    lines = f.read().splitlines()
    if len(lines) >= 2:
      flickr.API_KEY = lines[0]
      flickr.API_SECRET = lines[1]
    else:
      print "Insert API_KEY and API_SECRET to 'API key here.txt'"

  # Script parameters
  save_images = True
  image_folder = None
  image_dl_count = None

  parser = argparse.ArgumentParser(description='This script downloads images from Flickr.')
  parser.add_argument('-f', '--folder_name', help='Save folder name', required=True)
  parser.add_argument('-a', '--photo_amount', help='Amount of images to download', required=True)
  args = parser.parse_args()

  image_folder = './' + args.folder_name + '/'
  image_dl_count = int(args.photo_amount)

  print "Parameters:", image_folder, image_dl_count

  new_york_id = 2459115
  helsinki_id = 565346
  rome_id = 721943
  eiffel_coords = [48.85837, 2.294481]
  eduskunta_coords = [60.172538, 24.9333456]
  #eduskunta_coords = [60.102071, 24.555601] # a little bit off for better pictures (behind eduskuntatalo)
  photos = []
  months_to_dl_from = 60
  per_page = min(image_dl_count, 100)
  #for months_back in range(months_to_dl_from * 30):
  if True:
    out_of_photos = False
    #while not out_of_photos and len(photos) < image_dl_count:
    if True:
      #max_taken = datetime.date.today() - datetime.timedelta(days=months_back)#months_back * 30)
      #min_taken = max_taken - datetime.timedelta(days=1)#30)
      page = 0
      photos_found = 0
      while True:
        #page_photos = flickr.photos_search(woe_id=helsinki_id, has_geo=1, per_page=per_page, page=page, min_taken_date=min_taken, max_taken_date=max_taken)
        #page_photos = flickr.photos_search(per_page=per_page, page=page, lat=eiffel_coords[0], lon=eiffel_coords[1], radius=0.1)
        page_photos = flickr.photos_search(per_page=per_page, page=page, lat=eduskunta_coords[0], lon=eduskunta_coords[1], radius=1.0)
        if len(page_photos) == 0:
          out_of_photos = True
          break
        page += 1
        photos_found += len(page_photos)
        for photo in page_photos: # day-wise downloading changes
          if photo not in photos:
            photos.append(photo)
        #photos.extend(page_photos)
        if len(photos) >= image_dl_count:
          break
      print "Found", photos_found #, "photos between", min_taken, max_taken
  print "Found", len(photos), "photos"
  urls = []
  failed_downloads = 0
  if len(photos) > image_dl_count:
    photos = photos[:image_dl_count]
  for i in range(len(photos)):
    try:
      print "Downloading photos... {}/{}\r".format(i+1, len(photos)),
      photo = photos[i]
      url = photo.getMedium()

      tags = []
      if photo.__getattr__('tags') != None:
        tags = [tag.text.encode('utf-8') for tag in photo.__getattr__('tags')]

      title = photo.title.encode('utf-8')
      description = photo.description.encode('utf-8')
      gps = photo.getLocation()
      id = photo.id.encode('utf-8')
      owner = photo.owner.id.encode('utf-8')
      datetaken = photo.datetaken
      if save_images:
        save_image_and_data(image_folder, url, title, id, owner, tags, description, gps, datetaken)
    except KeyboardInterrupt:
      raise
    except Exception as e:
      print "Exception while downloading photo at index {}:".format(i), e
      failed_downloads += 1

  print "Photos:", len(photos), "Successful downloads:", len(photos) - failed_downloads
  seen = set()
  uniq = [x for x in urls if x not in seen and not seen.add(x)]
  print "Duplicates: ", len(urls) - len(uniq)
Esempio n. 57
0
import flickr
import urllib, urlparse
import os
import sys
    
tag_list = ['Dance', 'Sing', 'Eat', 'Food', 'Play', 'People', 'Family', 'Dog', 'Cat', 'Animals', 'Car', 'Man', 'Woman', 'Baby', 'Bird', 'Lake', 'Forest', 'Sunshine', 'City', 'Street', 'Building']
page_index = ['1', '2', '3', '4', '5', '6']

for tag in tag_list:
    if not os.path.isdir(tag):
        os.mkdir(tag)
    for pi in page_index:
        # downloading image data
        f = flickr.photos_search(tags=tag, per_page='1', page=pi)
        print len(f)
        urllist = [] #store a list of what was downloaded
        # downloading images
        for k in f:
            try:
                url = k.getURL(size='Large', urlType='source')
            except:
                print 'error'
                continue
            urllist.append(url)
            image = urllib.URLopener()
            image.retrieve(url, os.path.join(tag, os.path.basename(urlparse.urlparse(url).path)))
            print 'downloading:', url
Esempio n. 58
0
from timeit import default_timer as timer

start = timer()

if len(sys.argv)>1:
  lic = sys.argv[1]
  date = sys.argv[2]
else:
  print 'no tag specified'
firstattempt = 0
# downloading image data
#f = flickr.people_getPublicPhotos(min_upload_date="2015-03-20", max_upload_date="2015-03-20", license=lic)
while firstattempt < 3:
  try:
    #get the photos
    f = flickr.photos_search(min_upload_date=date, max_upload_date=date, license=lic, per_page="1")
    #get the total pages
    fn = flickr.photos_search_pages(min_upload_date=date, max_upload_date=date, license=lic, per_page="2")
    #loop through the pages
    print 'TOTAL', fn, len(f)
    for z in range(0,int(fn)):
      pageattempts=0
      while pageattempts < 3:
        try:
          f = flickr.photos_search(min_upload_date=date, max_upload_date=date, license=lic, page=z+1, per_page="2")
          #print 'license:', lic
          urllist = [] #store a list of what was downloaded
          fl = open('urllist.txt', 'w')
          fail = open('failed.txt', 'w')
          counter = 0
          attempts = 0
Esempio n. 59
0
INSTAGRAM_CLIENT_ID = '5d56eb1e594c420997c394d1dca7fcea'
INSTAGRAM_CLIENT_SECRET = 'd0d78baa1e4e4f4b8af9fd9588379968'

api = InstagramAPI(client_id=INSTAGRAM_CLIENT_ID,client_secret=INSTAGRAM_CLIENT_SECRET)

cnx = mysql.connector.connect(user='******', password='******', host='galleryhop2.crflf9mu2uwj.us-east-1.rds.amazonaws.com',database='galleryhop2')

cursor = cnx.cursor()

cursor.execute("""select * from galleries""")

geolocator = Nominatim()

coords = []

for row in cursor:
	try:
		location = geolocator.geocode(row[5]+' NYC')
		coords.append((location.latitude,location.longitude))
	except:
		print 'error'

print coords

for i in coords:
	photos = flickr.photos_search(lat=i[0],lon=i[1],per_page=5,radius=0.25)
	for p in photos:
		str = 'https://farm'+p.farm+'.staticflickr.com/'+p.server+'/'+p.id+'_'+p.secret+'.jpg'
		print str