コード例 #1
0
ファイル: download.py プロジェクト: AstroBoy1/Geolocation
# First, you should install flickrapi
# pip install flickrapi

import flickrapi
import urllib.request
from PIL import Image

# Flickr api access key
flickr = flickrapi.FlickrAPI(u'a7abf9679953e2cb03fe748fad23f798',
                             u'ccbbdce75269f4b8',
                             cache=True)

keyword = 'United States'

# photos = flickr.walk(text=keyword,
#                      tag_mode='all',
#                      tags=keyword,
#                      extras='url_c',
#                      per_page=100,  # may be you can try different numbers..
#                      sort='relevance')
# photos = flickr.photos.getPopular()
photos = flickr.photos.getWithGeoData(privacy_filter=1,
                                      media='photos',
                                      extras='url_o')

urls = []
for i, photo in enumerate(photos):
    print(i)

    url = photo.get('url_o')
    urls.append(url)
コード例 #2
0
 def get_client(self):
     # authentication handling
     flickr = flickrapi.FlickrAPI(self.api_key, self.api_secret)
     return flickr
コード例 #3
0
    Notes:
    Album and Set are interchangeable
    User Id and nsid are interchangeable
    
    '''

import requests, json, sys, string, time, flickrapi, pytz, webbrowser
from getFlickrList import searchInFlickr
from datetime import datetime, tzinfo
from classes import Album, Photo

key = "6ab5883201c84be19c9ceb0a4f5ba959"
secret = "1d2bcde87f98ed92"

global flickrObj
flickrObj = flickrapi.FlickrAPI(key, secret, format="json")


#pid refers to photo id, nsid is the user id
#stores all ids from the files in one list
def get_ids():
    ids = []
    f = open('all_ids', 'r')
    for line in f:  #each id in the file is stored on a new line
        ids.append(
            line[:-1])  #stores the id without the newline character at the end
    f.close()
    return ids


def get_urls():
コード例 #4
0
os.makedirs('edges2portrait/trainB', exist_ok=True)

facenet = cv2.dnn.readNetFromTensorflow(
    'face_models/opencv_face_detector_uint8.pb',
    'face_models/opencv_face_detector.pbtxt')

# net = cv2.dnn.readNetFromTensorflow('face_models/landmarks_net.pb')

predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

FACE_IMG_SIZE = 256
FACE_THRESHOLD = 0.7
KEYWORD = 'portrait  bldigital'

flickr = flickrapi.FlickrAPI('c6a2c45591d4973ff525042472446ca2',
                             '202ffe6f387ce29b',
                             cache=True)

photos = flickr.walk(
    text=KEYWORD,
    tag_mode='all',
    # tags=KEYWORD,
    extras='url_c',
    per_page=100,
    sort='relevance',
)


def url_to_img(url):
    resp = urllib.request.urlopen(url)
    img = np.asarray(bytearray(resp.read()), dtype=np.uint8)
コード例 #5
0
 def get_flickr(self, api_key, api_secret, token, token_secret):
     return flickrapi.FlickrAPI(api_key,
                                api_secret,
                                token=FlickrAccessToken(
                                    token, token_secret, u'write'))
コード例 #6
0
ファイル: stream_generator.py プロジェクト: vnisor/cotl
    def run(self):
        # when the throughput number is greater than 3
        # we fake all the data
        pid = 0
        if self.num_per_second > 3:
            while self.ttr > 0:
                for i in range(self.num_per_second):
                    photo_msg = self.simulate_photo_detail(pid)
                    pid = pid + 1
                    print photo_msg
                    self.producer.send_messages(self.topic_name, photo_msg)
                time.sleep(1)
                self.ttr -= 1
            return
        # otherwise we use the data from flickr
        # specifically, the number == 0 indicates a natural mode

        api_num = np.random.randint(0, len(self.api_keys))
        api_key = self.api_keys[api_num]
        api_secret = self.api_secrets[api_num]

        # mantian two dicts to generate streaming data
        # one dict would be enough for small data
        # but as data grows bigger in memory it will crash the program
        pre_dict = {}
        cur_dict = {}
        d_size = 200
        time_runned = 0
        while True:
            flickr = flickrapi.FlickrAPI(api_key, api_secret, format='json')
            try:
                raw_json = flickr.photos.getRecent(per_page='50')
                parsed = json.loads(raw_json.decode('utf-8'))
            except UnicodeDecodeError:
                # flickr api will complain such error, anyway just wait a few seconds
                # and retry
                print "UnicodeDecodeError"
                time.sleep(10)
                continue
            count = 0
            pre_useful = False
            batch_count = 0
            for p in parsed['photos']['photo']:
                pid = str(p['farm']) + str(p['server']) + str(p['id'])
                existed = False
                if pre_dict is not None:
                    if pid in pre_dict:
                        existed = True
                        pre_useful = True
                if pid in cur_dict:
                    existed = True
                if not existed:
                    cur_dict[pid] = True
                    # when the throughput number is <= 3,
                    # and we have alread sent over 3 photos
                    # we no longer send the photos within this second
                    if self.num_per_second != 0 and self.num_per_second <= 3 and batch_count >= self.num_per_second:
                        continue
                    # start a thread and send the message to kafka
                    new_api_num = np.random.randint(0, len(self.api_keys))
                    api_key = self.api_keys[api_num]
                    api_secret = self.api_secrets[api_num]

                    thread = ProduceMsg(p['id'], p['secret'],
                                        self.api_keys[new_api_num],
                                        self.api_secrets[new_api_num],
                                        self.producer, self.topic_name,
                                        self.user_geos)
                    thread.start()
                    #sendout(p['id'], p['secret'])
                    count = count + 1
                    batch_count = batch_count + 1
            if pre_dict is not None:
                if not pre_useful:
                    # print "pre dict no longer useful, deleting.."
                    pre_dict = None
            if len(cur_dict) >= d_size:
                # print "current dict full", len(cur_dict), ", switching.."
                pre_dict = cur_dict
                cur_dict = {}
            # print count, "New come in 1 second"
            time.sleep(1)
            time_runned = time_runned + 1
            # control the time to run
            if self.ttr > 0:
                if time_runned >= self.ttr:
                    return
コード例 #7
0
ファイル: code.py プロジェクト: katiewhan/SearchFlickr
import web
import threading
import flickrapi
import flickrapi.exceptions
import urllib
import os

API_KEY = '7c1331700a70376959f0ac13c4fa09e9'
SECRET = 'b8560eb50a720515'
flickr = flickrapi.FlickrAPI(API_KEY)
DOWNLOAD = '/downloads/'

urls = (
    '/',
    'index',
    '/result',
    'result',
    '/next/(.*)',
    'next',
)

render = web.template.render('templates/')


# global function getting and returning search results
def search(param):
    thumbs = []
    orgs = []
    pid = []

    try:
コード例 #8
0
    def __init__(self,
                 key,
                 secret,
                 nsid,
                 basepath,
                 verbose,
                 force):
        """do

        Arguments:
        - `key`:
        - `secret`:
        - `ndsid`:
        - `basepath`:
        - `verbose`:
        - `force`:
        """
        # auth
        self.api_key = key
        self.api_secret = secret
        self.nsid = nsid
        self.basepath = os.path.join(basepath, "nsid", self.nsid)
        self.verbose = not verbose
        self.force = force

        self.photo_count = 0
        self.photo_page = 0
        self.photo_total = 0
        self.per_page = 500

        self.flickr = flickrapi.FlickrAPI(self.api_key,
                                          self.api_secret,
                                          format='parsed-json')

        # Only do this if we don't have a valid token already
        if not self.flickr.token_valid(perms=unicode('write')):
            # Get a request token
            self.flickr.get_request_token(oauth_callback='oob')
            # Open a browser at the authentication URL. Do this however
            # you want, as long as the user visits that URL.
            authorize_url = self.flickr.auth_url(perms=unicode('write'))
            webbrowser.open_new_tab(authorize_url)
            # Get the verifier code from the user. Do this however you
            # want, as long as the user gives the application the code.
            verifier = unicode(raw_input('Verifier code: '))
            # Trade the request token for an access token
            self.flickr.get_access_token(verifier)

        # dir
        if not os.path.exists(self.basepath):
            os.makedirs(self.basepath)

        # search: this has a 'pages' field with how many photos left
        # accounting per page..max is 500
        extras = "%s,%s,%s,%s,%s,%s,%s,%s," % ("url_o",
                                               "geo",
                                               "tags",
                                               "machine_tags",
                                               "views",
                                               "description",
                                               "date_upload",
                                               "date_taken")

        photos = self.flickr.photos_search(user_id=self.nsid,
                                           per_page=self.per_page,
                                           extras=extras)
        photos = photos['photos']

        self.photo_total = photos['perpage'] * photos['pages']
        self.photo_page = 0

        for i in range(photos['perpage']):
            self.photo_count = i
            photo = photos['photo'][i]
            self.get_photo(photo)

        # this page counter is for the next page actually
        for page in range(photos['pages'])[1:]:
            self.photo_page = page
            photos = self.flickr.photos_search(user_id=self.nsid,
                                               page=str(self.photo_page + 1),
                                               per_page=self.per_page,
                                               extras=extras)
            photos = photos['photos']
            print photos['perpage']
            for i in range(len(photos['photo'])):
                self.photo_count = i
                photo = photos['photo'][i]
                self.get_photo(photo)
        return
コード例 #9
0
def auth():
    return flickrapi.FlickrAPI(app.config['FLICKR_KEY'],
                               app.config['FLICKR_SECRET'],
                               format='parsed-json')
コード例 #10
0
    email_from = config.get("main", "email_from")
    email_to = config.get("main", "email_to")
except:
    print("Missing " + config_file)
    sys.exit(0)

done = ".done"

sdcard = sys.argv[1] + "/"

logfile = sdcard + "upload-log.txt"

includes = ['*.jpg']  # for files only / case insensitive
excludes = ["*" + done, logfile]  # for dirs and files / case insensitive

flickr = flickrapi.FlickrAPI(api_key, api_secret, api_token)


def send_mail(send_from,
              send_to,
              subject,
              text,
              files=[],
              server="localhost",
              port=25,
              username='',
              password='',
              isTls=True):
    msg = MIMEMultipart()
    msg['From'] = send_from
    msg['To'] = COMMASPACE.join(send_to)
コード例 #11
0
            if tags[item] > 1:
                print('{} - {}'.format(item.encode('utf-8'), tags[item]))
                writer.writerow([item.encode('utf-8'), tags[item]])


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Download a list of tags that are popular on Flickr')
    parser.add_argument(
        '-w', '--workers', type=int, default=-1,
        help="num workers used to download images. -x uses (all - x) cores [-1 default]."
    )

    args = parser.parse_args()

    flickr = flickrapi.FlickrAPI(FLICKR_API_KEY, FLICKR_API_SECRET, format='parsed-json')

    num_images = 0
    page = 1
    tags = defaultdict(int)

    while True:
        photos = flickr.photos.search(sort='interestingness-desc', per_page=IMAGES_PER_PAGE, page=page, content_type=1)['photos']['photo']

        for photo in photos:
            print(num_images)

            try:
                info = flickr.photos.getInfo(photo_id=photo.get('id'))
                num_images += 1
                for tag in info['photo']['tags']['tag']:
コード例 #12
0
import flickrapi
import wget
from os import path, walk, remove
from sys import exit, argv
import json
from argparse import ArgumentParser
from apikeys import *
# use a modern url library to ensure proper SSL connections
import urllib3

# instantiate a flickrAPI object that will give us results in user-friendly JSON
flickr = flickrapi.FlickrAPI(flickrKey, flickrSecret, cache=True, format='parsed-json')
if not flickr.token_valid(perms='read'):
  print "operating unauthenticated, may not have permission to see all images"
# but not all the functions we need are available in JSON, so here's another object :/
flickrEtree = flickrapi.FlickrAPI(flickrKey, flickrSecret, cache=True)

class picinfo(object):
  """This class will store information about images on flickr."""
  picoutdir = 'images'
  def __init__(self,flickrSpec,photosetInfo,photoSize = 'Large'):
    self.photo_size = photoSize
    self.photosetInfo = photosetInfo
    self.flickrSpec = flickrSpec
    self.sourceUrls = self.urlMaker()
    self.filename = self.picoutdir + '/' + self.fnameMaker()
    self.flickrID = flickrSpec['id']
    if path.isfile(self.filename):
      self.localFileExists = True
    else:
      deets = flickr.photos_getInfo(photo_id=flickrSpec['id'])
コード例 #13
0
from django.shortcuts import render
from django.core.paginator import Paginator
import flickrapi
from .models import Preset, FavouritePlaces
from .forms import PresetForm

api_key = "02df6aacd6e20bed557b78d7dc1d143a"
secret_api_key = "5a2087a02ab2540c"
flickr = flickrapi.FlickrAPI(api_key, secret_api_key)


# Search function
def search(lat, lon):
    # Calling Flickr API and Create Photos object
    obj = flickr.photos.search(api_key=api_key,
                               lat=lat,
                               lon=lon,
                               accuracy=11,
                               format='parsed-json')
    photos = obj['photos']['photo']

    # Create photo addresses
    addresses = []
    for photo in photos:
        farm_id = photo['farm']
        server_id = photo['server']
        id = photo['id']
        secret = photo['secret']
        address = f"https://farm{farm_id}.staticflickr.com/{server_id}/{id}_{secret}.jpg"
        addresses.append(address)
    return addresses
コード例 #14
0
ファイル: auth.py プロジェクト: SrgSprinkles/photobooth
import flickrapi
import webbrowser

fileToUpload = '/home/pi/photobooth/slides/test_upload.png'  #change to a real filename

tagsToTag = 'photobooth testing'


def toUnicodeOrBust(obj, encoding='utf-8'):
    if isinstance(obj, basestring):
        if not isinstance(obj, unicode):
            obj = unicode(obj, encoding)
    return obj


flickr = flickrapi.FlickrAPI(config.api_key, config.api_secret)

print('Step 1: authenticate')

# Only do this if we don't have a valid token already
if not flickr.token_valid(
        perms=u'write'):  #notice the letter u. It's important.

    # Get a request token
    flickr.get_request_token(oauth_callback='oob')

    # Open a browser at the authentication URL. Do this however
    # you want, as long as the user visits that URL.
    authorize_url = flickr.auth_url(
        perms=u'write')  #notice the letter u. It's important.
    webbrowser.open_new_tab(authorize_url)
コード例 #15
0
 def __init__(self, api_key, api_secret):
     self.flickr = flickrapi.FlickrAPI(api_key,
                                       api_secret,
                                       format='parsed-json')
コード例 #16
0
import flickrapi
import requests
import sys
import urllib

api_key = '652dbd597aa5103e20afe8fdf3b4f42e'
flickr = flickrapi.FlickrAPI(api_key, cache=True)

# Fenway from wikipedia 42  20 47 N 71 5 51 W
lat = '42.355056'
lon = '-71.065503'
lat = '42.33'
lon = '-71.6'
rad = 20
the_date = '2013-08-04'
the_date_end = '2013-10-05'
#photos = flickr.photos_search(tags='boston', lat='42.355056', lon='-71.065503', radius='5')
#photos = flickr.photos_search(tags='fenway park')
photos = flickr.photos_search(tags='david ortiz')
#, lat=lat, lon=lon, radius=rad,	min_taken_date=the_date, max_taken_date=the_date_end)

maxdown = 177700
n = 10000

print "starting..."
for photo in photos[0]:
    if maxdown > 0:
        maxdown = maxdown - 1
        try:
            print photo.attrib['title']
            photoLoc = flickr.photos_geo_getLocation(
コード例 #17
0
import flickrapi
import xml

key = u'a4b698792ceedf048aa579118b4d7ef5'
secret = u'c8f60618dab0ed74'
username = '******'
flickr = flickrapi.FlickrAPI(key, secret, token_cache_location='tests/')


def upload_img(img_path):
    flickr.authenticate_via_browser(perms='write')
    id = flickr.upload(img_path, is_public='0')[0].text
    photo = flickr.photos.getSizes(photo_id=id)
    the_url = photo[0][-1].get('source')
    return id, the_url


def delete_img(img_id):
    flickr.authenticate_via_browser(perms='delete')
    flickr.photos.delete(photo_id=img_id)


def replace_img(img_path, img_id):
    flickr.authenticate_via_browser(perms='write')
    flickr.replace(filename=img_path, photo_id=img_id)
    photo = flickr.photos.getSizes(photo_id=img_id)
    the_url = photo[0][-1].get('source')
    return img_id, the_url
コード例 #18
0
new_dir_path = './' + dt_now + '/'
os.mkdir(new_dir_path)
n = 0
while n < 5:
    if gazou[n] in model:
        pre = model.most_similar(positive=[gazou[n]])
        text = str(pre)
        result = re.sub(r'[!-~]', "", text)

        n += 1
        num = 0
        while num < 10:
            keyword = gazou[n], pre[num][0]
            if __name__ == '__main__':
                flicker = flickrapi.FlickrAPI(flickr_api_key,
                                              secret_key,
                                              format='parsed-json')
                response = flicker.photos.search(text=keyword,
                                                 per_page=1,
                                                 media='photos',
                                                 sort='relevance',
                                                 safe_search=1,
                                                 extras='url_q,license')
                photos = response['photos']

            for photo in photos['photo']:
                url_q = photo['url_q']
                picnum = (n - 1) * 10 + num
                number = str(picnum)

                savename = "test" + number + ".png"
コード例 #19
0

def check_dir(text):
    if not os.path.exists(
            '/home/lumpsum/Desktop/Data Science/Scriptie/Flickr/' + text):
        os.makedirs('/home/lumpsum/Desktop/Data Science/Scriptie/Flickr/' +
                    text)


if __name__ == "__main__":
    start = time.time()

    api_key = u'dfdcc6dbe417c37cdc9d2d4fabd690b7'
    api_secret = u'821c300dc6191853'

    flickr = flickrapi.FlickrAPI(api_key, api_secret, cache=True)
    flickr.authenticate_via_browser(perms='read')

    with open('nameListLarge2.txt', 'r') as f:
        celebs = f.readlines()

    celebs = [x.rstrip() for x in celebs]

    for c in celebs:
        check_dir(c)

    # Create a queue to communicate with the worker threads
    queue = Queue()
    # Create 8 worker threads
    for x in range(100):
        worker = DownloadWorker(queue)
コード例 #20
0
# get data from flickr
import flickrapi
# consider the region of lat: 45.395688-45.468072
#                        lon: 12.248631-12.396624

#input API key and secret
flickr = flickrapi.FlickrAPI('c5af07877cc00731fbec3b2fc063a9e4',
                             '4d9ffde5a58411d6',
                             cache=True)

try:
    # for training data set
    photos = flickr.walk(text='Venice, building',
                         has_geo=1,
                         geo_context=2,
                         extras='geo, url_m')

    # for testing data set
    #photos = flickr.walk(text='Venice, building',has_geo=0,geo_context=2,extras='geo, url_m')
except Exception as e:
    print('Error')

count = 0

file = open("venice_m1.txt", "w+")
#file = open("venice_test.txt","w+")
for photo in photos:
    count += 1
    lat = photo.get('latitude')
    lon = photo.get('longitude')
    url = photo.get('url_m')
コード例 #21
0
ファイル: flickrripper.py プロジェクト: ckmp-code/pywikibot
def main(*args):
    """
    Process command line arguments and invoke bot.

    If args is an empty list, sys.argv is used.

    @param args: command line arguments
    @type args: unicode
    """
    local_args = pywikibot.handle_args(args)

    group_id = ''
    photoset_id = ''
    user_id = ''
    start_id = ''
    end_id = ''
    tags = ''
    addCategory = ''
    removeCategories = False
    autonomous = False
    totalPhotos = 0
    uploadedPhotos = 0

    # Do we mark the images as reviewed right away?
    if config.flickr['review']:
        flickrreview = config.flickr['review']
    else:
        flickrreview = False

    # Set the Flickr reviewer
    if config.flickr['reviewer']:
        reviewer = config.flickr['reviewer']
    elif 'commons' in config.sysopnames['commons']:
        pywikibot.output(config.sysopnames['commons'])
        reviewer = config.sysopnames['commons']['commons']
    elif 'commons' in config.usernames['commons']:
        reviewer = config.usernames['commons']['commons']
    else:
        reviewer = ''

    # Should be renamed to overrideLicense or something like that
    override = ''
    for arg in local_args:
        if arg.startswith('-group_id'):
            if len(arg) == 9:
                group_id = pywikibot.input('What is the group_id of the pool?')
            else:
                group_id = arg[10:]
        elif arg.startswith('-photoset_id'):
            if len(arg) == 12:
                photoset_id = pywikibot.input('What is the photoset_id?')
            else:
                photoset_id = arg[13:]
        elif arg.startswith('-user_id'):
            if len(arg) == 8:
                user_id = pywikibot.input(
                    'What is the user_id of the flickr user?')
            else:
                user_id = arg[9:]
        elif arg.startswith('-start_id'):
            if len(arg) == 9:
                start_id = pywikibot.input(
                    'What is the id of the photo you want to start at?')
            else:
                start_id = arg[10:]
        elif arg.startswith('-end_id'):
            if len(arg) == 7:
                end_id = pywikibot.input(
                    'What is the id of the photo you want to end at?')
            else:
                end_id = arg[8:]
        elif arg.startswith('-tags'):
            if len(arg) == 5:
                tags = pywikibot.input(
                    'What is the tag you want to filter out (currently only '
                    'one supported)?')
            else:
                tags = arg[6:]
        elif arg == '-flickrreview':
            flickrreview = True
        elif arg.startswith('-reviewer'):
            if len(arg) == 9:
                reviewer = pywikibot.input('Who is the reviewer?')
            else:
                reviewer = arg[10:]
        elif arg.startswith('-override'):
            if len(arg) == 9:
                override = pywikibot.input('What is the override text?')
            else:
                override = arg[10:]
        elif arg.startswith('-addcategory'):
            if len(arg) == 12:
                addCategory = pywikibot.input(
                    'What category do you want to add?')
            else:
                addCategory = arg[13:]
        elif arg == '-removecategories':
            removeCategories = True
        elif arg == '-autonomous':
            autonomous = True

    if isinstance(flickrapi, Exception):
        pywikibot.error('This script requires the python flickrapi module. \n'
                        'See: http://stuvel.eu/projects/flickrapi')

    elif not config.flickr['api_key']:
        pywikibot.warning('Flickr api key not found! Get yourself an api key\n'
                          'Any flickr user can get a key at\n'
                          'https://www.flickr.com/services/api/keys/apply/')

    elif user_id or group_id or photoset_id:
        if 'api_secret' in config.flickr and config.flickr['api_secret']:
            flickr = flickrapi.FlickrAPI(config.flickr['api_key'],
                                         config.flickr['api_secret'])
        else:
            pywikibot.output('Accessing public content only')
            flickr = flickrapi.FlickrAPI(config.flickr['api_key'])

        for photo_id in getPhotos(flickr, user_id, group_id, photoset_id,
                                  start_id, end_id, tags):
            uploadedPhotos += processPhoto(flickr, photo_id, flickrreview,
                                           reviewer, override, addCategory,
                                           removeCategories, autonomous)
            totalPhotos += 1
        pywikibot.output('Finished running')
        pywikibot.output('Total photos: ' + str(totalPhotos))
        pywikibot.output('Uploaded photos: ' + str(uploadedPhotos))
コード例 #22
0
def main():
    site = pywikibot.getSite(u'commons', u'commons')
    pywikibot.setSite(site)
    #imagerecat.initLists()

    #Get the api key
    if not config.flickr['api_key']:
        pywikibot.output('Flickr api key not found! Get yourself an api key')
        pywikibot.output(
            'Any flickr user can get a key at http://www.flickr.com/services/api/keys/apply/'
        )
        return

    if 'api_secret' in config.flickr and config.flickr['api_secret']:
        flickr = flickrapi.FlickrAPI(config.flickr['api_key'],
                                     config.flickr['api_secret'])
        (token, frob) = flickr.get_token_part_one(perms='read')
        if not token:  # The user still hasn't authorised this app yet, get_token_part_one() will have spawn a browser window
            pywikibot.input("Press ENTER after you authorized this program")
        flickr.get_token_part_two((token, frob))
    else:
        print 'Accessing public content only'
        flickr = flickrapi.FlickrAPI(config.flickr['api_key'])
    group_id = u''
    photoset_id = u''
    user_id = u''
    start_id = u''
    end_id = u''
    tags = u''
    addCategory = u''
    removeCategories = False
    autonomous = False
    totalPhotos = 0
    uploadedPhotos = 0

    # Do we mark the images as reviewed right away?
    if config.flickr['review']:
        flickrreview = config.flickr['review']
    else:
        flickrreview = False

    # Set the Flickr reviewer
    if config.flickr['reviewer']:
        reviewer = config.flickr['reviewer']
    elif 'commons' in config.sysopnames['commons']:
        print config.sysopnames['commons']
        reviewer = config.sysopnames['commons']['commons']
    elif 'commons' in config.usernames['commons']:
        reviewer = config.usernames['commons']['commons']
    else:
        reviewer = u''

    # Should be renamed to overrideLicense or something like that
    override = u''
    for arg in pywikibot.handleArgs():
        if arg.startswith('-group_id'):
            if len(arg) == 9:
                group_id = pywikibot.input(
                    u'What is the group_id of the pool?')
            else:
                group_id = arg[10:]
        elif arg.startswith('-photoset_id'):
            if len(arg) == 12:
                photoset_id = pywikibot.input(u'What is the photoset_id?')
            else:
                photoset_id = arg[13:]
        elif arg.startswith('-user_id'):
            if len(arg) == 8:
                user_id = pywikibot.input(
                    u'What is the user_id of the flickr user?')
            else:
                user_id = arg[9:]
        elif arg.startswith('-start_id'):
            if len(arg) == 9:
                start_id = pywikibot.input(
                    u'What is the id of the photo you want to start at?')
            else:
                start_id = arg[10:]
        elif arg.startswith('-end_id'):
            if len(arg) == 7:
                end_id = pywikibot.input(
                    u'What is the id of the photo you want to end at?')
            else:
                end_id = arg[8:]
        elif arg.startswith('-tags'):
            if len(arg) == 5:
                tags = pywikibot.input(
                    u'What is the tag you want to filter out (currently only one supported)?'
                )
            else:
                tags = arg[6:]
        elif arg == '-flickrreview':
            flickrreview = True
        elif arg.startswith('-reviewer'):
            if len(arg) == 9:
                reviewer = pywikibot.input(u'Who is the reviewer?')
            else:
                reviewer = arg[10:]
        elif arg.startswith('-override'):
            if len(arg) == 9:
                override = pywikibot.input(u'What is the override text?')
            else:
                override = arg[10:]
        elif arg.startswith('-addcategory'):
            if len(arg) == 12:
                addCategory = pywikibot.input(
                    u'What category do you want to add?')
            else:
                addCategory = arg[13:]
        elif arg == '-removecategories':
            removeCategories = True
        elif arg == '-autonomous':
            autonomous = True

    if user_id or group_id or photoset_id:
        for photo_id in getPhotos(flickr, user_id, group_id, photoset_id,
                                  start_id, end_id, tags):
            uploadedPhotos += processPhoto(flickr, photo_id, flickrreview,
                                           reviewer, override, addCategory,
                                           removeCategories, autonomous)
            totalPhotos += 1
    else:
        usage()
    pywikibot.output(u'Finished running')
    pywikibot.output(u'Total photos: ' + str(totalPhotos))
    pywikibot.output(u'Uploaded photos: ' + str(uploadedPhotos))
コード例 #23
0
ファイル: utils.py プロジェクト: codeaudit/pymooney
def connect_api(dbname, api_key, api_secret):
    if dbname == 'flickr':
        api = flickrapi.FlickrAPI(api_key, api_secret, format='parsed-json')
    return api
コード例 #24
0
def flickrAPI():
    api_key = u'382e669299b2ea33fa2288fd7180326a'
    api_secret = u'b556d443c16be15e'
    flickr = flickrapi.FlickrAPI(api_key, api_secret, cache=True)
    return flickr
コード例 #25
0
import flickrapi
import json
import requests
import os
import imghdr

api_key = "8eb125a29103001fd80205c899619704"
api_secret = "0456e2f367e5933d"
img_url_500px_format = "https://live.staticflickr.com/{server_id}/{id}_{secret}.jpg"

save_img_path = "./temp_imgs/"

if not os.path.exists(save_img_path):
    os.makedirs(save_img_path)

flickr = flickrapi.FlickrAPI(api_key, api_secret, format='parsed-json')
recent_photos = flickr.photos.getRecent()['photos']['photo']

res_all = []
for photo in recent_photos:
    try:
        print(photo)
        server_id, id, secret, title = photo['server'], photo['id'], photo[
            'secret'], photo['title']

        try:
            loc = flickr.photos.geo.getLocation(photo_id=id)
            latitude = loc['photo']['location']['latitude']
            longitude = loc['photo']['location']['longitude']
        except Exception as e:
            loc = None
コード例 #26
0
    def __init__(self, conf_file):

        self.api = flickrapi.FlickrAPI(conf_file['flickr']['api_key'])
        self.photo_dir = conf_file['directories']['profilepics_dir']
コード例 #27
0
import flickrapi

api_key = u'382e669299b2ea33fa2288fd7180326a'
api_secret = u'b556d443c16be15e'
flickr = flickrapi.FlickrAPI(api_key, api_secret)

#获取位置ID
location = flickr.places.find(query='Wuhan University')

for local in location[0]:
    placeid = local.attrib['place_id']
    print(placeid + "\n" + local.attrib['latitude'] + "\n" +
          local.attrib['longitude'])

try:
    photos = flickr.walk(place_id=placeid, extras='url_c')
except Exception:
    print('error')

for photo in photos:
    url = photo.get('url_c')
    if url is not None:
        print(url)
    else:
        print('url none')
コード例 #28
0
import flickrapi

api_key = u'888d850723d9d8b71858dc1080adc78d'
api_secret = u'96ab0837676e58bf'

# Change path for your own
f = open(
    'C:/Users/Dunai/Desktop/VPP_Data_RR/FlickrPanoramaCrawler/CrawlerOuputFile_b.txt',
    'w')

x = 0
flickr = flickrapi.FlickrAPI(api_key, api_secret, format='etree')
for photo in flickr.walk(
        tags='panorama',
        min_taken_date='2015-01-20',
        #       max_taken_date='2015-08-30'
):
    Server = photo.get('server')
    ID = photo.get('id')
    Farm = photo.get('farm')
    Secret = photo.get('secret')
    size_pic = 'b'

    url = "https://farm" + Farm + '.staticflickr.com/' + Server + '/' + ID + '_' + Secret + '_' + size_pic + '.jpg'

    favorites = flickr.photos.getFavorites(photo_id=ID)
    photo = favorites.getchildren()
    photo1 = photo.pop()
    list_of_favs = photo1.findall('person')
    num_favs = len(list_of_favs)
コード例 #29
0
def get_albums():

    #creates flickr object
    flickrObj = flickrapi.FlickrAPI(key, secret, format="json")
    photolist = get_ids()  #list of ids returned from the search on flickr
    albumlist = {}  # {album id: album object}

    #loops through all the photos in the search
    for pid in photolist:
        '''
        #for i in range(0,1):
        pid = photolist[i]
        '''

        all_contexts = json.loads(
            flickrObj.photos.getAllContexts(photo_id=pid).decode(
                encoding='utf-8'))

        #list of all set ids
        if 'set' in all_contexts:  #all_contexts["set"] == True
            sets = all_contexts["set"]

            #loops through all the sets that the photo is in
            for i in sets:
                set_id = i["id"]
                #if the album has not already been processed
                if not (set_id in albumlist):
                    #gets the userid for the owner of the album
                    user = json.loads(
                        flickrObj.photos.getInfo(photo_id=pid).decode(
                            encoding='utf-8'))['photo']['owner']['nsid']
                    #gets list of pictuers in the album
                    photosets = json.loads(
                        flickrObj.photosets.getPhotos(
                            photoset_id=set_id,
                            user_id=user).decode(encoding='utf-8'))
                    #creates album object we are analyzing and sets the album id, album url, and album name
                    newalbum = Album(set_id, user_id=user)
                    add_album_url(newalbum)
                    add_album_name(newalbum)

                    #initializes minimum and maximum posted/taken time
                    first_posted = json.loads(
                        flickrObj.photos.getInfo(
                            photo_id=photosets['photoset']['photo'][0]['id']).
                        decode(encoding='utf-8'))['photo']['dates']['posted']
                    first_taken = json.loads(
                        flickrObj.photos.getInfo(
                            photo_id=photosets['photoset']['photo'][0]['id']).
                        decode(encoding='utf-8'))['photo']['dates']['taken']
                    #converts the text time into unix timestamp
                    mint = int(
                        datetime.strptime(first_taken,
                                          '%Y-%m-%d %H:%M:%S').strftime("%s"))
                    maxt = int(
                        datetime.strptime(first_taken,
                                          '%Y-%m-%d %H:%M:%S').strftime("%s"))
                    minp = int(first_posted)
                    maxp = int(first_posted)

                    #counter in loop which counts number of pictures in album
                    album_size = 0
                    #number of pictures of species of interest in the album
                    num_species = 0

                    #loops through each picture in the photoset creatingi a photo class object for each image
                    for j in photosets['photoset']['photo']:

                        newphoto = Photo(photoId=j['id'])
                        add_photo_url(newphoto)
                        add_photo_description(newphoto)
                        #add_photo_location(newphoto)
                        '''
                            NEED TO BE DONE:
                            make photo_list a list of photo objects (appending newphoto ) instead of a
                            list of photo ids, which it currently is
                            
                            see: line 177
                            '''
                        taken = json.loads(
                            flickrObj.photos.getInfo(photo_id=j['id']).decode(
                                encoding='utf-8'))['photo']['dates']['taken']
                        #converts the text time into unix timestamp
                        taken = int(
                            datetime.strptime(
                                taken, '%Y-%m-%d %H:%M:%S').strftime("%s"))
                        posted = int(
                            json.loads(
                                flickrObj.photos.getInfo(
                                    photo_id=j['id']).decode(encoding='utf-8'))
                            ['photo']['dates']['posted'])

                        #resets the max/min time if its later/earlier respectively
                        if taken < mint:
                            mint = taken
                        if taken > maxt:
                            maxt = taken
                        if posted < minp:
                            minp = posted
                        if posted > maxp:
                            maxp = posted
                        #adds the photo to the photolist attribute of the album object
                        newalbum.photo_list.append(j['id'])

                        album_size += 1

                        #checks to see if picture has tag of species of interest and updates the count
                        if j['id'] in photolist:
                            num_species += 1

                #calculates the time range the album has for taken/posted
                newalbum.time_range_posted = int(maxp) - int(minp)
                newalbum.time_range_taken = int(maxt) - int(mint)
                #updates album size
                newalbum.size = album_size
                #calculates species of interest ratio to total number of photos in album
                newalbum.species_ratio = float(num_species) / float(album_size)

                #adds the album to the albumlist
                albumlist[newalbum.sid] = newalbum

    return albumlist
コード例 #30
0
ファイル: flickr.py プロジェクト: nginth/luminance
def flickrAPIUser(username):
    return flickrapi.FlickrAPI(api_key,
                               api_secret,
                               format='parsed-json',
                               username=username)