Example #1
0
class Shell(cmdln.Cmdln):
    r"""ci2 -- the new Code Intel, a tool for working with source code

    usage:
        ${name} SUBCOMMAND [ARGS...]
        ${name} help SUBCOMMAND

    ${option_list}
    ${command_list}
    ${help_list}
    """
    name = "batchr"
    #XXX There is a bug in cmdln.py alignment when using this. Leave it off
    #    until that is fixed.
    #helpindent = ' '*4

    def _setup(self, opts):
        # flickr auth information:
        self.flickrAPIKey = os.environ['FLICKR_BATCHR_KEY']
        self.flickrSecret = os.environ['FLICKR_BATCHR_SECRET']
        try:
            gui = opts.gui
        except AttributeError: # really wish I could just test "gui" in opts
            gui = False
        self.progress = progressBar("Flickr Shell", indeterminate=True, gui=gui)

        self.progress.update("Logging In...")
        # make a new FlickrAPI instance
        self.fapi = FlickrAPI(self.flickrAPIKey, self.flickrSecret)

        # do the whole whatever-it-takes to get a valid token:
        self.token = self.fapi.getToken(browser="Firefox")


    def do_listsets(self, subcmd, opts, *args):
        """List set ids and set titles (with substring title matches)"""
        self._setup(opts)
        kw = dict(api_key=self.flickrAPIKey,
                  auth_token=self.token)
        rsp = self.fapi.photosets_getList(**kw)
        self.fapi.testFailure(rsp)
        sets = rsp.photosets[0].photoset
        for set in sets:
            title = set.title[0].elementText
            match = True
            if args:
                match = False
                for word in args:
                    if word.lower() in title.lower():
                        match = True
                        break
            if match:
                print set['id']+':', title, '('+set['photos']+ " photos)"
        self.progress.finish()

    @cmdln.option("-b", "--base", dest="base",
                  default="./photos",
                  help="the directory we want to download to")
    @cmdln.option("-F", "--format", dest="format",
                  help="the file layout format to use within the base directory (default is %(year)s/%(month_name)s/%(size)s/%(id)s.jpg",
                  default="%(year)s/%(month_name)s/%(size)s/%(id)s.jpg")
    @cmdln.option("-f", "--force", action="store_true",
                  help="force downloads even if files with same name already exist")
    @cmdln.option("-y", "--year", dest="year", type='int',
                  help="the year we want backed up")
    @cmdln.option("-m", "--month", dest="month", type='int',
                  help="the month we want backed up")
    @cmdln.option("-s", "--size", dest="size", default='-',
                  nargs=1,
                  help="the size we want downloaded: one of: s t m b - o")
    @cmdln.option("-t", "--test", action="store_true",
                  help="Just get the URLs (don't download images) -- implies -l")
    @cmdln.option("--tags",
                  help="Tags (separatead by commas)")
    @cmdln.option("-S", "--set",
                  help="Set id (use listsets command to get them)")
    @cmdln.option("-l", "--list", action="store_true",
                  help="List URLs being downloaded instead of using progress bar")
    @cmdln.option("--gui", action="store_true",
                  help="use Cocoa progress bar on OS X")
    def do_download(self, subcmd, opts):
        """Download images based on search criteria (dates, tags, sets)

        ${cmd_usage}
        ${cmd_option_list}
        Any errors will be printed. Returns the number of errors (i.e.
        exit value is 0 if there are no consistency problems).
        """
        urls = self._search(opts)
        self.progress.reset("Getting Photo Info")
        count = 0
        downloadeds = 0
        bad_ones =  []
        try:
            try:
                for (id, url, original_url, date_taken) in urls:
                    year, month, day = parse_date_taken(date_taken)
                    month_name = calendar.month_name[month]
                    size = pretty_size[opts.size]
                    filename = opts.format % locals()
                    filename = join(opts.base, filename)

                    extra_info = " (%s %s %s)          " % (day, month_name, year)
                    self.progress.update("Phase 2: Downloading: %d/%d " % (count+1, len(urls)),
                                         (count+1)/float(len(urls)),
                                         after_args=(extra_info,))
                    if opts.test:
                        print "Would download (test):", url, "to", filename
                        continue
                    if not os.path.exists(filename):
                        if opts.list:
                            print "Downloading:", url
                        tmpfile = os.path.join(opts.base, 'tempfile.jpg')
                        try:
                            os.makedirs(os.path.dirname(filename))
                        except OSError:
                            pass
                        f, headers = urllib.urlretrieve(url, tmpfile)
                        if (headers['Content-Length'] == 2900 and
                            headers['Content-Type'] == 'image/gif'):
                            # first try the original size
                            url = original_url
                            print "GETTING ORIGINAL", url
                            f, headers = urllib.urlretrieve(url, tmpfile)
                        if (headers['Content-Length'] == 2900 and
                            headers['Content-Type'] == 'image/gif'):
                            # something's wrong
                            print "SOMETHING's BAD", url
                            os.unlink(tmpfile)
                            bad_ones.append(url)
                        else:  # it's a good one.
                            os.rename(tmpfile, filename)
                            # set the ctime/mtime, just for fun!
                            datetakentime = time.mktime(strptime(date_taken, "%Y-%m-%d  %H:%M:%S"))
                            os.utime(filename, (datetakentime, datetakentime))

                        downloadeds += 1
                    elif opts.list:
                        print "Skipping (cached):", url
                    count += 1
            except KeyboardInterrupt:
                raise
        finally:
            if not opts.list:
                self.progress.update()
            print "Processed %d images" % count
            print "Downloaded %d images" % downloadeds
            if bad_ones:
                print "Some images could not be downloaded:"
                print '\n\t'.join(bad_ones)

    def _search(self, opts):
        base = opts.base
        if not os.path.exists(opts.base):
            print "Make sure the base directory: %s exists first" % opts.base
            sys.exit(2)
        if opts.test:
            opts.list = True
        global rsp # for debugging runs invoked with -i

        self._setup(opts)

        kw = dict(api_key=self.flickrAPIKey,
                  auth_token=self.token,
                  extras="date_taken",
                  sort="date-taken-asc",
                  per_page="500")

        if opts.set:
            kw.update(photoset_id=opts.set)
            search = "Getting photos in set (%s)" % opts.set
            self.progress.update(search)
            print search + ': ',
            sys.stdout.flush()
            photo_accessor = self.fapi.photosets_getPhotos
            rsp = photo_accessor(**kw)
            self.fapi.testFailure(rsp)
            payload = rsp.photoset[0]

        else: # time and/or tag-based searches
            if opts.month:
                year = opts.year or time.localtime(time.time())[0]
                monthStart = datetime.date(year, opts.month, 1)
                monthEnd = monthStart + datetime.timedelta(days=35)
                min_date = monthStart.isoformat()
                max_date = monthEnd.isoformat()
            else:
                startyear = opts.year or 1900
                endyear = opts.year or time.localtime(time.time())[0]+1
                monthStart = datetime.date(startyear, 1, 1)
                monthEnd = datetime.date(endyear+1, 1, 1)
                min_date = monthStart.isoformat()
                max_date = monthEnd.isoformat()
                print min_date, max_date
            if opts.year:
                if opts.month:
                    search = "Searching for photos taken in %s %s" % (calendar.month_name[opts.month], opts.year)
                else:
                    search = "Searching for photos taken in %s" % opts.year
            else:
                search = "Searching for photos (all dates)"
            if opts.tags:
                search += " tagged: " + opts.tags
            kw.update(user_id="me",
                      min_taken_date=min_date,
                      max_taken_date=max_date)
            if opts.tags:
                kw['tags'] = opts.tags

            self.progress.update(search)
            print search + ': ',
            sys.stdout.flush()

            photo_accessor = self.fapi.photos_search
            rsp = photo_accessor(**kw)
            self.fapi.testFailure(rsp)
            payload = rsp.photos[0]

        self.progress.finish()

        pages = payload['pages']
        if pages == '0':
            print "no photos found"
            return []
        num_photos = int(payload['total'])
        print "found %d photos (getting data)" % num_photos
        urls = extract_urls(payload.photo, opts.size)

        self.progress.reset('Flickr Download')
        for page in range(2, int(pages)+1):
                kw['page'] = str(page)
                rsp = photo_accessor(**kw)
                self.fapi.testFailure(rsp)
                if opts.set:
                    payload = rsp.photoset[0]
                else:
                    payload = rsp.photos[0]
                urls.extend(extract_urls(payload.photo, opts.size))
                self.progress.update("Phase 1: Getting info about batch %d of %d " % (page, int(pages)),
                                page/float(pages))
        print
        return urls
Example #2
0
class Offlickr:
    def __init__(
        self,
        key,
        secret,
        httplib=None,
        dryrun=False,
        verbose=False,
    ):
        """Instantiates an Offlickr object
        An API key is needed, as well as an API secret"""

        self.__flickrAPIKey = key
        self.__flickrSecret = secret
        self.__httplib = httplib

        # Get authentication token
        # note we must explicitly select the xmlnode parser to be compatible with FlickrAPI 1.2

        self.fapi = FlickrAPI(self.__flickrAPIKey,
                              self.__flickrSecret,
                              format='xmlnode')
        (token, frob) = self.fapi.get_token_part_one()
        if not token:
            raw_input('Press ENTER after you authorized this program')
        self.fapi.get_token_part_two((token, frob))
        self.token = token
        test_login = self.fapi.test_login()
        uid = test_login.user[0]['id']
        self.flickrUserId = uid
        self.dryrun = dryrun
        self.verbose = verbose

    def __testFailure(self, rsp):
        """Returns whether the previous call was successful"""

        if rsp['stat'] == 'fail':
            print 'Error!'
            return True
        else:
            return False

    def getPhotoList(self, dateLo, dateHi):
        """Returns a list of photo given a time frame"""

        n = 0
        flickr_max = 500
        photos = []

        print 'Retrieving list of photos'
        while True:
            if self.verbose:
                print 'Requesting a page...'
            n = n + 1
            rsp = self.fapi.photos_search(
                api_key=self.__flickrAPIKey,
                auth_token=self.token,
                user_id=self.flickrUserId,
                per_page=str(flickr_max),
                page=str(n),
                min_upload_date=dateLo,
                max_upload_date=dateHi,
            )
            if self.__testFailure(rsp):
                return None
            if rsp.photos[0]['total'] == '0':
                return None
            photos += rsp.photos[0].photo
            if self.verbose:
                print ' %d photos so far' % len(photos)
            if len(photos) >= int(rsp.photos[0]['total']):
                break

        return photos

    def getGeotaggedPhotoList(self, dateLo, dateHi):
        """Returns a list of photo given a time frame"""

        n = 0
        flickr_max = 500
        photos = []

        print 'Retrieving list of photos'
        while True:
            if self.verbose:
                print 'Requesting a page...'
            n = n + 1
            rsp = \
                self.fapi.photos_getWithGeoData(api_key=self.__flickrAPIKey,
                    auth_token=self.token, user_id=self.flickrUserId,
                    per_page=str(flickr_max), page=str(n))
            if self.__testFailure(rsp):
                return None
            if rsp.photos[0]['total'] == '0':
                return None
            photos += rsp.photos[0].photo
            if self.verbose:
                print ' %d photos so far' % len(photos)
            if len(photos) >= int(rsp.photos[0]['total']):
                break

        return photos

    def getPhotoLocation(self, pid):
        """Returns a string containing location of a photo (in XML)"""

        rsp = \
            self.fapi.photos_geo_getLocation(api_key=self.__flickrAPIKey,
                auth_token=self.token, photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        info = doc.xpathEval('/rsp/photo')[0].serialize()
        doc.freeDoc()
        return info

    def getPhotoLocationPermission(self, pid):
        """Returns a string containing location permision for a photo (in XML)"""

        rsp = \
            self.fapi.photos_geo_getPerms(api_key=self.__flickrAPIKey,
                auth_token=self.token, photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        info = doc.xpathEval('/rsp/perms')[0].serialize()
        doc.freeDoc()
        return info

    def getPhotosetList(self):
        """Returns a list of photosets for a user"""

        rsp = self.fapi.photosets_getList(api_key=self.__flickrAPIKey,
                                          auth_token=self.token,
                                          user_id=self.flickrUserId)
        if self.__testFailure(rsp):
            return None
        return rsp.photosets[0].photoset

    def getPhotosetInfo(self, pid, method):
        """Returns a string containing information about a photoset (in XML)"""

        rsp = method(api_key=self.__flickrAPIKey,
                     auth_token=self.token,
                     photoset_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        info = doc.xpathEval('/rsp/photoset')[0].serialize()
        doc.freeDoc()
        return info

    def getPhotoMetadata(self, pid):
        """Returns an array containing containing the photo metadata (as a string), and the format of the photo"""

        if self.verbose:
            print 'Requesting metadata for photo %s' % pid
        rsp = self.fapi.photos_getInfo(api_key=self.__flickrAPIKey,
                                       auth_token=self.token,
                                       photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        metadata = doc.xpathEval('/rsp/photo')[0].serialize()
        doc.freeDoc()
        return [metadata, rsp.photo[0]['originalformat']]

    def getPhotoComments(self, pid):
        """Returns an XML string containing the photo comments"""

        if self.verbose:
            print 'Requesting comments for photo %s' % pid
        rsp = \
            self.fapi.photos_comments_getList(api_key=self.__flickrAPIKey,
                auth_token=self.token, photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        comments = doc.xpathEval('/rsp/comments')[0].serialize()
        doc.freeDoc()
        return comments

    def getPhotoSizes(self, pid):
        """Returns a string with is a list of available sizes for a photo"""

        rsp = self.fapi.photos_getSizes(api_key=self.__flickrAPIKey,
                                        auth_token=self.token,
                                        photo_id=pid)
        if self.__testFailure(rsp):
            return None
        return rsp

    def getOriginalPhoto(self, pid):
        """Returns a URL which is the original photo, if it exists"""

        source = None
        rsp = self.getPhotoSizes(pid)
        if rsp == None:
            return None
        for s in rsp.sizes[0].size:
            if s['label'] == 'Original':
                source = s['source']
        for s in rsp.sizes[0].size:
            if s['label'] == 'Video Original':
                source = s['source']
        return [source, s['label'] == 'Video Original']

    def __downloadReportHook(
        self,
        count,
        blockSize,
        totalSize,
    ):

        if not self.__verbose:
            return
        p = ((100 * count) * blockSize) / totalSize
        if p > 100:
            p = 100
        print '\r %3d %%' % p,
        sys.stdout.flush()

    def downloadURL(
        self,
        url,
        target,
        filename,
        verbose=False,
    ):
        """Saves a photo in a file"""

        if self.dryrun:
            return
        self.__verbose = verbose
        tmpfile = '%s/%s.TMP' % (target, filename)
        if self.__httplib == 'wget':
            cmd = 'wget -q -t 0 -T 120 -w 10 -c -O %s %s' % (tmpfile, url)
            os.system(cmd)
        else:
            urllib.urlretrieve(url,
                               tmpfile,
                               reporthook=self.__downloadReportHook)
        os.rename(tmpfile, '%s/%s' % (target, filename))
Example #3
0
class Shell(cmdln.Cmdln):
    r"""ci2 -- the new Code Intel, a tool for working with source code

    usage:
        ${name} SUBCOMMAND [ARGS...]
        ${name} help SUBCOMMAND

    ${option_list}
    ${command_list}
    ${help_list}
    """
    name = "batchr"

    #XXX There is a bug in cmdln.py alignment when using this. Leave it off
    #    until that is fixed.
    #helpindent = ' '*4

    def _setup(self, opts):
        # flickr auth information:
        self.flickrAPIKey = os.environ['FLICKR_BATCHR_KEY']
        self.flickrSecret = os.environ['FLICKR_BATCHR_SECRET']
        try:
            gui = opts.gui
        except AttributeError:  # really wish I could just test "gui" in opts
            gui = False
        self.progress = progressBar("Flickr Shell",
                                    indeterminate=True,
                                    gui=gui)

        self.progress.update("Logging In...")
        # make a new FlickrAPI instance
        self.fapi = FlickrAPI(self.flickrAPIKey, self.flickrSecret)

        # do the whole whatever-it-takes to get a valid token:
        self.token = self.fapi.getToken(browser="Firefox")

    def do_listsets(self, subcmd, opts, *args):
        """List set ids and set titles (with substring title matches)"""
        self._setup(opts)
        kw = dict(api_key=self.flickrAPIKey, auth_token=self.token)
        rsp = self.fapi.photosets_getList(**kw)
        self.fapi.testFailure(rsp)
        sets = rsp.photosets[0].photoset
        for set in sets:
            title = set.title[0].elementText
            match = True
            if args:
                match = False
                for word in args:
                    if word.lower() in title.lower():
                        match = True
                        break
            if match:
                print set['id'] + ':', title, '(' + set['photos'] + " photos)"
        self.progress.finish()

    @cmdln.option("-b",
                  "--base",
                  dest="base",
                  default="./photos",
                  help="the directory we want to download to")
    @cmdln.option(
        "-F",
        "--format",
        dest="format",
        help=
        "the file layout format to use within the base directory (default is %(year)s/%(month_name)s/%(size)s/%(id)s.jpg",
        default="%(year)s/%(month_name)s/%(size)s/%(id)s.jpg")
    @cmdln.option(
        "-f",
        "--force",
        action="store_true",
        help="force downloads even if files with same name already exist")
    @cmdln.option("-y",
                  "--year",
                  dest="year",
                  type='int',
                  help="the year we want backed up")
    @cmdln.option("-m",
                  "--month",
                  dest="month",
                  type='int',
                  help="the month we want backed up")
    @cmdln.option("-s",
                  "--size",
                  dest="size",
                  default='-',
                  nargs=1,
                  help="the size we want downloaded: one of: s t m b - o")
    @cmdln.option(
        "-t",
        "--test",
        action="store_true",
        help="Just get the URLs (don't download images) -- implies -l")
    @cmdln.option("--tags", help="Tags (separatead by commas)")
    @cmdln.option("-S",
                  "--set",
                  help="Set id (use listsets command to get them)")
    @cmdln.option(
        "-l",
        "--list",
        action="store_true",
        help="List URLs being downloaded instead of using progress bar")
    @cmdln.option("--gui",
                  action="store_true",
                  help="use Cocoa progress bar on OS X")
    def do_download(self, subcmd, opts):
        """Download images based on search criteria (dates, tags, sets)

        ${cmd_usage}
        ${cmd_option_list}
        Any errors will be printed. Returns the number of errors (i.e.
        exit value is 0 if there are no consistency problems).
        """
        urls = self._search(opts)
        self.progress.reset("Getting Photo Info")
        count = 0
        downloadeds = 0
        bad_ones = []
        try:
            try:
                for (id, url, original_url, date_taken) in urls:
                    year, month, day = parse_date_taken(date_taken)
                    month_name = calendar.month_name[month]
                    size = pretty_size[opts.size]
                    filename = opts.format % locals()
                    filename = join(opts.base, filename)

                    extra_info = " (%s %s %s)          " % (day, month_name,
                                                            year)
                    self.progress.update("Phase 2: Downloading: %d/%d " %
                                         (count + 1, len(urls)),
                                         (count + 1) / float(len(urls)),
                                         after_args=(extra_info, ))
                    if opts.test:
                        print "Would download (test):", url, "to", filename
                        continue
                    if not os.path.exists(filename):
                        if opts.list:
                            print "Downloading:", url
                        tmpfile = os.path.join(opts.base, 'tempfile.jpg')
                        try:
                            os.makedirs(os.path.dirname(filename))
                        except OSError:
                            pass
                        f, headers = urllib.urlretrieve(url, tmpfile)
                        if (headers['Content-Length'] == 2900
                                and headers['Content-Type'] == 'image/gif'):
                            # first try the original size
                            url = original_url
                            print "GETTING ORIGINAL", url
                            f, headers = urllib.urlretrieve(url, tmpfile)
                        if (headers['Content-Length'] == 2900
                                and headers['Content-Type'] == 'image/gif'):
                            # something's wrong
                            print "SOMETHING's BAD", url
                            os.unlink(tmpfile)
                            bad_ones.append(url)
                        else:  # it's a good one.
                            os.rename(tmpfile, filename)
                            # set the ctime/mtime, just for fun!
                            datetakentime = time.mktime(
                                strptime(date_taken, "%Y-%m-%d  %H:%M:%S"))
                            os.utime(filename, (datetakentime, datetakentime))

                        downloadeds += 1
                    elif opts.list:
                        print "Skipping (cached):", url
                    count += 1
            except KeyboardInterrupt:
                raise
        finally:
            if not opts.list:
                self.progress.update()
            print "Processed %d images" % count
            print "Downloaded %d images" % downloadeds
            if bad_ones:
                print "Some images could not be downloaded:"
                print '\n\t'.join(bad_ones)

    def _search(self, opts):
        base = opts.base
        if not os.path.exists(opts.base):
            print "Make sure the base directory: %s exists first" % opts.base
            sys.exit(2)
        if opts.test:
            opts.list = True
        global rsp  # for debugging runs invoked with -i

        self._setup(opts)

        kw = dict(api_key=self.flickrAPIKey,
                  auth_token=self.token,
                  extras="date_taken",
                  sort="date-taken-asc",
                  per_page="500")

        if opts.set:
            kw.update(photoset_id=opts.set)
            search = "Getting photos in set (%s)" % opts.set
            self.progress.update(search)
            print search + ': ',
            sys.stdout.flush()
            photo_accessor = self.fapi.photosets_getPhotos
            rsp = photo_accessor(**kw)
            self.fapi.testFailure(rsp)
            payload = rsp.photoset[0]

        else:  # time and/or tag-based searches
            if opts.month:
                year = opts.year or time.localtime(time.time())[0]
                monthStart = datetime.date(year, opts.month, 1)
                monthEnd = monthStart + datetime.timedelta(days=35)
                min_date = monthStart.isoformat()
                max_date = monthEnd.isoformat()
            else:
                startyear = opts.year or 1900
                endyear = opts.year or time.localtime(time.time())[0] + 1
                monthStart = datetime.date(startyear, 1, 1)
                monthEnd = datetime.date(endyear + 1, 1, 1)
                min_date = monthStart.isoformat()
                max_date = monthEnd.isoformat()
                print min_date, max_date
            if opts.year:
                if opts.month:
                    search = "Searching for photos taken in %s %s" % (
                        calendar.month_name[opts.month], opts.year)
                else:
                    search = "Searching for photos taken in %s" % opts.year
            else:
                search = "Searching for photos (all dates)"
            if opts.tags:
                search += " tagged: " + opts.tags
            kw.update(user_id="me",
                      min_taken_date=min_date,
                      max_taken_date=max_date)
            if opts.tags:
                kw['tags'] = opts.tags

            self.progress.update(search)
            print search + ': ',
            sys.stdout.flush()

            photo_accessor = self.fapi.photos_search
            rsp = photo_accessor(**kw)
            self.fapi.testFailure(rsp)
            payload = rsp.photos[0]

        self.progress.finish()

        pages = payload['pages']
        if pages == '0':
            print "no photos found"
            return []
        num_photos = int(payload['total'])
        print "found %d photos (getting data)" % num_photos
        urls = extract_urls(payload.photo, opts.size)

        self.progress.reset('Flickr Download')
        for page in range(2, int(pages) + 1):
            kw['page'] = str(page)
            rsp = photo_accessor(**kw)
            self.fapi.testFailure(rsp)
            if opts.set:
                payload = rsp.photoset[0]
            else:
                payload = rsp.photos[0]
            urls.extend(extract_urls(payload.photo, opts.size))
            self.progress.update(
                "Phase 1: Getting info about batch %d of %d " %
                (page, int(pages)), page / float(pages))
        print
        return urls
Example #4
0
class Offlickr:

    def __init__(
        self,
        key,
        secret,
        httplib=None,
        dryrun=False,
        verbose=False,
        ):
        """Instantiates an Offlickr object
        An API key is needed, as well as an API secret"""

        self.__flickrAPIKey = key
        self.__flickrSecret = secret
        self.__httplib = httplib

        # Get authentication token
        # note we must explicitly select the xmlnode parser to be compatible with FlickrAPI 1.2

        self.fapi = FlickrAPI(self.__flickrAPIKey, self.__flickrSecret,
                              format='xmlnode')
        (token, frob) = self.fapi.get_token_part_one()
        if not token:
            raw_input('Press ENTER after you authorized this program')
        self.fapi.get_token_part_two((token, frob))
        self.token = token
        test_login = self.fapi.test_login()
        uid = test_login.user[0]['id']
        self.flickrUserId = uid
        self.dryrun = dryrun
        self.verbose = verbose

    def __testFailure(self, rsp):
        """Returns whether the previous call was successful"""

        if rsp['stat'] == 'fail':
            print 'Error!'
            return True
        else:
            return False

    def getPhotoList(self, dateLo, dateHi):
        """Returns a list of photo given a time frame"""

        n = 0
        flickr_max = 500
        photos = []

        print 'Retrieving list of photos'
        while True:
            if self.verbose:
                print 'Requesting a page...'
            n = n + 1
            rsp = self.fapi.photos_search(
                api_key=self.__flickrAPIKey,
                auth_token=self.token,
                user_id=self.flickrUserId,
                per_page=str(flickr_max),
                page=str(n),
                min_upload_date=dateLo,
                max_upload_date=dateHi,
                )
            if self.__testFailure(rsp):
                return None
            if rsp.photos[0]['total'] == '0':
                return None
            photos += rsp.photos[0].photo
            if self.verbose:
                print ' %d photos so far' % len(photos)
            if len(photos) >= int(rsp.photos[0]['total']):
                break

        return photos

    def getGeotaggedPhotoList(self, dateLo, dateHi):
        """Returns a list of photo given a time frame"""

        n = 0
        flickr_max = 500
        photos = []

        print 'Retrieving list of photos'
        while True:
            if self.verbose:
                print 'Requesting a page...'
            n = n + 1
            rsp = \
                self.fapi.photos_getWithGeoData(api_key=self.__flickrAPIKey,
                    auth_token=self.token, user_id=self.flickrUserId,
                    per_page=str(flickr_max), page=str(n))
            if self.__testFailure(rsp):
                return None
            if rsp.photos[0]['total'] == '0':
                return None
            photos += rsp.photos[0].photo
            if self.verbose:
                print ' %d photos so far' % len(photos)
            if len(photos) >= int(rsp.photos[0]['total']):
                break

        return photos

    def getPhotoLocation(self, pid):
        """Returns a string containing location of a photo (in XML)"""

        rsp = \
            self.fapi.photos_geo_getLocation(api_key=self.__flickrAPIKey,
                auth_token=self.token, photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        info = doc.xpathEval('/rsp/photo')[0].serialize()
        doc.freeDoc()
        return info

    def getPhotoLocationPermission(self, pid):
        """Returns a string containing location permision for a photo (in XML)"""

        rsp = \
            self.fapi.photos_geo_getPerms(api_key=self.__flickrAPIKey,
                auth_token=self.token, photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        info = doc.xpathEval('/rsp/perms')[0].serialize()
        doc.freeDoc()
        return info

    def getPhotosetList(self):
        """Returns a list of photosets for a user"""

        rsp = self.fapi.photosets_getList(api_key=self.__flickrAPIKey,
                auth_token=self.token, user_id=self.flickrUserId)
        if self.__testFailure(rsp):
            return None
        return rsp.photosets[0].photoset

    def getPhotosetInfo(self, pid, method):
        """Returns a string containing information about a photoset (in XML)"""

        rsp = method(api_key=self.__flickrAPIKey,
                     auth_token=self.token, photoset_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        info = doc.xpathEval('/rsp/photoset')[0].serialize()
        doc.freeDoc()
        return info

    def getPhotoMetadata(self, pid):
        """Returns an array containing containing the photo metadata (as a string), and the format of the photo"""

        if self.verbose:
            print 'Requesting metadata for photo %s' % pid
        rsp = self.fapi.photos_getInfo(api_key=self.__flickrAPIKey,
                auth_token=self.token, photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        metadata = doc.xpathEval('/rsp/photo')[0].serialize()
        doc.freeDoc()
        return [metadata, rsp.photo[0]['originalformat']]

    def getPhotoComments(self, pid):
        """Returns an XML string containing the photo comments"""

        if self.verbose:
            print 'Requesting comments for photo %s' % pid
        rsp = \
            self.fapi.photos_comments_getList(api_key=self.__flickrAPIKey,
                auth_token=self.token, photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        comments = doc.xpathEval('/rsp/comments')[0].serialize()
        doc.freeDoc()
        return comments

    def getPhotoSizes(self, pid):
        """Returns a string with is a list of available sizes for a photo"""

        rsp = self.fapi.photos_getSizes(api_key=self.__flickrAPIKey,
                auth_token=self.token, photo_id=pid)
        if self.__testFailure(rsp):
            return None
        return rsp

    def getOriginalPhoto(self, pid):
        """Returns a URL which is the original photo, if it exists"""

        source = None
        rsp = self.getPhotoSizes(pid)
        if rsp == None:
            return None
        for s in rsp.sizes[0].size:
            if s['label'] == 'Original':
                source = s['source']
        for s in rsp.sizes[0].size:
            if s['label'] == 'Video Original':
                source = s['source']
        return [source, s['label'] == 'Video Original']

    def __downloadReportHook(
        self,
        count,
        blockSize,
        totalSize,
        ):

        if not self.__verbose:
            return
        p = ((100 * count) * blockSize) / totalSize
        if p > 100:
            p = 100
        print '\r %3d %%' % p,
        sys.stdout.flush()

    def downloadURL(
        self,
        url,
        target,
        filename,
        verbose=False,
        ):
        """Saves a photo in a file"""

        if self.dryrun:
            return
        self.__verbose = verbose
        tmpfile = '%s/%s.TMP' % (target, filename)
        if self.__httplib == 'wget':
            cmd = 'wget -q -t 0 -T 120 -w 10 -c -O %s %s' % (tmpfile,
                    url)
            os.system(cmd)
        else:
            urllib.urlretrieve(url, tmpfile,
                               reporthook=self.__downloadReportHook)
        os.rename(tmpfile, '%s/%s' % (target, filename))
Example #5
0
class TransFlickr: 

  extras = "original_format,date_upload,last_update"

  def __init__(self, browserName):
    self.fapi = FlickrAPI(flickrAPIKey, flickrSecret)
    self.user_id = ""
    # proceed with auth
    # TODO use auth.checkToken function if available, 
    # and wait after opening browser.
    print "Authorizing with flickr..."
    log.info("authorizing with flickr...")
    try:
      self.authtoken = self.fapi.getToken(browser=browserName)
    except:
      print ("Can't retrieve token from browser %s" % browserName)
      print ("\tIf you're behind a proxy server,"
             " first set http_proxy environment variable.")
      print "\tPlease close all your browser windows, and try again"
      log.error(format_exc())
      log.error("can't retrieve token from browser %s", browserName)
      sys.exit(-1)
    if self.authtoken == None:
      print "Unable to authorize (reason unknown)"
      log.error('not able to authorize; exiting')
      sys.exit(-1)
        #Add some authorization checks here(?)
    print "Authorization complete."
    log.info('authorization complete')
    
  def uploadfile(self, filepath, taglist, bufData, mode):
    #Set public 4(always), 1(public). Public overwrites f&f.
    public = mode&1
    #Set friends and family 4(always), 2(family), 1(friends).
    friends = mode>>3 & 1
    family = mode>>4 & 1
      #E.g. 745 - 4:No f&f, but 5:public
      #E.g. 754 - 5:friends, but not public
      #E.g. 774 - 7:f&f, but not public

    log.info("uploading file %s", filepath)
    log.info("  data length: %s", len(bufData))
    log.info("  taglist: %s", taglist)
    log.info("  permissions: family %s, friends %s, public %s",
             family, friends, public)
    filename = os.path.splitext(os.path.basename(filepath))[0]
    rsp = self.fapi.upload(filename=filepath, jpegData=bufData,
          title=filename,
          tags=taglist,
          is_public=public and "1" or "0",
          is_friend=friends and "1" or "0",
          is_family=family and "1" or "0")

    if rsp is None:
      log.error("response None from attempt to write file %s", filepath)
      log.error("will attempt recovery...")
      recent_rsp = None
      trytimes = 2
      while(trytimes):
        log.info("sleeping for 3 seconds...")
        time.sleep(3)
        trytimes -= 1
        # Keep on trying to retrieve the recently uploaded photo, till we
        # actually get the information, or the function throws an exception.
        while(recent_rsp is None or not recent_rsp):
          recent_rsp = self.fapi.photos_recentlyUpdated(
              auth_token=self.authtoken, min_date='1', per_page='1')
        
        pic = recent_rsp.photos[0].photo[0]
        log.info('we are looking for %s', filename)
        log.info('most recently updated pic is %s', pic['title'])
        if filename == pic['title']:
          id = pic['id']
          log.info("file %s uploaded with photoid %s", filepath, id)
          return id
      log.error("giving up; upload of %s appears to have failed", filepath)
      return None
    else:
      id = rsp.photoid[0].elementText
      log.info("file %s uploaded with photoid %s", filepath, id)
      return id

  def put2Set(self, set_id, photo_id):
    log.info("uploading photo %s to set id %s", photo_id, set_id)
    rsp = self.fapi.photosets_addPhoto(auth_token=self.authtoken, 
                                       photoset_id=set_id, photo_id=photo_id)
    if rsp:
      log.info("photo uploaded to set")
    else:
      log.error(rsp.errormsg)
  
  def createSet(self, path, photo_id):
    log.info("creating set %s with primary photo %s", path, photo_id)
    path, title = os.path.split(path)
    rsp = self.fapi.photosets_create(auth_token=self.authtoken, 
                                     title=title, primary_photo_id=photo_id)
    if rsp:
      log.info("created set %s", title)
      return rsp.photoset[0]['id']
    else:
      log.error(rsp.errormsg)
  
  def deleteSet(self, set_id):
    log.info("deleting set %s", set_id)
    if str(set_id)=="0":
      log.info("ignoring attempt to delete set wtih set_id 0 (a locally "
        "created set that has not yet acquired an id via uploading")
      return
    rsp = self.fapi.photosets_delete(auth_token=self.authtoken, 
                                     photoset_id=set_id)
    if rsp:
      log.info("deleted set %s", set_id)
    else:
      log.error(rsp.errormsg)
  
  def getPhotoInfo(self, photoId):
    log.debug("id: %s", photoId)
    rsp = self.fapi.photos_getInfo(auth_token=self.authtoken, photo_id=photoId)
    if not rsp:
      log.error("can't retrieve information about photo %s; got error %s",
                photoId, rsp.errormsg)
      return None
    #XXX: should see if there's some other 'format' option we can fall back to.
    try: format = rsp.photo[0]['originalformat']
    except KeyError: format = 'jpg'
    perm_public = rsp.photo[0].visibility[0]['ispublic']
    perm_family = rsp.photo[0].visibility[0]['isfamily']
    perm_friend = rsp.photo[0].visibility[0]['isfriend']
    if perm_public == '1':
      mode = 0755
    else:
      b_cnt = 4
      if perm_family == '1':
        b_cnt += 2
      if perm_friend == '1':
        b_cnt += 1
      mode = "07" + str(b_cnt) + "4"
      mode = int(mode)
      
    if hasattr(rsp.photo[0],'permissions'):
      permcomment = rsp.photo[0].permissions[0]['permcomment']
      permaddmeta = rsp.photo[0].permissions[0]['permaddmeta']
    else:
      permcomment = permaddmeta = [None]
      
    commMeta = '%s%s' % (permcomment,permaddmeta) # Required for chmod.
    desc = rsp.photo[0].description[0].elementText
    title = rsp.photo[0].title[0].elementText
    if hasattr(rsp.photo[0].tags[0], "tag"):
      taglist = [ a.elementText for a in rsp.photo[0].tags[0].tag ]
    else:
      taglist = []
    license = rsp.photo[0]['license']
    owner = rsp.photo[0].owner[0]['username']
    ownerNSID = rsp.photo[0].owner[0]['nsid']
    url = rsp.photo[0].urls[0].url[0].elementText
    posted = rsp.photo[0].dates[0]['posted']
    lastupdate = rsp.photo[0].dates[0]['lastupdate']
    return (format, mode, commMeta, desc, title, taglist, 
            license, owner, ownerNSID, url, int(posted), int(lastupdate))

  def setPerm(self, photoId, mode, comm_meta="33"):
    log.debug("id: %s, mode: %s, comm_meta=%s", photoId, mode, comm_meta)
    public = mode&1 #Set public 4(always), 1(public). Public overwrites f&f
    #Set friends and family 4(always), 2(family), 1(friends) 
    friends = mode>>3 & 1
    family = mode>>4 & 1
    if len(comm_meta)<2: 
      # This wd patch string index out of range bug, caused 
      # because some photos may not have comm_meta value set.
      comm_meta="33"
    rsp = self.fapi.photos_setPerms(auth_token=self.authtoken,
                                    is_public=str(public),
                                    is_friend=str(friends), 
                                    is_family=str(family), 
                                    perm_comment=comm_meta[0],
                                    perm_addmeta=comm_meta[1], 
                                    photo_id=photoId)
    if not rsp:
      log.error("couldn't set permission for photo %s; got error %s",
                photoId, rsp.errormsg)
      return False
    log.info("permissions have been set for photo %s", photoId)
    return True

  def setTags(self, photoId, tags):
    log.debug("id: %s, tags: %s", photoId, tags)
    templist = [ '"%s"'%(a,) for a in string.split(tags, ',')] + ['flickrfs']
    tagstring = ' '.join(templist)
    rsp = self.fapi.photos_setTags(auth_token=self.authtoken, 
                                   photo_id=photoId, tags=tagstring)
    if not rsp:
      log.error("couldn't set tags for %s; got error %s",
                photoId, rsp.errormsg)
      return False
    return True
  
  def setMeta(self, photoId, title, desc):
    log.debug("id: %s, title: %s, desc: %s", photoId, title, desc)
    rsp = self.fapi.photos_setMeta(auth_token=self.authtoken, 
                                   photo_id=photoId, title=title, 
                                   description=desc)
    if not rsp:
      log.error("couldn't set meta info for photo %s; got error",
                photoId, rsp.errormsg)
      return False
    return True

  def getLicenses(self):
    log.debug("started")
    rsp = self.fapi.photos_licenses_getInfo()
    if not rsp:
      log.error("couldn't retrieve licenses; got error %s", rsp.errormsg)
      return None
    licenseDict = {}
    for l in rsp.licenses[0].license:
      licenseDict[l['id']] = l['name']
    keys = licenseDict.keys()
    keys.sort()
    sortedLicenseList = []
    for k in keys:
      # Add tuple of license key, and license value.
      sortedLicenseList.append((k, licenseDict[k]))
    return sortedLicenseList
    
  def setLicense(self, photoId, license):
    log.debug("id: %s, license: %s", photoId, license)
    rsp = self.fapi.photos_licenses_setLicense(auth_token=self.authtoken, 
                                               photo_id=photoId, 
                                               license_id=license)
    if not rsp:
      log.error("couldn't set license info for photo %s; got error %s",
                photoId, rsp.errormsg)
      return False
    return True

  def getPhoto(self, photoId):
    log.debug("id: %s", photoId)
    rsp = self.fapi.photos_getSizes(auth_token=self.authtoken, 
                                    photo_id=photoId)
    if not rsp:
      log.error("error while trying to retrieve size information"
                " for photo %s", photoId)
      return None
    buf = ""
    for a in rsp.sizes[0].size:
      if a['label']=='Original':
        try:
          f = urllib2.urlopen(a['source'])
          buf = f.read()
        except:
          log.error("exception in getPhoto")
          log.error(format_exc())
          return ""
    if not buf:
      f = urllib2.urlopen(rsp.sizes[0].size[-1]['source'])
      buf = f.read()
    return buf

  def removePhotofromSet(self, photoId, photosetId):
    log.debug("id: %s, setid: %s", photoId, photosetId)
    rsp = self.fapi.photosets_removePhoto(auth_token=self.authtoken, 
                                          photo_id=photoId, 
                                          photoset_id=photosetId)
    if rsp:
      log.info("photo %s removed from set %s", photoId, photosetId)
    else:
      log.error(rsp.errormsg)
      
    
  def getBandwidthInfo(self):
    log.debug("retrieving bandwidth information")
    rsp = self.fapi.people_getUploadStatus(auth_token=self.authtoken)
    if not rsp:
      log.error("can't retrieve bandwidth information; got error %s",
        rsp.errormsg)
      return (None,None)
    bw = rsp.user[0].bandwidth[0]
    log.debug("max bandwidth: %s, bandwidth used: %s", bw['max'], bw['used'])
    return (bw['max'], bw['used'])

  def getUserId(self):
    log.debug("entered")
    rsp = self.fapi.auth_checkToken(api_key=flickrAPIKey, 
                                    auth_token=self.authtoken)
    if not rsp:
      log.error("unable to get userid; got error %s", rsp.errormsg)
      return None
    usr = rsp.auth[0].user[0]
    log.info("got NSID %s", usr['nsid'])
    #Set self.user_id to this value
    self.user_id = usr['nsid']
    return usr['nsid']

  def getPhotosetList(self):
    log.debug("entered")
    if self.user_id is "":
      self.getUserId() #This will set the value of self.user_id
    rsp = self.fapi.photosets_getList(auth_token=self.authtoken, 
                                      user_id=self.user_id)
    if not rsp:
      log.error("error getting photoset list; got error %s", rsp.errormsg)
      return []
    if not hasattr(rsp.photosets[0], "photoset"):
      log.info("no sets found for userid %s", self.user_id)
      return []
    else:
      log.info("%s sets found for userid %s",
          len(rsp.photosets[0].photoset), self.user_id)
    return rsp.photosets[0].photoset

  def parseInfoFromPhoto(self, photo, perms=None):
    info = {}
    info['id'] = photo['id']
    info['title'] = photo['title'].replace('/', '_')
    # Some pics don't contain originalformat attribute, so set it to jpg by default.
    try:
      info['format'] = photo['originalformat']
    except KeyError:
      info['format'] = 'jpg'

    try:
      info['dupload'] = photo['dateupload']
    except KeyError:
      info['dupload'] = '0'

    try:
      info['dupdate'] = photo['lastupdate']
    except KeyError:
      info['dupdate'] = '0'
    
    info['perms'] = perms
    return info

  def parseInfoFromFullInfo(self, id, fullInfo):
    info = {}
    info['id'] = id
    info['title'] = fullInfo[4]
    info['format'] = fullInfo[0]
    info['dupload'] = fullInfo[10]
    info['dupdate'] = fullInfo[11]
    info['mode'] = fullInfo[1]
    return info

  def getPhotosFromPhotoset(self, photoset_id):
    log.debug("set id: %s", photoset_id)
    photosPermsMap = {}
    # I'm not utilizing the value part of this dictionary. Its arbitrarily
    # set to i.
    for i in range(0,3):
      page = 1
      while True:
        rsp = self.fapi.photosets_getPhotos(auth_token=self.authtoken,
                                            photoset_id=photoset_id, 
                                            extras=self.extras, 
                                            page=str(page),
                                            privacy_filter=str(i))
        if not rsp:
          break
        if not hasattr(rsp.photoset[0], 'photo'):
          log.error("photoset %s doesn't have attribute photo", rsp.photoset[0]['id'])
          break
        for p in rsp.photoset[0].photo:
          photosPermsMap[p] = str(i)
        page += 1
        if page > int(rsp.photoset[0]['pages']): break
      if photosPermsMap: break
    return photosPermsMap
            
  def getPhotoStream(self, user_id):
    log.debug("userid: %s", user_id)
    retList = []
    pageNo = 1
    maxPage = 1
    while pageNo<=maxPage:
      log.info("retreiving page number %s of %s", pageNo, maxPage) 
      rsp = self.fapi.photos_search(auth_token=self.authtoken, 
                                    user_id=user_id, per_page="500", 
                                    page=str(pageNo), extras=self.extras)
      if not rsp:
        log.error("can't retrive photos from your stream; got error %s",
            rsp.errormsg)
        return retList
      if not hasattr(rsp.photos[0], 'photo'):
        log.error("photos.search response doesn't have attribute photos; "
            "returning list acquired so far")
        return retList
      for a in rsp.photos[0].photo:
        retList.append(a)
      maxPage = int(rsp.photos[0]['pages'])
      pageNo = pageNo + 1
    return retList
 
  def getTaggedPhotos(self, tags, user_id=None):
    log.debug("tags: %s user_id: %s", tags, user_id)
    kw = kwdict(auth_token=self.authtoken, tags=tags, tag_mode="all", 
                extras=self.extras, per_page="500")
    if user_id is not None: 
      kw = kwdict(user_id=user_id, **kw)
    rsp = self.fapi.photos_search(**kw)
    log.debug("search for photos with tags %s has been"
              " successfully finished" % tags)
    if not rsp:
      log.error("couldn't search for the photos; got error %s", rsp.errormsg)
      return
    if not hasattr(rsp.photos[0], 'photo'):
      return []
    return rsp.photos[0].photo
Example #6
0
# flickr auth information:
flickrSecret = "3fbf7144be7eca28"                  # shared "secret"

flickrAPIKey = "f8aa9917a9ae5e44a87cae657924f42d"  # API key
# make a new FlickrAPI instance
fapi = FlickrAPI(flickrAPIKey, flickrSecret)

# do the whole whatever-it-takes to get a valid token:
token = fapi.getToken(browser="/usr/bin/firefox")

# get my favorites
rsp = fapi.favorites_getList(api_key=flickrAPIKey,auth_token=token)
fapi.testFailure(rsp)

#print 'Photosets: '
print fapi.photosets_getList(api_key=flickrAPIKey, auth_token=token)
rsp = fapi.photosets_getList(api_key=flickrAPIKey, auth_token=token)
fapi.testFailure(rsp)
#print photoSets
#print ', '.join([str(set.title) for set in photoSets])

#person = fapi.flickr_people_getInfo(user_id="tuxmann")
#print person.username

# and print them
if hasattr(rsp.photosets[0], "photoset"):
	print 'yeup!'
else:
	print 'nope'

for a in rsp.photosets[0].photoset:
Example #7
0
class Importer(object):

    def __init__(self):
        self.flickr = FlickrAPI(FLICKR_KEY)

    def get_photosets(self, username, filename=None):
        filename = filename or username+'.json'
        if os.path.exists(filename):
            print "Looks like we already have information about your photos."
            if raw_input("Refresh? (y/n): ").lower().startswith('n'):
                return deserialize(open(filename).read())

        print "Downloading information about your photos."
        if '@' in username:
            response = self.flickr.people_findByEmail(find_email=username)
        else:
            response = self.flickr.people_findByUsername(username=username)
        nsid = response[0].get('nsid')

        response = self.flickr.photosets_getList(user_id=nsid)
        photosets = []
        photo_ids = []
        for ps in response[0]:
            photoset = {'id': ps.get('id'),
                        'title': ps[0].text,
                        'description': ps[1].text,
                        'photos':[]}
            photos_response = self.flickr.photosets_getPhotos(photoset_id=photoset['id'],
                                                              extras='url_o')
            for pxml in photos_response[0]:
                photo = {'id':pxml.get('id'),
                         'title':pxml.get('title')}
                photoset['photos'].append(photo)
                photo_ids.append(photo['id'])
            print photoset['title'],'-',len(photoset['photos']),'photos'
            photosets.append(photoset)

        # get photos not in photosets
        photos_response = self.flickr.photos_search(user_id=nsid, per_page=500)
        photoset = {'id':'stream',
                    'title':'Flickr Stream',
                    'description':'Photos from my flickr stream',
                    'photos':[]}
        for pxml in response[0]:
            photo = {'id':pxml.get('id'),
                     'title':pxml.get('title')}
            if photo['id'] not in photo_ids:
                photoset['photos'].append(photo)
                photo_ids.append(photo['id'])
        if photoset['photos']:
            print photoset['title'],'-',len(photoset['photos']),'photos'
            photosets.append(photoset)

        f = open(filename, "w")
        f.write(serialize(photosets))
        f.close()
        return photosets

    def download_images(self, photosets, directory):
        print "Downloading your photos"
        if not os.path.exists(directory):
            os.mkdir(directory)
        default = None
        for photoset in photosets:
            dirpath = os.path.join(directory, photoset['id']+' - '+photoset['title'])
            if not os.path.exists(dirpath):
                os.mkdir(dirpath)
            for photo in photoset['photos']:
                filename = os.path.join(dirpath, photo['id']+'.jpg')
                if os.path.exists(filename):
                    if default is None:
                        print "Photo", photo['id'], "has already been downloaded."
                        default = raw_input("Download again? (y/n/Y/N) (capital to not ask again): ")
                    if default == 'n':
                        default = None
                        continue
                    elif default == 'N':
                        continue
                    elif default == 'y':
                        default = None

                f = open(filename, 'w')
                if not photo.get('url'):
                    try:
                        sizes_response = self.flickr.photos_getSizes(photo_id=photo['id'])
                    except:
                        print "Failed to download photo:", photo['id'], '... sorry!'
                    else:
                        photo['url'] = sizes_response[0][-1].get('source')
                if photo.get('url'):
                    print "Downloading", photo['title'], 'from', photo['url']
                    remote = urllib2.urlopen(photo['url'])
                    f.write(remote.read())
                    f.close()
                    remote.close()

    def upload_images(self, photosets, directory):
        client = DivvyshotClient()
        for photoset in photosets:
            event_data = client.create_event(name=photoset['title'],
                                           description=photoset['description'])
            event_path = '/api/v2/json/event/%s/photo/' % event_data['url_slug']
            for photo in photoset['photos']:
                print "Uploading", photo['title']
                filename = os.path.join(directory, photoset['id']+' - '+photoset['title'], photo['id']+'.jpg')
                if not os.path.exists(filename):
                    print "Looks like photo",photo['id'],'did not get downloaded.'
                    continue
                photo_data = client.create_photo(event_data['url_slug'], filename)
                photo_data = client.update_photo(photo_data['url_slug'], name=photo['title'])
                print "Finished uploading", photo_data['name']
                os.remove(filename)

    def do_import(self):
        username = raw_input("Your flickr username/email: ")
        # Step 1: grab the list of photos from flickr
        photosets = self.get_photosets(username)
        # Step 2: download the images from flickr
        self.download_images(photosets, username)
        self.upload_images(photosets, username)
Example #8
0
from xml.etree import ElementTree as ET
import pyperclip
class keys:
    apikey = u"6ac162e31528ac235870856eadce38d1"
    apisecret = u"1c01580225bb447c"

print('Dang nhap vao flickr')
flickr = FlickrAPI(keys.apikey, keys.apisecret)

# ------------------------------------------------------------------------------

flickr.authenticate_via_browser(perms='delete')

ID = "151892798@N03"

sets= flickr.photosets_getList(user_id='151892798@N03' , format='etree')

def embed(album_id):
    ALBUM = flickr.photosets.getPhotos(user_id=ID,photoset_id= album_id)

    print('Ma san pham: ',ALBUM[0].get('title'))
    total = int(ALBUM[0].get('total'))

    empty=('')
    for i in range(total):
        D = flickr.photos.getInfo(photo_id = ALBUM[0][i].get('id'))
        ID_SECRET = str( D[0].get('originalsecret'))
        title = ALBUM[0][i].get('title')
        title = title.replace("-"," ")
        title = title.replace("_", " ")
        title = title.replace("copy","")
Example #9
0
class Offlickr:
    def __init__(self,
                 key,
                 secret,
                 uid,
                 httplib=None,
                 browser="lynx",
                 verbose=False):
        """Instantiates an Offlickr object
        An API key is needed, as well as an API secret and a user id.
        A browser can be specified to be used for authorizing the program
        to access the user account."""
        self.__flickrAPIKey = key
        self.__flickrSecret = secret
        self.__httplib = httplib
        # Get authentication token
        self.fapi = FlickrAPI(self.__flickrAPIKey, self.__flickrSecret)
        self.token = self.fapi.getToken(browser=browser)
        self.flickrUserId = uid
        self.verbose = verbose

    def __testFailure(self, rsp):
        """Returns whether the previous call was successful"""
        if rsp['stat'] == "fail":
            print "Error!"
            return True
        else:
            return False

    def getPhotoList(self, dateLo, dateHi):
        """Returns a list of photo given a time frame"""
        n = 0
        flickr_max = 500
        photos = []

        print "Retrieving list of photos"
        while True:
            if self.verbose:
                print "Requesting a page..."
            n = n + 1
            rsp = self.fapi.photos_search(
                api_key=self.__flickrAPIKey,
                auth_token=self.token,
                user_id=self.flickrUserId,
                per_page=str(flickr_max),  # Max allowed by Flickr
                page=str(n),
                min_upload_date=dateLo,
                max_upload_date=dateHi)
            if self.__testFailure(rsp):
                return None
            if rsp.photos[0]['total'] == '0':
                return None
            photos += rsp.photos[0].photo
            if self.verbose:
                print " %d photos so far" % len(photos)
            if len(photos) >= int(rsp.photos[0]['total']):
                break

        return photos

    def getPhotosetList(self):
        """Returns a list of photosets for a user"""

        rsp = self.fapi.photosets_getList(api_key=self.__flickrAPIKey,
                                          auth_token=self.token,
                                          user_id=self.flickrUserId)
        if self.__testFailure(rsp):
            return None
        return rsp.photosets[0].photoset

    def getPhotosetInfo(self, pid, method):
        """Returns a string containing information about a photoset (in XML)"""
        rsp = method(api_key=self.__flickrAPIKey,
                     auth_token=self.token,
                     photoset_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        info = str(doc.xpathEval("/rsp/photoset")[0])
        doc.freeDoc()
        return info

    def getPhotoMetadata(self, pid):
        """Returns an array containing containing the photo metadata (as a string), and the format of the photo"""
        if self.verbose:
            print "Requesting metadata for photo %s" % pid
        rsp = self.fapi.photos_getInfo(api_key=self.__flickrAPIKey,
                                       auth_token=self.token,
                                       photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        metadata = doc.xpathEval("/rsp/photo")[0].serialize()
        doc.freeDoc()
        return [metadata, rsp.photo[0]['originalformat']]

    def getPhotoComments(self, pid):
        """Returns an XML string containing the photo comments"""
        if self.verbose:
            print "Requesting comments for photo %s" % pid
        rsp = self.fapi.photos_comments_getList(api_key=self.__flickrAPIKey,
                                                auth_token=self.token,
                                                photo_id=pid)
        if self.__testFailure(rsp):
            return None
        doc = libxml2.parseDoc(rsp.xml)
        comments = doc.xpathEval("/rsp/comments")[0].serialize()
        doc.freeDoc()
        return comments

    def getPhotoSizes(self, pid):
        """Returns a string with is a list of available sizes for a photo"""
        rsp = self.fapi.photos_getSizes(api_key=self.__flickrAPIKey,
                                        auth_token=self.token,
                                        photo_id=pid)
        if self.__testFailure(rsp):
            return None
        return rsp

    def getOriginalPhoto(self, pid):
        """Returns a URL which is the original photo, if it exists"""
        source = None
        rsp = self.getPhotoSizes(pid)
        if rsp == None:
            return None
        for s in rsp.sizes[0].size:
            if s['label'] == 'Original':
                source = s['source']
        return source

    def __downloadReportHook(self, count, blockSize, totalSize):
        if self.__verbose == False:
            return
        p = 100 * count * blockSize / totalSize
        if (p > 100):
            p = 100
        print "\r %3d %%" % p,
        sys.stdout.flush()

    def downloadURL(self, url, target, filename, verbose=False):
        """Saves a photo in a file"""
        self.__verbose = verbose
        tmpfile = "%s/%s.TMP" % (target, filename)
        if self.__httplib == 'wget':
            cmd = 'wget -q -t 0 -T 120 -w 10 -c -O %s %s' % (tmpfile, url)
            os.system(cmd)
        else:
            urllib.urlretrieve(url,
                               tmpfile,
                               reporthook=self.__downloadReportHook)
        os.rename(tmpfile, "%s/%s" % (target, filename))