Example #1
0
 def upload(self, oid: str, file_like, metadata=None) -> OInfo:
     if oid.startswith(self.sep):
         raise CloudFileNotFoundError(
             "Called upload with a path instead of an OID: %s" % oid)
     if not self.exists_oid(oid):
         raise CloudFileNotFoundError(oid)
     return self._upload(oid, file_like, metadata)
Example #2
0
        def __exit__(self, ty, ex, tb):  # pylint: disable=too-many-branches
            self.__box._mutex.__exit__(ty, ex, tb)

            if ex:
                try:
                    raise ex
                except (TimeoutError,):
                    self.__box.disconnect()
                    raise CloudDisconnectedError("disconnected on timeout")
                except BoxOAuthException as e:
                    self.__box.disconnect()
                    raise CloudTokenError("oauth fail %s" % e)
                except BoxNetworkException as e:
                    self.__box.disconenct()
                    raise CloudDisconnectedError("disconnected %s" % e)
                except BoxValueError:
                    raise CloudFileNotFoundError()
                except BoxAPIException as e:
                    if e.status == 400 and e.code == 'folder_not_empty':
                        raise CloudFileExistsError()
                    if e.status == 404 and e.code == 'not_found':
                        raise CloudFileNotFoundError()
                    if e.status == 404 and e.code == 'trashed':
                        raise CloudFileNotFoundError()
                    if e.status == 405 and e.code == 'method_not_allowed':
                        raise PermissionError()
                    if e.status == 409 and e.code == 'item_name_in_use':
                        raise CloudFileExistsError()
                    if e.status == 400 and e.code == 'invalid_grant':
                        raise CloudTokenError()
                    log.exception("unknown box exception: \n%s", e)
                except CloudException:
                    raise
                except Exception:
                    pass  # this will not swallow the exception, because this is in a context manager
Example #3
0
    def mkdir(self, path) -> str:
        info = self.info_path(path)
        if info and info.otype == DIRECTORY:
            return info.oid
        log.debug("MKDIR ---------------- path=%s", path)
        with self._api() as client:  # gives us the client we can use in the exception handling block
            try:
                with self._api():  # only for exception translation inside the try
                    parent, base = self.split(path)
                    log.debug("MKDIR ---------------- parent=%s base=%s", parent, base)
                    parent_object: BoxItem = self._get_box_object(client, path=parent, object_type=DIRECTORY)
                    if parent_object is None:
                        raise CloudFileNotFoundError()
                    child_object: BoxFolder = parent_object.create_subfolder(base)
                    self.__cache.mkdir(path, child_object.object_id)
                    log.debug("MKDIR ---------------- path=%s oid=%s", path, child_object.object_id)

                    return child_object.object_id
            except CloudFileExistsError as e:
                self.__cache.delete(path=path)
                try:
                    box_object = self._get_box_object(client, path=path, object_type=DIRECTORY, strict=False)
                except Exception:
                    raise e
                if box_object is None or box_object.object_type != 'folder':
                    raise
                return box_object.object_id
            except Exception:
                self.__cache.delete(path=path)
                raise
Example #4
0
    def rename(self, oid, path) -> str:  # pylint: disable=too-many-branches
        self.__cache.delete(path=path)
        try:
            with self._api() as client:
                box_object: BoxItem = self._get_box_object(client, oid=oid, object_type=NOTKNOWN, strict=False)  # todo: get object_type from cache
                if box_object is None:
                    self.__cache.delete(oid=oid)
                    raise CloudFileNotFoundError()
                info = self._box_get_oinfo(client, box_object)
                if info.path:
                    old_path = info.path
                else:
                    old_path = self._box_get_path(client, box_object)
                old_parent, _ignored_old_base = self.split(old_path)
                new_parent, new_base = self.split(path)
                if new_parent == old_parent:
                    try:
                        with self._api():
                            retval = box_object.rename(new_base)
                    except CloudFileExistsError:
                        if box_object.object_type == 'file':
                            raise
                        # are we renaming a folder over another empty folder?
                        box_conflict = self._get_box_object(client, path=path, object_type=NOTKNOWN, strict=False)  # todo: get type from cache

                        # should't happen... we just got a FEx error, and we're not moving
                        if box_conflict is None:  # pragma: no cover
                            raise
                        items = self._box_get_items(client, box_conflict, new_parent)
                        if box_conflict.object_type == 'folder' and len(items) == 0:
                            box_conflict.delete()
                        else:
                            raise
                        return self.rename(oid, path)
                else:
                    new_parent_object = self._get_box_object(client, path=new_parent, object_type=DIRECTORY, strict=False)
                    if new_parent_object is None:
                        raise CloudFileNotFoundError()
                    if new_parent_object.object_type != 'folder':
                        raise CloudFileExistsError()

                    retval = box_object.move(parent_folder=new_parent_object, name=new_base)
                self.__cache.rename(old_path, path)
                return retval.id
        except Exception:
            self.__cache.delete(oid=oid)
            raise
Example #5
0
 def upload(self, oid, file_like, metadata=None) -> OInfo:
     with self._api() as client:
         box_object: BoxItem = self._get_box_object(client, oid=oid, object_type=FILE, strict=False)
         if box_object is None:
             raise CloudFileNotFoundError()
         if box_object.object_type != 'file':
             raise CloudFileExistsError()
         new_object = box_object.update_contents_with_stream(file_like)
         retval = self._box_get_oinfo(client, new_object)
         return retval
Example #6
0
 def create(self, path, file_like, metadata=None) -> OInfo:
     with self._api() as client:
         parent, base = self.split(path)
         parent_object = self._get_box_object(client, path=parent, object_type=DIRECTORY)
         if parent_object is None:
             raise CloudFileNotFoundError()
         # TODO: implement preflight_check on the upload_stream() call
         new_object: BoxFile = parent_object.upload_stream(file_stream=file_like, file_name=base)
         log.debug("caching id %s for file %s", new_object.object_id, path)
         self.__cache.create(path, new_object.object_id)
         retval = self._box_get_oinfo(client, new_object, parent_path=parent)
         return retval
Example #7
0
 def download(self, oid, file_like):
     ok = self._api('files_download', oid)
     if not ok:
         raise CloudFileNotFoundError()
     res, content = ok
     for data in content.iter_content(self.upload_block_size):
         file_like.write(data)
     return OInfo(otype=FILE,
                  oid=oid,
                  hash=res.content_hash,
                  path=res.path_display,
                  size=res.size,
                  mtime=self._mtime_from_metadata(res))
Example #8
0
 def _unsafe_get_box_object_from_path(self, client: Client,  # pylint: disable=too-many-locals
                                      path: str,
                                      object_type: OType,
                                      strict: bool,
                                      use_cache: bool) -> Optional[BoxItem]:
     assert isinstance(client, Client)
     assert object_type in (FILE, DIRECTORY)
     if path in ('/', ''):  # pragma: no cover
         # no cover because the tests always use a test root
         root: BoxItem = client.root_folder()
         root = self._unsafe_box_object_populate(client, root)
         return root
     if use_cache:
         cached_oid = self.__cache.get_oid(path)
         if cached_oid:
             cached_type = self.__cache.get_type(path=path) or NOTKNOWN
             return self._get_box_object(client, oid=cached_oid, object_type=cached_type, strict=strict, use_cache=use_cache)
     parent, base = self.split(path)
     cached_parent_oid = None
     if use_cache:
         cached_parent_oid = self.__cache.get_oid(parent)
     parent_object: Optional[BoxFolder]
     if cached_parent_oid is not None:
         parent_object = self._get_box_object(client, oid=cached_parent_oid, object_type=DIRECTORY, strict=strict)
     else:
         parent_object = self._get_box_object(client, path=parent, object_type=DIRECTORY, strict=strict)
         if parent_object:
             self.__cache.set_oid(parent, parent_object.object_id, DIRECTORY)
     if not parent_object:
         return None
     if parent_object.object_type != 'folder':
         raise CloudFileExistsError
     collection = parent_object.item_collection
     collection_entries = list(collection['entries'])
     entry, found_type = self.__look_for_name_in_collection_entries(client, base, collection_entries, object_type,
                                                                    strict)
     if not entry:
         start = time.monotonic()
         # the next line is very slow for big folders.
         # limit=5000 speeds it up because it lowers the number of pages
         # Is there a way to confirm the non-existence of a file that doesn't involve
         # getting every item in the parent's folder? maybe limiting the fields would speed this up...
         entries = self._box_get_items(client, parent_object, parent)
         log.debug("done getting %s, %s", parent, time.monotonic() - start)
         entry, found_type = self.__look_for_name_in_collection_entries(client, base, entries, object_type, strict)
     if not entry:
         raise CloudFileNotFoundError()
     if strict and found_type != object_type:
         raise CloudFileExistsError()
     return self._get_box_object(client, oid=entry.object_id, object_type=found_type, strict=strict)
Example #9
0
    def listdir(self, oid) -> Generator[DirInfo, None, None]:
        entries: List[BoxItem] = []
        with self._api() as client:
            parent_object = self._get_box_object(client, oid=oid, object_type=DIRECTORY)
            if parent_object is None:
                raise CloudFileNotFoundError()
            parent_path = self._box_get_path(client, parent_object)

            # shitty attempt 1 that fails due to caching in the sdk:
            # entries = parent_object.item_collection['entries']  # don't use this, new children may be missing

            # shitty attempt 2 that fails due to caching in the sdk:
            entries = self._box_get_items(client, parent_object, parent_path, page_size=self._listdir_page_size)
        for entry in entries:
            retval = None
            with self._api() as client:
                if type(entry) is dict:  # Apparently, get_box_object by path returns dicts and by oid returns objects?
                    raise NotImplementedError
                retval = self._box_get_dirinfo(client, entry, parent_path)
            if retval is not None:
                yield retval
Example #10
0
    def _get_parent_id(self, path, use_cache=False):
        if not path:
            return None

        parent, _ = self.split(path)

        if parent == path:
            return self._cached_id(parent)

        cached_id = self._cached_id(parent)
        if use_cache and cached_id:
            return cached_id

        # get the latest version of the parent path
        # it may have changed, or case may be different, etc.
        info = self.info_path(parent)
        if not info:
            raise CloudFileNotFoundError("parent %s must exist" % parent)

        # cache the latest version
        return self._cached_id(info.path)
Example #11
0
 def download(self, oid, file_like):
     with self._api() as client:
         box_object: BoxItem = self._get_box_object(client, oid=oid, object_type=FILE)
         if box_object is None:
             raise CloudFileNotFoundError()
         box_object.download_to(writeable_stream=file_like)
Example #12
0
    def _events(self, cursor, path=None, recursive=True, save_cursor=True):  # pylint: disable=too-many-branches, too-many-statements, too-many-locals
        if path and path != "/":
            info = self.info_path(path)
            if not info:
                raise CloudFileNotFoundError(path)
            oid = info.oid
        else:
            oid = self._root_id

        for res in _FolderIterator(self._api,
                                   oid,
                                   recursive=recursive,
                                   cursor=cursor):
            exists = True

            log.debug("event %s", res)

            if self._is_rtmp(res.path_display):
                continue

            if isinstance(res, files.DeletedMetadata):
                # dropbox doesn't give you the id that was deleted
                # we need to get the ids of every revision
                # then find out which one was the latest before the deletion time
                # then get the oid for that

                otype = FILE
                try:
                    revs = self._api('files_list_revisions',
                                     res.path_lower,
                                     limit=10)
                except NotAFileError:
                    oid = None
                    otype = DIRECTORY

                if otype == FILE:
                    if revs is None:
                        # dropbox will give a 409 conflict if the revision history was deleted
                        # instead of raising an error, this gets converted to revs==None
                        log.info("revs is none for %s %s", oid, path)
                        continue

                    log.debug("revs %s", revs)
                    deleted_time = revs.server_deleted
                    if deleted_time is None:  # not really sure why this happens, but this event isn't useful without it
                        log.error("revs %s has no deleted time?", revs)
                        continue
                    latest_time = None
                    for ent in revs.entries:
                        assert ent.server_modified is not None
                        if ent.server_modified <= deleted_time and \
                                (latest_time is None or ent.server_modified >= latest_time):
                            oid = ent.id
                            latest_time = ent.server_modified
                    if not oid:
                        log.error(
                            "skipping deletion %s, because we don't know the oid",
                            res)
                        continue

                exists = False
                ohash = None
            elif isinstance(res, files.FolderMetadata):
                otype = DIRECTORY
                ohash = None
                oid = res.id
            else:
                otype = FILE
                ohash = res.content_hash
                oid = res.id

            path = res.path_display
            event = Event(otype, oid, path, ohash, exists, time.time())
            yield event
            if save_cursor and getattr(res, "cursor", False):
                self.__cursor = res.cursor
Example #13
0
    def _real_api(self, client, mutex, method, *args, **kwargs):  # pylint: disable=too-many-branches, too-many-statements
        log.debug("_api: %s (%s)", method, debug_args(args, kwargs))

        with mutex:
            if not client:
                raise CloudDisconnectedError("currently disconnected")

            try:
                return getattr(client, method)(*args, **kwargs)
            except exceptions.AuthError:
                self.disconnect()
                raise CloudTokenError()
            except exceptions.ApiError as e:
                inside_error: Union[files.LookupError, files.WriteError]

                if isinstance(e.error,
                              (files.ListFolderError, files.GetMetadataError,
                               files.ListRevisionsError)):
                    if e.error.is_path() and isinstance(
                            e.error.get_path(), files.LookupError):
                        inside_error = e.error.get_path()
                        if inside_error.is_malformed_path():
                            log.debug(
                                'Malformed path when executing %s(%s %s) : %s',
                                *debug_args(method, args, kwargs, e))
                            raise CloudFileNotFoundError(
                                'Malformed path when executing %s(%s)' %
                                debug_args(method, kwargs))
                        if inside_error.is_not_found():
                            log.debug('File not found %s(%s %s) : %s',
                                      *debug_args(method, args, kwargs, e))
                            raise CloudFileNotFoundError(
                                'File not found when executing %s(%s)' %
                                debug_args(method, kwargs))
                        if inside_error.is_not_folder():
                            log.debug(
                                'Expected folder is actually a file when executing %s(%s %s) : %s',
                                *debug_args(method, args, kwargs, e))
                            raise CloudFileExistsError(
                                'Expected folder is actually a file when executing %s(%s %s)'
                                % debug_args(method, args, kwargs))

                if isinstance(e.error, sharing.SharedFolderAccessError):
                    raise CloudFileNotFoundError(str(e))

                if isinstance(e.error, files.UploadError):
                    if e.error.is_path() and isinstance(
                            e.error.get_path(), files.UploadWriteFailed):
                        inside_error = e.error.get_path()
                        write_error = inside_error.reason
                        if write_error.is_insufficient_space():
                            log.debug('out of space %s(%s %s) : %s',
                                      *debug_args(method, args, kwargs, e))
                            raise CloudOutOfSpaceError(
                                'Out of space when executing %s(%s)' %
                                debug_args(method, kwargs))
                        if write_error.is_conflict():
                            raise CloudFileExistsError(
                                'Conflict when executing %s(%s)' %
                                debug_args(method, kwargs))

                if isinstance(e.error, files.DownloadError):
                    if e.error.is_path() and isinstance(
                            e.error.get_path(), files.LookupError):
                        inside_error = e.error.get_path()
                        if inside_error.is_not_found():
                            raise CloudFileNotFoundError(
                                "Not found when executing %s(%s)" %
                                debug_args(method, kwargs))

                if isinstance(e.error, files.DeleteError):
                    if e.error.is_path_lookup():
                        inside_error = e.error.get_path_lookup()
                        if inside_error.is_not_found():
                            log.debug('file not found %s(%s %s) : %s',
                                      *debug_args(method, args, kwargs, e))
                            raise CloudFileNotFoundError(
                                'File not found when executing %s(%s)' %
                                debug_args(method, kwargs))

                if isinstance(e.error, files.RelocationError):
                    if e.error.is_from_lookup():
                        inside_error = e.error.get_from_lookup()
                        if inside_error.is_not_found():
                            log.debug('file not found %s(%s %s) : %s',
                                      *debug_args(method, args, kwargs, e))
                            raise CloudFileNotFoundError(
                                'File not found when executing %s(%s,%s)' %
                                debug_args(method, args, kwargs))
                    if e.error.is_to():
                        inside_error = e.error.get_to()
                        if inside_error.is_conflict():
                            raise CloudFileExistsError(
                                'File already exists when executing %s(%s)' %
                                debug_args(method, kwargs))

                    if e.error.is_duplicated_or_nested_paths():
                        raise CloudFileExistsError(
                            'Duplicated or nested path %s(%s)' %
                            debug_args(method, kwargs))

                if isinstance(e.error, files.CreateFolderError):
                    if e.error.is_path() and isinstance(
                            e.error.get_path(), files.WriteError):
                        inside_error = e.error.get_path()
                        if inside_error.is_conflict():
                            raise CloudFileExistsError(
                                'File already exists when executing %s(%s)' %
                                debug_args(method, kwargs))

                if isinstance(e.error, files.ListFolderContinueError):
                    # all list-folder-continue errors should cause a cursor reset
                    # these include the actual "is_reset" and, of course a is_path (fnf)
                    # and also is_other which can secretly contain a reset as well
                    raise CloudCursorError("Cursor reset request")

                if isinstance(e.error, files.ListRevisionsError):
                    if e.error.is_path():
                        inside_error = e.error.get_path()
                        if inside_error.is_not_file():
                            raise NotAFileError(str(e))

                if isinstance(e.error, files.ListFolderLongpollError):
                    raise CloudCursorError(
                        "cursor invalidated during longpoll")

                raise CloudException(
                    "Unknown exception when executing %s(%s,%s): %s" %
                    debug_args(method, args, kwargs, e))
            except (exceptions.InternalServerError, exceptions.RateLimitError,
                    requests.exceptions.ReadTimeout):
                raise CloudTemporaryError()
            except dropbox.stone_validators.ValidationError as e:
                log.debug("f*ed up api error: %s", e)
                if "never created" in str(e):
                    raise CloudFileNotFoundError()
                if "did not match" in str(e):
                    log.warning("oid error %s", e)
                    raise CloudFileNotFoundError()
                raise
            except requests.exceptions.ConnectionError as e:
                log.error('api error handled exception %s:%s', "dropbox",
                          e.__class__.__name__)
                self.disconnect()
                raise CloudDisconnectedError()
Example #14
0
    def listdir(self, oid) -> Generator[GDriveInfo, None, None]:  # pylint: disable=too-many-branches, too-many-locals
        if oid == self._root_id:
            query = f"'{oid}' in parents or sharedWithMe"
        else:
            query = f"'{oid}' in parents"
        page_token = None
        done = False
        while not done:
            try:
                res = self._api(
                    'files',
                    'list',
                    q=query,
                    spaces='drive',
                    fields=
                    'files(id, md5Checksum, parents, name, mimeType, trashed, shared, \
                                headRevisionId, capabilities, appProperties, modifiedTime, size), nextPageToken',
                    pageToken=page_token,
                    includeItemsFromAllDrives=True,
                    supportsAllDrives=True)
                page_token = res.get('nextPageToken', None)
                if not page_token:
                    done = True
            except CloudFileNotFoundError:
                if self._info_oid(oid):
                    return
                log.debug("listdir oid gone %s", oid)
                raise

            if not res or not res['files']:
                if self.exists_oid(oid):
                    return
                raise CloudFileNotFoundError(oid)

            log.debug("listdir got res %s", res)

            for ent in res['files']:
                fid = ent['id']
                if fid == oid:
                    continue
                pids = ent.get('parents', [])
                if not pids and ent.get('shared'):
                    pids = self._resolve_missing_parent(ent)
                fhash = ent.get('md5Checksum')
                name = ent['name']
                shared = ent['shared']
                readonly = not ent['capabilities']['canEdit']
                trashed = ent.get('trashed', False)
                mtime = ent.get('modifiedTime')
                mtime = mtime and self._parse_time(mtime)
                size = int(ent.get('size', 0))
                if ent.get('mimeType') == self._folder_mime_type:
                    otype = DIRECTORY
                else:
                    otype = FILE
                if not trashed:
                    yield GDriveInfo(otype,
                                     fid,
                                     fhash,
                                     None,
                                     shared=shared,
                                     readonly=readonly,
                                     pids=pids,
                                     name=name,
                                     mtime=mtime,
                                     size=size)
Example #15
0
    def rename(self, oid, path):  # pylint: disable=too-many-locals, too-many-branches
        # Use cache to get parent id, no need to hit info_path twice
        possible_conflict = self.info_path(path)
        pid = self._get_parent_id(path, use_cache=True)

        add_pids = [pid]
        if pid == 'root':  # pragma: no cover
            # cant ever get hit from the tests due to test root
            add_pids = [self._root_id]

        info = self._info_oid(oid)
        if info is None:
            log.debug("can't rename, oid doesn't exist %s", debug_sig(oid))
            raise CloudFileNotFoundError(oid)
        remove_pids = info.pids
        old_path = info.path

        _, name = self.split(path)
        appProperties = self._prep_app_properties(pid)
        body = {'name': name, 'appProperties': appProperties}

        if possible_conflict:
            if FILE in (info.otype, possible_conflict.otype):
                if possible_conflict.oid != oid:  # it's OK to rename a file over itself, frex, to change case
                    raise CloudFileExistsError(path)
            else:
                if possible_conflict.oid != oid:
                    try:
                        next(self.listdir(possible_conflict.oid))
                        raise CloudFileExistsError(
                            "Cannot rename over non-empty folder %s" % path)
                    except StopIteration:
                        # Folder is empty, rename over it no problem
                        if possible_conflict.oid != oid:  # delete the target if we're not just changing case
                            self.delete(possible_conflict.oid)

        if not old_path:
            for cpath, coid in list(self._ids.items()):
                if coid == oid:
                    old_path = cpath

        if add_pids == remove_pids:
            add_pids_str = ""
            remove_pids_str = ""
        else:
            add_pids_str = ",".join(add_pids)
            remove_pids_str = ",".join(remove_pids)

        self._api('files',
                  'update',
                  body=body,
                  fileId=oid,
                  addParents=add_pids_str,
                  removeParents=remove_pids_str,
                  fields='id')

        if old_path:
            # TODO: this will break if the kids are cached but not the parent folder, I'm not convinced that can
            #   actually be the case at this point in the code, so, no need to fix until that can be established
            for cpath, coid in list(self._ids.items()):
                relative = self.is_subpath(old_path, cpath)
                if relative:
                    new_cpath = self.join(path, relative)
                    self._ids.pop(cpath)
                    self._ids[new_cpath] = coid

        log.debug("renamed %s -> %s", debug_sig(oid), body)

        return oid
Example #16
0
    def _api(self, resource, method, *args, **kwargs):  # pylint: disable=arguments-differ, too-many-branches, too-many-statements
        if not self._client:
            raise CloudDisconnectedError("currently disconnected")

        with self._mutex:
            try:
                if resource == 'media':
                    res = args[0]
                    args = args[1:]
                else:
                    res = getattr(self._client, resource)()

                meth = getattr(res, method)(*args, **kwargs)

                if resource == 'media' or (resource == 'files'
                                           and method == 'get_media'):
                    ret = meth
                else:
                    ret = meth.execute()
                log.debug("api: %s (%s) -> %s", method,
                          debug_args(args, kwargs), ret)

                return ret
            except SSLError as e:
                if "WRONG_VERSION" in str(e):
                    # httplib2 used by google's api gives this weird error for no discernable reason
                    raise CloudTemporaryError(str(e))
                raise
            except google.auth.exceptions.RefreshError:
                self.disconnect()
                raise CloudTokenError("refresh error")
            except HttpError as e:
                log.debug("api: %s (%s) -> %s", method,
                          debug_args(args, kwargs), e.resp.status)
                if str(e.resp.status) == '416':
                    raise GDriveFileDoneError()

                if str(e.resp.status) == '413':
                    raise CloudOutOfSpaceError('Payload too large')

                if str(e.resp.status) == '409':
                    raise CloudFileExistsError('Another user is modifying')

                if str(e.resp.status) == '404':
                    raise CloudFileNotFoundError(
                        'File not found when executing %s.%s(%s)' %
                        debug_args(resource, method, kwargs))

                reason = self._get_reason_from_http_error(e)

                if str(e.resp.status) == '403' and str(
                        reason) == 'storageQuotaExceeded':
                    raise CloudOutOfSpaceError("Storage storageQuotaExceeded")

                if str(e.resp.status) == '401':
                    self.disconnect()
                    raise CloudTokenError("Unauthorized %s" % reason)

                if str(e.resp.status) == '403' and str(
                        reason) == 'parentNotAFolder':
                    raise CloudFileExistsError("Parent Not A Folder")

                if str(e.resp.status) == '403' and str(
                        reason) == 'insufficientFilePermissions':
                    raise PermissionError("PermissionError")

                if (str(e.resp.status) == '403' and reason in (
                        'userRateLimitExceeded', 'rateLimitExceeded', 'dailyLimitExceeded')) \
                        or str(e.resp.status) == '429':
                    raise CloudTemporaryError("rate limit hit")

                # At this point, _should_retry_response() returns true for error codes >=500, 429, and 403 with
                #  the reason 'userRateLimitExceeded' or 'rateLimitExceeded'. 403 without content, or any other
                #  response is not retried. We have already taken care of some of those cases above, but we call this
                #  below to catch the rest, and in case they improve their library with more conditions. If we called
                #  meth.execute() above with a num_retries argument, all this retrying would happen in the google api
                #  library, and we wouldn't have to think about retries.
                should_retry = _should_retry_response(e.resp.status, e.content)
                if should_retry:
                    raise CloudTemporaryError("unknown error %s" % e)
                log.error("Unhandled %s error %s", e.resp.status, reason)
                raise
            except (TimeoutError, HttpLib2Error):
                self.disconnect()
                raise CloudDisconnectedError("disconnected on timeout")
            except ConnectionResetError:
                raise CloudTemporaryError(
                    "An existing connection was forcibly closed by the remote host"
                )