コード例 #1
0
 def stat(self, filename):
     """
 Returns:
   file info for a finalized file with given filename."""
     blob_key = files_blobstore.get_blob_key(filename)
     file_info = datastore.Get(datastore.Key.from_path(api_blobstore.BLOB_INFO_KIND, str(blob_key), namespace=""))
     if file_info == None:
         raise raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR_MEATADATA_NOT_FOUND, filename)
     return file_info
コード例 #2
0
 def stat(self, filename):
   """
   Returns:
     file info for a finalized file with given filename."""
   blob_key = files_blobstore.get_blob_key(filename)
   file_info = datastore.Get(
       datastore.Key.from_path(api_blobstore.BLOB_INFO_KIND, str(blob_key),
           namespace=''))
   if file_info == None:
     raise raise_error(
         file_service_pb.FileServiceErrors.EXISTENCE_ERROR_MEATADATA_NOT_FOUND,
         filename)
   return file_info
def _write_blob(data, content_type=None, filename=None):
  """Creates a test blob and returns its BlobInfo."""
  kwargs = {}
  if content_type:
    kwargs['mime_type'] = content_type
  if filename:
    kwargs['_blobinfo_uploaded_filename'] = filename
  output_filename = files.blobstore.create(**kwargs)
  with files.open(output_filename, 'a') as outfile:
    outfile.write(data)
  files.finalize(output_filename)
  blob_key = files_blobstore.get_blob_key(output_filename)
  blob_info = blobstore.BlobInfo.get(blob_key)
  return blob_info
コード例 #4
0
def delete(*filenames):
    """Permanently delete files.

  Delete on non-finalized/non-existent files is a no-op.

  Args:
    filenames: finalized file names as strings. filename should has format
      "/gs/bucket/filename" or "/blobstore/blobkey".

  Raises:
    InvalidFileNameError: Raised when any filename is not of valid format or
      not a finalized name.
    IOError: Raised if any problem occurs contacting the backend system.
  """

    from google.appengine.api.files import blobstore as files_blobstore
    from google.appengine.api.files import gs
    from google.appengine.ext import blobstore

    blobkeys = []

    for filename in filenames:
        if not isinstance(filename, str):
            raise InvalidArgumentError(
                'Filename should be a string, but is %s(%r)' %
                (filename.__class__.__name__, filename))
        if filename.startswith(files_blobstore._BLOBSTORE_DIRECTORY):
            __checkIsFinalizedName(filename)
            blobkey = files_blobstore.get_blob_key(filename)
            if blobkey:
                blobkeys.append(blobkey)
        elif filename.startswith(gs._GS_PREFIX):

            __checkIsFinalizedName(filename)
            blobkeys.append(blobstore.create_gs_key(filename))
        else:
            raise InvalidFileNameError(
                'Filename should start with /%s or /%s' %
                (files_blobstore._BLOBSTORE_DIRECTORY, gs._GS_PREFIX))

    try:
        blobstore.delete(blobkeys)
    except Exception as e:
        raise IOError('Blobstore failure.', e)
コード例 #5
0
ファイル: file.py プロジェクト: KronnyEC/cliques
def delete(*filenames):
  """Permanently delete files.

  Delete on non-finalized/non-existent files is a no-op.

  Args:
    filenames: finalized file names as strings. filename should has format
      "/gs/bucket/filename" or "/blobstore/blobkey".

  Raises:
    InvalidFileNameError: Raised when any filename is not of valid format or
      not a finalized name.
    IOError: Raised if any problem occurs contacting the backend system.
  """

  from google.appengine.api.files import blobstore as files_blobstore
  from google.appengine.api.files import gs
  from google.appengine.ext import blobstore

  blobkeys = []

  for filename in filenames:
    if not isinstance(filename, basestring):
      raise InvalidArgumentError('Filename should be a string, but is %s(%r)' %
                                 (filename.__class__.__name__, filename))
    if filename.startswith(files_blobstore._BLOBSTORE_DIRECTORY):
      __checkIsFinalizedName(filename)
      blobkey = files_blobstore.get_blob_key(filename)
      if blobkey:
        blobkeys.append(blobkey)
    elif filename.startswith(gs._GS_PREFIX):

      __checkIsFinalizedName(filename)
      blobkeys.append(blobstore.create_gs_key(filename))
    else:
      raise InvalidFileNameError('Filename should start with /%s or /%s' %
                                 (files_blobstore._BLOBSTORE_DIRECTORY,
                                 gs._GS_PREFIX))

  try:
    blobstore.delete(blobkeys)
  except Exception, e:
    raise IOError('Blobstore failure.', e)
コード例 #6
0
ファイル: repository.py プロジェクト: earthreader/ergae
def put_slot(key, iterable):
    db_key = make_db_key(key)
    filename = create(mime_type='text/xml')
    size = 0
    with fopen(filename, 'ab') as f:
        for chunk in iterable:
            f.write(chunk)
            size += len(chunk)
    finalize(filename)
    blob_key = get_blob_key(filename)
    blob_info = BlobInfo.get(blob_key)
    assert blob_info.size == size, (
        'blob_info.size = {0!r}, size = {1!r}'.format(blob_info.size, size)
    )
    assert isinstance(blob_info, BlobInfo)
    now = datetime.datetime.utcnow()
    cache_key = make_cache_key(key)
    list_cache_key = make_cache_key(key[:-1])

    def txn():
        delete(cache_key, namespace='slot')
        delete(list_cache_key, namespace='list')
        slot = Slot.get(db_key)
        if slot is None:
            slot = Slot(
                depth=len(key),
                key=db_key,
                blob=blob_info,
                updated_at=now
            )
        else:
            assert isinstance(slot.blob, BlobInfo)
            slot.blob.delete()
            slot.blob = blob_info
            slot.updated_at = now
        slot.put()
        delete(list_cache_key, namespace='list')

    run_in_transaction_options(create_transaction_options(xg=True), txn)
    defer(push_to_dropbox, db_key, now)
コード例 #7
0
ファイル: repository.py プロジェクト: whigg/ergae
def put_slot(key, iterable):
    db_key = make_db_key(key)
    filename = create(mime_type='text/xml')
    size = 0
    with fopen(filename, 'ab') as f:
        for chunk in iterable:
            f.write(chunk)
            size += len(chunk)
    finalize(filename)
    blob_key = get_blob_key(filename)
    blob_info = BlobInfo.get(blob_key)
    assert blob_info.size == size, (
        'blob_info.size = {0!r}, size = {1!r}'.format(blob_info.size, size))
    assert isinstance(blob_info, BlobInfo)
    now = datetime.datetime.utcnow()
    cache_key = make_cache_key(key)
    list_cache_key = make_cache_key(key[:-1])

    def txn():
        delete(cache_key, namespace='slot')
        delete(list_cache_key, namespace='list')
        slot = Slot.get(db_key)
        if slot is None:
            slot = Slot(depth=len(key),
                        key=db_key,
                        blob=blob_info,
                        updated_at=now)
        else:
            assert isinstance(slot.blob, BlobInfo)
            slot.blob.delete()
            slot.blob = blob_info
            slot.updated_at = now
        slot.put()
        delete(list_cache_key, namespace='list')

    run_in_transaction_options(create_transaction_options(xg=True), txn)
    defer(push_to_dropbox, db_key, now)
コード例 #8
0
ファイル: repository.py プロジェクト: earthreader/ergae
def pull_from_dropbox():
    client = get_dropbox_client()
    if client is None:
        return
    path_prefix = get_config('dropbox_path')
    cursor = get_config('dropbox_delta_cursor')
    last_sync = get_config('dropbox_last_sync') or datetime.datetime(2000, 1, 1)
    first = cursor is None
    if first:
        set_config('dropbox_sync_progress', (0, 1))
    entries = []
    while 1:
        result = client.delta(cursor, path_prefix=path_prefix.rstrip('/'))
        entries.extend(
            (path, metadata)
            for path, metadata in result['entries']
        )
        cursor = result['cursor']
        set_config('dropbox_delta_cursor', cursor)
        if not result['has_more']:
            break
    for i, (path, metadata) in enumerate(entries):
        repo_key = path[len(path_prefix):].split('/')
        cache_key = make_cache_key(repo_key)
        list_cache_key = make_cache_key(repo_key[:-1])
        if not repo_key or any(not part for part in repo_key):
            continue
        db_key = make_db_key(repo_key)
        if metadata:
            rev = metadata['rev']
            modified_at = parse_rfc2822(metadata['modified'])
            last_sync = max(modified_at, last_sync)
            if metadata['is_dir']:
                blob_info = None
                cache_value = 'D'
            else:
                filename = create(mime_type='text/xml')
                cache_value = None
                cache_buffer = ['F']
                dst_size = 0
                with fopen(filename, 'ab') as dst:
                    for offset in xrange(0, metadata['bytes'],
                                         INCOMING_BYTES_LIMIT):
                        src = client.get_file(path,
                                              rev=rev,
                                              start=offset,
                                              length=offset)
                        while 1:
                            chunk = src.read(10240)
                            if chunk:
                                dst_size += len(chunk)
                                dst.write(chunk)
                                if dst_size < CACHE_BYTES_LIMIT:
                                    cache_buffer.append(chunk)
                            else:
                                break
                    if dst_size < CACHE_BYTES_LIMIT:
                        cache_value = ''.join(cache_buffer)
                        del cache_buffer
                finalize(filename)
                blob_key = get_blob_key(filename)
                blob_info = BlobInfo.get(blob_key)
            def txn():
                delete(cache_key, namespace='slot')
                delete(list_cache_key, namespace='list')
                slot = Slot.get(db_key)
                if slot is None:
                    slot = Slot(
                        depth=len(repo_key),
                        key=db_key,
                        blob=blob_info,
                        rev=rev,
                        updated_at=modified_at,
                        synced_at=modified_at
                    )
                else:
                    if slot.blob is not None:
                        slot.blob.delete()
                    slot.blob = blob_info
                    slot.rev = rev
                    slot.updated_at = modified_at
                    slot.synced_at = modified_at
                slot.put()
                if cache_value is not None:
                    put(cache_key, cache_value, namespace='slot')
                delete(list_cache_key, namespace='list')
            run_in_transaction_options(create_transaction_options(xg=True),
                                       txn)
        else:
            slot = Slot.get(db_key)
            if slot is not None:
                slot.delete()
                delete(cache_key, namespace='slot')
        delete(list_cache_key, namespace='list')
        if first:
            set_config('dropbox_sync_progress', (i + 1, len(entries)))
    set_config('dropbox_last_sync', last_sync)
コード例 #9
0
ファイル: repository.py プロジェクト: whigg/ergae
def pull_from_dropbox():
    client = get_dropbox_client()
    if client is None:
        return
    path_prefix = get_config('dropbox_path')
    cursor = get_config('dropbox_delta_cursor')
    last_sync = get_config('dropbox_last_sync') or datetime.datetime(
        2000, 1, 1)
    first = cursor is None
    if first:
        set_config('dropbox_sync_progress', (0, 1))
    entries = []
    while 1:
        result = client.delta(cursor, path_prefix=path_prefix.rstrip('/'))
        entries.extend(
            (path, metadata) for path, metadata in result['entries'])
        cursor = result['cursor']
        set_config('dropbox_delta_cursor', cursor)
        if not result['has_more']:
            break
    for i, (path, metadata) in enumerate(entries):
        repo_key = path[len(path_prefix):].split('/')
        cache_key = make_cache_key(repo_key)
        list_cache_key = make_cache_key(repo_key[:-1])
        if not repo_key or any(not part for part in repo_key):
            continue
        db_key = make_db_key(repo_key)
        if metadata:
            rev = metadata['rev']
            modified_at = parse_rfc2822(metadata['modified'])
            last_sync = max(modified_at, last_sync)
            if metadata['is_dir']:
                blob_info = None
                cache_value = 'D'
            else:
                filename = create(mime_type='text/xml')
                cache_value = None
                cache_buffer = ['F']
                dst_size = 0
                with fopen(filename, 'ab') as dst:
                    for offset in xrange(0, metadata['bytes'],
                                         INCOMING_BYTES_LIMIT):
                        src = client.get_file(path,
                                              rev=rev,
                                              start=offset,
                                              length=offset)
                        while 1:
                            chunk = src.read(10240)
                            if chunk:
                                dst_size += len(chunk)
                                dst.write(chunk)
                                if dst_size < CACHE_BYTES_LIMIT:
                                    cache_buffer.append(chunk)
                            else:
                                break
                    if dst_size < CACHE_BYTES_LIMIT:
                        cache_value = ''.join(cache_buffer)
                        del cache_buffer
                finalize(filename)
                blob_key = get_blob_key(filename)
                blob_info = BlobInfo.get(blob_key)

            def txn():
                delete(cache_key, namespace='slot')
                delete(list_cache_key, namespace='list')
                slot = Slot.get(db_key)
                if slot is None:
                    slot = Slot(depth=len(repo_key),
                                key=db_key,
                                blob=blob_info,
                                rev=rev,
                                updated_at=modified_at,
                                synced_at=modified_at)
                else:
                    if slot.blob is not None:
                        slot.blob.delete()
                    slot.blob = blob_info
                    slot.rev = rev
                    slot.updated_at = modified_at
                    slot.synced_at = modified_at
                slot.put()
                if cache_value is not None:
                    put(cache_key, cache_value, namespace='slot')
                delete(list_cache_key, namespace='list')

            run_in_transaction_options(create_transaction_options(xg=True),
                                       txn)
        else:
            slot = Slot.get(db_key)
            if slot is not None:
                slot.delete()
                delete(cache_key, namespace='slot')
        delete(list_cache_key, namespace='list')
        if first:
            set_config('dropbox_sync_progress', (i + 1, len(entries)))
    set_config('dropbox_last_sync', last_sync)