示例#1
0
def files_shares(root, path_):
    tools.validate_root_or_abort(root)

    try:
        stored_object = database.get_stored_object(root, path_)
    except database.MissingNodeException:
        raise BasicError(404, E_FILE_NOT_FOUND)
    else:
        if not stored_object or type(stored_object) != models.BlobLink:
            raise BasicError(404, E_FILE_NOT_FOUND)

    # Start by purging the table of all expired uploads.
    _purge_expired_links()

    # If we already have a reference to this file, no need to recreate one.
    db_ref = _find_ref_by_key(_make_file_ref(stored_object))
    if not db_ref:
        db_ref = _create_blob_ref(root, path_, stored_object)

    # Number of days to expiration.
    expire_days = int(request.args.get('expire_days', '1'))
    if not (0 < expire_days <= 10000):
        raise BasicError(406, E_EXPIRE_DAYS(0, 10000))

    # Regardless of the fact that the reference previously existed, its expiry
    # is reset.
    db_ref.blob = stored_object
    db_ref.expires = datetime.utcnow() + timedelta(days=expire_days)
    g.db_session.commit()

    # Return the reference for this file, used for retrieval.
    return make_link_data(db_ref)
示例#2
0
def fileops_createfolder():
    # Root and path data are passed as POST data rather than URL args.
    root = request.form['root']
    path_ = request.form['path']
    tools.validate_root_or_abort(root)
    tools.validate_path_or_abort(path_)

    # A file operation is always traduced by a new Commit object in order to
    # track the changes.
    try:
        commit = database.create_commit(root)
        ref_node, new_node = database.copy_hierarchy(root, path_, commit.root)
    except database.MissingNodeException:
        raise BasicError(404, E_NON_EXISTING_DESTINATION_PATH)

    # To stick with Dropbox behaviour, we raise a 403 if the directory already
    # exists.
    treename = tools.split_path(path_)[-1]
    existing = ref_node and ref_node.sub_trees.get(treename)
    if existing and not existing.is_deleted:
        raise BasicError(403, E_DIR_ALREADY_EXISTS)

    # Create the new directory and commit the change.
    output = models.TreeLink(tree=models.Tree(), path=treename)
    new_node.sub_trees[treename] = output
    database.store_commit(root, commit)
    return metadata.make_metadata(root, path_, output)
示例#3
0
def commit_chunked_upload(root, path_):
    tools.validate_root_or_abort(root)

    # A missing object for /commit_chunked_upload gives a 400.
    upload_id = request.form['upload_id']
    db_upload = _find_existing_upload(upload_id)
    if not db_upload:
        raise BasicError(400, 'Unknown upload_id {0}'.format(upload_id))

    # Start by purging the table of all expired uploads.
    _purge_expired_uploads()

    # We retrieve a file-like object to the uploaded data, and use this as an
    # input stream to a regular file upload request.
    data_stream = file_store.retrieve_chunk_stream(upload_id)

    # Install a SHA1 hash calculating stream on the request. This will
    # allow us to compute the file's hash as it is written to disk.
    crypt_key = g.user.dbuser.password[:32]
    enc_stream = stream.AESEncryptionStream(data_stream, crypt_key)
    hash_stream = stream.ChecksumCalcStream(enc_stream)
    put_result = put.do_put(root, path_, hash_stream, hash_stream.hash,
                            enc_stream.IV)

    # Close the input stream.
    data_stream.close()

    # Delete the temporary upload file.
    file_store.remove_chunked_upload(upload_id)

    # If we got here, it means that the storage is successful and the database
    # object can now be safely deleted.
    g.db_session.delete(db_upload)
    g.db_session.commit()
    return put_result
示例#4
0
def files_search(root, path_):
    tools.validate_root_or_abort(root)

    # First step is to verify that we have a valid query in the request, there
    # is no need to go further and query the DB if this condition is not met.
    query = request.args['query']
    if len(query) < 3:
        raise BasicError(400, E_QUERY_LEN(3))

    # Find the root for the search, identified by the provided path.
    try:
        stored_object = database.get_stored_object(root, path_)
    except database.MissingNodeException:
        raise BasicError(404, E_SEARCH_DIR_NOT_FOUND)
    else:
        if not isinstance(stored_object, models.TreeLink):
            raise BasicError(404, E_SEARCH_PATH_NOT_A_DIR)

    # Maximum number of results to fetch (up to 1000).
    file_limit = int(request.args.get('file_limit', '1000'))
    if not (0 < file_limit <= 1000):
        raise BasicError(406, E_FILE_LIMIT(0, 1000))

    # We handle the same URL parameters as the metadata call, although we
    # explicitely disable listing.
    kwargs = {
        'list': False,
        'include_deleted': get_boolean_arg(request.args, 'include_deleted')
    }

    # Remark: we cannot use flask.jsonify here (through our usual api_endpoint
    # decorator), see http://flask.pocoo.org/docs/security/#json-security.
    return json.dumps([
        m for m in _gen_metadata(stored_object, root, path_, query, **kwargs)
    ]), 200, {'content-type': 'application/json'}
示例#5
0
def files_revisions(root, path_):
    tools.validate_root_or_abort(root)

    try:
        stored_object = database.get_stored_object(root, path_)
    except database.MissingNodeException:
        raise BasicError(404, E_FILE_NOT_FOUND)
    else:
        if not isinstance(stored_object, models.BlobLink):
            raise BasicError(404, E_FILE_NOT_FOUND)

    # Maximum number of file revision to fetch (up to 1000).
    rev_limit = int(request.args.get('rev_limit', '10'))
    if not (0 < rev_limit <= 1000):
        raise BasicError(406, E_REV_LIMIT(0, 1000))

    def _revisions(stored_object):
        while stored_object:
            yield stored_object
            stored_object = stored_object.parent

    # Remark: we cannot use flask.jsonify here (through our usual api_endpoint
    # decorator), see http://flask.pocoo.org/docs/security/#json-security.
    return json.dumps([
        metadata.make_metadata(root, path_, obj)
        for obj in itertools.islice(_revisions(stored_object), rev_limit)
    ]), 200, {'content-type': 'application/json'}
示例#6
0
def fileops_copy():
    root, from_path, to_path = _get_params()
    tools.validate_root_or_abort(root)

    # A copy operation is always traduced by a new Commit object in order to
    # track the changes.
    commit = database.create_commit(root)
    obj, obj_copy = do_copy(commit.root, root, from_path, to_path)

    # Store the commit, and return the metadata for the new object.
    database.store_commit(root, commit)
    return metadata.make_metadata(root, to_path, obj_copy)
示例#7
0
文件: get.py 项目: icecrime/datastore
def files_get(root, path_):
    tools.validate_root_or_abort(root)

    # Attempt to retrieve the database representation of the requested file
    # from the database, and raise a 404 if we failed in doing so.
    try:
        dbobject = database.get_stored_object(root, path_)
    except database.MissingNodeException:
        raise BasicError(404, E_FILE_NOT_FOUND)

    # Request the actual disk object to the file_store, and send the result as
    # a file to the client.
    fmdata = metadata.make_metadata(root, path_, dbobject)
    stream = file_store.retrieve_blob_stream(root, dbobject.hash)
    return _send_file(stream, path_, fmdata, dbobject.iv)
示例#8
0
def fileops_move():
    root, from_path, to_path = _get_params()
    tools.validate_root_or_abort(root)

    # Move is implemented in terms of copy.
    commit = database.create_commit(root)
    obj, obj_copy = copy.do_copy(commit.root, root, from_path, to_path)

    # Delete the source object.
    source = database.get_stored_object(root, from_path, commit.root)
    delete.recursive_delete(source)

    # Store the commit, and return the metadata for the new object.
    database.store_commit(root, commit)
    return metadata.make_metadata(root, to_path, obj_copy)
示例#9
0
文件: put.py 项目: icecrime/datastore
def files_put(root, path_):
    tools.validate_root_or_abort(root)

    # Install a AES256 encrypting stream on the request. All the posted data
    # will be encrypted on the fly as it is read. A specific IV will be
    # generated in the process and inserted to the database later on. We use
    # the user's hashed password as encryption key: if a user believes if was
    # compromised and changes his password, all the files are made unusable.
    crypt_key = g.user.dbuser.password[:32]
    enc_stream = stream.install_stream(stream.AESEncryptionStream, crypt_key)

    # Install a SHA1 hash calculating stream on the request. This will allow us
    # to compute the file's hash as it is written to disk.
    hash_stream = stream.install_stream(stream.ChecksumCalcStream)

    data_stream = request.stream or request.data
    return do_put(root, path_, data_stream, hash_stream.hash, enc_stream.IV)
示例#10
0
def fileops_delete():
    root, path_ = _get_params()
    tools.validate_root_or_abort(root)
    commit = database.create_commit(root)

    # Retrieve the stored object (could be a blob or tree link) or abort with
    # a 404 if we fail.
    try:
        stored_object = database.get_stored_object(root, path_, commit.root)
    except database.MissingNodeException:
        raise BasicError(404, E_FILE_NOT_FOUND)

    # Mark the database object as deleted if it's not already.
    if stored_object.is_deleted:
        raise BasicError(404, E_ALREADY_DELETED)

    # Recursively delete the object (if necessary), and commit the transaction.
    recursive_delete(stored_object)
    database.store_commit(root, commit)
    return metadata.make_metadata(root, path_, stored_object)
示例#11
0
def files_metadata(root, path_):
    tools.validate_root_or_abort(root)

    try:
        stored_object = database.get_stored_object(root, path_)
    except database.MissingNodeException:
        raise BasicError(404, E_FILE_NOT_FOUND)

    # If the client has provided a hash value and it compares equal to the one
    # we have just generated, return a 304 (Not Modified).
    params = _get_url_params()
    metadata = make_metadata(root, path_, stored_object, **params)
    if request.args.get("hash") == metadata.get("hash", ""):
        return Response(status=304)

    # Little hack here: we cannot decorate files_metadata function as an json
    # api endpoint because it may return a 304 without data. We use this tiny
    # internal decorated function to the job when there is data to return.
    @decorators.api_endpoint
    def _json_metadata_return(metadata):
        return metadata

    return _json_metadata_return(metadata)
示例#12
0
def _get_params():
    # Root and path data are passed as POST data rather than URL args.
    root, path_ = tools.get_params(request.form, 'root', 'path')
    tools.validate_root_or_abort(root)
    tools.validate_path_or_abort(path_)
    return root, path_
示例#13
0
 def get_response(self, environ):
     try:
         tools.validate_root_or_abort(self.root)
     except BasicError, e:
         return e.format_response()
示例#14
0
def _get_params():
    items = tools.get_params(request.form, 'root', 'from_path', 'to_path')
    tools.validate_root_or_abort(items[0])
    tools.validate_path_or_abort(items[1])
    tools.validate_path_or_abort(items[2])
    return items