Exemplo n.º 1
0
def archive(project, job_id):
    """Redirects to the actual job archive.

    This is done via a redirect, so that templates that offer a download link do not
    need to know the eventual storage backend link. This makes them render faster,
    and only requires communication with the storage backend when needed.
    """

    from pillar.api.file_storage_backends import default_storage_backend

    from . import ARCHIVE_JOB_STATES
    from .sdk import Job

    api = pillar_api()
    job = Job.find(job_id, api=api)

    if job['status'] not in ARCHIVE_JOB_STATES:
        raise wz_exceptions.PreconditionFailed('Job is not archived')

    archive_blob_name = job.archive_blob_name
    if not archive_blob_name:
        raise wz_exceptions.NotFound('Job has no archive')

    bucket = default_storage_backend(project._id)
    blob = bucket.get_blob(archive_blob_name)
    archive_url = blob.get_url(is_public=False)

    return redirect(archive_url, code=303)
Exemplo n.º 2
0
def archive(project, job_id):
    """Redirects to the actual job archive.

    This is done via a redirect, so that templates that offer a download link do not
    need to know the eventual storage backend link. This makes them render faster,
    and only requires communication with the storage backend when needed.
    """

    from pillar.api.file_storage_backends import default_storage_backend

    from . import ARCHIVE_JOB_STATES
    from .sdk import Job

    api = pillar_api()
    job = Job.find(job_id, api=api)

    if job['status'] not in ARCHIVE_JOB_STATES:
        raise wz_exceptions.PreconditionFailed('Job is not archived')

    archive_blob_name = job.archive_blob_name
    if not archive_blob_name:
        raise wz_exceptions.NotFound('Job has no archive')

    bucket = default_storage_backend(project._id)
    blob = bucket.get_blob(archive_blob_name)
    archive_url = blob.get_url(is_public=False)

    return redirect(archive_url, code=303)
Exemplo n.º 3
0
def storage(project_id: ObjectId) -> file_storage_backends.Bucket:
    """Return the storage bucket for this project.

    For now this returns a bucket in the default storage backend, since
    individual projects do not have a 'storage backend' setting (this is
    set per file, not per project).
    """

    return file_storage_backends.default_storage_backend(str(project_id))
Exemplo n.º 4
0
def upload_and_process(local_file: typing.Union[io.BytesIO, typing.BinaryIO],
                       uploaded_file: werkzeug.datastructures.FileStorage,
                       project_id: str):
    # Figure out the file size, as we need to pass this in explicitly to GCloud.
    # Otherwise it always uses os.fstat(file_obj.fileno()).st_size, which isn't
    # supported by a BytesIO object (even though it does have a fileno
    # attribute).
    if isinstance(local_file, io.BytesIO):
        file_size = len(local_file.getvalue())
    else:
        file_size = os.fstat(local_file.fileno()).st_size

    # Check the file size again, now that we know its size for sure.
    assert_file_size_allowed(file_size)

    # Create file document in MongoDB.
    file_id, internal_fname, status = create_file_doc_for_upload(
        project_id, uploaded_file)

    # Copy the file into storage.
    bucket = default_storage_backend(project_id)
    blob = bucket.blob(internal_fname)
    blob.create_from_file(local_file,
                          file_size=file_size,
                          content_type=uploaded_file.mimetype)

    log.debug(
        'Marking uploaded file id=%s, fname=%s, '
        'size=%i as "queued_for_processing"', file_id, internal_fname,
        file_size)
    update_file_doc(file_id,
                    status='queued_for_processing',
                    file_path=internal_fname,
                    length=blob.size,
                    content_type=uploaded_file.mimetype)

    log.debug('Processing uploaded file id=%s, fname=%s, size=%i', file_id,
              internal_fname, blob.size)
    process_file(bucket, file_id, local_file)

    # Local processing is done, we can close the local file so it is removed.
    if local_file is not None:
        local_file.close()

    log.debug('Handled uploaded file id=%s, fname=%s, size=%i, status=%i',
              file_id, internal_fname, blob.size, status)

    # Status is 200 if the file already existed, and 201 if it was newly
    # created.
    # TODO: add a link to a thumbnail in the response.
    return dict(status='ok', file_id=str(file_id), status_code=status)
Exemplo n.º 5
0
def create_upload_zip(project_id: str, storage_path: str,
                      zip_path: str) -> str:
    """Uploads the ZIP file to the storage backend.

    Also stores the link to the ZIP in the job document.

    Returns the name of the storage blob the ZIP is stored in.
    """

    import itertools
    import zipfile
    import secrets
    import datetime

    from pillar.api.file_storage_backends import default_storage_backend

    log.info('Creating ZIP %s', zip_path)
    spath = pathlib.Path(storage_path)
    zpath = pathlib.Path(zip_path)

    with zipfile.ZipFile(zip_path, mode='w',
                         compression=zipfile.ZIP_DEFLATED) as outfile:
        for fpath in itertools.chain(spath.glob('*.gz'), spath.glob('*.json')):
            outfile.write(fpath, fpath.name)

    bucket = default_storage_backend(project_id)
    blob = bucket.blob(f'flamenco-jobs/{zpath.name}')
    while blob.exists():
        log.warning(
            'Blob %s already exists, changing filename to something more unique.',
            blob)
        today = datetime.date.today()
        uniquifier = f'{today:%Y-%m-%d}-{secrets.token_hex(4)}'
        new_zipname = f'{zpath.stem}-{uniquifier}{zpath.suffix}'
        blob = bucket.blob(f'flamenco-jobs/{new_zipname}')

    log.info('Uploading ZIP %s to %s', zpath, blob)

    file_size = zpath.stat().st_size
    with zpath.open(mode='rb') as stream_to_upload:
        blob.create_from_file(stream_to_upload,
                              file_size=file_size,
                              content_type='application/zip')

    return blob.name
Exemplo n.º 6
0
def create_upload_zip(project_id: str, storage_path: str, zip_path: str) -> str:
    """Uploads the ZIP file to the storage backend.

    Also stores the link to the ZIP in the job document.

    Returns the name of the storage blob the ZIP is stored in.
    """

    import itertools
    import zipfile
    import secrets
    import datetime

    from pillar.api.file_storage_backends import default_storage_backend

    log.info('Creating ZIP %s', zip_path)
    spath = pathlib.Path(storage_path)
    zpath = pathlib.Path(zip_path)

    with zipfile.ZipFile(zip_path, mode='w', compression=zipfile.ZIP_DEFLATED) as outfile:
        for fpath in itertools.chain(spath.glob('*.gz'),
                                     spath.glob('*.json')):
            outfile.write(fpath, fpath.name)

    bucket = default_storage_backend(project_id)
    blob = bucket.blob(f'flamenco-jobs/{zpath.name}')
    while blob.exists():
        log.warning('Blob %s already exists, changing filename to something more unique.', blob)
        today = datetime.date.today()
        uniquifier = f'{today:%Y-%m-%d}-{secrets.token_hex(4)}'
        new_zipname = f'{zpath.stem}-{uniquifier}{zpath.suffix}'
        blob = bucket.blob(f'flamenco-jobs/{new_zipname}')

    log.info('Uploading ZIP %s to %s', zpath, blob)

    file_size = zpath.stat().st_size
    with zpath.open(mode='rb') as stream_to_upload:
        blob.create_from_file(stream_to_upload,
                              file_size=file_size,
                              content_type='application/zip')

    return blob.name
Exemplo n.º 7
0
def remove_deleted_files():
    """Iterate over every _deleted file, remove file from the system and delete doc."""
    from pillar.api.file_storage_backends import default_storage_backend
    from pillar.api.file_storage_backends.local import LocalBlob

    file_collection = current_app.db()['files']
    files = file_collection.find({'_deleted': True})

    for f in files:
        bucket = default_storage_backend(f['project'])
        blob = bucket.get_blob(f['name'])
        if not isinstance(blob, LocalBlob):
            log.info('Skipping non local blob')
            continue
        file_abspath = blob.abspath()
        log.info('Removing %s from filesystem' % file_abspath)
        try:
            file_abspath.unlink()
        except OSError:
            log.error('File %s not found' % file_abspath)
        log.info('Deleting file document for file %s' % f['_id'])
        file_collection.delete_one({'_id': f['_id']})
Exemplo n.º 8
0
def after_inserting_project(project, db_user):
    from pillar.auth import UserClass

    project_id = project['_id']
    user_id = db_user['_id']

    # Create a project-specific admin group (with name matching the project id)
    result, _, _, status = current_app.post_internal('groups',
                                                     {'name': str(project_id)})
    if status != 201:
        log.error('Unable to create admin group for new project %s: %s',
                  project_id, result)
        return abort_with_error(status)

    admin_group_id = result['_id']
    log.debug('Created admin group %s for project %s', admin_group_id,
              project_id)

    # Assign the current user to the group
    db_user.setdefault('groups', []).append(admin_group_id)

    result, _, _, status = current_app.patch_internal(
        'users', {'groups': db_user['groups']}, _id=user_id)
    if status != 200:
        log.error(
            'Unable to add user %s as member of admin group %s for new project %s: %s',
            user_id, admin_group_id, project_id, result)
        return abort_with_error(status)
    log.debug('Made user %s member of group %s', user_id, admin_group_id)

    # Assign the group to the project with admin rights
    owner_user = UserClass.construct('', db_user)
    is_admin = authorization.is_admin(owner_user)
    world_permissions = ['GET'] if is_admin else []
    permissions = {
        'world':
        world_permissions,
        'users': [],
        'groups': [
            {
                'group': admin_group_id,
                'methods': DEFAULT_ADMIN_GROUP_PERMISSIONS[:]
            },
        ]
    }

    def with_permissions(node_type):
        copied = copy.deepcopy(node_type)
        copied['permissions'] = permissions
        return copied

    # Assign permissions to the project itself, as well as to the node_types
    project['permissions'] = permissions
    project['node_types'] = [
        with_permissions(node_type_group),
        with_permissions(node_type_asset),
        with_permissions(node_type_comment),
        with_permissions(node_type_texture),
        with_permissions(node_type_group_texture),
    ]

    # Allow admin users to use whatever url they want.
    if not is_admin or not project.get('url'):
        if project.get('category', '') == 'home':
            project['url'] = 'home'
        else:
            project['url'] = "p-{!s}".format(project_id)

    # Initialize storage using the default specified in STORAGE_BACKEND
    default_storage_backend(str(project_id))

    # Commit the changes directly to the MongoDB; a PUT is not allowed yet,
    # as the project doesn't have a valid permission structure.
    projects_collection = current_app.data.driver.db['projects']
    result = projects_collection.update_one(
        {'_id': project_id}, {'$set': remove_private_keys(project)})
    if result.matched_count != 1:
        log.error('Unable to update project %s: %s', project_id,
                  result.raw_result)
        abort_with_error(500)