示例#1
0
def create_file_doc_for_upload(project_id, uploaded_file):
    """Creates a secure filename and a document in MongoDB for the file.

    The (project_id, filename) tuple should be unique. If such a document already
    exists, it is updated with the new file.

    :param uploaded_file: file from request.files['form-key']
    :type uploaded_file: werkzeug.datastructures.FileStorage
    :returns: a tuple (file_id, filename, status), where 'filename' is the internal
            filename used on GCS.
    """

    project_id = ObjectId(project_id)

    # Hash the filename with path info to get the internal name. This should
    # be unique for the project.
    # internal_filename = uploaded_file.filename
    _, ext = os.path.splitext(uploaded_file.filename)
    internal_filename = uuid.uuid4().hex + ext

    # For now, we don't support overwriting files, and create a new one every time.
    # # See if we can find a pre-existing file doc.
    # files = current_app.data.driver.db['files']
    # file_doc = files.find_one({'project': project_id,
    #                            'name': internal_filename})
    file_doc = None

    # TODO: at some point do name-based and content-based content-type sniffing.
    new_props = {
        'filename': uploaded_file.filename,
        'content_type': uploaded_file.mimetype,
        'length': uploaded_file.content_length,
        'project': project_id,
        'status': 'uploading'
    }

    if file_doc is None:
        # Create a file document on MongoDB for this file.
        file_doc = create_file_doc(name=internal_filename, **new_props)
        file_fields, _, _, status = current_app.post_internal(
            'files', file_doc)
    else:
        file_doc.update(new_props)
        file_fields, _, _, status = current_app.put_internal(
            'files', remove_private_keys(file_doc))

    if status not in (200, 201):
        log.error(
            'Unable to create new file document in MongoDB, status=%i: %s',
            status, file_fields)
        raise wz_exceptions.InternalServerError()

    log.debug(
        'Created file document %s for uploaded file %s; internal name %s',
        file_fields['_id'], uploaded_file.filename, internal_filename)

    return file_fields['_id'], internal_filename, status
示例#2
0
def edit_comment(user_id, node_id, patch):
    """Edits a single comment.

    Doesn't do permission checking; users are allowed to edit their own
    comment, and this is not something you want to revoke anyway. Admins
    can edit all comments.
    """

    # Find the node. We need to fetch some more info than we use here, so that
    # we can pass this stuff to Eve's patch_internal; that way the validation &
    # authorisation system has enough info to work.
    nodes_coll = current_app.data.driver.db['nodes']
    node = nodes_coll.find_one(node_id)
    if node is None:
        log.warning('User %s wanted to patch non-existing node %s' %
                    (user_id, node_id))
        raise wz_exceptions.NotFound('Node %s not found' % node_id)

    if node['user'] != user_id and not authorization.user_has_role('admin'):
        raise wz_exceptions.Forbidden('You can only edit your own comments.')

    node = remove_private_keys(node)
    node['properties']['content'] = patch['content']
    node['properties']['attachments'] = patch.get('attachments', {})
    # Use Eve to PUT this node, as that also updates the etag and we want to replace attachments.
    r, _, _, status = current_app.put_internal('nodes',
                                               node,
                                               concurrency_check=False,
                                               _id=node_id)
    if status != 200:
        log.error('Error %i editing comment %s for user %s: %s', status,
                  node_id, user_id, r)
        raise wz_exceptions.InternalServerError('Internal error %i from Eve' %
                                                status)
    else:
        log.info('User %s edited comment %s', user_id, node_id)

    # Fetch the new content, so the client can show these without querying again.
    node = nodes_coll.find_one(node_id,
                               projection={
                                   'properties.content': 1,
                                   'properties._content_html': 1,
                               })
    return status, node
示例#3
0
def _update_project(project):
    """Updates a project in the database, or SystemExit()s.

    :param project: the project data, should be the entire project document
    :type: dict
    :return: the project
    :rtype: dict
    """

    from pillar.api.utils import remove_private_keys

    project_id = ObjectId(project['_id'])
    project = remove_private_keys(project)
    result, _, _, status_code = current_app.put_internal('projects',
                                                         project,
                                                         _id=project_id)

    if status_code != 200:
        raise RuntimeError(f"Can't update project {project_id}, "
                           f"status {status_code} with issues: {result}")
示例#4
0
def process_file(bucket: Bucket, file_id: typing.Union[str, ObjectId],
                 local_file: tempfile._TemporaryFileWrapper):
    """Process the file by creating thumbnails, sending to Zencoder, etc.

    :param file_id: '_id' key of the file
    :param local_file: locally stored file, or None if no local processing is
    needed.
    """

    file_id = ObjectId(file_id)

    # Fetch the src_file document from MongoDB.
    files = current_app.data.driver.db['files']
    src_file = files.find_one(file_id)
    if not src_file:
        log.warning('process_file(%s): no such file document found, ignoring.')
        return
    src_file = utils.remove_private_keys(src_file)

    # Update the 'format' field from the content type.
    # TODO: overrule the content type based on file extention & magic numbers.
    mime_category, src_file['format'] = src_file['content_type'].split('/', 1)

    # Only allow video encoding when the user has the correct capability.
    if not current_user.has_cap('encode-video') and mime_category == 'video':
        if src_file['format'].startswith('x-'):
            xified = src_file['format']
        else:
            xified = 'x-' + src_file['format']

        src_file['content_type'] = 'application/%s' % xified
        mime_category = 'application'
        log.info('Not processing video file %s for non-video-encoding user',
                 file_id)

    # Run the required processor, based on the MIME category.
    processors: typing.Mapping[str, typing.Callable] = {
        'image': _process_image,
        'video': _process_video,
    }

    try:
        processor = processors[mime_category]
    except KeyError:
        log.info(
            "POSTed file %s was of type %r, which isn't "
            "thumbnailed/encoded.", file_id, mime_category)
        src_file['status'] = 'complete'
    else:
        log.debug('process_file(%s): marking file status as "processing"',
                  file_id)
        src_file['status'] = 'processing'
        update_file_doc(file_id, status='processing')

        try:
            processor(bucket, file_id, local_file, src_file)
        except Exception:
            log.warning(
                'process_file(%s): error when processing file, '
                'resetting status to '
                '"queued_for_processing"',
                file_id,
                exc_info=True)
            update_file_doc(file_id, status='queued_for_processing')
            return

    # Update the original file with additional info, e.g. image resolution
    r, _, _, status = current_app.put_internal('files', src_file, _id=file_id)
    if status not in (200, 201):
        log.warning(
            'process_file(%s): status %i when saving processed file '
            'info to MongoDB: %s', file_id, status, r)
示例#5
0
def create_home_project(user_id, write_access):
    """Creates a home project for the given user.

    :param user_id: the user ID of the owner
    :param write_access: whether the user has full write access to the home project.
    :type write_access: bool
    :returns: the project
    :rtype: dict
    """

    log.info('Creating home project for user %s', user_id)
    overrides = {
        'category': 'home',
        'url': 'home',
        'summary': HOME_PROJECT_SUMMARY,
        'description': HOME_PROJECT_DESCRIPTION
    }

    # Maybe the user has a deleted home project.
    proj_coll = current_app.data.driver.db['projects']
    deleted_proj = proj_coll.find_one({
        'user': user_id,
        'category': 'home',
        '_deleted': True
    })
    if deleted_proj:
        log.info('User %s has a deleted project %s, restoring', user_id,
                 deleted_proj['_id'])
        project = deleted_proj
    else:
        log.debug('User %s does not have a deleted project', user_id)
        project = proj_utils.create_new_project(project_name='Home',
                                                user_id=ObjectId(user_id),
                                                overrides=overrides)

    # Re-validate the authentication token, so that the put_internal call sees the
    # new group created for the project.
    authentication.validate_token(force=True)

    # There are a few things in the on_insert_projects hook we need to adjust.

    # Ensure that the project is private, even for admins.
    project['permissions']['world'] = []

    # Set up the correct node types. No need to set permissions for them,
    # as the inherited project permissions are fine.
    from pillar.api.node_types.group import node_type_group
    from pillar.api.node_types.asset import node_type_asset
    # from pillar.api.node_types.text import node_type_text
    from pillar.api.node_types.comment import node_type_comment

    # For non-subscribers: take away write access from the admin group,
    # and grant it to certain node types.
    project['permissions']['groups'][0]['methods'] = home_project_permissions(
        write_access)

    # Everybody should be able to comment on anything in this project.
    # This allows people to comment on shared images and see comments.
    node_type_comment = assign_permissions(node_type_comment,
                                           subscriber_methods=['GET', 'POST'],
                                           world_methods=['GET'])

    project['node_types'] = [
        node_type_group,
        node_type_asset,
        # node_type_text,
        node_type_comment,
    ]

    result, _, _, status = current_app.put_internal(
        'projects', utils.remove_private_keys(project), _id=project['_id'])
    if status != 200:
        log.error('Unable to update home project %s for user %s: %s',
                  project['_id'], user_id, result)
        raise wz_exceptions.InternalServerError(
            'Unable to update home project')
    project.update(result)

    # Create the Blender Sync node, with explicit write permissions on the node itself.
    create_blender_sync_node(project['_id'],
                             project['permissions']['groups'][0]['group'],
                             user_id)

    return project
示例#6
0
def zencoder_notifications():
    """

    See: https://app.zencoder.com/docs/guides/getting-started/notifications#api_version_2

    """
    if current_app.config['ENCODING_BACKEND'] != 'zencoder':
        log.warning('Received notification from Zencoder but app not configured for Zencoder.')
        return abort(403)

    if not current_app.config['DEBUG']:
        # If we are in production, look for the Zencoder header secret
        try:
            notification_secret_request = request.headers[
                'X-Zencoder-Notification-Secret']
        except KeyError:
            log.warning('Received Zencoder notification without secret.')
            return abort(401)
        # If the header is found, check it agains the one in the config
        notification_secret = current_app.config['ZENCODER_NOTIFICATIONS_SECRET']
        if notification_secret_request != notification_secret:
            log.warning('Received Zencoder notification with incorrect secret.')
            return abort(401)

    # Cast request data into a dict
    data = request.get_json()

    if log.isEnabledFor(logging.DEBUG):
        from pprint import pformat
        log.debug('Zencoder job JSON: %s', pformat(data))

    files_collection = current_app.data.driver.db['files']
    # Find the file object based on processing backend and job_id
    zencoder_job_id = data['job']['id']
    lookup = {'processing.backend': 'zencoder',
              'processing.job_id': str(zencoder_job_id)}
    file_doc = files_collection.find_one(lookup)
    if not file_doc:
        log.warning('Unknown Zencoder job id %r', zencoder_job_id)
        # Return 200 OK when debugging, or Zencoder will keep trying and trying and trying...
        # which is what we want in production.
        return "Not found, but that's okay.", 200 if current_app.config['DEBUG'] else 404

    file_id = ObjectId(file_doc['_id'])
    # Remove internal keys (so that we can run put internal)
    file_doc = utils.remove_private_keys(file_doc)

    # Update processing status
    job_state = data['job']['state']
    file_doc['processing']['status'] = job_state

    if job_state == 'failed':
        log.warning('Zencoder job %i for file %s failed.', zencoder_job_id, file_id)
        # Log what Zencoder told us went wrong.
        for output in data['outputs']:
            if not any('error' in key for key in output):
                continue
            log.warning('Errors for output %s:', output['url'])
            for key in output:
                if 'error' in key:
                    log.info('    %s: %s', key, output[key])

        file_doc['status'] = 'failed'
        current_app.put_internal('files', file_doc, _id=file_id)
        return "You failed, but that's okay.", 200

    log.info('Zencoder job %s for file %s completed with status %s.', zencoder_job_id, file_id,
             job_state)

    # For every variation encoded, try to update the file object
    root, _ = os.path.splitext(file_doc['file_path'])

    for output in data['outputs']:
        video_format = output['format']
        # Change the zencoder 'mpeg4' format to 'mp4' used internally
        video_format = 'mp4' if video_format == 'mpeg4' else video_format

        # Find a variation matching format and resolution
        variation = next((v for v in file_doc['variations']
                          if v['format'] == format and v['width'] == output['width']), None)
        # Fall back to a variation matching just the format
        if variation is None:
            variation = next((v for v in file_doc['variations']
                              if v['format'] == video_format), None)
        if variation is None:
            log.warning('Unable to find variation for video format %s for file %s',
                        video_format, file_id)
            continue

        # Rename the file to include the now-known size descriptor.
        size = size_descriptor(output['width'], output['height'])
        new_fname = '{}-{}.{}'.format(root, size, video_format)

        # Rename on Google Cloud Storage
        try:
            rename_on_gcs(file_doc['project'],
                          '_/' + variation['file_path'],
                          '_/' + new_fname)
        except Exception:
            log.warning('Unable to rename GCS blob %r to %r. Keeping old name.',
                        variation['file_path'], new_fname, exc_info=True)
        else:
            variation['file_path'] = new_fname

        # TODO: calculate md5 on the storage
        variation.update({
            'height': output['height'],
            'width': output['width'],
            'length': output['file_size_in_bytes'],
            'duration': data['input']['duration_in_ms'] / 1000,
            'md5': output['md5_checksum'] or '',  # they don't do MD5 for GCS...
            'size': size,
        })

    file_doc['status'] = 'complete'

    # Force an update of the links on the next load of the file.
    file_doc['link_expires'] = datetime.datetime.now(tz=tz_util.utc) - datetime.timedelta(days=1)

    current_app.put_internal('files', file_doc, _id=file_id)

    return '', 204
示例#7
0
def zencoder_notifications():
    """

    See: https://app.zencoder.com/docs/guides/getting-started/notifications#api_version_2

    """
    if current_app.config['ENCODING_BACKEND'] != 'zencoder':
        log.warning('Received notification from Zencoder but app not configured for Zencoder.')
        return abort(403)

    if not current_app.config['DEBUG']:
        # If we are in production, look for the Zencoder header secret
        try:
            notification_secret_request = request.headers[
                'X-Zencoder-Notification-Secret']
        except KeyError:
            log.warning('Received Zencoder notification without secret.')
            return abort(401)
        # If the header is found, check it agains the one in the config
        notification_secret = current_app.config['ZENCODER_NOTIFICATIONS_SECRET']
        if notification_secret_request != notification_secret:
            log.warning('Received Zencoder notification with incorrect secret.')
            return abort(401)

    # Cast request data into a dict
    data = request.get_json()

    if log.isEnabledFor(logging.DEBUG):
        from pprint import pformat
        log.debug('Zencoder job JSON: %s', pformat(data))

    files_collection = current_app.data.driver.db['files']
    # Find the file object based on processing backend and job_id
    zencoder_job_id = data['job']['id']
    lookup = {'processing.backend': 'zencoder',
              'processing.job_id': str(zencoder_job_id)}
    file_doc = files_collection.find_one(lookup)
    if not file_doc:
        log.warning('Unknown Zencoder job id %r', zencoder_job_id)
        # Return 200 OK when debugging, or Zencoder will keep trying and trying and trying...
        # which is what we want in production.
        return "Not found, but that's okay.", 200 if current_app.config['DEBUG'] else 404

    file_id = ObjectId(file_doc['_id'])
    # Remove internal keys (so that we can run put internal)
    file_doc = utils.remove_private_keys(file_doc)

    # Update processing status
    job_state = data['job']['state']
    file_doc['processing']['status'] = job_state

    if job_state == 'failed':
        log.warning('Zencoder job %s for file %s failed: %s', zencoder_job_id, file_id,
                    json.dumps(data, sort_keys=True, indent=4))

        file_doc['status'] = 'failed'
        current_app.put_internal('files', file_doc, _id=file_id)

        # This is 'okay' because we handled the Zencoder notification properly.
        return "You failed, but that's okay.", 200

    log.info('Zencoder job %s for file %s completed with status %s.', zencoder_job_id, file_id,
             job_state)

    # For every variation encoded, try to update the file object
    storage_name, _ = os.path.splitext(file_doc['file_path'])
    nice_name, _ = os.path.splitext(file_doc['filename'])

    bucket_class = Bucket.for_backend(file_doc['backend'])
    bucket = bucket_class(str(file_doc['project']))

    for output in data['outputs']:
        video_format = output['format']
        # Change the zencoder 'mpeg4' format to 'mp4' used internally
        video_format = 'mp4' if video_format == 'mpeg4' else video_format

        # Find a variation matching format and resolution
        variation = next((v for v in file_doc['variations']
                          if v['format'] == format and v['width'] == output['width']), None)
        # Fall back to a variation matching just the format
        if variation is None:
            variation = next((v for v in file_doc['variations']
                              if v['format'] == video_format), None)
        if variation is None:
            log.warning('Unable to find variation for video format %s for file %s',
                        video_format, file_id)
            continue

        # Rename the file to include the now-known size descriptor.
        size = size_descriptor(output['width'], output['height'])
        new_fname = f'{storage_name}-{size}.{video_format}'

        # Rename the file on the storage.
        blob = bucket.blob(variation['file_path'])
        try:
            new_blob = bucket.rename_blob(blob, new_fname)
            new_blob.update_filename(f'{nice_name}-{size}.{video_format}')
        except Exception:
            log.warning('Unable to rename blob %r to %r. Keeping old name.',
                        blob, new_fname, exc_info=True)
        else:
            variation['file_path'] = new_fname

        # TODO: calculate md5 on the storage
        variation.update({
            'height': output['height'],
            'width': output['width'],
            'length': output['file_size_in_bytes'],
            'duration': data['input']['duration_in_ms'] / 1000,
            'md5': output['md5_checksum'] or '',  # they don't do MD5 for GCS...
            'size': size,
        })

    file_doc['status'] = 'complete'

    # Force an update of the links on the next load of the file.
    file_doc['link_expires'] = utils.utcnow() - datetime.timedelta(days=1)

    r, _, _, status = current_app.put_internal('files', file_doc, _id=file_id)
    if status != 200:
        log.error('unable to save file %s after Zencoder notification: %s', file_id, r)
        return json.dumps(r), 500

    return '', 204
示例#8
0
def upsert_user(db_user):
    """Inserts/updates the user in MongoDB.

    Retries a few times when there are uniqueness issues in the username.

    :returns: the user's database ID and the status of the PUT/POST.
        The status is 201 on insert, and 200 on update.
    :type: (ObjectId, int)
    """

    if 'subscriber' in db_user.get('groups', []):
        log.error('Non-ObjectID string found in user.groups: %s', db_user)
        raise wz_exceptions.InternalServerError(
            'Non-ObjectID string found in user.groups: %s' % db_user)

    if not db_user['full_name']:
        # Blender ID doesn't need a full name, but we do.
        db_user['full_name'] = db_user['username']

    r = {}
    for retry in range(5):
        if '_id' in db_user:
            # Update the existing user
            attempted_eve_method = 'PUT'
            db_id = db_user['_id']
            r, _, _, status = current_app.put_internal(
                'users', remove_private_keys(db_user), _id=db_id)
            if status == 422:
                log.error(
                    'Status %i trying to PUT user %s with values %s, should not happen! %s',
                    status, db_id, remove_private_keys(db_user), r)
        else:
            # Create a new user, retry for non-unique usernames.
            attempted_eve_method = 'POST'
            r, _, _, status = current_app.post_internal('users', db_user)

            if status not in {200, 201}:
                log.error('Status %i trying to create user with values %s: %s',
                          status, db_user, r)
                raise wz_exceptions.InternalServerError()

            db_id = r['_id']
            db_user.update(r)  # update with database/eve-generated fields.

        if status == 422:
            # Probably non-unique username, so retry a few times with different usernames.
            log.info('Error creating new user: %s', r)
            username_issue = r.get('_issues', {}).get('username', '')
            if 'not unique' in username_issue:
                # Retry
                db_user['username'] = make_unique_username(db_user['email'])
                continue

        # Saving was successful, or at least didn't break on a non-unique username.
        break
    else:
        log.error('Unable to create new user %s: %s', db_user, r)
        raise wz_exceptions.InternalServerError()

    if status not in (200, 201):
        log.error('internal response from %s to Eve: %r %r',
                  attempted_eve_method, status, r)
        raise wz_exceptions.InternalServerError()

    return db_id, status