Ejemplo n.º 1
0
def tagged(tag=''):
    """Return all tagged nodes of public projects as JSON."""
    from pillar.auth import current_user

    # We explicitly register the tagless endpoint to raise a 404, otherwise the PATCH
    # handler on /api/nodes/<node_id> will return a 405 Method Not Allowed.
    if not tag:
        raise wz_exceptions.NotFound()

    # Build the (cached) list of tagged nodes
    agg_list = _tagged(tag)

    for node in agg_list:
        if node['properties'].get('duration_seconds'):
            node['properties']['duration'] = datetime.timedelta(seconds=node['properties']['duration_seconds'])

        if node.get('_created') is not None:
            node['pretty_created'] = pretty_date(node['_created'])

    # If the user is anonymous, no more information is needed and we return
    if current_user.is_anonymous:
        return jsonify(agg_list)

    # If the user is authenticated, attach view_progress for video assets
    view_progress = current_user.nodes['view_progress']
    for node in agg_list:
        node_id = str(node['_id'])
        # View progress should be added only for nodes of type 'asset' and
        # with content_type 'video', only if the video was already in the watched
        # list for the current user.
        if node_id in view_progress:
            node['view_progress'] = view_progress[node_id]

    return jsonify(agg_list)
Ejemplo n.º 2
0
def search_user():
    searchword = request.args.get('q', '')
    terms = _term_filters(request.args)
    page_idx = _page_index(request.args.get('page', 0))
    # result is the raw elasticseach output.
    # we need to filter fields in case of user objects.

    try:
        result = queries.do_user_search(searchword, terms, page_idx)
    except elk_ex.ElasticsearchException as ex:
        resp = jsonify({'_message': str(ex)})
        resp.status_code = 500
        return resp

    return jsonify(result)
Ejemplo n.º 3
0
def task_update_batch(manager: dict, task_updates):
    """Handle task updates from the Manager and respond with actions for the Manager.

    This endpoint receives batched task updates from the Manager, and handles
    those (for example by failing a job when too many tasks failed).

    In the response there are a few other pieces of data that basically
    indicate actions to be performed by the manager (cancelling tasks, sending
    task log files).
    """
    from pillar.api.utils import jsonify

    manager_id = manager['_id']
    total_modif_count, handled_update_ids = handle_task_update_batch(manager_id, task_updates)

    # Check which tasks are in state 'cancel-requested', as those need to be sent back.
    # This MUST be done after we run the task update batch, as just-changed task statuses
    # should be taken into account.
    tasks_to_cancel = tasks_cancel_requested(manager_id)

    response = {'modified_count': total_modif_count,
                'handled_update_ids': handled_update_ids}
    if tasks_to_cancel:
        response['cancel_task_ids'] = list(tasks_to_cancel)

    # TODO(Sybren): expose the fact that there are other types of actions to
    # perform by the Manager in a different way, for example via a Redis or
    # RabbitMQ channel.
    upload_task_file_queue = manager.get('upload_task_file_queue')
    if upload_task_file_queue:
        response['upload_task_file_queue'] = upload_task_file_queue

    return jsonify(response)
Ejemplo n.º 4
0
    def pillar_error_handler(self, error_ob):

        # 'error_ob' can be any exception. If it's not a Werkzeug exception,
        # handle it as a 500.
        if not hasattr(error_ob, 'code'):
            error_ob.code = 500
        if not hasattr(error_ob, 'description'):
            error_ob.description = str(error_ob)

        if request.full_path.startswith('/%s/' % self.config['URL_PREFIX']):
            from pillar.api.utils import jsonify
            # This is an API request, so respond in JSON.
            return jsonify(
                {
                    '_status': 'ERR',
                    '_code': error_ob.code,
                    '_message': error_ob.description,
                },
                status=error_ob.code)

        # See whether we should return an embedded page or a regular one.
        if request.is_xhr:
            fname = 'errors/%i_embed.html' % error_ob.code
        else:
            fname = 'errors/%i.html' % error_ob.code

        # Also handle the case where we didn't create a template for this error.
        try:
            return render_template(
                fname, description=error_ob.description), error_ob.code
        except TemplateNotFound:
            self.log.warning('Error template %s for code %i not found', fname,
                             error_ob.code)
            return render_template('errors/500.html'), error_ob.code
Ejemplo n.º 5
0
def task_update_batch(manager: dict, task_updates):
    """Handle task updates from the Manager and respond with actions for the Manager.

    This endpoint receives batched task updates from the Manager, and handles
    those (for example by failing a job when too many tasks failed).

    In the response there are a few other pieces of data that basically
    indicate actions to be performed by the manager (cancelling tasks, sending
    task log files).
    """
    from pillar.api.utils import jsonify

    manager_id = manager['_id']
    total_modif_count, handled_update_ids = handle_task_update_batch(manager_id, task_updates)

    # Check which tasks are in state 'cancel-requested', as those need to be sent back.
    # This MUST be done after we run the task update batch, as just-changed task statuses
    # should be taken into account.
    tasks_to_cancel = tasks_cancel_requested(manager_id)

    response = {'modified_count': total_modif_count,
                'handled_update_ids': handled_update_ids}
    if tasks_to_cancel:
        response['cancel_task_ids'] = list(tasks_to_cancel)

    # TODO(Sybren): expose the fact that there are other types of actions to
    # perform by the Manager in a different way, for example via a Redis or
    # RabbitMQ channel.
    upload_task_file_queue = manager.get('upload_task_file_queue')
    if upload_task_file_queue:
        response['upload_task_file_queue'] = upload_task_file_queue

    return jsonify(response)
Ejemplo n.º 6
0
def latest_comments():
    latest = latest_nodes(
        {
            'node_type': 'comment',
            'properties.status': 'published'
        }, {
            'project': 1,
            'parent': 1,
            'user': 1,
            'properties.content': 1,
            'node_type': 1,
            'properties.status': 1,
            'properties.is_reply': 1
        }, has_public_project, 10)

    # Embed the comments' parents.
    nodes = current_app.data.driver.db['nodes']
    parents = {}
    for comment in latest:
        parent_id = comment['parent']

        if parent_id in parents:
            comment['parent'] = parents[parent_id]
            continue

        parent = nodes.find_one(parent_id)
        parents[parent_id] = parent
        comment['parent'] = parent

    embed_project(latest)
    embed_user(latest)

    return jsonify({'_items': latest})
Ejemplo n.º 7
0
def share_node(node_id):
    """Shares a node, or returns sharing information."""

    node_id = str2id(node_id)
    nodes_coll = current_app.data.driver.db['nodes']

    node = nodes_coll.find_one({'_id': node_id},
                               projection={
                                   'project': 1,
                                   'node_type': 1,
                                   'short_code': 1
                               })
    if not node:
        raise wz_exceptions.NotFound('Node %s does not exist.' % node_id)

    check_permissions('nodes', node, request.method)

    log.info('Sharing node %s', node_id)

    short_code = node.get('short_code')
    status = 200

    if not short_code:
        if request.method == 'POST':
            short_code = generate_and_store_short_code(node)
            make_world_gettable(node)
            status = 201
        else:
            return '', 204

    return jsonify(eve_hooks.short_link_info(short_code), status=status)
Ejemplo n.º 8
0
    def patch_rna_overrides(self, job_id: bson.ObjectId, patch: dict):
        """Update the RNA overrides of this render job, and re-queue dependent tasks.

        Note that once a job has RNA overrides, the RNA overrides task cannot
        be deleted. If such task deletion were possible, it would still not
        delete the RNA override file and effectively keep the old overrides in
        place. Having an empty overrides file is better.
        """
        self.assert_job_access(job_id)

        rna_overrides = patch.get('rna_overrides') or []
        if not all(isinstance(override, str) for override in rna_overrides):
            log.info('User %s wants to PATCH job %s to set RNA overrides, but not all '
                     'overrides are strings', current_user_id(), job_id)
            raise wz_exceptions.BadRequest(f'"rna_overrides" should be a list of strings,'
                                           f' not {rna_overrides!r}')

        result = rna_overrides_mod.validate_rna_overrides(rna_overrides)
        if result:
            msg, line_num = result
            log.info('User %s tries PATCH to update RNA overrides of job %s but has '
                     'error %r in override %d',
                     current_user_id(), job_id, msg, line_num)

            return jsonify({
                'validation_error': {
                    'message': msg,
                    'line_num': line_num,
                }
            }, status=422)

        log.info('User %s uses PATCH to update RNA overrides of job %s to %d overrides',
                 current_user_id(), job_id, len(rna_overrides))
        current_flamenco.job_manager.api_update_rna_overrides(job_id, rna_overrides)
Ejemplo n.º 9
0
def texture_libraries():
    from . import blender_cloud_addon_version

    # Use Eve method so that we get filtering on permissions for free.
    # This gives all the projects that contain the required node types.
    request.args = MultiDict(
        request.args)  # allow changes; it's an ImmutableMultiDict by default.
    request.args.setlist(eve_config.QUERY_PROJECTION, [TL_PROJECTION])
    request.args.setlist(eve_config.QUERY_SORT, [TL_SORT])

    # Determine whether to return HDRi projects too, based on the version
    # of the Blender Cloud Addon. If the addon version is None, we're dealing
    # with a version of the BCA that's so old it doesn't send its version along.
    addon_version = blender_cloud_addon_version()
    return_hdri = addon_version is not None and addon_version >= FIRST_ADDON_VERSION_WITH_HDRI
    log.debug('User %s has Blender Cloud Addon version %s; return_hdri=%s',
              current_user_id(), addon_version, return_hdri)

    accept_as_library = functools.partial(has_texture_node,
                                          return_hdri=return_hdri)

    # Construct eve-like response.
    projects = list(keep_fetching_texture_libraries(accept_as_library))
    result = {
        '_items': projects,
        '_meta': {
            'max_results': len(projects),
            'page': 1,
            'total': len(projects),
        }
    }

    return utils.jsonify(result)
Ejemplo n.º 10
0
def latest_comments():
    latest = latest_nodes(
        {
            'node_type': 'comment',
            'properties.status': 'published'
        }, {
            'parent': 1,
            'user.full_name': 1,
            'properties.content': 1,
            'node_type': 1,
            'properties.status': 1,
            'properties.is_reply': 1
        }, 10)

    # Embed the comments' parents.
    # TODO: move to aggregation pipeline.
    nodes = current_app.data.driver.db['nodes']
    parents = {}
    for comment in latest:
        parent_id = comment['parent']

        if parent_id in parents:
            comment['parent'] = parents[parent_id]
            continue

        parent = nodes.find_one(parent_id)
        parents[parent_id] = parent
        comment['parent'] = parent

    return jsonify({'_items': latest})
Ejemplo n.º 11
0
def urler(project_id):
    """Returns the URL of any project."""

    project_id = str2id(project_id)
    project = mongo.find_one_or_404('projects',
                                    project_id,
                                    projection={'url': 1})
    return jsonify({'_id': project_id, 'url': project['url']})
Ejemplo n.º 12
0
def search_nodes():
    searchword = request.args.get('q', '')
    project_id = request.args.get('project', '')
    terms = _term_filters()
    page_idx = _page_index()

    result = queries.do_node_search(searchword, terms, page_idx, project_id)
    return jsonify(result)
Ejemplo n.º 13
0
def search_user_admin():
    """
    User search over all fields.
    """

    searchword = request.args.get('q', '')
    terms = _term_filters()
    page_idx = _page_index()

    try:
        result = queries.do_user_search_admin(searchword, terms, page_idx)
    except elk_ex.ElasticsearchException as ex:
        resp = jsonify({'_message': str(ex)})
        resp.status_code = 500
        return resp

    return jsonify(result)
Ejemplo n.º 14
0
def sync_avatar():
    """Fetch the user's avatar from Blender ID and save to storage.

    This is an API-like endpoint, in the sense that it returns JSON.
    It's here in this file to have it close to the endpoint that
    serves the only page that calls on this endpoint.
    """

    new_url = pillar.api.users.avatar.sync_avatar(current_user.user_id)
    if not new_url:
        return jsonify({'_message': 'Your avatar could not be updated'})
    return new_url
Ejemplo n.º 15
0
def home_project():
    """Fetches the home project, creating it if necessary.

    Eve projections are supported, but at least the following fields must be present:
        'permissions', 'category', 'user'
    """
    from pillar.auth import current_user

    user_id = current_user.user_id
    roles = current_user.roles

    log.debug('Possibly creating home project for user %s with roles %s',
              user_id, roles)
    if HOME_PROJECT_USERS and not HOME_PROJECT_USERS.intersection(roles):
        log.debug('User %s is not a subscriber, not creating home project.',
                  user_id)
        return 'No home project', 404

    # Create the home project before we do the Eve query. This costs an extra round-trip
    # to the database, but makes it easier to do projections correctly.
    if not has_home_project(user_id):
        write_access = write_access_with_roles(roles)
        create_home_project(user_id, write_access)

    resp, _, _, status, _ = get('projects', category='home', user=user_id)
    if status != 200:
        return utils.jsonify(resp), status

    if resp['_items']:
        project = resp['_items'][0]
    else:
        log.warning(
            'Home project for user %s not found, while we just created it! Could be '
            'due to projections and other arguments on the query string: %s',
            user_id, request.query_string)
        return 'No home project', 404

    return utils.jsonify(project), status
Ejemplo n.º 16
0
def create_project(overrides=None):
    """Creates a new project."""

    if request.mimetype == 'application/json':
        project_name = request.json['name']
    else:
        project_name = request.form['project_name']
    user_id = current_user.user_id

    project = utils.create_new_project(project_name, user_id, overrides)

    # Return the project in the response.
    loc = url_for('projects|item_lookup', _id=project['_id'])
    return jsonify(project, status=201, headers={'Location': loc})
Ejemplo n.º 17
0
def patch_post(node_id, patch):
    assert_is_valid_patch(node_id, patch)
    user_id = authentication.current_user_id()

    if patch['op'] in COMMENT_VOTING_OPS:
        nodes_coll = current_app.db()['nodes']
        node = nodes_coll.find_one({'_id': node_id})

        old_rating = rating_difference(node)
        result, node = vote_comment(user_id, node_id, patch)
        new_rating = rating_difference(node)

        # Update the user karma based on the rating differences.
        karma_increase = (new_rating - old_rating) * POST_VOTE_WEIGHT
        if karma_increase != 0:
            node_user_id = nodes_coll.find_one({'_id': node_id},
                                               projection={
                                                   'user': 1,
                                               })['user']

            users_collection = current_app.db()['users']
            db_fieldname = f'extension_props_public.{EXTENSION_NAME}.karma'

            users_collection.find_one_and_update(
                {'_id': node_user_id},
                {'$inc': {
                    db_fieldname: karma_increase
                }},
                {db_fieldname: 1},
            )

        # Fetch the full node for updating hotness and reindexing
        # TODO (can be improved by making a partial update)
        node = nodes_coll.find_one({'_id': node['_id']})
        update_hot(node)
        nodes_coll.update_one(
            {'_id': node['_id']},
            {'$set': {
                'properties.hot': node['properties']['hot']
            }})

        algolia_index_post_save(node)
    else:
        return abort(403)

    return jsonify({
        '_status': 'OK',
        'result': result,
        'properties': node['properties']
    })
Ejemplo n.º 18
0
def patch_comment(node_id, patch):
    assert_is_valid_patch(node_id, patch)
    user_id = authentication.current_user_id()

    if patch['op'] in COMMENT_VOTING_OPS:
        result, node = vote_comment(user_id, node_id, patch)
    else:
        assert patch['op'] == 'edit', 'Invalid patch operation %s' % patch['op']
        result, node = edit_comment(user_id, node_id, patch)

    return jsonify({'_status': 'OK',
                    'result': result,
                    'properties': node['properties']
                    })
Ejemplo n.º 19
0
def attach_task_log(manager_id: ObjectId, _, task_id: str):
    """Store the POSTed task log as a file in the storage backend.

    Also updates the task itself to have a reference to the file.
    """
    # We only want to deal with GZipped files.
    if 'logfile' not in request.files:
        raise wz_exceptions.BadRequest("Missing uploaded file named 'logfile'")
    uploaded_file: werkzeug.datastructures.FileStorage = request.files[
        'logfile']
    if not uploaded_file.filename.endswith('.gz'):
        # The test HTTP client doesn't support setting per-part headers.
        raise wz_exceptions.BadRequest(f'GZIP your file!')

    # De-queue now; if the task or project doesn't exist, the Manager shouldn't be asked again.
    task_oid = str2id(task_id)
    current_flamenco.manager_manager.dequeue_task_log_request(
        manager_id, task_oid)

    # Check whether this Manager may attach to this Task.
    tasks_coll = current_flamenco.db('tasks')
    task = tasks_coll.find_one({'_id': task_oid, 'manager': manager_id})
    if not task:
        raise wz_exceptions.NotFound(f'No such task exists')

    proj_coll = current_app.db('projects')
    project = proj_coll.find_one(
        {
            '_id': task['project'],
            '_deleted': {
                '$ne': True
            }
        },
        projection={'url': True})
    if not project:
        log.warning('attach_task_log(%s, %s): project %s does not exist!',
                    manager_id, task_id, task['project'])
        raise wz_exceptions.NotFound(
            f'Project for task {task_oid} does not exist')

    preexisting = current_flamenco.task_manager.api_attach_log(
        task, uploaded_file)

    resp = jsonify({'_message': 'ok'}, status=200 if preexisting else 201)
    resp.headers['Location'] = url_for(
        'flamenco.tasks.perproject.download_task_log_file',
        project_url=project['url'],
        task_id=task_id)
    return resp
Ejemplo n.º 20
0
def update_download_count(post_id):
    """Update download count for the Post.

    This function is called from dillo.web.posts.routes.download_file.
    """

    current_app.db('nodes').update_one({'_id': ObjectId(post_id)}, {
        '$inc': {
            'properties.downloads_total': 1,
            'properties.downloads_latest': 1
        }
    })

    log.debug('Updated download count for post %s' % post_id)
    return jsonify({'status': 'OK'})
Ejemplo n.º 21
0
def search_user():
    searchword = request.args.get('q', '')
    terms = _term_filters()
    page_idx = _page_index()
    # result is the raw elasticseach output.
    # we need to filter fields in case of user objects.

    try:
        result = queries.do_user_search(searchword, terms, page_idx)
    except elk_ex.ElasticsearchException as ex:
        resp = jsonify({'_message': str(ex)})
        resp.status_code = 500
        return resp

    # filter sensitive stuff
    # we only need. objectID, full_name, username
    hits = result.get('hits', {})

    new_hits = []

    for hit in hits.get('hits'):
        source = hit['_source']
        single_hit = {
            '_source': {
                'objectID': source.get('objectID'),
                'username': source.get('username'),
                'full_name': source.get('full_name'),
            }
        }

        new_hits.append(single_hit)

    # replace search result with safe subset
    result['hits']['hits'] = new_hits

    return jsonify(result)
Ejemplo n.º 22
0
def project_quotas(project_id):
    """Returns information about the project's limits."""

    # Check that the user has GET permissions on the project itself.
    project = mongo.find_one_or_404('projects', project_id)
    check_permissions('projects', project, 'GET')

    file_size_used = utils.project_total_file_size(project_id)

    info = {
        'file_size_quota': None,  # TODO: implement this later.
        'file_size_used': file_size_used,
    }

    return jsonify(info)
Ejemplo n.º 23
0
def multi_search_nodes():
    if len(request.args) != 1:
        log.info(f'Expected 1 argument, received {len(request.args)}')

    json_obj = request.json
    q = []
    for row in json_obj:
        q.append({
            'query': row.get('q', ''),
            'project_id': row.get('project', ''),
            'terms': _term_filters(row),
            'page': _page_index(row.get('page', 0))
        })

    result = queries.do_multi_node_search(q)
    return jsonify(result)
Ejemplo n.º 24
0
def latest_assets():
    latest = latest_nodes(
        {
            'node_type': 'asset',
            'properties.status': 'published'
        }, {
            'name': 1,
            'node_type': 1,
            'parent': 1,
            'picture': 1,
            'properties.status': 1,
            'properties.content_type': 1,
            'permissions.world': 1
        }, 12)

    return jsonify({'_items': latest})
Ejemplo n.º 25
0
def create_new():
    """Creates a new Organization, owned by the currently logged-in user."""

    user_id = current_user.user_id
    log.info('Creating new organization for user %s', user_id)

    name = request.form['name']
    seat_count = int(request.form['seat_count'], 10)

    org_doc = current_app.org_manager.create_new_org(name, user_id, seat_count)

    org_id = str(org_doc['_id'])
    url = url_for('.view_embed', organization_id=org_id)
    resp = jsonify({'_id': org_id, 'location': url})
    resp.headers['Location'] = url

    return resp, 201
Ejemplo n.º 26
0
def task_update_batch(manager_id, task_updates):
    from pillar.api.utils import jsonify

    total_modif_count, handled_update_ids = handle_task_update_batch(
        manager_id, task_updates)

    # Check which tasks are in state 'cancel-requested', as those need to be sent back.
    # This MUST be done after we run the task update batch, as just-changed task statuses
    # should be taken into account.
    tasks_to_cancel = tasks_cancel_requested(manager_id)

    response = {
        'modified_count': total_modif_count,
        'handled_update_ids': handled_update_ids
    }
    if tasks_to_cancel:
        response['cancel_task_ids'] = list(tasks_to_cancel)

    return jsonify(response)
Ejemplo n.º 27
0
    def patch_set_username(self, user_id: bson.ObjectId, patch: dict):
        """Updates a user's username."""
        if user_id != current_user.user_id:
            log.info('User %s tried to change username of user %s',
                     current_user.user_id, user_id)
            raise wz_exceptions.Forbidden(
                'You may only change your own username')

        new_username = patch['username']
        log.info('User %s uses PATCH to set username to %r',
                 current_user.user_id, new_username)

        users_coll = current_app.db('users')
        db_user = users_coll.find_one({'_id': user_id})
        db_user['username'] = new_username

        # Save via Eve to check the schema and trigger update hooks.
        response, _, _, status = current_app.put_internal(
            'users', remove_private_keys(db_user), _id=user_id)

        return jsonify(response), status
Ejemplo n.º 28
0
def latest_assets():
    latest = latest_nodes(
        {
            'node_type': 'asset',
            'properties.status': 'published'
        }, {
            'name': 1,
            'project': 1,
            'user': 1,
            'node_type': 1,
            'parent': 1,
            'picture': 1,
            'properties.status': 1,
            'properties.content_type': 1,
            'permissions.world': 1
        }, has_public_project, 12)

    embed_user(latest)
    embed_project(latest)

    return jsonify({'_items': latest})
Ejemplo n.º 29
0
def attach_task_log(manager_id: ObjectId, _, task_id: str):
    """Store the POSTed task log as a file in the storage backend.

    Also updates the task itself to have a reference to the file.
    """
    # We only want to deal with GZipped files.
    if 'logfile' not in request.files:
        raise wz_exceptions.BadRequest("Missing uploaded file named 'logfile'")
    uploaded_file: werkzeug.datastructures.FileStorage = request.files['logfile']
    if not uploaded_file.filename.endswith('.gz'):
        # The test HTTP client doesn't support setting per-part headers.
        raise wz_exceptions.BadRequest(f'GZIP your file!')

    # De-queue now; if the task or project doesn't exist, the Manager shouldn't be asked again.
    task_oid = str2id(task_id)
    current_flamenco.manager_manager.dequeue_task_log_request(manager_id, task_oid)

    # Check whether this Manager may attach to this Task.
    tasks_coll = current_flamenco.db('tasks')
    task = tasks_coll.find_one({'_id': task_oid, 'manager': manager_id})
    if not task:
        raise wz_exceptions.NotFound(f'No such task exists')

    proj_coll = current_app.db('projects')
    project = proj_coll.find_one({'_id': task['project'], '_deleted': {'$ne': True}},
                                 projection={'url': True})
    if not project:
        log.warning('attach_task_log(%s, %s): project %s does not exist!',
                    manager_id, task_id, task['project'])
        raise wz_exceptions.NotFound(f'Project for task {task_oid} does not exist')

    preexisting = current_flamenco.task_manager.api_attach_log(task, uploaded_file)

    resp = jsonify({'_message': 'ok'}, status=200 if preexisting else 201)
    resp.headers['Location'] = url_for(
        'flamenco.tasks.perproject.download_task_log_file',
        project_url=project['url'], task_id=task_id)
    return resp
Ejemplo n.º 30
0
def get_video_progress(video_id: str):
    """Return video progress information.

    Either a `204 No Content` is returned (no information stored),
    or a `200 Ok` with JSON from Eve's 'users' schema, from the key
    video.view_progress.<video_id>.
    """

    # Validation of the video ID; raises a BadRequest when it's not an ObjectID.
    # This isn't strictly necessary, but it makes this function behave symmetrical
    # to the set_video_progress() function.
    utils.str2id(video_id)

    users_coll = current_app.db('users')
    user_doc = users_coll.find_one(current_user.user_id, projection={'nodes.view_progress': True})
    try:
        progress = user_doc['nodes']['view_progress'][video_id]
    except KeyError:
        return '', 204
    if not progress:
        return '', 204

    return utils.jsonify(progress)
Ejemplo n.º 31
0
def get_posts():
    """Fetch list of paginated posts.

    If the user is logged in, show posts based on the followed communities.
    Supported query parameters:

    - page
    - sorting (hot, top, new)
    - filters (tags, development_status)

    We limit the amount of results to 15 (config PAGINATION_DEFAULT_POSTS) per request.
    Together with data and metadata, we also return 'facets' which act as 'filters'
    when performing requests to this endpoint.
    """

    # Validate query parameters and define sorting and pagination
    page, sorting, filters, community_id, title = validate_query_strings(
        flask.request)
    pagination_default = current_app.config['PAGINATION_DEFAULT_POSTS']
    skip = pagination_default * (page - 1)

    pipeline = [
        # Find all Dillo posts that are published and not deleted
        # Optionally, require posts from communities that the user follows
        # or from the DEFAULT_FOLLOWED_COMMUNITY_IDS list
        {
            '$match': {
                'node_type': 'dillo_post',
                'properties.status': 'published',
                '_deleted': {
                    '$ne': True
                }
            }
        },
        # Sort them by most recent (or by hot)
        {
            '$sort': sorting
        },
        # Create facets
        # Store total document count in metadata
        # Further process the posts in data
        {
            '$facet': {
                'metadata': [{
                    '$count': 'total'
                }, {
                    '$addFields': {
                        'page': page
                    }
                }],
                'data': [
                    {
                        '$skip': skip
                    },
                    {
                        '$limit': pagination_default
                    },
                    # Aggregate the project
                    {
                        '$lookup': {
                            'from': 'projects',
                            'localField': 'project',
                            'foreignField': '_id',
                            'as': 'project'
                        }
                    },
                    {
                        '$unwind': '$project'
                    },
                    # Aggregate the user
                    {
                        '$lookup': {
                            'from': 'users',
                            'localField': 'user',
                            'foreignField': '_id',
                            'as': 'user'
                        }
                    },
                    {
                        '$unwind': '$user'
                    },
                    {
                        '$project': {
                            'name': 1,
                            'properties': 1,
                            'picture': 1,
                            '_created': 1,
                            'project.url': 1,
                            'project.name': 1,
                            'user': '******'
                        }
                    },
                ]
            }
        },
    ]

    if community_id:
        # If a community is specified, show only posts that belong to it
        add_filter_community(pipeline, community_id)
    else:
        # We are not viewing a community, use the aggregated communities
        add_communities_filter(pipeline)

    for filter_key, filter_value in filters.items():
        pipeline[0]['$match'][f'properties.{filter_key}'] = {
            '$in': filter_value
        }

    if title:
        pipeline[0]['$match']['$text'] = {'$search': title}

    # Add default facets, as well as community-specific facets
    add_facets_to_pipeline(pipeline, community_id)

    nodes_coll = current_app.db('nodes')
    # The cursor will return only one item in the list
    posts_cursor = list(nodes_coll.aggregate(pipeline=pipeline))[0]

    # Set default values for metadata (in case no result was retrieved)
    metadata = {'total': 0, 'page': 1}
    if posts_cursor['metadata']:
        metadata = posts_cursor['metadata'][
            0]  # Only the first element from the list

    docs = {
        'metadata': metadata,
        'data': posts_cursor['data'],
        'filters': filters,
    }

    add_facets_to_response(docs, posts_cursor, community_id)

    return jsonify(docs)
Ejemplo n.º 32
0
def get_depsgraph(manager_id, request_json):
    """Returns the dependency graph of all tasks assigned to the given Manager.

    Use the HTTP header X-Flamenco-If-Updated-Since to limit the dependency
    graph to tasks that have been modified since that timestamp.
    """

    import dateutil.parser
    from pillar.api.utils import jsonify, bsonify
    from flamenco import current_flamenco
    from flamenco.utils import report_duration

    modified_since = request.headers.get('X-Flamenco-If-Updated-Since')

    with report_duration(log, 'depsgraph query'):
        tasks_coll = current_flamenco.db('tasks')
        jobs_coll = current_flamenco.db('jobs')

        # Get runnable jobs first, as non-runnable jobs are not interesting.
        # Note that jobs going from runnable to non-runnable should have their
        # tasks set to cancel-requested, which is communicated to the Manager
        # through a different channel.
        jobs = jobs_coll.find({
            'manager': manager_id,
            'status': {'$in': DEPSGRAPH_RUNNABLE_JOB_STATUSES}},
            projection={'_id': 1},
        )
        job_ids = [job['_id'] for job in jobs]
        if not job_ids:
            log.debug('Returning empty depsgraph')
            return '', 204  # empty response

        log.debug('Requiring jobs to be in %s', job_ids)
        task_query = {
            'manager': manager_id,
            'status': {'$nin': ['active']},
            'job': {'$in': job_ids},
        }

        if modified_since is None:
            # "Clean slate" query.
            task_query['status'] = {'$in': DEPSGRAPH_CLEAN_SLATE_TASK_STATUSES}
        else:
            # Not clean slate, just give all updated tasks assigned to this manager.
            log.debug('Modified-since header: %s', modified_since)
            modified_since = dateutil.parser.parse(modified_since)
            task_query['_updated'] = {'$gt': modified_since}
            task_query['status'] = {'$in': DEPSGRAPH_MODIFIED_SINCE_TASK_STATUSES}
            log.debug('Querying all tasks changed since %s', modified_since)

        cursor = tasks_coll.find(task_query)
        depsgraph = list(cursor)

    if len(depsgraph) == 0:
        log.debug('Returning empty depsgraph')
        if modified_since is not None:
            return '', 304  # Not Modified
    else:
        log.info('Returning depsgraph of %i tasks', len(depsgraph))

    # Update the task status in the database to move queued tasks to claimed-by-manager.
    # This also erases the link to any previously uploaded log files, to ensure the
    # log is fresh and represents the current execution of the task.
    task_query['status'] = 'queued'
    tasks_coll.update_many(task_query, {
        '$set': {'status': 'claimed-by-manager'},
        '$unset': {'log_file': True},
    })

    # Update the returned task statuses. Unfortunately Mongo doesn't support
    # find_and_modify() on multiple documents.
    for task in depsgraph:
        if task['status'] == 'queued':
            task['status'] = 'claimed-by-manager'

    # Must be a dict to convert to BSON.
    respdoc = {
        'depsgraph': depsgraph,
    }
    if request.accept_mimetypes.best == 'application/bson':
        resp = bsonify(respdoc)
    else:
        resp = jsonify(respdoc)

    if depsgraph:
        last_modification = max(task['_updated'] for task in depsgraph)
        log.debug('Last modification was %s', last_modification)
        # We need a format that can handle sub-second precision, which is not provided by the
        # HTTP date format (RFC 1123). This means that we can't use the Last-Modified header, as
        # it may be incorrectly interpreted and rewritten by HaProxy, Apache or other software
        # in the path between client & server.
        resp.headers['X-Flamenco-Last-Updated'] = last_modification.isoformat()
        resp.headers['X-Flamenco-Last-Updated-Format'] = 'ISO-8601'
    return resp
Ejemplo n.º 33
0
def get_depsgraph(manager_id, request_json):
    """Returns the dependency graph of all tasks assigned to the given Manager.

    Use the HTTP header X-Flamenco-If-Updated-Since to limit the dependency
    graph to tasks that have been modified since that timestamp.
    """

    import dateutil.parser
    from pillar.api.utils import jsonify, bsonify
    from flamenco import current_flamenco
    from flamenco.utils import report_duration

    modified_since = request.headers.get('X-Flamenco-If-Updated-Since')

    with report_duration(log, 'depsgraph query'):
        tasks_coll = current_flamenco.db('tasks')
        jobs_coll = current_flamenco.db('jobs')

        # Get runnable jobs first, as non-runnable jobs are not interesting.
        # Note that jobs going from runnable to non-runnable should have their
        # tasks set to cancel-requested, which is communicated to the Manager
        # through a different channel.
        jobs = jobs_coll.find({
            'manager': manager_id,
            'status': {'$in': DEPSGRAPH_RUNNABLE_JOB_STATUSES}},
            projection={'_id': 1},
        )
        job_ids = [job['_id'] for job in jobs]
        if not job_ids:
            log.debug('Returning empty depsgraph')
            return '', 204  # empty response

        log.debug('Requiring jobs to be in %s', job_ids)
        task_query = {
            'manager': manager_id,
            'status': {'$nin': ['active']},
            'job': {'$in': job_ids},
        }

        if modified_since is None:
            # "Clean slate" query.
            task_query['status'] = {'$in': DEPSGRAPH_CLEAN_SLATE_TASK_STATUSES}
        else:
            # Not clean slate, just give all updated tasks assigned to this manager.
            log.debug('Modified-since header: %s', modified_since)
            modified_since = dateutil.parser.parse(modified_since)
            task_query['_updated'] = {'$gt': modified_since}
            task_query['status'] = {'$in': DEPSGRAPH_MODIFIED_SINCE_TASK_STATUSES}
            log.debug('Querying all tasks changed since %s', modified_since)

        cursor = tasks_coll.find(task_query)
        depsgraph = list(cursor)

    if len(depsgraph) == 0:
        log.debug('Returning empty depsgraph')
        if modified_since is not None:
            return '', 304  # Not Modified
    else:
        log.info('Returning depsgraph of %i tasks', len(depsgraph))

    # Update the task status in the database to move queued tasks to claimed-by-manager.
    # This also erases the link to any previously uploaded log files, to ensure the
    # log is fresh and represents the current execution of the task.
    task_query['status'] = 'queued'
    tasks_coll.update_many(task_query, {
        '$set': {'status': 'claimed-by-manager'},
        '$unset': {'log_file': True},
    })

    # Update the returned task statuses. Unfortunately Mongo doesn't support
    # find_and_modify() on multiple documents.
    for task in depsgraph:
        if task['status'] == 'queued':
            task['status'] = 'claimed-by-manager'

    # Must be a dict to convert to BSON.
    respdoc = {
        'depsgraph': depsgraph,
    }
    if request.accept_mimetypes.best == 'application/bson':
        resp = bsonify(respdoc)
    else:
        resp = jsonify(respdoc)

    if depsgraph:
        last_modification = max(task['_updated'] for task in depsgraph)
        log.debug('Last modification was %s', last_modification)
        # We need a format that can handle sub-second precision, which is not provided by the
        # HTTP date format (RFC 1123). This means that we can't use the Last-Modified header, as
        # it may be incorrectly interpreted and rewritten by HaProxy, Apache or other software
        # in the path between client & server.
        resp.headers['X-Flamenco-Last-Updated'] = last_modification.isoformat()
        resp.headers['X-Flamenco-Last-Updated-Format'] = 'ISO-8601'
    return resp