Example #1
0
def search_node(auth, **kwargs):
    """

    """
    # Get arguments
    node = Node.load(request.json.get('nodeId'))
    include_public = request.json.get('includePublic')
    size = float(request.json.get('size', '5').strip())
    page = request.json.get('page', 0)
    query = request.json.get('query', '').strip()

    start = (page * size)
    if not query:
        return {'nodes': []}

    # Build ODM query
    title_query = Q('title', 'icontains', query)
    not_deleted_query = Q('is_deleted', 'eq', False)
    visibility_query = Q('contributors', 'eq', auth.user)
    no_folders_query = Q('is_folder', 'eq', False)
    if include_public:
        visibility_query = visibility_query | Q('is_public', 'eq', True)
    odm_query = title_query & not_deleted_query & visibility_query & no_folders_query

    # Exclude current node from query if provided
    if node:
        nin = [node._id] + node.node_ids
        odm_query = (
            odm_query &
            Q('_id', 'nin', nin)
        )

    nodes = Node.find(odm_query)
    count = nodes.count()
    pages = math.ceil(count / size)
    validate_page_num(page, pages)

    return {
        'nodes': [
            _serialize_node_search(each)
            for each in islice(nodes, start, start + size)
            if each.contributors
        ],
        'total': count,
        'pages': pages,
        'page': page
    }
Example #2
0
def search_node(auth, **kwargs):
    """

    """
    # Get arguments
    node = AbstractNode.load(request.json.get('nodeId'))
    include_public = request.json.get('includePublic')
    size = float(request.json.get('size', '5').strip())
    page = request.json.get('page', 0)
    query = request.json.get('query', '').strip()

    start = (page * size)
    if not query:
        return {'nodes': []}

    # Exclude current node from query if provided
    nin = [node.id] + list(node._nodes.values_list('pk', flat=True)) if node else []

    can_view_query = Q(_contributors=auth.user)
    if include_public:
        can_view_query = can_view_query | Q(is_public=True)

    nodes = (AbstractNode.objects
        .filter(
            can_view_query,
            title__icontains=query,
            is_deleted=False)
        .exclude(id__in=nin)
        .exclude(type='osf.collection')
        .exclude(type='osf.quickfilesnode'))

    count = nodes.count()
    pages = math.ceil(count / size)
    validate_page_num(page, pages)

    return {
        'nodes': [
            _serialize_node_search(each)
            for each in islice(nodes, start, start + size)
            if each.contributors
        ],
        'total': count,
        'pages': pages,
        'page': page
    }
Example #3
0
def search_node(auth, **kwargs):
    """

    """
    # Get arguments
    node = AbstractNode.load(request.json.get('nodeId'))
    include_public = request.json.get('includePublic')
    size = float(request.json.get('size', '5').strip())
    page = request.json.get('page', 0)
    query = request.json.get('query', '').strip()

    start = (page * size)
    if not query:
        return {'nodes': []}

    # Exclude current node from query if provided
    nin = [node.id] + list(node._nodes.values_list('pk', flat=True)) if node else []

    can_view_query = Q(_contributors=auth.user)
    if include_public:
        can_view_query = can_view_query | Q(is_public=True)

    nodes = (AbstractNode.objects
        .filter(
            can_view_query,
            title__icontains=query,
            is_deleted=False)
        .exclude(id__in=nin)
        .exclude(type='osf.collection')
        .exclude(type='osf.quickfilesnode'))

    count = nodes.count()
    pages = math.ceil(count / size)
    validate_page_num(page, pages)

    return {
        'nodes': [
            _serialize_node_search(each)
            for each in islice(nodes, start, start + size)
            if each.contributors
        ],
        'total': count,
        'pages': pages,
        'page': page
    }
Example #4
0
def search_node(auth, **kwargs):
    """

    """
    # Get arguments
    node = AbstractNode.load(request.json.get('nodeId'))
    include_public = request.json.get('includePublic')
    size = float(request.json.get('size', '5').strip())
    page = request.json.get('page', 0)
    query = request.json.get('query', '').strip()

    start = (page * size)
    if not query:
        return {'nodes': []}

    # Build ODM query
    title_query = Q('title', 'icontains', query)
    not_deleted_query = Q('is_deleted', 'eq', False)
    visibility_query = Q('contributors', 'eq', auth.user)
    if include_public:
        visibility_query = visibility_query | Q('is_public', 'eq', True)
    odm_query = title_query & not_deleted_query & visibility_query

    # Exclude current node from query if provided
    nin = [node.id] + list(node._nodes.values_list('pk',
                                                   flat=True)) if node else []

    nodes = AbstractNode.find(odm_query).exclude(id__in=nin).exclude(
        type='osf.collection')
    count = nodes.count()
    pages = math.ceil(count / size)
    validate_page_num(page, pages)

    return {
        'nodes': [
            _serialize_node_search(each)
            for each in islice(nodes, start, start + size) if each.contributors
        ],
        'total':
        count,
        'pages':
        pages,
        'page':
        page
    }
Example #5
0
def search_node(auth, **kwargs):
    """

    """
    # Get arguments
    node = Node.load(request.json.get("nodeId"))
    include_public = request.json.get("includePublic")
    size = float(request.json.get("size", "5").strip())
    page = request.json.get("page", 0)
    query = request.json.get("query", "").strip()

    start = page * size
    if not query:
        return {"nodes": []}

    # Build ODM query
    title_query = Q("title", "icontains", query)
    not_deleted_query = Q("is_deleted", "eq", False)
    visibility_query = Q("contributors", "eq", auth.user)
    no_folders_query = Q("is_folder", "eq", False)
    if include_public:
        visibility_query = visibility_query | Q("is_public", "eq", True)
    odm_query = title_query & not_deleted_query & visibility_query & no_folders_query

    # Exclude current node from query if provided
    if node:
        nin = [node._id] + node.node_ids
        odm_query = odm_query & Q("_id", "nin", nin)

    nodes = Node.find(odm_query)
    count = nodes.count()
    pages = math.ceil(count / size)
    validate_page_num(page, pages)

    return {
        "nodes": [_serialize_node_search(each) for each in islice(nodes, start, start + size) if each.contributors],
        "total": count,
        "pages": pages,
        "page": page,
    }
Example #6
0
def _get_logs(node, count, auth, page=0):
    """

    :param Node node:
    :param int count:
    :param auth:
    :return list: List of serialized logs,
            boolean: if there are more logs

    """
    logs_set = node.get_aggregate_logs_queryset(auth)
    total = logs_set.count()
    pages = math.ceil(total / float(count))
    validate_page_num(page, pages)

    start = page * count
    stop = start + count
    logs = [
        serialize_log(log, auth=auth, anonymous=has_anonymous_link(node, auth))
        for log in logs_set[start:stop]
    ]

    return logs, total, pages
Example #7
0
def search_contributor(query,
                       page=0,
                       size=10,
                       exclude=None,
                       current_user=None):
    """Search for contributors to add to a project using elastic search. Request must
    include JSON data with a "query" field.

    :param query: The substring of the username to search for
    :param page: For pagination, the page number to use for results
    :param size: For pagination, the number of results per page
    :param exclude: A list of User objects to exclude from the search
    :param current_user: A User object of the current user

    :return: List of dictionaries, each containing the ID, full name,
        most recent employment and education, profile_image URL of an OSF user

    """
    start = (page * size)
    items = re.split(r'[\s-]+', query)
    exclude = exclude or []
    normalized_items = []
    for item in items:
        try:
            normalized_item = six.u(item)
        except TypeError:
            normalized_item = item
        normalized_item = unicodedata.normalize('NFKD',
                                                normalized_item).encode(
                                                    'ascii', 'ignore')
        normalized_items.append(normalized_item)
    items = normalized_items

    query = '  AND '.join('{}*~'.format(re.escape(item)) for item in items) + \
            ''.join(' NOT id:"{}"'.format(excluded._id) for excluded in exclude)

    results = search(build_query(query, start=start, size=size),
                     index=INDEX,
                     doc_type='user')
    docs = results['results']
    pages = math.ceil(results['counts'].get('user', 0) / size)
    validate_page_num(page, pages)

    users = []
    for doc in docs:
        # TODO: use utils.serialize_user
        user = OSFUser.load(doc['id'])

        if current_user and current_user._id == user._id:
            n_projects_in_common = -1
        elif current_user:
            n_projects_in_common = current_user.n_projects_in_common(user)
        else:
            n_projects_in_common = 0

        if user is None:
            logger.error('Could not load user {0}'.format(doc['id']))
            continue
        if user.is_active:  # exclude merged, unregistered, etc.
            current_employment = None
            education = None

            if user.jobs:
                current_employment = user.jobs[0]['institution']

            if user.schools:
                education = user.schools[0]['institution']

            users.append({
                'fullname':
                doc['user'],
                'id':
                doc['id'],
                'employment':
                current_employment,
                'education':
                education,
                'social':
                user.social_links,
                'n_projects_in_common':
                n_projects_in_common,
                'profile_image_url':
                profile_image_url(settings.PROFILE_IMAGE_PROVIDER,
                                  user,
                                  use_ssl=True,
                                  size=settings.PROFILE_IMAGE_MEDIUM),
                'profile_url':
                user.profile_url,
                'registered':
                user.is_registered,
                'active':
                user.is_active
            })

    return {
        'users': users,
        'total': results['counts']['total'],
        'pages': pages,
        'page': page,
    }
Example #8
0
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
    """Search for contributors to add to a project using elastic search. Request must
    include JSON data with a "query" field.

    :param query: The substring of the username to search for
    :param page: For pagination, the page number to use for results
    :param size: For pagination, the number of results per page
    :param exclude: A list of User objects to exclude from the search
    :param current_user: A User object of the current user

    :return: List of dictionaries, each containing the ID, full name,
        most recent employment and education, profile_image URL of an OSF user

    """
    start = (page * size)
    items = re.split(r'[\s-]+', query)
    exclude = exclude or []
    normalized_items = []
    for item in items:
        try:
            normalized_item = six.u(item)
        except TypeError:
            normalized_item = item
        normalized_item = unicodedata.normalize('NFKD', normalized_item).encode('ascii', 'ignore')
        normalized_items.append(normalized_item)
    items = normalized_items

    query = '  AND '.join('{}*~'.format(re.escape(item)) for item in items) + \
            ''.join(' NOT id:"{}"'.format(excluded._id) for excluded in exclude)

    results = search(build_query(query, start=start, size=size), index=INDEX, doc_type='user')
    docs = results['results']
    pages = math.ceil(results['counts'].get('user', 0) / size)
    validate_page_num(page, pages)

    users = []
    for doc in docs:
        # TODO: use utils.serialize_user
        user = OSFUser.load(doc['id'])

        if current_user and current_user._id == user._id:
            n_projects_in_common = -1
        elif current_user:
            n_projects_in_common = current_user.n_projects_in_common(user)
        else:
            n_projects_in_common = 0

        if user is None:
            logger.error('Could not load user {0}'.format(doc['id']))
            continue
        if user.is_active:  # exclude merged, unregistered, etc.
            current_employment = None
            education = None

            if user.jobs:
                current_employment = user.jobs[0]['institution']

            if user.schools:
                education = user.schools[0]['institution']

            users.append({
                'fullname': doc['user'],
                'id': doc['id'],
                'employment': current_employment,
                'education': education,
                'social': user.social_links,
                'n_projects_in_common': n_projects_in_common,
                'profile_image_url': profile_image_url(settings.PROFILE_IMAGE_PROVIDER,
                                                       user,
                                                       use_ssl=True,
                                                       size=settings.PROFILE_IMAGE_MEDIUM),
                'profile_url': user.profile_url,
                'registered': user.is_registered,
                'active': user.is_active
            })

    return {
        'users': users,
        'total': results['counts']['total'],
        'pages': pages,
        'page': page,
    }
Example #9
0
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
    """Search for contributors to add to a project using elastic search. Request must
    include JSON data with a "query" field.

    :param query: The substring of the username to search for
    :param page: For pagination, the page number to use for results
    :param size: For pagination, the number of results per page
    :param exclude: A list of User objects to exclude from the search
    :param current_user: A User object of the current user

    :return: List of dictionaries, each containing the ID, full name,
        most recent employment and education, gravatar URL of an OSF user

    """
    start = page * size
    items = re.split(r"[\s-]+", query)
    exclude = exclude or []
    normalized_items = []
    for item in items:
        try:
            normalized_item = six.u(item)
        except TypeError:
            normalized_item = item
        normalized_item = unicodedata.normalize("NFKD", normalized_item).encode("ascii", "ignore")
        normalized_items.append(normalized_item)
    items = normalized_items

    query = "  AND ".join("{}*~".format(re.escape(item)) for item in items) + "".join(
        ' NOT id:"{}"'.format(excluded._id) for excluded in exclude
    )

    results = search(build_query(query, start=start, size=size), index=INDEX, doc_type="user")
    docs = results["results"]
    pages = math.ceil(results["counts"].get("user", 0) / size)
    validate_page_num(page, pages)

    users = []
    for doc in docs:
        # TODO: use utils.serialize_user
        user = User.load(doc["id"])

        if current_user and current_user._id == user._id:
            n_projects_in_common = -1
        elif current_user:
            n_projects_in_common = current_user.n_projects_in_common(user)
        else:
            n_projects_in_common = 0

        if user is None:
            logger.error("Could not load user {0}".format(doc["id"]))
            continue
        if user.is_active:  # exclude merged, unregistered, etc.
            current_employment = None
            education = None

            if user.jobs:
                current_employment = user.jobs[0]["institution"]

            if user.schools:
                education = user.schools[0]["institution"]

            users.append(
                {
                    "fullname": doc["user"],
                    "id": doc["id"],
                    "employment": current_employment,
                    "education": education,
                    "n_projects_in_common": n_projects_in_common,
                    "gravatar_url": gravatar(user, use_ssl=True, size=settings.PROFILE_IMAGE_MEDIUM),
                    "profile_url": user.profile_url,
                    "registered": user.is_registered,
                    "active": user.is_active,
                }
            )

    return {"users": users, "total": results["counts"]["total"], "pages": pages, "page": page}