コード例 #1
0
def update_collected_metadata(cgm_id, collection_id=None, index=None, op='update'):
    index = index or settings.ELASTIC_INDEX

    if settings.USE_CELERY:
        enqueue_task(search_engine.update_cgm_async.s(cgm_id, collection_id=collection_id, op=op, index=index))
    else:
        search_engine.update_cgm_async(cgm_id, collection_id=collection_id, op=op, index=index)
コード例 #2
0
ファイル: search.py プロジェクト: CenterForOpenScience/osf.io
def update_collected_metadata(cgm_id, collection_id=None, index=None, op='update'):
    index = index or settings.ELASTIC_INDEX

    if settings.USE_CELERY:
        enqueue_task(search_engine.update_cgm_async.s(cgm_id, collection_id=collection_id, op=op, index=index))
    else:
        search_engine.update_cgm_async(cgm_id, collection_id=collection_id, op=op, index=index)
コード例 #3
0
    def save(self, *args, **kwargs):
        first_save = self.id is None
        if self.is_bookmark_collection:
            if first_save and self.creator.collection_set.filter(
                    is_bookmark_collection=True,
                    deleted__isnull=True).exists():
                raise IntegrityError(
                    'Each user cannot have more than one Bookmark collection.')
            if self.title != 'Bookmarks':
                # Bookmark collections are always named 'Bookmarks'
                self.title = 'Bookmarks'
        saved_fields = self.get_dirty_fields() or []
        ret = super(Collection, self).save(*args, **kwargs)

        if first_save:
            # Set defaults for M2M
            content_type = ContentType.objects.filter(
                app_label='osf',
                model__in=['abstractnode', 'collection', 'preprint'])

            self.collected_types.add(*content_type)

            # Set up initial permissions
            self.update_group_permissions()
            self.get_group(ADMIN).user_set.add(self.creator)

        elif 'is_public' in saved_fields:
            from website.collections.tasks import on_collection_updated
            enqueue_task(on_collection_updated.s(self._id))

        return ret
コード例 #4
0
def update_status_on_delete(node):
    from website.identifiers.tasks import update_ezid_metadata_on_change

    for preprint in node.preprints.all():
        enqueue_task(update_ezid_metadata_on_change.s(preprint._id, status='unavailable'))

    if node.get_identifier('doi'):
        enqueue_task(update_ezid_metadata_on_change.s(node._id, status='unavailable'))
コード例 #5
0
    def save(self, *args, **kwargs):
        first_save = not bool(self.pk)
        saved_fields = self.get_dirty_fields() or []
        ret = super(PreprintService, self).save(*args, **kwargs)

        if (not first_save and 'is_published' in saved_fields) or self.is_published:
            enqueue_task(on_preprint_updated.s(self._id))
        return ret
コード例 #6
0
ファイル: preprint_service.py プロジェクト: adlius/osf.io
    def save(self, *args, **kwargs):
        first_save = not bool(self.pk)
        saved_fields = self.get_dirty_fields() or []
        ret = super(PreprintService, self).save(*args, **kwargs)

        if (not first_save and 'is_published' in saved_fields) or self.is_published:
            enqueue_task(on_preprint_updated.s(self._id))
        return ret
コード例 #7
0
ファイル: views.py プロジェクト: CenterForOpenScience/osf.io
 def get_object(self):
     try:
         submission = ChronosSubmission.objects.get(publication_id=self.kwargs['submission_id'])
     except ChronosSubmission.DoesNotExist:
         raise NotFound
     else:
         if submission.modified < chronos_submission_stale_time():
             enqueue_task(update_submissions_status_async.s([submission.id]))
         self.check_object_permissions(self.request, submission)
         return submission
コード例 #8
0
def update_user(user, index=None, async_update=True):
    index = index or settings.ELASTIC_INDEX
    if async_update:
        user_id = user.id
        if settings.USE_CELERY:
            enqueue_task(search_engine.update_user_async.s(user_id, index=index))
        else:
            search_engine.update_user_async(user_id, index=index)
    else:
        search_engine.update_user(user, index=index)
コード例 #9
0
ファイル: search.py プロジェクト: CenterForOpenScience/osf.io
def update_user(user, index=None, async_update=True):
    index = index or settings.ELASTIC_INDEX
    if async_update:
        user_id = user.id
        if settings.USE_CELERY:
            enqueue_task(search_engine.update_user_async.s(user_id, index=index))
        else:
            search_engine.update_user_async(user_id, index=index)
    else:
        search_engine.update_user(user, index=index)
コード例 #10
0
ファイル: views.py プロジェクト: jwalz/osf.io
 def get_object(self):
     try:
         submission = ChronosSubmission.objects.get(publication_id=self.kwargs['submission_id'])
     except ChronosSubmission.DoesNotExist:
         raise NotFound
     else:
         if submission.modified < chronos_submission_stale_time():
             enqueue_task(update_submissions_status_async.s([submission.id]))
         self.check_object_permissions(self.request, submission)
         return submission
コード例 #11
0
ファイル: search.py プロジェクト: CenterForOpenScience/osf.io
def update_preprint(preprint, index=None, bulk=False, async_update=True, saved_fields=None):
    kwargs = {
        'index': index,
        'bulk': bulk
    }
    if async_update:
        preprint_id = preprint._id
        # We need the transaction to be committed before trying to run celery tasks.
        if settings.USE_CELERY:
            enqueue_task(search_engine.update_preprint_async.s(preprint_id=preprint_id, **kwargs))
        else:
            search_engine.update_preprint_async(preprint_id=preprint_id, **kwargs)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_preprint(preprint, **kwargs)
コード例 #12
0
def update_preprint(preprint, index=None, bulk=False, async_update=True, saved_fields=None):
    kwargs = {
        'index': index,
        'bulk': bulk
    }
    if async_update:
        preprint_id = preprint._id
        # We need the transaction to be committed before trying to run celery tasks.
        if settings.USE_CELERY:
            enqueue_task(search_engine.update_preprint_async.s(preprint_id=preprint_id, **kwargs))
        else:
            search_engine.update_preprint_async(preprint_id=preprint_id, **kwargs)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_preprint(preprint, **kwargs)
コード例 #13
0
ファイル: listeners.py プロジェクト: kch8qx/osf.io
def after_register(src, dst, user):
    """Blinker listener for registration initiations. Enqueqes a chain
    of archive tasks for the current node and its descendants

    :param src: Node being registered
    :param dst: registration Node
    :param user: registration initiator
    """
    # Prevent circular import with app.py
    from website.archiver import tasks

    archiver_utils.before_archive(dst, user)
    if dst.root != dst:  # if not top-level registration
        return
    archive_tasks = [tasks.archive(job_pk=t.archive_job._id) for t in dst.node_and_primary_descendants()]
    handlers.enqueue_task(celery.chain(archive_tasks))
コード例 #14
0
ファイル: views.py プロジェクト: CenterForOpenScience/osf.io
    def get_default_queryset(self):
        user = get_user_auth(self.request).user
        preprint_contributors = Preprint.load(self.kwargs['preprint_id'])._contributors
        queryset = ChronosSubmission.objects.filter(preprint__guids___id=self.kwargs['preprint_id'])

        # Get the list of stale submissions and queue a task to update them
        update_list_id = queryset.filter(
            modified__lt=chronos_submission_stale_time(),
        ).values_list('id', flat=True)
        if len(update_list_id) > 0:
            enqueue_task(update_submissions_status_async.s(list(update_list_id)))

        # If the user is a contributor on this preprint, show all submissions
        # Otherwise, only show submissions in status 3 or 4 (accepted or published)
        if user and preprint_contributors.filter(id=user.id).exists():
            return queryset
        else:
            return queryset.filter(status__in=[3, 4])
コード例 #15
0
def update_node(node, index=None, bulk=False, async_update=True, saved_fields=None):
    kwargs = {
        'index': index,
        'bulk': bulk
    }
    if async_update:
        node_id = node._id
        # We need the transaction to be committed before trying to run celery tasks.
        # For example, when updating a Node's privacy, is_public must be True in the
        # database in order for method that updates the Node's elastic search document
        # to run correctly.
        if settings.USE_CELERY:
            enqueue_task(search_engine.update_node_async.s(node_id=node_id, **kwargs))
        else:
            search_engine.update_node_async(node_id=node_id, **kwargs)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_node(node, **kwargs)
コード例 #16
0
def update_group(group,
                 index=None,
                 bulk=False,
                 async_update=True,
                 saved_fields=None,
                 deleted_id=None):
    kwargs = {'index': index, 'bulk': bulk, 'deleted_id': deleted_id}
    if async_update:
        # We need the transaction to be committed before trying to run celery tasks.
        if settings.USE_CELERY:
            enqueue_task(
                search_engine.update_group_async.s(group_id=group._id,
                                                   **kwargs))
        else:
            search_engine.update_group_async(group_id=group._id, **kwargs)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_group(group, **kwargs)
コード例 #17
0
ファイル: search.py プロジェクト: CenterForOpenScience/osf.io
def update_node(node, index=None, bulk=False, async_update=True, saved_fields=None):
    kwargs = {
        'index': index,
        'bulk': bulk
    }
    if async_update:
        node_id = node._id
        # We need the transaction to be committed before trying to run celery tasks.
        # For example, when updating a Node's privacy, is_public must be True in the
        # database in order for method that updates the Node's elastic search document
        # to run correctly.
        if settings.USE_CELERY:
            enqueue_task(search_engine.update_node_async.s(node_id=node_id, **kwargs))
        else:
            search_engine.update_node_async(node_id=node_id, **kwargs)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_node(node, **kwargs)
コード例 #18
0
def after_register(src, dst, user):
    """Blinker listener for registration initiations. Enqueqes a chain
    of archive tasks for the current node and its descendants

    :param src: Node being registered
    :param dst: registration Node
    :param user: registration initiator
    """
    # Prevent circular import with app.py
    from website.archiver import tasks
    archiver_utils.before_archive(dst, user)
    if dst.root != dst:  # if not top-level registration
        return
    archive_tasks = [
        tasks.archive(job_pk=t.archive_job._id)
        for t in dst.node_and_primary_descendants()
    ]
    handlers.enqueue_task(celery.chain(archive_tasks))
コード例 #19
0
ファイル: views.py プロジェクト: jwalz/osf.io
    def get_default_queryset(self):
        user = get_user_auth(self.request).user
        preprint_contributors = Preprint.load(self.kwargs['preprint_id'])._contributors
        queryset = ChronosSubmission.objects.filter(preprint__guids___id=self.kwargs['preprint_id'])

        # Get the list of stale submissions and queue a task to update them
        update_list_id = queryset.filter(
            modified__lt=chronos_submission_stale_time(),
        ).values_list('id', flat=True)
        if len(update_list_id) > 0:
            enqueue_task(update_submissions_status_async.s(list(update_list_id)))

        # If the user is a contributor on this preprint, show all submissions
        # Otherwise, only show submissions in status 3 or 4 (accepted or published)
        if user and preprint_contributors.filter(id=user.id).exists():
            return queryset
        else:
            return queryset.filter(status__in=[3, 4])
コード例 #20
0
ファイル: preprint_service.py プロジェクト: adlius/osf.io
    def set_published(self, published, auth, save=False):
        if not self.node.has_permission(auth.user, ADMIN):
            raise PermissionsError('Only admins can publish a preprint.')

        if self.is_published and not published:
            raise ValueError('Cannot unpublish preprint.')

        self.is_published = published

        if published:
            if not (self.node.preprint_file and self.node.preprint_file.node == self.node):
                raise ValueError('Preprint node is not a valid preprint; cannot publish.')
            if not self.provider:
                raise ValueError('Preprint provider not specified; cannot publish.')
            if not self.subjects.exists():
                raise ValueError('Preprint must have at least one subject to be published.')
            self.date_published = timezone.now()
            self.node._has_abandoned_preprint = False

            self.node.add_log(
                action=NodeLog.PREPRINT_INITIATED,
                params={
                    'preprint': self._id
                },
                auth=auth,
                save=False,
            )

            if not self.node.is_public:
                self.node.set_privacy(
                    self.node.PUBLIC,
                    auth=None,
                    log=True
                )

            # This should be called after all fields for EZID metadta have been set
            enqueue_task(get_and_set_preprint_identifiers.s(self._id))

        if save:
            self.node.save()
            self.save()
コード例 #21
0
    def save(self, *args, **kwargs):
        first_save = self.id is None
        if self.is_bookmark_collection:
            if first_save and self.creator.collection_set.filter(is_bookmark_collection=True, deleted__isnull=True).exists():
                raise IntegrityError('Each user cannot have more than one Bookmark collection.')
            if self.title != 'Bookmarks':
                # Bookmark collections are always named 'Bookmarks'
                self.title = 'Bookmarks'
        saved_fields = self.get_dirty_fields() or []
        ret = super(Collection, self).save(*args, **kwargs)

        if first_save:
            # Set defaults for M2M
            self.collected_types = ContentType.objects.filter(app_label='osf', model__in=['abstractnode', 'collection', 'preprint'])
            # Set up initial permissions
            self.update_group_permissions()
            self.get_group('admin').user_set.add(self.creator)

        elif 'is_public' in saved_fields:
            from website.collections.tasks import on_collection_updated
            enqueue_task(on_collection_updated.s(self._id))

        return ret
コード例 #22
0
ファイル: __init__.py プロジェクト: jwalz/osf.io
def authenticate(user, access_token, response, user_updates=None):
    data = session.data if session._get_current_object() else {}
    data.update({
        'auth_user_username': user.username,
        'auth_user_id': user._primary_key,
        'auth_user_fullname': user.fullname,
        'auth_user_access_token': access_token,
    })
    print_cas_log(
        f'Finalizing authentication - data updated: user=[{user._id}]',
        LogLevel.INFO)
    enqueue_task(
        update_user_from_activity.s(user._id,
                                    timezone.now().timestamp(),
                                    cas_login=True,
                                    updates=user_updates))
    print_cas_log(
        f'Finalizing authentication - user update queued: user=[{user._id}]',
        LogLevel.INFO)
    response = create_session(response, data=data)
    print_cas_log(
        f'Finalizing authentication - session created: user=[{user._id}]',
        LogLevel.INFO)
    return response
コード例 #23
0
ファイル: search.py プロジェクト: CenterForOpenScience/osf.io
def update_contributors_async(user_id):
    """Async version of update_contributors above"""
    if settings.USE_CELERY:
        enqueue_task(search_engine.update_contributors_async.s(user_id))
    else:
        search_engine.update_contributors_async(user_id)
コード例 #24
0
def before_request():
    # TODO: Fix circular import
    from framework.auth.core import get_user
    from framework.auth import cas
    from framework.utils import throttle_period_expired
    Session = apps.get_model('osf.Session')

    # Central Authentication Server Ticket Validation and Authentication
    ticket = request.args.get('ticket')
    if ticket:
        service_url = furl.furl(request.url)
        service_url.args.pop('ticket')
        # Attempt to authenticate wih CAS, and return a proper redirect response
        return cas.make_response_from_ticket(ticket=ticket,
                                             service_url=service_url.url)

    if request.authorization:
        user = get_user(email=request.authorization.username,
                        password=request.authorization.password)
        # Create an empty session
        # TODO: Shoudn't need to create a session for Basic Auth
        user_session = Session()
        set_session(user_session)

        if user:
            user_addon = user.get_addon('twofactor')
            if user_addon and user_addon.is_confirmed:
                otp = request.headers.get('X-OSF-OTP')
                if otp is None or not user_addon.verify_code(otp):
                    # Must specify two-factor authentication OTP code or invalid two-factor authentication OTP code.
                    user_session.data[
                        'auth_error_code'] = http_status.HTTP_401_UNAUTHORIZED
                    return
            user_session.data['auth_user_username'] = user.username
            user_session.data['auth_user_fullname'] = user.fullname
            if user_session.data.get('auth_user_id',
                                     None) != user._primary_key:
                user_session.data['auth_user_id'] = user._primary_key
                user_session.save()
        else:
            # Invalid key: Not found in database
            user_session.data[
                'auth_error_code'] = http_status.HTTP_401_UNAUTHORIZED
        return

    cookie = request.cookies.get(settings.COOKIE_NAME)
    if cookie:
        try:
            session_id = itsdangerous.Signer(
                settings.SECRET_KEY).unsign(cookie)
            user_session = Session.load(session_id) or Session(_id=session_id)
        except itsdangerous.BadData:
            return
        if not throttle_period_expired(user_session.created,
                                       settings.OSF_SESSION_TIMEOUT):
            # Update date last login when making non-api requests
            from framework.auth.tasks import update_user_from_activity
            if user_session.data.get(
                    'auth_user_id') and 'api' not in request.url:
                enqueue_task(
                    update_user_from_activity.s(
                        user_session.data.get('auth_user_id'),
                        timezone.now().timestamp(),
                        cas_login=False))
            set_session(user_session)
        else:
            remove_session(user_session)
コード例 #25
0
ファイル: search.py プロジェクト: adlius/osf.io
    return search_engine.search(query, index=index, doc_type=doc_type, raw=raw)

@requires_search
def update_node(node, index=None, bulk=False, async=True, saved_fields=None):
    kwargs = {
        'index': index,
        'bulk': bulk
    }
    if async:
        node_id = node._id
        # We need the transaction to be committed before trying to run celery tasks.
        # For example, when updating a Node's privacy, is_public must be True in the
        # database in order for method that updates the Node's elastic search document
        # to run correctly.
        if settings.USE_CELERY:
            enqueue_task(search_engine.update_node_async.s(node_id=node_id, **kwargs))
        else:
            search_engine.update_node_async(node_id=node_id, **kwargs)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_node(node, **kwargs)

@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
    index = index or settings.ELASTIC_INDEX
    search_engine.bulk_update_nodes(serialize, nodes, index=index)

@requires_search
def delete_node(node, index=None):
    index = index or settings.ELASTIC_INDEX
    doc_type = node.project_or_component
コード例 #26
0
def update_contributors_async(user_id):
    """Async version of update_contributors above"""
    if settings.USE_CELERY:
        enqueue_task(search_engine.update_contributors_async.s(user_id))
    else:
        search_engine.update_contributors_async(user_id)
コード例 #27
0
ファイル: search.py プロジェクト: digideskio/osf.io
@requires_search
def search(query, index=None, doc_type=None):
    index = index or settings.ELASTIC_INDEX
    return search_engine.search(query, index=index, doc_type=doc_type)

@requires_search
def update_node(node, index=None, bulk=False, async=True):
    if async:
        node_id = node._id
        # We need the transaction to be committed before trying to run celery tasks.
        # For example, when updating a Node's privacy, is_public must be True in the
        # database in order for method that updates the Node's elastic search document
        # to run correctly.
        if settings.USE_CELERY:
            enqueue_task(search_engine.update_node_async.s(node_id=node_id, index=index, bulk=bulk))
        else:
            search_engine.update_node_async(node_id=node_id, index=index, bulk=bulk)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_node(node, index=index, bulk=bulk)

@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
    index = index or settings.ELASTIC_INDEX
    search_engine.bulk_update_nodes(serialize, nodes, index=index)

@requires_search
def delete_node(node, index=None):
    index = index or settings.ELASTIC_INDEX
    doc_type = node.project_or_component
コード例 #28
0
ファイル: listeners.py プロジェクト: adlius/osf.io
def update_status_on_delete(node):
    from website.preprints.tasks import update_ezid_metadata_on_change

    for preprint in node.preprints.all():
        enqueue_task(update_ezid_metadata_on_change.s(preprint, status='unavailable'))
コード例 #29
0
ファイル: views.py プロジェクト: jwalz/osf.io
    def put(self, request, *args, **kwargs):
        provider_id = kwargs['provider_id']
        provider = get_object_or_error(RegistrationProvider, provider_id,
                                       request)
        if not provider.allow_bulk_uploads:
            return JsonResponse(
                {'errors': [{
                    'type': 'bulkUploadNotAllowed'
                }]},
                status=405,
                content_type='application/vnd.api+json; application/json',
            )
        user_id = self.request.user._id
        file_size_limit = BULK_SETTINGS['DEFAULT_BULK_LIMIT'] * 10000
        file_obj = request.data['file']

        if file_obj.size > file_size_limit:
            return JsonResponse(
                {'errors': [{
                    'type': 'sizeExceedsLimit'
                }]},
                status=413,
                content_type='application/vnd.api+json; application/json',
            )

        if file_obj.content_type != 'text/csv':
            return JsonResponse(
                {'errors': [{
                    'type': 'invalidFileType'
                }]},
                status=413,
                content_type='application/vnd.api+json; application/json',
            )

        file_md5 = self.get_hash(file_obj)
        if RegistrationBulkUploadJob.objects.filter(
                payload_hash=file_md5).exists():
            return JsonResponse(
                {'errors': [{
                    'type': 'bulkUploadJobExists'
                }]},
                status=409,
                content_type='application/vnd.api+json; application/json',
            )
        try:
            upload = BulkRegistrationUpload(file_obj, provider_id)
            upload.validate()
            errors = upload.errors
        except InvalidHeadersError as e:
            invalid_headers = [
                str(detail) for detail in e.detail['invalid_headers']
            ]
            missing_headers = [
                str(detail) for detail in e.detail['missing_headers']
            ]
            return JsonResponse(
                {
                    'errors': [{
                        'type': 'invalidColumnId',
                        'invalidHeaders': invalid_headers,
                        'missingHeaders': missing_headers
                    }]
                },
                status=400,
                content_type='application/vnd.api+json; application/json',
            )
        except DuplicateHeadersError as e:
            duplicate_headers = [
                str(detail) for detail in e.detail['duplicate_headers']
            ]
            return JsonResponse(
                {
                    'errors': [{
                        'type': 'duplicateColumnId',
                        'duplicateHeaders': duplicate_headers
                    }]
                },
                status=400,
                content_type='application/vnd.api+json; application/json',
            )
        except FileUploadNotSupportedError:
            return JsonResponse(
                {'errors': [{
                    'type': 'fileUploadNotSupported'
                }]},
                status=400,
                content_type='application/vnd.api+json; application/json',
            )
        except NotFound:
            return JsonResponse(
                {'errors': [{
                    'type': 'invalidSchemaId'
                }]},
                status=404,
                content_type='application/vnd.api+json; application/json',
            )

        if errors:
            return JsonResponse(
                {'errors': errors},
                status=400,
                content_type='application/vnd.api+json; application/json',
            )
        parsed = upload.get_parsed()
        enqueue_task(
            prepare_for_registration_bulk_creation.s(file_md5,
                                                     user_id,
                                                     provider_id,
                                                     parsed,
                                                     dry_run=False))
        return Response(status=204)
コード例 #30
0
ファイル: listeners.py プロジェクト: nakajimahiroyuki/osf.io
def update_status_on_delete(node):
    from website.preprints.tasks import update_ezid_metadata_on_change

    for preprint in node.preprints.all():
        enqueue_task(
            update_ezid_metadata_on_change.s(preprint, status='unavailable'))
コード例 #31
0
 def save(self, *args, **kwargs):
     saved_fields = super(PreprintService, self).save(*args, **kwargs)
     if saved_fields:
         enqueue_task(on_preprint_updated.s(self._id))
コード例 #32
0
def update_status_on_delete(node):
    from website.identifiers.tasks import update_doi_metadata_on_change

    if node.get_identifier('doi'):
        enqueue_task(update_doi_metadata_on_change.s(node._id))
コード例 #33
0
ファイル: search.py プロジェクト: lambroisie/osf.io
def search(query, index=None, doc_type=None):
    index = index or settings.ELASTIC_INDEX
    return search_engine.search(query, index=index, doc_type=doc_type)


@requires_search
def update_node(node, index=None, bulk=False, async=True):
    if async:
        node_id = node._id
        # We need the transaction to be committed before trying to run celery tasks.
        # For example, when updating a Node's privacy, is_public must be True in the
        # database in order for method that updates the Node's elastic search document
        # to run correctly.
        if settings.USE_CELERY:
            enqueue_task(
                search_engine.update_node_async.s(node_id=node_id,
                                                  index=index,
                                                  bulk=bulk))
        else:
            search_engine.update_node_async(node_id=node_id,
                                            index=index,
                                            bulk=bulk)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_node(node, index=index, bulk=bulk)


@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
    index = index or settings.ELASTIC_INDEX
    search_engine.bulk_update_nodes(serialize, nodes, index=index)
コード例 #34
0
def search(query, index=None, doc_type=None, raw=None):
    index = index or settings.ELASTIC_INDEX
    return search_engine.search(query, index=index, doc_type=doc_type, raw=raw)


@requires_search
def update_node(node, index=None, bulk=False, async=True, saved_fields=None):
    kwargs = {'index': index, 'bulk': bulk}
    if async:
        node_id = node._id
        # We need the transaction to be committed before trying to run celery tasks.
        # For example, when updating a Node's privacy, is_public must be True in the
        # database in order for method that updates the Node's elastic search document
        # to run correctly.
        if settings.USE_CELERY:
            enqueue_task(
                search_engine.update_node_async.s(node_id=node_id, **kwargs))
        else:
            search_engine.update_node_async(node_id=node_id, **kwargs)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_node(node, **kwargs)


@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
    index = index or settings.ELASTIC_INDEX
    search_engine.bulk_update_nodes(serialize, nodes, index=index)


@requires_search
def delete_node(node, index=None):
コード例 #35
0
def update_status_on_delete(node):
    from website.identifiers.tasks import update_doi_metadata_on_change

    if node.get_identifier('doi'):
        enqueue_task(update_doi_metadata_on_change.s(node._id, status='unavailable'))
コード例 #36
0
ファイル: search.py プロジェクト: scooley/osf.io
def search(query, index=None, doc_type=None, raw=None):
    index = index or settings.ELASTIC_INDEX
    return search_engine.search(query, index=index, doc_type=doc_type, raw=raw)


@requires_search
def update_node(node, index=None, bulk=False, async=True, saved_fields=None):
    kwargs = {'index': index, 'bulk': bulk}
    if async:
        node_id = node._id
        # We need the transaction to be committed before trying to run celery tasks.
        # For example, when updating a Node's privacy, is_public must be True in the
        # database in order for method that updates the Node's elastic search document
        # to run correctly.
        if settings.USE_CELERY:
            enqueue_task(
                search_engine.update_node_async.s(node_id=node_id, **kwargs))
        else:
            search_engine.update_node_async(node_id=node_id, **kwargs)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_node(node, **kwargs)


@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
    index = index or settings.ELASTIC_INDEX
    search_engine.bulk_update_nodes(serialize, nodes, index=index)


@requires_search
def delete_node(node, index=None):
コード例 #37
0
ファイル: search.py プロジェクト: baylee-d/osf.io
    return search_engine.search(query, index=index, doc_type=doc_type, raw=raw)

@requires_search
def update_node(node, index=None, bulk=False, async=True, saved_fields=None):
    kwargs = {
        'index': index,
        'bulk': bulk
    }
    if async:
        node_id = node._id
        # We need the transaction to be committed before trying to run celery tasks.
        # For example, when updating a Node's privacy, is_public must be True in the
        # database in order for method that updates the Node's elastic search document
        # to run correctly.
        if settings.USE_CELERY:
            enqueue_task(search_engine.update_node_async.s(node_id=node_id, **kwargs))
        else:
            search_engine.update_node_async(node_id=node_id, **kwargs)
    else:
        index = index or settings.ELASTIC_INDEX
        return search_engine.update_node(node, **kwargs)

@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
    index = index or settings.ELASTIC_INDEX
    search_engine.bulk_update_nodes(serialize, nodes, index=index)

@requires_search
def delete_node(node, index=None):
    index = index or settings.ELASTIC_INDEX
    doc_type = node.project_or_component