def formset_valid(self, formset):
        # Save but dont commit so _objects properties are set
        formset.save(commit=False)
        # Get lists of objects
        updated = [n[0] for n in formset.changed_objects]
        created = formset.new_objects
        deleted = formset.deleted_objects

        for ss in created:
            # Set data not available through form
            ss.proposed = True
            ss.registration_authority = self.review.registration_authority
            ss.review = self.review

        if created:
            # Bulk save created
            MDR.SupersedeRelationship.objects.bulk_create(created)
        if updated:
            # Bulk save updated
            bulk_update(updated, batch_size=500)
        if deleted:
            # Bulk delete
            ids = [i.id for i in deleted]
            MDR.SupersedeRelationship.proposed_objects.filter(
                id__in=ids).delete()

        # Redirect to supersedes info page
        return HttpResponseRedirect(
            reverse('aristotle_reviews:request_supersedes',
                    args=[self.review.id]))
Example #2
0
def add_datetime_to_audio_files(apps, schema_editor):
    """
    """
    db_alias = schema_editor.connection.alias
    audio_file_model = apps.get_model('koe', 'AudioFile')

    audio_files = audio_file_model.objects.using(db_alias).all()

    slashed_url = os.path.join(settings.MEDIA_URL, 'audio/wav/{}', '{}.wav')
    unslashed_url = slashed_url[1:]
    wav_path_template = os.path.join(settings.BASE_DIR, unslashed_url)

    sys.stdout.write('\n')
    sys.stdout.write('\tAdding timestamp to {} AudioFiles...'.format(
        len(audio_files)))

    for audio_file in audio_files:
        if audio_file.original is None:
            database_id = audio_file.database.id
            file_name = audio_file.name
        else:
            database_id = audio_file.original.database.id
            file_name = audio_file.original.name
        file_path = wav_path_template.format(database_id, file_name)

        if os.path.isfile(file_path):
            last_modif_timestamp = os.path.getmtime(file_path)
            last_modif_datetime = datetime.datetime.utcfromtimestamp(
                last_modif_timestamp)
        else:
            last_modif_datetime = datetime.datetime.utcfromtimestamp(0)
        audio_file.added = pytz.utc.localize(last_modif_datetime)

    bulk_update(audio_files, update_fields=['added'], batch_size=10000)
Example #3
0
    def post(self, request):
        submissions_id = request.data.copy().pop('submissions')
        if not submissions_id:
            return Response("Parameter error, submissions is required",
                            status=HTTP_400_BAD_REQUEST)

        submissions = Submission.objects.filter(ID__in=submissions_id).exclude(
            Q(result=JudgeStatus.PENDING) | Q(result=JudgeStatus.JUDGING))
        for submission in submissions:
            submission.info = []
            submission.compile_error_info = None
            submission.time_cost = None
            submission.memory_cost = None
            submission.score = None
            submission.result = JudgeStatus.PENDING
        bulk_update(submissions,
                    update_fields=[
                        'info', 'compile_error_info', 'time_cost',
                        'memory_cost', 'score', 'result'
                    ])
        submissions_id = []
        for submission in submissions:
            submissions_id.append(submission.ID)
            RedisQueue.put('queue:submission', submission.ID)
        return Response(submissions_id, status=HTTP_200_OK)
    def post_handle(self):
        kwargs = {"desc": "Linking authorship", "unit": "bills"}
        for url, authors in tqdm(self.post_handle_cache.items(), **kwargs):
            bill = Bill.objects.get(url=url)
            for author in authors.split(","):
                politician = get_politician(author.strip())
                if politician:
                    bill.authors.add(politician)
            bill.save()

        get_politician.cache_clear()
        self.post_handle_cache = None

        kwargs = {"desc": "Counting keywords", "unit": "politicians"}
        politicians = tuple(Politician.objects.exclude(bills=None))
        for politician in tqdm(politicians, **kwargs):
            counter = Counter()
            for bill in politician.bills.all():
                counter.update(bill.keywords)

            politician.bill_keywords = tuple({
                "keyword": keyword,
                "total": total
            } for keyword, total in counter.most_common())

        bulk_update(politicians, update_fields=("bill_keywords", ))
Example #5
0
def add_registration_files_count(state, *args, **kwargs):
    """
    Caches registration files count on Registration object.

    Importing Registration model outside of this method to take advantage of files
    relationship for speed purposes in this migration.  If this model changes significantly,
    this migration may have to be modified in the future so it runs on an empty db.
    """
    registrations = Registration.objects.filter(is_deleted=False).filter(
        files__type='osf.osfstoragefile',
        files__deleted_on__isnull=True
    ).annotate(
        annotated_file_count=Count('files')
    )
    progress_bar = tqdm(total=registrations.count())
    registrations_to_update = []

    for i, registration in enumerate(registrations, 1):
        progress_bar.update(i)
        registration.files_count = registration.annotated_file_count
        registrations_to_update.append(registration)

    bulk_update(registrations_to_update, update_fields=['files_count'], batch_size=5000)
    logger.info('Populated `files_count` on a total of {} registrations'.format(len(registrations_to_update)))
    progress_bar.close()
Example #6
0
def update_comment_root_target(state, *args, **kwargs):
    Comment = state.get_model('osf', 'comment')
    comments = Comment.objects.exclude(
        is_deleted=True).select_related('root_target')
    logger.info('{} comments to check'.format(comments.count()))
    comments_to_update = []
    for comment in comments:
        if comment.root_target:
            root_target_ctype = comment.root_target.content_type
            root_target_model_cls = state.get_model(
                root_target_ctype.app_label, root_target_ctype.model)
            root_target = root_target_model_cls.objects.get(
                pk=comment.root_target.object_id)
            if hasattr(root_target, 'is_deleted') and root_target.is_deleted:
                logger.info(
                    '{} is deleted. Setting Comment {} root_target to None'.
                    format(root_target, comment.pk))
                comment.root_target = None
                comments_to_update.append(comment)
            if hasattr(root_target, 'deleted') and root_target.deleted:
                logger.info(
                    '{} is deleted. Setting Comment {} root_target to None'.
                    format(root_target, comment.pk))
                comment.root_target = None
                comments_to_update.append(comment)
    bulk_update(comments_to_update, update_fields=['root_target'])
    logger.info('Total comments migrated: {}'.format(len(comments_to_update)))
Example #7
0
    def _set_(cls, objs, attr, value):
        """
        A shortcut to set multiple value to an attribute of multiple objects
        :param objs: an array or QuerySet of objects
        :param attr: name of the attr
        :param value: value to set
        :return:
        """
        if not isinstance(objs, QuerySet):
            ids = [x.id for x in objs]
            preserved = Case(
                *[When(id=id, then=pos) for pos, id in enumerate(ids)])
            objs = objs[0].__class__.objects.filter(
                id__in=ids).order_by(preserved)

        if isinstance(value, list):
            for obj, val in zip(objs, value):
                setattr(obj, attr, val)
            bulk_update(objs, update_fields=[attr], batch_size=10000)
        elif len(objs) == 1:
            obj = objs[0]
            setattr(obj, attr, value)
            obj.save()
        else:
            objs.update(**{attr: value})
Example #8
0
    def include_charactere(self, request, queryset):
        import re

        # dados = [
        #     dict(
        #         new_classification=queryset_planodecontas.new_classification) for queryset_planodecontas in PlanoDeContas.objects.all()
        #     # '1', '101', '10101',
        #     # '1010110', '101011010', '10101101010',
        #     # '1010110101000015', '1.01.01.10.10.10.0001-6', '1.0101.10.10.10.0001-7',
        #     # '1.01.01.10.10.10.0002-1', '1.01.0110.10.10.0002-2', '1.01.01.10.10.1000023',
        # ]
        padrao = re.compile(
            '(\d)(?:\.?(\d{2})(?:\.?(\d{2})(?:\.?(\d{2})(?:\.?(\d{2})(?:\.?(\d{2})(?:\.?(\d{4})-?(\d))?)?)?)?)?)?'
        )

        def formatar(dado):
            res = padrao.search(dado)
            if res:
                res = tuple(filter(lambda v: v is not None, res.groups()))
                if len(res) > 6:
                    return '.'.join(res[:6]) + '.' + '-'.join(res[6:])
                return '.'.join(res[:6])

        dados = []
        for plano in PlanoDeContas.objects.all():
            plano.classification = formatar(plano.classification)
            dados.append(plano)

        bulk_update(dados)
def add_registration_files_count(state, *args, **kwargs):
    """
    Caches registration files count on Registration object.

    Importing Registration model outside of this method to take advantage of files
    relationship for speed purposes in this migration.  If this model changes significantly,
    this migration may have to be modified in the future so it runs on an empty db.
    """
    Registration = state.get_model('osf', 'registration')
    registrations = Registration.objects.filter(is_deleted=False,
                                                files_count__isnull=True)
    BaseFileNode = state.get_model('osf', 'BaseFileNode')
    ContentType = state.get_model('contenttypes', 'ContentType')
    content_type = ContentType.objects.get(app_label='osf',
                                           model='abstractnode')
    registrations_to_update = []

    for registration in registrations:
        registration_files = BaseFileNode.objects.filter(
            target_object_id=registration.id,
            target_content_type=content_type,
            type='osf.osfstoragefile',
            deleted_on__isnull=True,
        )
        registration.files_count = registration_files.count()
        registrations_to_update.append(registration)

    bulk_update(registrations_to_update,
                update_fields=['files_count'],
                batch_size=5000)
    logger.info(
        'Populated `files_count` on a total of {} registrations'.format(
            len(registrations_to_update)))
Example #10
0
    def anonymize(self):
        print('Updating started')
        for anonym_cls in list(self.anonym_models.values()):

            if not anonym_cls.get_fields_names():
                continue

            queryset = anonym_cls.Meta.queryset.only(
                *anonym_cls.get_fields_names()
            )

            print('\nGenerating fake values for model "{}"'.format(
                queryset.model.__name__
            ))

            i = 0
            total = queryset.count()
            for j in list(range(0, total,
                          settings.ANONYMIZER_SELECT_BATCH_SIZE)) + [None]:
                sub_set = queryset.order_by('pk')[i:j]
                for model in sub_set:
                    i += 1

                    for name in anonym_cls.get_fields_names():
                        if getattr(model, name) or anonym_cls.Meta.fill_empty:
                            setattr(model, name, next(
                                getattr(anonym_cls, name))
                            )

                bulk_update(sub_set,
                            batch_size=settings.ANONYMIZER_UPDATE_BATCH_SIZE,
                            update_fields=anonym_cls.get_fields_names())
        print('\n\nUpdating finished')
 def handle(self, *args, **options):
     results = tuple(self.politicians_and_results())
     kwargs = {"desc": "Election results", "total": len(results), "unit": "results"}
     with tqdm(**kwargs) as progress_bar:
         for bulk in ipartition(results, 4096):
             bulk = tuple(self.serialize_bulk(bulk))
             bulk_update(bulk, update_fields=("election_history",))
             progress_bar.update(len(bulk))
Example #12
0
 def put(self, request):
     data = {x['id']: x['order'] for x in json.loads(request.body)}
     L = []
     for x in self.order_model_class.objects.all():
         x.order = data[x.pk]
         L.append(x)
     bulk_update(L, update_fields=['order'])
     return JsonResponse({'message': 'ok'})
Example #13
0
def bulk_sync(new_models, key_fields, filters, batch_size=None):
    """ Combine bulk create, update, and delete.  Make the DB match a set of in-memory objects.

    `new_models`: Django ORM objects that are the desired state.  They may or may not have `id` set.
    `key_fields`: Identifying attribute name(s) to match up `new_models` items with database rows.  If a foreign key
            is being used as a key field, be sure to pass the `fieldname_id` rather than the `fieldname`.
    `filters`: Q() filters specifying the subset of the database to work in.
    `batch_size`: passes through to Django `bulk_create.batch_size` and `bulk_update.batch_size`, and controls
            how many objects are created/updated per SQL query.

    """
    db_class = new_models[0].__class__

    with transaction.atomic():
        objs = db_class.objects.all()
        if filters:
            objs = objs.filter(filters)
        objs = objs.only("pk", *key_fields).select_for_update()

        def get_key(obj):
            return tuple(getattr(obj, k) for k in key_fields)

        obj_dict = {get_key(obj): obj for obj in objs}

        new_objs = []
        existing_objs = []
        for new_obj in new_models:
            old_obj = obj_dict.pop(get_key(new_obj), None)
            if old_obj is None:
                # This is a new object, so create it.
                # Make sure the primary key field is clear.
                new_obj.pk = None
                new_objs.append(new_obj)
            else:
                new_obj.id = old_obj.id
                existing_objs.append(new_obj)

        db_class.objects.bulk_create(new_objs, batch_size=batch_size)

        bulk_update(existing_objs, batch_size=batch_size)

        # delete stale ones...
        objs.filter(pk__in=[_.pk for _ in list(obj_dict.values())]).delete()

        assert len(existing_objs) == len(new_models) - len(new_objs)

        stats = {
            "created": len(new_objs),
            "updated": len(new_models) - len(new_objs),
            "deleted": len(obj_dict)
        }

        logger.debug("{}: {} created, {} updated, {} deleted.".format(
            db_class.__name__, stats["created"], stats["updated"],
            stats["deleted"]))

    return {"stats": stats}
def quote_folder_paths(state, schema):
    try:
        NodeSettings = state.get_model('addons_googledrive', 'nodesettings')
        targets = NodeSettings.objects.filter(folder_path__isnull=False)
    except LookupError:
        return
    for obj in targets:
        obj.folder_path = quote(obj.folder_path.encode('utf-8'))
    bulk_update(targets, update_fields=['folder_path'])
Example #15
0
    def update_tasks(self, event, tasks):
        from django_celery_events import models

        backend_tasks = [
            models.Task(pk=task.backend_obj.pk, queue=task.queue)
            for task in tasks
        ]
        bulk_update(backend_tasks, update_fields=['queue'])
        event.backend_obj.save()
Example #16
0
def update_hit_count_or_like(hit_or_like_desc_menu_queryset, weight):
    """
    조회수, 좋아요에 대해서 TOP 5 순위를 매겨서, 점수를 생성하고 합한다.
    """
    exists, score_sum, result_menu_queryset = calc_score(
        hit_or_like_desc_menu_queryset, weight)
    if exists:
        bulk_update(result_menu_queryset, update_fields=['score'])
    return score_sum
Example #17
0
def quote_folder_paths(state, schema):
    try:
        NodeSettings = state.get_model('addons_googledrive', 'nodesettings')
        targets = NodeSettings.objects.filter(folder_path__isnull=False)
    except LookupError:
        return
    for obj in targets:
        obj.folder_path = quote(obj.folder_path.encode('utf-8'))
    bulk_update(targets, update_fields=['folder_path'])
Example #18
0
    def update(self, queryset, all_validated_data):
        concrete_fields = set(
            f.name for f in self.child.Meta.model._meta.concrete_fields
        )

        all_validated_data_by_id = {}

        properties_to_update = set()

        for obj in all_validated_data:
            obj_id = self.child.id_value_lookup(obj)
            obj = self.child.remove_id_values(obj)
            if obj.keys():
                all_validated_data_by_id[obj_id] = obj
                properties_to_update.update(obj.keys())

        properties_to_update = properties_to_update.intersection(concrete_fields)

        # this method is handed a queryset that has been pre-filtered
        # to the specific instance ids in question, by `create_from_updates` on the bulk update mixin
        objects_to_update = queryset.only(*properties_to_update)

        updated_objects = []

        updated_keys = set()

        for obj in objects_to_update:
            # Coerce to string as some ids are of the UUID class
            obj_id = self.child.id_value_lookup(obj)
            obj_validated_data = all_validated_data_by_id.get(obj_id)

            # If no valid data was passed back then this will be None
            if obj_validated_data is not None:

                # Reset the child serializer changes attribute
                self.child.changes = []
                # use model serializer to actually update the model
                # in case that method is overwritten

                instance = self.child.update(obj, obj_validated_data)
                # If the update method does not return an instance for some reason
                # do not try to run further updates on the model, as there is no
                # object to update.
                if instance:
                    updated_objects.append(instance)
                    updated_keys.add(obj_id)
                    # Collect any registered changes from this run of the loop
                    self.changes.extend(self.child.changes)

        if len(all_validated_data_by_id) != len(updated_keys):
            self.missing_keys = set(all_validated_data_by_id.keys())\
                .difference(updated_keys)

        bulk_update(updated_objects, update_fields=properties_to_update)

        return updated_objects
Example #19
0
def sync_node_assessment_items(node, original):  # noqa C901
    node_assessment_items = {}

    for ai in node.assessment_items.all():
        node_assessment_items[ai.assessment_id] = ai

    node.extra_fields = original.extra_fields
    files_to_delete = []
    files_to_create = []

    ai_to_update = []

    for source_ai in original.assessment_items.all():
        ai_id = source_ai.assessment_id
        node_ai = node_assessment_items.get(ai_id)
        if not node_ai:
            node_ai = copy.copy(source_ai)
            node_ai.id = None
            node_ai.contentnode_id = node.id
            node_ai.save()
            node.changed = True
        else:
            for field in assessment_item_fields:
                setattr(node_ai, field, getattr(source_ai, field))
            if node_ai.has_changes():
                ai_to_update.append(node_ai)
            node_assessment_items.pop(ai_id)
        node_ai_files = {}
        if node_ai.id is not None:
            for file in node_ai.files.all():
                node_ai_files[file.checksum] = file
        for file in source_ai.files.all():
            if file.checksum not in node_ai_files:
                file.id = None
                file.assessment_item_id = node_ai.id
                files_to_create.append(file)
            else:
                node_ai_files.pop(file.checksum)
        files_to_delete.extend([f.id for f in node_ai_files.values()])

    ai_to_delete = [a.id for a in node_assessment_items.values()]
    if ai_to_delete:
        AssessmentItem.objects.filter(id__in=ai_to_delete).delete()
        node.changed = True

    if ai_to_update:
        bulk_update(ai_to_update, update_fields=assessment_item_fields)
        node.changed = True

    if files_to_delete:
        File.objects.filter(id__in=files_to_delete).delete()
        node.changed = True

    if files_to_create:
        File.objects.bulk_create(files_to_create)
        node.changed = True
def transfer_forked_date(state, schema):
    """
    If the most recent node log is forking, transfer that log's date to the node's last_logged field
    """
    newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
    nodes = Node.objects.filter(is_fork=True).annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
    for node in nodes:
        node.last_logged = node.logs.first().date

    bulk_update(nodes, update_fields=['last_logged'])
Example #21
0
def update_order_count_score(all_menu, order_desc_menu_id, weight):
    """
    주문수 TOP 5 순위를 매겨서, 점수를 생성하고 합한다.
    """
    order_desc_menu_queryset = all_menu.filter(id__in=order_desc_menu_id)
    exists, score_sum, result_menu_queryset = calc_score(
        order_desc_menu_queryset, weight)
    if exists:
        bulk_update(result_menu_queryset, update_fields=['score'])
    return score_sum
Example #22
0
def get_latest_rates():
    logger.info("RUN CELERY TASK - Get latest exchange rates from openexchangerates.org")
    response = requests.get(f'https://openexchangerates.org/api/latest.json?app_id={APP_ID}')
    if response.ok:
        rates = Rate.objects.select_related('from_currency').select_related('to_currency').all()
        for rate in rates:
            rate.rate = get_rate(response.text, rate.from_currency.code, rate.to_currency.code)
        bulk_update(rates, update_fields=['rate'])
        logger.info("OK")
    else:
        logger.critical('No response data from openexchangerates.org! Please try later!')
Example #23
0
def ordered_formset_save(formset, item, model_to_add_field, ordering_field):
    # Save a formset created with the above factory

    item.save(
    )  # do this to ensure we are saving reversion records for the item, not just the values
    formset.save(
        commit=False
    )  # Save formset so we have access to deleted_objects and save_m2m

    new = []
    for obj in formset.new_objects:
        # Loop through the forms so we can add the order value to the ordering field
        # ordered_forms does not contain forms marked for deletion
        setattr(obj, model_to_add_field, item)

        # If this item is a subclass of MPTT (like a FrameworkDimension) let MPTT order the values automatically.
        # They are ordered by name in alphabetical order by default.
        # Check the FrameworkDimension model to see the 'order_insertion_by' option of MPTT.
        # TODO: We need to write some tests for this functionality.
        if issubclass(formset.model, MPTTModel):
            obj.save()
        else:
            new.append(obj)

    if new:
        formset.model.objects.bulk_create(new)

    changed = []
    for record in formset.changed_objects:
        # record is a tuple with obj and form changed_data
        obj = record[0]
        setattr(obj, model_to_add_field, item)
        if issubclass(formset.model, MPTTModel):
            obj.save()
        else:
            changed.append(obj)

    if changed:
        bulk_update(changed, batch_size=500)

    if formset.deleted_objects:
        if hasattr(formset.model.objects, 'bulk_delete'):
            formset.model.objects.bulk_delete(formset.deleted_objects)
        else:
            # Backup just in case wrong manager is being used
            formset.model.objects.filter(
                id__in=[i.id for i in formset.deleted_objects]).delete()

    if issubclass(formset.model, MPTTModel):
        formset.model.objects.rebuild()

    # Save any m2m relations on the objects (not actually needed yet)
    formset.save_m2m()
Example #24
0
 def post_handle(self):
     assets = tuple(self.assets_per_politician_per_year())
     kwargs = {
         "desc": f"Calculating {Asset._meta.verbose_name} per year/politician",
         "total": len(assets),
         "unit": "politician",
     }
     with tqdm(**kwargs) as progress_bar:
         for bulk in ipartition(assets, 4096):
             bulk = tuple(self.serialize_bulk(bulk))
             bulk_update(bulk, update_fields=["asset_history"])
             progress_bar.update(len(bulk))
Example #25
0
    def delete(self):
        super().delete()
        f = []
        c = []
        for i, x in enumerate(FacilitatorOrder.objects.all().order_by('order'),
                              1):
            x.order = i
            f.append(x)
            c.append(x)

        bulk_update(f, update_fields=['order'])
        bulk_update(c, update_fields=['order'])
def untransfer_forked_date(state, schema):
    """
    Reverse mig.

    Revert the last logged date of nodes whose last log is forking to the previous log's date
    """
    newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
    nodes = Node.objects.filter(is_fork=True).annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
    for node in nodes:
        node.last_logged = node.logs.order_by('-date')[1].date

    bulk_update(nodes, update_fields=['last_logged'])
Example #27
0
def transfer_forked_date(state, schema):
    """
    If the most recent node log is forking, transfer that log's date to the node's last_logged field
    """
    newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
    nodes = Node.objects.filter(is_fork=True).annotate(
        latest_log=Subquery(newest.values('action')[:1])).filter(
            latest_log='node_forked')
    for node in nodes:
        node.last_logged = node.logs.first().date

    bulk_update(nodes, update_fields=['last_logged'])
 def test_bulk_update_objects(self, Factory, django_assert_num_queries):
     objects = []
     ids = range(0, 5)
     for id in ids:
         objects.append(Factory())
     try:
         dtfield = [x.name for x in objects[0]._meta.get_fields() if isinstance(x, DateTimeField)][0]
     except IndexError:
         pytest.skip('Thing doesn\'t have a DateTimeField')
     for obj in objects:
         setattr(obj, dtfield, timezone.now())
     with django_assert_num_queries(1):
         bulk_update(objects)
Example #29
0
 def link_campaign(self, year):
     kwargs = {
         "desc":
         str(year),
         "total":
         Candidate.objects.campaign(year).exclude(voter_id=None).count(),
         "unit":
         "links",
     }
     with tqdm(**kwargs) as progress_bar:
         for bulk in ipartition(self.linked_candidates(year), 4096):
             bulk_update(bulk, update_fields=("politician", ))
             progress_bar.update(len(bulk))
Example #30
0
    def remove_charactere(self, request, queryset_planodecontas):

        queryset_planodecontas = PlanoDeContas.objects.all()

        # limpa os dados
        for planodecontas_obj in queryset_planodecontas:
            dado_a_limpar = planodecontas_obj.classification
            dado_limpo = dado_a_limpar.replace("-", "").replace(".", "")
            planodecontas_obj.classification = dado_limpo

        bulk_update(queryset_planodecontas,
                    update_fields=['classification'],
                    batch_size=5000)
Example #31
0
def new_invoice(created, instance, **kwargs):
    if instance.status == 1:
        time_to_check = datetime.utcnow() + timedelta(days=5)
        from .tasks import check_invoice_status
        check_invoice_status.apply_async((instance.id,), eta=time_to_check)
    if instance.status == 0 and len(instance.invoice_lines.all()) != 0:
        to_update = []
        from .tasks import send_notification
        send_notification.delay(instance.id)
        for product in instance.invoice_lines.all():
            product.on_transition = True
            to_update.append(product)
        bulk_update(to_update, update_fields=['on_transition'])
Example #32
0
    def get(self, request):
        ping_cache_helper = PingCacheHelper()
        ping_keys = cache.get('ping_keys', [])
        rpid = request.GET.get('rpid')
        if rpid:
            ping_keys = [ping_cache_helper.get_key(rpid)]

        rpids_ping_map = {}
        for ping_key in ping_keys:
            ping_data = cache.get(ping_key)
            if not ping_data:
                continue
            rpid = ping_data['rpid']
            rpids_ping_map[rpid] = ping_data

        rpids = []
        invalidated_rpids = []
        raspberry_pis = RaspberryPi.objects.filter(
            rpid__in=rpids_ping_map.keys()).prefetch_related('lead')
        ec2_instances = EC2Instance.objects.filter(
            rpid__in=rpids_ping_map.keys()).select_related('lead')
        ec2_instances_map = {}
        for ec2_instance in ec2_instances:
            ec2_instances_map[ec2_instance.rpid] = ec2_instance
        for raspberry_pi in raspberry_pis:
            ping_data = rpids_ping_map.get(raspberry_pi.rpid)
            rpid = ping_data['rpid']
            rpids.append(rpid)
            ec2_instance = ec2_instances_map.get(rpid)
            self.process_ping_data(ping_data, raspberry_pi, ec2_instance)

            if not ping_cache_helper.is_data_consistent(
                    ping_data,
                    raspberry_pi=raspberry_pi,
                    ec2_instance=ec2_instance,
            ):
                ping_cache_helper.delete(rpid)
                invalidated_rpids.append(rpid)

        bulk_update(raspberry_pis,
                    update_fields=[
                        'ip_address', 'first_seen', 'first_tested',
                        'online_since_date', 'last_seen', 'version'
                    ])
        bulk_update(ec2_instances,
                    update_fields=['last_troubleshoot', 'tunnel_up_date'])
        return JsonResponse({
            'rpids': rpids,
            'invalidated': invalidated_rpids,
            'result': True,
        })
Example #33
0
def untransfer_forked_date(state, schema):
    """
    Reverse mig.

    Revert the last logged date of nodes whose last log is forking to the previous log's date
    """
    newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
    nodes = Node.objects.filter(is_fork=True).annotate(
        latest_log=Subquery(newest.values('action')[:1])).filter(
            latest_log='node_forked')
    for node in nodes:
        node.last_logged = node.logs.order_by('-date')[1].date

    bulk_update(nodes, update_fields=['last_logged'])
Example #34
0
def conference_submissions(**kwargs):
    """Return data for all OSF4M submissions.

    The total number of submissions for each meeting is calculated and cached
    in the Conference.num_submissions field.
    """
    conferences = Conference.objects.filter(is_meeting=True)
    #  TODO: Revisit this loop, there has to be a way to optimize it
    for conf in conferences:
        # For efficiency, we filter by tag first, then node
        # instead of doing a single Node query
        tags = Tag.objects.filter(system=False, name__iexact=conf.endpoint).values_list('pk', flat=True)
        nodes = AbstractNode.objects.filter(tags__in=tags, is_public=True, is_deleted=False)
        # Cache the number of submissions
        conf.num_submissions = nodes.count()
    bulk_update(conferences, update_fields=['num_submissions'])
    return {'success': True}
def update_comment_root_target(state, *args, **kwargs):
    Comment = state.get_model('osf', 'comment')
    comments = Comment.objects.exclude(is_deleted=True).select_related('root_target')
    logger.info('{} comments to check'.format(comments.count()))
    comments_to_update = []
    for comment in comments:
        if comment.root_target:
            root_target_ctype = comment.root_target.content_type
            root_target_model_cls = state.get_model(root_target_ctype.app_label, root_target_ctype.model)
            root_target = root_target_model_cls.objects.get(pk=comment.root_target.object_id)
            if hasattr(root_target, 'is_deleted') and root_target.is_deleted:
                logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
                comment.root_target = None
                comments_to_update.append(comment)
            if hasattr(root_target, 'deleted') and root_target.deleted:
                logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
                comment.root_target = None
                comments_to_update.append(comment)
    bulk_update(comments_to_update, update_fields=['root_target'])
    logger.info('Total comments migrated: {}'.format(len(comments_to_update)))
Example #36
0
    def bulk_get_file_nodes_from_wb_resp(self, files_list):
        """Takes a list of file data from wb response, touches/updates metadata for each, and returns list of file objects.
        This function mirrors all the actions of get_file_node_from_wb_resp except the create and updates are done in bulk.
        The bulk_update and bulk_create do not call the base class update and create so the actions of those functions are
        done here where needed
        """
        node = self.get_node(check_object_permissions=False)
        content_type = ContentType.objects.get_for_model(node)

        objs_to_create = defaultdict(lambda: [])
        file_objs = []

        for item in files_list:
            attrs = item['attributes']
            base_class = BaseFileNode.resolve_class(
                attrs['provider'],
                BaseFileNode.FOLDER if attrs['kind'] == 'folder'
                else BaseFileNode.FILE,
            )

            # mirrors BaseFileNode get_or_create
            try:
                file_obj = base_class.objects.get(target_object_id=node.id, target_content_type=content_type, _path='/' + attrs['path'].lstrip('/'))
            except base_class.DoesNotExist:
                # create method on BaseFileNode appends provider, bulk_create bypasses this step so it is added here
                file_obj = base_class(target=node, _path='/' + attrs['path'].lstrip('/'), provider=base_class._provider)
                objs_to_create[base_class].append(file_obj)
            else:
                file_objs.append(file_obj)

            file_obj.update(None, attrs, user=self.request.user, save=False)

        bulk_update(file_objs)

        for base_class in objs_to_create:
            base_class.objects.bulk_create(objs_to_create[base_class])
            file_objs += objs_to_create[base_class]

        return file_objs