Example #1
0
    def post(self, request):
        if not (is_active_superuser(request) and request.access.has_permission('broadcasts.admin')):
            return self.respond(status=401)

        validator = AdminBroadcastValidator(data=request.DATA)
        if not validator.is_valid():
            return self.respond(validator.errors, status=400)

        result = validator.object

        with transaction.atomic():
            broadcast = Broadcast.objects.create(
                title=result['title'],
                message=result['message'],
                link=result['link'],
                is_active=result.get('isActive') or False,
                date_expires=result.get('expiresAt'),
            )
            logger.info('broadcasts.create', extra={
                'ip_address': request.META['REMOTE_ADDR'],
                'user_id': request.user.id,
                'broadcast_id': broadcast.id,
            })

        if result.get('hasSeen'):
            try:
                with transaction.atomic():
                    BroadcastSeen.objects.create(
                        broadcast=broadcast,
                        user=request.user,
                    )
            except IntegrityError:
                pass

        return self.respond(self._serialize_objects(broadcast, request))
def create_contributions(apps, schema_editor):
    Word = apps.get_model("linguistics", "Word")
    Template = apps.get_model("linguistics", "Template")
    Contribution = apps.get_model("linguistics", "Contribution")

    for word in Word.objects.all():
        if word.author_id is None:
            continue

        if word.state == 1:
            continue

        with transaction.atomic():
            try:
                Contribution.objects.create(
                    account_id=word.author_id, state=word.state, type=0, source=0, entity_id=word.id
                )
            except:
                pass

    for template in Template.objects.all():
        if template.author_id is None:
            continue

        if template.state == 1:
            continue

        with transaction.atomic():
            try:
                Contribution.objects.create(
                    account_id=template.author_id, state=template.state, type=1, source=0, entity_id=template.id
                )
            except:
                pass
Example #3
0
def install_vocabularies():
    Vocabulary.objects.all().delete()
    VocabularyClass.objects.all().delete()
    VocabularyProperty.objects.all().delete()

    # load vocabularies
    vocabularies_fp = open(os.path.join(os.path.dirname(__file__), 'data/vocabularies.json'))
    data = json.load(vocabularies_fp)
    n_of_vocabularies = 0
    with transaction.atomic():  # atomic transactions vastly improve performence
        for v in data:
            vocabulary = Vocabulary.objects.create(pk=v['pk'], category=v['fields']['category'],
                                                   version=v['fields']['version'], votes=v['fields']['votes'],
                                                   originalUrl=v['fields']['originalUrl'],
                                                   description=v['fields']['description'], title=v['fields']['title'],
                                                   downloads=v['fields']['downloads'],
                                                   lodRanking=v['fields']['lodRanking'],
                                                   preferredNamespacePrefix=v['fields']['preferredNamespacePrefix'],
                                                   datePublished=v['fields']['datePublished'],
                                                   downloadUrl=v['fields']['downloadUrl'], score=v['fields']['score'],
                                                   uploader=User.objects.get(pk=v['fields']['uploader']),
                                                   dateModified=v['fields']['dateModified'],
                                                   dateCreated=v['fields']['dateCreated'],
                                                   preferredNamespaceUri=v['fields']['preferredNamespaceUri'],
                                                   example=v['fields']['example'], prevent_default_make=True)
            n_of_vocabularies += 1
            vocabulary.prevent_default_make = False  # reset to false so it can be updated
    vocabularies_fp.close()

    # load classes
    classes_fp = open(os.path.join(os.path.dirname(__file__), 'data/classes.json'))
    data = json.load(classes_fp)
    n_of_classes = 0
    with transaction.atomic():
        for c in data:
            VocabularyClass.objects.create(pk=c['pk'], description=c['fields']['description'],
                                           uri=c['fields']['uri'], label=c['fields']['label'],
                                           vocabulary=Vocabulary.objects.get(pk=c['fields']['vocabulary']))
            n_of_classes += 1
    classes_fp.close()

    # load properties
    properties_fp = open(os.path.join(os.path.dirname(__file__), 'data/properties.json'))
    data = json.load(properties_fp)
    n_of_properties = 0
    with transaction.atomic():
        for p in data:
            VocabularyProperty.objects.create(pk=p['pk'], description=p['fields']['description'],
                                              uri=p['fields']['uri'],
                                              vocabulary=Vocabulary.objects.get(pk=p['fields']['vocabulary']),
                                              label=p['fields']['label'], domain=p['fields']['domain'],
                                              range=p['fields']['range'], parent_uri=p['fields']['parent_uri'])
            n_of_properties += 1
    properties_fp.close()

    return HttpResponse(json.dumps({
        'n_of_vocabularies': n_of_vocabularies,
        'n_of_classes': n_of_classes,
        'n_of_properties': n_of_properties
    }), status=200, content_type="application/json")
Example #4
0
 def kill(self):
     with transaction.atomic():
         p = type(self).objects.get(pk=self.pk)
         p.not_needed_anymore = True
         p.save()
     save_lives = not self.finished
     logger().info("Killing pool #{}".format(self.id))
     if self.appliances:
         for appliance in self.appliances:
             if (
                     save_lives and appliance.ready and appliance.leased_until is None
                     and appliance.marked_for_deletion is False
                     and not appliance.managed_providers):
                 with transaction.atomic():
                     with appliance.kill_lock:
                         appliance.appliance_pool = None
                         appliance.datetime_leased = None
                         appliance.save()
                     self.total_count -= 1
                     if self.total_count < 0:
                         self.total_count = 0  # Protection against stupidity
                     self.save()
                 appliance.set_status(
                     "The appliance was taken out of dying pool {}".format(self.id))
             else:
                 Appliance.kill(appliance)
     else:
         # No appliances, so just delete it
         self.delete()
Example #5
0
    def test__init(self):
        rated_by = TestUser()

        # all fields
        rcpe1 = TestRecipe()
        r1 = Rating.objects.create(recipe=rcpe1, rated_by=rated_by, vote=1)
        self.assertIsNotNone(r1.last_updated)

        # recipe and rated_by should be unique
        with transaction.atomic():
            self.assertRaises(IntegrityError, Rating.objects.create, recipe=rcpe1, rated_by=rated_by, vote=1)
        self.assert_(Rating.objects.create(recipe=rcpe1, rated_by=TestUser(), vote=1))
        self.assert_(Rating.objects.create(recipe=TestRecipe(), rated_by=rated_by, vote=-1))

        # recipe is required
        with transaction.atomic():
            self.assertRaises(IntegrityError, Rating.objects.create, rated_by=rated_by, vote=-1)

        # rated_by is required
        with transaction.atomic():
            self.assertRaises(IntegrityError, Rating.objects.create, recipe=rcpe1, vote=1)

        # vote is required
        with transaction.atomic():
            self.assertRaises(IntegrityError, Rating.objects.create, recipe=TestRecipe(), rated_by=TestUser())
    def test_hooks_cleared_after_successful_commit(self, track):
        with atomic():
            track.do(1)
        with atomic():
            track.do(2)

        track.assert_done([1, 2])  # not [1, 1, 2]
 def test_executes_only_after_final_transaction_committed(self, track):
     with atomic():
         with atomic():
             track.do(1)
             track.assert_notified([])
         track.assert_notified([])
     track.assert_done([1])
Example #8
0
File: tests.py Project: 6ft/django
 def test_wrap_callable_instance(self):
     # Regression test for #20028
     class Callable(object):
         def __call__(self):
             pass
     # Must not raise an exception
     transaction.atomic(Callable())
Example #9
0
File: tests.py Project: 6ft/django
 def test_nested_commit_commit(self):
     with transaction.atomic():
         Reporter.objects.create(first_name="Tintin")
         with transaction.atomic():
             Reporter.objects.create(first_name="Archibald", last_name="Haddock")
     self.assertQuerysetEqual(Reporter.objects.all(),
             ['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
Example #10
0
File: tests.py Project: ar45/django
 def test_float_validates_object(self):
     instance = FloatModel(size=2.5)
     # Try setting float field to unsaved object
     instance.size = instance
     with transaction.atomic():
         with self.assertRaises(TypeError):
             instance.save()
     # Set value to valid and save
     instance.size = 2.5
     instance.save()
     self.assertTrue(instance.id)
     # Set field to object on saved instance
     instance.size = instance
     msg = (
         "Tried to update field model_fields.FloatModel.size with a model "
         "instance, <FloatModel: FloatModel object>. Use a value "
         "compatible with FloatField."
     )
     with transaction.atomic():
         with self.assertRaisesMessage(TypeError, msg):
             instance.save()
     # Try setting field to object on retrieved object
     obj = FloatModel.objects.get(pk=instance.id)
     obj.size = obj
     with self.assertRaises(TypeError):
         obj.save()
Example #11
0
def link_kamervragen_and_antwoorden():
    logger.info('BEGIN')
    from django.db.utils import IntegrityError
    kamerantwoorden = Kamerantwoord.objects.all()
    for kamerantwoord in kamerantwoorden:
        kamervragen = Kamervraag.objects.filter(vraagnummer=kamerantwoord.vraagnummer)
        if kamervragen:
            kamervraag = kamervragen[0]
            try:
                with transaction.atomic():
                    kamervraag.kamerantwoord = kamerantwoord
                    kamervraag.save()
            except IntegrityError as error:
                logger.error('kamervraag: ' + str(kamervraag.id))
                logger.error('kamerantwoord: ' + str(kamerantwoord.id))
                logger.exception(error)

    mededelingen = KamervraagMededeling.objects.all()
    for mededeling in mededelingen:
        kamervragen = Kamervraag.objects.filter(vraagnummer=mededeling.vraagnummer)
        if kamervragen:
            kamervraag = kamervragen[0]
            try:
                with transaction.atomic():
                    mededeling.kamervraag = kamervraag
                    mededeling.save()
            except IntegrityError as error:
                logger.error('mededeling: ' + str(mededeling.id))
                logger.error('kamervraag: ' + str(kamervraag.id))
                logger.exception(error)
    logger.info('END')
Example #12
0
    def get_model_object( self, obj_type, **conditions ):
        """(DBTools, <arbitrary Django model object>) -> 
        <unsaved arbitrary Django model object> |  
        <saved arbitrary Django model object> | Error
        """
        try:
            with transaction.atomic():
                return obj_type.objects.get( **conditions )
        except IntegrityError:
            try:
                with transaction.atomic():
                    return obj_type.objects.create( **conditions )
            except IntegrityError:
                if not len( obj_type.objects.filter( **conditions ) ):
                    raise NonUniqueError(  )
                elif len( obj_type.objects.filter( **conditions ) ) > 1:
                    raise ClassNotFoundError(  )
                else:
                    raise ObjectUnreadyError()




        if not len( obj_type.objects.filter( **conditions ) ):
            return obj_type( **conditions )
        elif len( obj_type.objects.filter( **conditions ) ) == 1:
            return obj_type.objects.get( **conditions )
        else:
            raise NonUniqueError(  )
Example #13
0
File: tests.py Project: 01-/django
    def test_implicit_savepoint_rollback(self):
        """MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""

        other_thread_ready = threading.Event()

        def other_thread():
            try:
                with transaction.atomic():
                    Reporter.objects.create(id=1, first_name="Tintin")
                    other_thread_ready.set()
                    # We cannot synchronize the two threads with an event here
                    # because the main thread locks. Sleep for a little while.
                    time.sleep(1)
                    # 2) ... and this line deadlocks. (see below for 1)
                    Reporter.objects.exclude(id=1).update(id=2)
            finally:
                # This is the thread-local connection, not the main connection.
                connection.close()

        other_thread = threading.Thread(target=other_thread)
        other_thread.start()
        other_thread_ready.wait()

        with self.assertRaisesMessage(OperationalError, 'Deadlock found'):
            # Double atomic to enter a transaction and create a savepoint.
            with transaction.atomic():
                with transaction.atomic():
                    # 1) This line locks... (see above for 2)
                    Reporter.objects.create(id=1, first_name="Tintin")

        other_thread.join()
    def test_merged_rollback_rollback(self):
        with six.assertRaisesRegex(self, Exception, "Oops"):
            with transaction.atomic():
                Reporter.objects.create(last_name="Tintin")
                with six.assertRaisesRegex(self, Exception, "Oops"):
                    with transaction.atomic(savepoint=False):
                        Reporter.objects.create(first_name="Haddock")
                    raise Exception("Oops, that's his last name")
                raise Exception("Oops, that's his first name")
        self.assertQuerysetEqual(Reporter.objects.all(), [])
        self.assertAtomicSignalCalls(
            # Enter atomic transaction block.
            enter_block_atomic_signal_call_sequence(True) +

            # Create Reporter.
            create_model_atomic_signal_call_sequence() +

            # Enter nested atomic transaction block.
            enter_block_atomic_signal_call_sequence(False, savepoint=False) +

            # Create Reporter.
            create_model_atomic_signal_call_sequence() +

            # Leave nested atomic transaction block.
            leave_block_atomic_signal_call_sequence(False, True,
                                                    savepoint=False) +

            # Leave atomic transaction block.
            leave_block_atomic_signal_call_sequence(True, False)
        )
    def test_prevent_rollback(self):
        with transaction.atomic():
            Reporter.objects.create(first_name="Tintin")
            sid = transaction.savepoint()
            # trigger a database error inside an inner atomic without savepoint
            with self.assertRaises(DatabaseError):
                with transaction.atomic(savepoint=False):
                    connection.cursor().execute(
                        "SELECT no_such_col FROM transactions_reporter"
                    )
            # prevent atomic from rolling back since we're recovering manually
            self.assertTrue(transaction.get_rollback())
            transaction.set_rollback(False)
            transaction.savepoint_rollback(sid)
        self.assertQuerysetEqual(Reporter.objects.all(),
                                 ['<Reporter: Tintin>'])
        self.assertAtomicSignalCalls(
            # Enter atomic transaction block.
            enter_block_atomic_signal_call_sequence(True) +

            # Create Reporter.
            create_model_atomic_signal_call_sequence() +

            # Enter and leave atomic transaction block.
            enter_block_atomic_signal_call_sequence(False, savepoint=False) +
            leave_block_atomic_signal_call_sequence(False, False,
                                                    savepoint=False) +

            # Leave atomic transaction with recovered rollback.
            leave_block_atomic_signal_call_sequence(True, True)
        )
Example #16
0
    def judge(self):
        self.submission.judge_start_time = int(time.time() * 1000)

        with transaction.atomic():
            judge_server = self.choose_judge_server()

            # 如果没有合适的判题服务器,就放入等待队列中等待判题
            if not judge_server:
                JudgeWaitingQueue.objects.create(submission_id=self.submission.id, time_limit=self.time_limit,
                                                 memory_limit=self.memory_limit, test_case_id=self.test_case_id)
                return

            judge_server.use_judge_instance()

        try:
            s = TimeoutServerProxy("http://" + judge_server.ip + ":" + str(judge_server.port), timeout=20)

            data = s.run(judge_server.token, self.submission.id, self.submission.language,
                         self.submission.code, self.time_limit, self.memory_limit, self.test_case_id)
            # 编译错误
            if data["code"] == 1:
                self.submission.result = result["compile_error"]
                self.submission.info = data["data"]["error"]
            # system error
            elif data["code"] == 2:
                self.submission.result = result["system_error"]
                self.submission.info = data["data"]["error"]
            elif data["code"] == 0:
                self.submission.result = data["data"]["result"]
                self.submission.info = json.dumps(data["data"]["info"])
                self.submission.accepted_answer_time = data["data"]["accepted_answer_time"]
        except Exception as e:
            self.submission.result = result["system_error"]
            self.submission.info = str(e)
        finally:
            with transaction.atomic():
                judge_server.release_judge_instance()

            self.submission.judge_end_time = int(time.time() * 1000)
            self.submission.save()

        if self.submission.contest_id:
            self.update_contest_problem_status()
        else:
            self.update_problem_status()

        with transaction.atomic():
            waiting_submissions = JudgeWaitingQueue.objects.select_for_update().all()
            if waiting_submissions.exists():
                # 防止循环依赖
                from submission.tasks import _judge

                waiting_submission = waiting_submissions.first()

                submission = Submission.objects.get(id=waiting_submission.submission_id)
                waiting_submission.delete()

                _judge.delay(submission, time_limit=waiting_submission.time_limit,
                             memory_limit=waiting_submission.memory_limit,
                             test_case_id=waiting_submission.test_case_id, is_waiting_task=True)
    def test_merged_commit_commit(self):
        with transaction.atomic():
            Reporter.objects.create(first_name="Tintin")
            with transaction.atomic(savepoint=False):
                Reporter.objects.create(first_name="Archibald",
                                        last_name="Haddock")
        self.assertQuerysetEqual(Reporter.objects.all(),
                                 ['<Reporter: Archibald Haddock>',
                                  '<Reporter: Tintin>'])
        self.assertAtomicSignalCalls(
            # Enter atomic transaction block.
            enter_block_atomic_signal_call_sequence(True) +

            # Create Reporter.
            create_model_atomic_signal_call_sequence() +

            # Enter nested atomic transaction block.
            enter_block_atomic_signal_call_sequence(False, savepoint=False) +

            # Create Reporter.
            create_model_atomic_signal_call_sequence() +

            # Leave nested atomic transaction block with caught eexception.
            leave_block_atomic_signal_call_sequence(False, True,
                                                    savepoint=False) +

            # Leave atomic transaction block with exception.
            leave_block_atomic_signal_call_sequence(True, True)
        )
Example #18
0
def updatejobparams(name=None, form=None, forcelevel=None):
    if form is None or name is None:
        return
    if 'descr' in form.changed_data:
        # print "Update description"
        with transaction.atomic():
            updateDIRJobDescr(name=name, descr=form.cleaned_data['descr'])
    if 'storage' in form.changed_data:
        # print "Update storage"
        with transaction.atomic():
            updateJobStorage(name=name, storage=form.cleaned_data['storage'])
    if 'client' in form.changed_data:
        # print "Update client"
        with transaction.atomic():
            updateJobClient(name=name, client=form.cleaned_data['client'])
    if 'backupsch' in form.changed_data or 'starttime' in form.changed_data or \
       'scheduleweek' in form.changed_data or 'schedulemonth' in form.changed_data or \
       'backuprepeat' in form.changed_data or 'backuplevel' in form.changed_data:
        # update Schedule
        with transaction.atomic():
            updateJobSchedule(jobname=name, data=form.cleaned_data, forcelevel=forcelevel)
    if 'retention' in form.changed_data:
        # update retention
        with transaction.atomic():
            updateJobRetention(name=name, retention=form.cleaned_data['retention'])
Example #19
0
    def do_update(self, limit):
        indexupdates = set()
        unit_ids = set()
        source_unit_ids = set()

        # Grab all updates from the database
        with transaction.atomic():
            updates = IndexUpdate.objects.filter(to_delete=False)
            for update in updates[:limit].iterator():
                indexupdates.add(update.pk)
                unit_ids.add(update.unitid)

                if update.source:
                    source_unit_ids.add(update.unitid)

        # Filter matching units
        units = Unit.objects.filter(
            id__in=unit_ids
        )
        source_units = Unit.objects.filter(
            id__in=source_unit_ids
        )

        # Udate index
        update_index(units, source_units)

        # Delete processed updates
        with transaction.atomic():
            IndexUpdate.objects.filter(id__in=indexupdates).delete()
Example #20
0
    def test_hooks_cleared_after_successful_commit(self):
        with transaction.atomic():
            self.do(1)
        with transaction.atomic():
            self.do(2)

        self.assertDone([1, 2])  # not [1, 1, 2]
Example #21
0
 def test_executes_only_after_final_transaction_committed(self):
     with transaction.atomic():
         with transaction.atomic():
             self.do(1)
             self.assertNotified([])
         self.assertNotified([])
     self.assertDone([1])
Example #22
0
def purseentry_edit_batch_save(request):
    id_list = request.POST.get('id_list')
    id_list = id_list.split(',')

    with transaction.atomic():
        for billentry_id in id_list:
            form = PurseEntryEditBatchForm(request.POST)
            if not form.is_valid():
                return JsonResponse({'success': False, 'message': '验证失败!' + get_form_error_msg(form)})
            material = form.data.get('material')
            amount = form.data.get('amount')
            if amount.strip() == '':
                amount = 0
            qty = form.data.get('qty')
            if qty.strip() == '':
                qty = 0

            material_id = None
            try:
                material_id = Materiel.objects.filter(bur_num=material)[0].bur_id
            except:
                pass

            bill_date = form.data.get('bill_date')

            with transaction.atomic():
                PurseEntry.objects.filter(pk=billentry_id).update(
                    material_id=material_id,
                    amount=amount,
                    qty=qty,
                )
                purse_service.markedit_by_bill_entry_id(billentry_id)
                purse_service.update_bill_date(bill_entry_id=billentry_id, bill_date=bill_date)
    return JsonResponse({'success': True, 'message': '保存成功!'})
Example #23
0
    def save_avatar(cls, relation, type, avatar=None, filename=None):
        from sentry.models import File

        if avatar:
            with transaction.atomic():
                photo = File.objects.create(
                    name=filename,
                    type=cls.FILE_TYPE,
                )
                if isinstance(avatar, six.string_types):
                    avatar = BytesIO(avatar)
                photo.putfile(avatar)
        else:
            photo = None

        with transaction.atomic():
            instance, created = cls.objects.get_or_create(**relation)
            if instance.file and photo:
                instance.file.delete()

            if photo:
                instance.file = photo
                instance.ident = uuid4().hex

            instance.avatar_type = [i for i, n in cls.AVATAR_TYPES if n == type][0]

            instance.save()

        if photo and not created:
            instance.clear_cached_photos()

        return instance
Example #24
0
def addfriend(user_id, receiver_id):

    try:
        with transaction.atomic():
            # get user
            u = UserDAO.get_user_by_id(user_id)
            r = UserDAO.get_user_by_id(receiver_id)
    except:
        return "fail"

    # object create
    new_friend = {"userid": u, "friendid": receiver_id, "datetime": datetime.datetime.now()}
    # addfriend_request = Addfriend.object.create(new_friend)
    message = {"userid": r, "type": "3", "content": u.nickname + " wants to add you"}
    # addfriend_message = Message.objects.create(message)

    # object commit
    try:
        with transaction.atomic():
            result = AddFriendDAO.new_addfriend(**new_friend)

            # add receiver's message
            MessageDAO.new_message(**message)

    except:
        result = "fail"

    return result
Example #25
0
    def delete(self, request, args=None):

        '''

        :argument 删除数据库连接,并删除改数据库连接相关的工单记录,执行记录,以及权限表等相关所有数据

        :return: success or fail

        '''

        try:
            with transaction.atomic():
                con_id = DatabaseList.objects.filter(connection_name=args).first()
                work_id = SqlOrder.objects.filter(bundle_id=con_id.id).first()
                with transaction.atomic():
                    SqlRecord.objects.filter(workid=work_id).delete()
                    SqlOrder.objects.filter(bundle_id=con_id.id).delete()
                    DatabaseList.objects.filter(connection_name=args).delete()
                per = grained.objects.all().values('username', 'permissions')
                for i in per:
                    for c in i['permissions']:
                        if isinstance(i['permissions'][c], list) and c != 'diccon':
                            i['permissions'][c] = list(filter(lambda x: x != args, i['permissions'][c]))
                    grained.objects.filter(username=i['username']).update(permissions=i['permissions'])
            return Response('数据库信息已删除!')
        except Exception as e:
            CUSTOM_ERROR.error(f'{e.__class__.__name__}: {e}')
            return HttpResponse(status=500)
Example #26
0
def delete_user_threads(sender, **kwargs):
    recount_forums = set()
    recount_threads = set()

    for thread in batch_delete(sender.thread_set.all(), 50):
        recount_forums.add(thread.forum_id)
        with transaction.atomic():
            thread.delete()

    for post in batch_delete(sender.post_set.all(), 50):
        recount_forums.add(post.forum_id)
        recount_threads.add(post.thread_id)
        with transaction.atomic():
            post.delete()

    if recount_threads:
        changed_threads_qs = Thread.objects.filter(id__in=recount_threads)
        for thread in batch_update(changed_threads_qs, 50):
            thread.synchronize()
            thread.save()

    if recount_forums:
        for forum in Forum.objects.filter(id__in=recount_forums):
            forum.synchronize()
            forum.save()
Example #27
0
def record_new_project(project, user, **kwargs):
    if not user.is_authenticated():
        user = None

    try:
        with transaction.atomic():
            OrganizationOnboardingTask.objects.create(
                organization=project.organization,
                task=OnboardingTask.FIRST_PROJECT,
                user=user,
                status=OnboardingTaskStatus.COMPLETE,
                project_id=project.id,
                date_completed=timezone.now(),
            )
    except IntegrityError:
        try:
            with transaction.atomic():
                OrganizationOnboardingTask.objects.create(
                    organization=project.organization,
                    task=OnboardingTask.SECOND_PLATFORM,
                    user=user,
                    status=OnboardingTaskStatus.PENDING,
                    project_id=project.id,
                    date_completed=timezone.now(),
                )
        except IntegrityError:
            pass
Example #28
0
def addperms(request, rolename):
    group = get_object_or_404(Group, name=rolename)
    status = [False, False, '']
    if request.method == 'POST':
        perms = get_system_permissions_filtered(group)
        form = RolesAddpermForm(perms=perms, data=request.POST)
        if form.is_valid():
            permname = form.cleaned_data['perms']
            if permname.startswith('addallperms'):
                (n, applabel) = permname.split('_')
                status = [True, True, applabel.capitalize()]
                allperms = Permission.objects\
                    .filter(content_type__model='permissions', content_type__app_label=applabel)\
                    .exclude(codename__icontains='_permissions')
                with transaction.atomic():
                    for perm in allperms:
                        group.permissions.add(perm)
                    group.save()
            else:
                with transaction.atomic():
                    status = [True, False, permname]
                    perm = get_object_or_404(Permission, codename=permname, content_type__model='permissions')
                    group.permissions.add(perm)
                    group.save()
        else:
            status = [False, False, "Cannot validate a form: %s" % form.errors]
    return JsonResponse(status, safe=False)
Example #29
0
def _update_xblocks_cache(course_key):
    """
    Calculate the XBlock cache data for a course and update the XBlockCache table.
    """
    from .models import XBlockCache
    blocks_data = _calculate_course_xblocks_data(course_key)

    def update_block_cache_if_needed(block_cache, block_data):
        """ Compare block_cache object with data and update if there are differences. """
        paths = _paths_from_data(block_data['paths'])
        if block_cache.display_name != block_data['display_name'] or not paths_equal(block_cache.paths, paths):
            log.info(u'Updating XBlockCache with usage_key: %s', unicode(block_cache.usage_key))
            block_cache.display_name = block_data['display_name']
            block_cache.paths = paths
            block_cache.save()

    with transaction.atomic():
        block_caches = XBlockCache.objects.filter(course_key=course_key)
        for block_cache in block_caches:
            block_data = blocks_data.pop(unicode(block_cache.usage_key), None)
            if block_data:
                update_block_cache_if_needed(block_cache, block_data)

    for block_data in blocks_data.values():
        with transaction.atomic():
            paths = _paths_from_data(block_data['paths'])
            log.info(u'Creating XBlockCache with usage_key: %s', unicode(block_data['usage_key']))
            block_cache, created = XBlockCache.objects.get_or_create(usage_key=block_data['usage_key'], defaults={
                'course_key': course_key,
                'display_name': block_data['display_name'],
                'paths': paths,
            })

            if not created:
                update_block_cache_if_needed(block_cache, block_data)
Example #30
0
    def test_delete_recipe(self):
        Recipe.objects.all().delete()
        vodka_id = Ingredient.objects.get(name='Vodka').id
        oj_id = Ingredient.objects.get(name='Orange Juice').id

        screwdriver_recipe = {
            'name': 'Screwdriver',
            'instructions': [
                'Fill glass with ice',
                'Add 2 parts Orange Juice',
                'Add 1 part Vodka',
                'Shake well'
            ],
            'ingredients': {
                vodka_id: 1,
                oj_id: 2
            }
        }

        recipes = self.profile.create_recipe(screwdriver_recipe)
        screwdriver = recipes.get(name='Screwdriver')

        profile = self.profile.delete_recipe(screwdriver.id)

        with self.assertRaises(Recipe.DoesNotExist):
            with transaction.atomic():
                profile.created_recipes.get(id=screwdriver.id)

        with self.assertRaises(Recipe.DoesNotExist):
            with transaction.atomic():
                Recipe.objects.get(id=screwdriver.id)

        recipes = Recipe.objects.all()
        self.assertEqual(0, recipes.count())
Example #31
0
 def perform_bulk_destroy(self, objects):
     with transaction.atomic():
         for o in objects:
             if hasattr(o, "snapshot"):
                 o.snapshot()
             self.perform_destroy(o)
Example #32
0
    def save(self, project_id, raw=False, assume_normalized=False):
        # Normalize if needed
        if not self._normalized:
            if not assume_normalized:
                self.normalize()
            self._normalized = True

        data = self._data

        project = Project.objects.get_from_cache(id=project_id)
        project._organization_cache = Organization.objects.get_from_cache(
            id=project.organization_id
        )

        # Check to make sure we're not about to do a bunch of work that's
        # already been done if we've processed an event with this ID. (This
        # isn't a perfect solution -- this doesn't handle ``EventMapping`` and
        # there's a race condition between here and when the event is actually
        # saved, but it's an improvement. See GH-7677.)
        try:
            event = Event.objects.get(project_id=project.id, event_id=data["event_id"])
        except Event.DoesNotExist:
            pass
        else:
            # Make sure we cache on the project before returning
            event._project_cache = project
            logger.info(
                "duplicate.found",
                exc_info=True,
                extra={
                    "event_uuid": data["event_id"],
                    "project_id": project.id,
                    "platform": data.get("platform"),
                    "model": Event.__name__,
                },
            )
            return event

        # Pull out the culprit
        culprit = self.get_culprit()

        # Pull the toplevel data we're interested in
        level = data.get("level")

        # TODO(mitsuhiko): this code path should be gone by July 2018.
        # This is going to be fine because no code actually still depends
        # on integers here.  When we need an integer it will be converted
        # into one later.  Old workers used to send integers here.
        if level is not None and isinstance(level, six.integer_types):
            level = LOG_LEVELS[level]

        transaction_name = data.get("transaction")
        logger_name = data.get("logger")
        release = data.get("release")
        dist = data.get("dist")
        environment = data.get("environment")
        recorded_timestamp = data.get("timestamp")

        # We need to swap out the data with the one internal to the newly
        # created event object
        event = self._get_event_instance(project_id=project_id)
        self._data = data = event.data.data

        event._project_cache = project

        date = event.datetime
        platform = event.platform
        event_id = event.event_id

        if transaction_name:
            transaction_name = force_text(transaction_name)

        # Right now the event type is the signal to skip the group. This
        # is going to change a lot.
        if event.get_event_type() == "transaction":
            issueless_event = True
        else:
            issueless_event = False

        # Some of the data that are toplevel attributes are duplicated
        # into tags (logger, level, environment, transaction).  These are
        # different from legacy attributes which are normalized into tags
        # ahead of time (site, server_name).
        setdefault_path(data, "tags", value=[])
        set_tag(data, "level", level)
        if logger_name:
            set_tag(data, "logger", logger_name)
        if environment:
            set_tag(data, "environment", environment)
        if transaction_name:
            set_tag(data, "transaction", transaction_name)

        if release:
            # dont allow a conflicting 'release' tag
            pop_tag(data, "release")
            release = Release.get_or_create(project=project, version=release, date_added=date)
            set_tag(data, "sentry:release", release.version)

        if dist and release:
            dist = release.add_dist(dist, date)
            # dont allow a conflicting 'dist' tag
            pop_tag(data, "dist")
            set_tag(data, "sentry:dist", dist.name)
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            pop_tag(data, "user")
            set_tag(data, "sentry:user", event_user.tag_value)

        # At this point we want to normalize the in_app values in case the
        # clients did not set this appropriately so far.
        grouping_config = load_grouping_config(
            get_grouping_config_dict_for_event_data(data, project)
        )
        normalize_stacktraces_for_grouping(data, grouping_config)

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False)
            if added_tags:
                # plugins should not override user provided tags
                for key, value in added_tags:
                    if get_tag(data, key) is None:
                        set_tag(data, key, value)

        for path, iface in six.iteritems(event.interfaces):
            for k, v in iface.iter_tags():
                set_tag(data, k, v)
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.path, None)

        # The active grouping config was put into the event in the
        # normalize step before.  We now also make sure that the
        # fingerprint was set to `'{{ default }}' just in case someone
        # removed it from the payload.  The call to get_hashes will then
        # look at `grouping_config` to pick the right parameters.
        data["fingerprint"] = data.get("fingerprint") or ["{{ default }}"]
        apply_server_fingerprinting(data, get_fingerprinting_config_for_project(project))

        # Here we try to use the grouping config that was requested in the
        # event.  If that config has since been deleted (because it was an
        # experimental grouping config) we fall back to the default.
        try:
            hashes = event.get_hashes()
        except GroupingConfigNotFound:
            data["grouping_config"] = get_grouping_config_dict_for_project(project)
            hashes = event.get_hashes()

        data["hashes"] = hashes

        # we want to freeze not just the metadata and type in but also the
        # derived attributes.  The reason for this is that we push this
        # data into kafka for snuba processing and our postprocessing
        # picks up the data right from the snuba topic.  For most usage
        # however the data is dynamically overridden by Event.title and
        # Event.location (See Event.as_dict)
        materialized_metadata = self.materialize_metadata()
        event_metadata = materialized_metadata["metadata"]
        data.update(materialized_metadata)
        data["culprit"] = culprit

        # index components into ``Event.message``
        # See GH-3248
        event.message = self.get_search_message(event_metadata, culprit)
        received_timestamp = event.data.get("received") or float(event.datetime.strftime("%s"))

        if not issueless_event:
            # The group gets the same metadata as the event when it's flushed but
            # additionally the `last_received` key is set.  This key is used by
            # _save_aggregate.
            group_metadata = dict(materialized_metadata)
            group_metadata["last_received"] = received_timestamp
            kwargs = {
                "platform": platform,
                "message": event.message,
                "culprit": culprit,
                "logger": logger_name,
                "level": LOG_LEVELS_MAP.get(level),
                "last_seen": date,
                "first_seen": date,
                "active_at": date,
                "data": group_metadata,
            }

            if release:
                kwargs["first_release"] = release

            try:
                group, is_new, is_regression = self._save_aggregate(
                    event=event, hashes=hashes, release=release, **kwargs
                )
            except HashDiscarded:
                event_discarded.send_robust(project=project, sender=EventManager)

                metrics.incr(
                    "events.discarded",
                    skip_internal=True,
                    tags={"organization_id": project.organization_id, "platform": platform},
                )
                raise
            else:
                event_saved.send_robust(project=project, event_size=event.size, sender=EventManager)
            event.group = group
        else:
            group = None
            is_new = False
            is_regression = False
            event_saved.send_robust(project=project, event_size=event.size, sender=EventManager)

        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        environment = Environment.get_or_create(project=project, name=environment)

        if group:
            group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
                group_id=group.id,
                environment_id=environment.id,
                defaults={"first_release": release if release else None},
            )
        else:
            is_new_group_environment = False

        if release:
            ReleaseEnvironment.get_or_create(
                project=project, release=release, environment=environment, datetime=date
            )

            ReleaseProjectEnvironment.get_or_create(
                project=project, release=release, environment=environment, datetime=date
            )

            if group:
                grouprelease = GroupRelease.get_or_create(
                    group=group, release=release, environment=environment, datetime=date
                )

        counters = [(tsdb.models.project, project.id)]

        if group:
            counters.append((tsdb.models.group, group.id))

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
        ]

        if group:
            frequencies.append(
                (tsdb.models.frequent_environments_by_group, {group.id: {environment.id: 1}})
            )

            if release:
                frequencies.append(
                    (tsdb.models.frequent_releases_by_group, {group.id: {grouprelease.id: 1}})
                )
        if frequencies:
            tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        if group:
            UserReport.objects.filter(project=project, event_id=event_id).update(
                group=group, environment=environment
            )

        # save the event
        try:
            with transaction.atomic(using=router.db_for_write(Event)):
                event.save()
        except IntegrityError:
            logger.info(
                "duplicate.found",
                exc_info=True,
                extra={
                    "event_uuid": event_id,
                    "project_id": project.id,
                    "group_id": group.id if group else None,
                    "model": Event.__name__,
                },
            )
            return event

        if event_user:
            counters = [
                (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,))
            ]

            if group:
                counters.append(
                    (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,))
                )

            tsdb.record_multi(counters, timestamp=event.datetime, environment_id=environment.id)

        if release:
            if is_new:
                buffer.incr(
                    ReleaseProject,
                    {"new_groups": 1},
                    {"release_id": release.id, "project_id": project.id},
                )
            if is_new_group_environment:
                buffer.incr(
                    ReleaseProjectEnvironment,
                    {"new_issues_count": 1},
                    {
                        "project_id": project.id,
                        "release_id": release.id,
                        "environment_id": environment.id,
                    },
                )

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send_robust(project=project, event=event, sender=Project)

        eventstream.insert(
            group=group,
            event=event,
            is_new=is_new,
            is_regression=is_regression,
            is_new_group_environment=is_new_group_environment,
            primary_hash=hashes[0],
            # We are choosing to skip consuming the event back
            # in the eventstream if it's flagged as raw.
            # This means that we want to publish the event
            # through the event stream, but we don't care
            # about post processing and handling the commit.
            skip_consume=raw,
        )

        metrics.timing("events.latency", received_timestamp - recorded_timestamp)

        metrics.timing("events.size.data.post_save", event.size)

        return event
Example #33
0
        def _add_items(self, source_field_name, target_field_name, *objs):
            # source_field_name: the PK fieldname in join table for the source object
            # target_field_name: the PK fieldname in join table for the target object
            # *objs - objects to add. Either object instances, or primary keys of object instances.

            # If there aren't any objects, there is nothing to do.
            from django.db.models import Model
            if objs:
                new_ids = set()
                for obj in objs:
                    if isinstance(obj, self.model):
                        if not router.allow_relation(obj, self.instance):
                            raise ValueError(
                                'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
                                (obj, self.instance._state.db, obj._state.db)
                            )
                        fk_val = self.through._meta.get_field(
                            target_field_name).get_foreign_related_value(obj)[0]
                        if fk_val is None:
                            raise ValueError(
                                'Cannot add "%r": the value for field "%s" is None' %
                                (obj, target_field_name)
                            )
                        new_ids.add(fk_val)
                    elif isinstance(obj, Model):
                        raise TypeError(
                            "'%s' instance expected, got %r" %
                            (self.model._meta.object_name, obj)
                        )
                    else:
                        new_ids.add(obj)

                db = router.db_for_write(self.through, instance=self.instance)
                vals = (self.through._default_manager.using(db)
                        .values_list(target_field_name, flat=True)
                        .filter(**{
                            source_field_name: self.related_val[0],
                            '%s__in' % target_field_name: new_ids,
                        }))
                new_ids = new_ids - set(vals)

                with transaction.atomic(using=db, savepoint=False):
                    if self.reverse or source_field_name == self.source_field_name:
                        # Don't send the signal when we are inserting the
                        # duplicate data row for symmetrical reverse entries.
                        signals.m2m_changed.send(
                            sender=self.through, action='pre_add',
                            instance=self.instance, reverse=self.reverse,
                            model=self.model, pk_set=new_ids, using=db,
                        )

                    # Add the ones that aren't there already
                    self.through._default_manager.using(db).bulk_create([
                        self.through(**{
                            '%s_id' % source_field_name: self.related_val[0],
                            '%s_id' % target_field_name: obj_id,
                        })
                        for obj_id in new_ids
                    ])

                    if self.reverse or source_field_name == self.source_field_name:
                        # Don't send the signal when we are inserting the
                        # duplicate data row for symmetrical reverse entries.
                        signals.m2m_changed.send(
                            sender=self.through, action='post_add',
                            instance=self.instance, reverse=self.reverse,
                            model=self.model, pk_set=new_ids, using=db,
                        )
Example #34
0
    def create(self, validated_data):
        active_state_signature_data = validated_data.pop('debit_signature')
        wallet = validated_data.pop('wallet')
        recipient = validated_data.pop('recipient')

        # get current eon
        current_eon = LocalViewInterface.latest().eon_number()

        # transfer eon should be the current eon number
        if validated_data.pop('eon_number') != current_eon:
            raise serializers.ValidationError(
                detail='', code=ErrorCode.EON_NUMBER_OUT_OF_SYNC)

        # TODO refactor this such that the recipient is only locked after the sender's details are verified
        wallets = sorted([wallet, recipient], key=lambda w: w.trail_identifier)
        with RootCommitment.read_write_lock(
                suffix=current_eon, auto_renewal=False), wallets[0].lock(
                    auto_renewal=False), wallets[1].lock(auto_renewal=False):
            if RootCommitment.objects.filter(eon_number=current_eon +
                                             1).exists():
                raise serializers.ValidationError(
                    detail='', code=ErrorCode.EON_NUMBER_OUT_OF_SYNC)

            transfer = Transfer(wallet=wallet,
                                amount=validated_data.pop('amount'),
                                eon_number=current_eon,
                                recipient=recipient,
                                nonce=validated_data.pop('nonce'),
                                passive=True)

            wallet_view_context = WalletTransferContext(wallet=wallet,
                                                        transfer=transfer)
            recipient_view_context = WalletTransferContext(wallet=recipient,
                                                           transfer=transfer)

            # Minimal SLA
            if not wallet.is_sla_exempt() and not recipient.is_sla_exempt():
                if not wallet.has_valid_sla():
                    sender_transfers_list = wallet_view_context.authorized_transfers_list(
                        only_appended=False, force_append=True)
                    if len(sender_transfers_list) > settings.SLA_THRESHOLD:
                        raise serializers.ValidationError(
                            detail='',
                            code=ErrorCode.DEBIT_WALLET_EXCEEDED_SLA)
                elif not recipient.has_valid_sla():
                    recipient_transfers_list = recipient_view_context.authorized_transfers_list(
                        only_appended=False, force_append=True)
                    if len(recipient_transfers_list) > settings.SLA_THRESHOLD:
                        raise serializers.ValidationError(
                            detail='',
                            code=ErrorCode.CREDIT_WALLET_EXCEEDED_SLA)

            # Ensure sender log consistency
            can_append_to_sender_log = wallet_view_context.can_schedule_transfer(
            )
            if can_append_to_sender_log is not True:
                raise serializers.ValidationError(
                    detail='Sender: {}'.format(can_append_to_sender_log),
                    code=ErrorCode.DEBIT_WALLET_CANNOT_ADD_TRANSACTION)

            # Ensure recipient log consistency
            can_append_to_recipient_log = recipient_view_context.can_schedule_transfer(
            )
            if can_append_to_recipient_log is not True:
                raise serializers.ValidationError(
                    detail='Recipient: {}'.format(can_append_to_recipient_log),
                    code=ErrorCode.CREDIT_WALLET_CANNOT_ADD_TRANSACTION)

            # Ensure transfer consistency
            can_spend, currently_available_funds = wallet_view_context.can_send_transfer(
                current_eon_number=current_eon,
                using_only_appended_funds=False)
            if can_spend is not True:
                raise serializers.ValidationError(
                    detail=can_spend, code=ErrorCode.DEBIT_WALLET_OVERSPENDING)

            # Validate data
            concise_balance_marker_signature_data = validated_data.pop(
                'debit_balance_signature')
            concise_balance_marker_amount = validated_data.pop('debit_balance')

            if concise_balance_marker_amount > currently_available_funds - transfer.amount:
                raise serializers.ValidationError(
                    detail='',
                    code=ErrorCode.DEBIT_WALLET_BALANCE_MARKER_EXCEED_BALANCE)

            concise_balance_marker = MinimumAvailableBalanceMarker(
                wallet=wallet,
                eon_number=transfer.eon_number,
                amount=concise_balance_marker_amount)
            concise_balance_marker_checksum = hex_value(
                concise_balance_marker.checksum())
            concise_balance_marker_signature = Signature(
                wallet=transfer.wallet,
                checksum=concise_balance_marker_checksum,
                value=concise_balance_marker_signature_data.get('value'))
            if not concise_balance_marker_signature.is_valid():
                raise serializers.ValidationError(
                    detail='', code=ErrorCode.INVALID_DEBIT_BALANCE_SIGNATURE)

            tx_set_tree = wallet_view_context.optimized_authorized_transfers_tree(
            )
            tx_set_hash = hex_value(tx_set_tree.root_hash())
            transfer_index = tx_set_tree.merkle_tree_nonce_map.get(
                transfer.nonce)
            transfer_proof = tx_set_tree.proof(transfer_index)

            highest_spendings, highest_gains = wallet_view_context.off_chain_actively_sent_received_amounts(
                eon_number=transfer.eon_number, only_appended=False)
            active_state = ActiveState(wallet=wallet,
                                       updated_spendings=highest_spendings +
                                       transfer.amount,
                                       updated_gains=highest_gains,
                                       tx_set_hash=tx_set_hash,
                                       tx_set_proof_hashes=transfer_proof,
                                       tx_set_index=transfer_index,
                                       eon_number=transfer.eon_number)

            checksum = hex_value(active_state.checksum())
            active_state_signature = Signature(
                wallet=transfer.wallet,
                checksum=checksum,
                value=active_state_signature_data.get('value'))
            if not active_state_signature.is_valid():
                raise serializers.ValidationError(
                    detail='', code=ErrorCode.INVALID_DEBIT_SIGNATURE)

            transfer.position = recipient_view_context.off_chain_passively_received_amount(
                eon_number=transfer.eon_number, only_appended=False)

            # locking context covers saving the state as well to make sure checkpoint creation is consistent
            with transaction.atomic():
                Signature.objects.bulk_create(
                    [concise_balance_marker_signature, active_state_signature])

                concise_balance_marker.signature = concise_balance_marker_signature
                concise_balance_marker.save()

                active_state.wallet_signature = active_state_signature
                active_state.operator_signature = active_state.sign_active_state(
                    settings.HUB_OWNER_ACCOUNT_ADDRESS,
                    settings.HUB_OWNER_ACCOUNT_KEY)
                active_state.save()

                transfer.sender_active_state = active_state
                transfer.sender_balance_marker = concise_balance_marker
                # cache transfer index in sender active set
                transfer.sender_merkle_index = transfer_index
                # transfer.sender_merkle_root_cache = tx_set_hash
                # cache active set merkle mountains height array and hash array for recipient active set
                transfer.sender_merkle_hash_cache, transfer.sender_merkle_height_cache = tx_set_tree.merkle_cache_stacks(
                )
                transfer.complete = True
                transfer.appended = True
                transfer.processed = True
                transfer.save()

        if transfer.appended:
            operator_celery.send_task('auditor.tasks.on_transfer_confirmation',
                                      args=[transfer.id])

        return transfer
Example #35
0
def sync(remote_pk, repository_pk):
    """
    Sync Collections with ``remote_pk``, and save a new RepositoryVersion for ``repository_pk``.

    Args:
        remote_pk (str): The remote PK.
        repository_pk (str): The repository PK.

    Raises:
        ValueError: If the remote does not specify a URL to sync or a ``whitelist`` of Collections
            to sync.

    """
    remote = CollectionRemote.objects.get(pk=remote_pk)
    repository = Repository.objects.get(pk=repository_pk)

    if not remote.url:
        raise ValueError(_("A CollectionRemote must have a 'url' specified to synchronize."))

    if not remote.whitelist:
        raise ValueError(_("A CollectionRemote must have a 'whitelist' specified to synchronize."))

    repository_spec_strings = remote.whitelist.split(' ')

    def nowhere(*args, **kwargs):
        pass

    collections_pks = []

    with RepositoryVersion.create(repository) as new_version:
        with tempfile.TemporaryDirectory() as temp_ansible_path:

            # workaround: mazer logs errors without this dir  https://pulp.plan.io/issues/4999
            os.mkdir(temp_ansible_path + os.sep + 'ansible_collections')

            galaxy_context = GalaxyContext(
                collections_path=temp_ansible_path,
                server={
                    'url': remote.url,
                    'ignore_certs': False,
                },
            )

            install_repository_specs_loop(
                display_callback=nowhere,
                galaxy_context=galaxy_context,
                repository_spec_strings=repository_spec_strings,
            )

            content_walk_generator = os.walk(temp_ansible_path)
            for dirpath, dirnames, filenames in content_walk_generator:
                if 'MANIFEST.json' in filenames:
                    with open(dirpath + os.path.sep + 'MANIFEST.json') as manifest_file:
                        manifest_data = json.load(manifest_file)
                    info = manifest_data['collection_info']
                    filename = '{namespace}-{name}-{version}'.format(
                        namespace=info['namespace'],
                        name=info['name'],
                        version=info['version'],
                    )
                    tarfile_path = temp_ansible_path + os.path.sep + filename + '.tar.gz'
                    with tarfile.open(name=tarfile_path, mode='w|gz') as newtar:
                        newtar.add(dirpath, arcname=filename)

                    with transaction.atomic():
                        collection, created = Collection.objects.get_or_create(
                            namespace=info['namespace'],
                            name=info['name'],
                            version=info['version']
                        )

                        if created:
                            artifact = Artifact.init_and_validate(newtar.name)
                            artifact.save()

                            ContentArtifact.objects.create(
                                artifact=artifact,
                                content=collection,
                                relative_path=collection.relative_path,
                            )

                            collections_pks.append(collection)

        collections = Collection.objects.filter(pk__in=collections_pks)
        new_version.add_content(collections)
Example #36
0
def view(request):
    ex = None
    data = {'title': u'Sesiones de Caja'}
    addUserData(request, data)

    usuario = request.user
    if not data['es_cajero']:
        return HttpResponseRedirect("/?info=El usuario no tiene permisos como cajero")
    data['cajero'] = cajero = usuario.cajero_set.all()[0]

    if request.method == 'POST':
        action = request.POST['action']

        if action == 'abrirsesion':
            f = SesionCajaForm(request.POST)
            if f.is_valid():
                try:
                    with transaction.atomic():
                        sc = SesionCaja(cajero=cajero,
                                        fecha=datetime.now(),
                                        fondo=f.cleaned_data['fondo'],
                                        abierta=True)
                        sc.save()
                        salva_auditoria(request, sc, ACCION_MODIFICAR, 'Abierta sesion de caja: ' + sc.__str__())
                        return ok_json()

                except Exception:
                    return bad_json(error=1)
            else:
                return bad_json(error=1)

        elif action == 'closesesion':
            sc = SesionCaja.objects.get(pk=request.POST['id'])
            f = CierreSesionCajaForm(request.POST)
            if f.is_valid():
                try:
                    with transaction.atomic():
                        sc.bill100 = f.cleaned_data['bill100']
                        sc.bill50 = f.cleaned_data['bill50']
                        sc.bill20 = f.cleaned_data['bill20']
                        sc.bill10 = f.cleaned_data['bill10']
                        sc.bill5 = f.cleaned_data['bill5']
                        sc.bill2 = f.cleaned_data['bill2']
                        sc.bill1 = f.cleaned_data['bill1']
                        sc.enmonedas = f.cleaned_data['enmonedas']
                        sc.cheque = f.cleaned_data['cheque']
                        sc.deposito = f.cleaned_data['deposito']
                        sc.transferencia = f.cleaned_data['transferencia']
                        sc.abierta = False
                        sc.fechacierre = datetime.now()
                        sc.horacierre = datetime.now().time()
                        sc.save()
                        salva_auditoria(request, sc, ACCION_MODIFICAR, 'Cerrada sesion de caja: {}'.format(sc))
                        return ok_json()

                except Exception:
                    return bad_json(error=1)
            else:
                return bad_json(error=1)

        return bad_json(error=0)

    else:
        if 'action' in request.GET:
            action = request.GET['action']
            if action == 'addsesion':
                try:
                    data['title'] = u'Abrir Sesión de Caja'
                    data['form'] = SesionCajaForm(initial={'fondo': '0.00'})
                    return render_to_response("caja/adicionarbs.html", data)
                except Exception as ex:
                    pass

            elif action == 'closesesion':
                try:
                    data['title'] = u'Cierre Sesión de Caja'
                    data['sesion'] = SesionCaja.objects.get(pk=request.GET['id'])
                    data['form'] = CierreSesionCajaForm(initial={'bill100': 0,
                                                                 'bill50': 0,
                                                                 'bill20': 0,
                                                                 'bill10': 0,
                                                                 'bill5': 0,
                                                                 'bill2': 0,
                                                                 'bill1': 0,
                                                                 'enmonedas': 0.00,
                                                                 'cheque': 0.00,
                                                                 'deposito': 0.00,
                                                                 'transferencia': 0.00,
                                                                 'total': 0.00})
                    return render_to_response("caja/cerrarsesionbs.html", data)
                except Exception as ex:
                    pass

            return HttpResponseRedirect(url_back(request, mensaje_excepcion(ex.args[0])))

        else:
            sesiones = cajero.sesiones_caja()
            paging = MiPaginador(sesiones, 30)
            p = 1
            try:
                if 'page' in request.GET:
                    p = int(request.GET['page'])
                sesionespagina = paging.page(p)
            except:
                sesionespagina = paging.page(1)
            data['paging'] = paging
            data['rangospaging'] = paging.rangos_paginado(p)
            data['page'] = sesionespagina
            data['sesiones'] = sesionespagina.object_list
            return render_to_response("caja/sesionesbs.html", data)
Example #37
0
def process_free_opinion_result(self, row_pk, cnt):
    """Process a single result from the free opinion report"""
    result = PACERFreeDocumentRow.objects.get(pk=row_pk)
    result.court = Court.objects.get(pk=map_pacer_to_cl_id(result.court_id))
    result.case_name = harmonize(result.case_name)
    result.case_name_short = cnt.make_case_name_short(result.case_name)
    row_copy = copy.copy(result)
    # If we don't do this, the doc's date_filed becomes the docket's
    # date_filed. Bad.
    delattr(row_copy, 'date_filed')
    # If we don't do this, we get the PACER court id and it crashes
    delattr(row_copy, 'court_id')
    # If we don't do this, the id of result tries to smash that of the docket.
    delattr(row_copy, 'id')
    try:
        with transaction.atomic():
            docket = lookup_and_save(row_copy)
            if not docket:
                msg = "Unable to create docket for %s" % result
                logger.error(msg)
                result.error_msg = msg
                result.save()
                self.request.callbacks = None
                return
            docket.blocked, docket.date_blocked = get_blocked_status(docket)
            docket.save()

            de, de_created = DocketEntry.objects.update_or_create(
                docket=docket,
                entry_number=result.document_number,
                defaults={
                    'date_filed': result.date_filed,
                    'description': result.description,
                })
            rd, rd_created = RECAPDocument.objects.update_or_create(
                docket_entry=de,
                document_number=result.document_number,
                attachment_number=None,
                defaults={
                    'pacer_doc_id': result.pacer_doc_id,
                    'document_type': RECAPDocument.PACER_DOCUMENT,
                    'is_free_on_pacer': True,
                })
    except IntegrityError as e:
        msg = "Raised IntegrityError: %s" % e
        logger.error(msg)
        if self.request.retries == self.max_retries:
            result.error_msg = msg
            result.save()
            return
        raise self.retry(exc=e)
    except DatabaseError as e:
        msg = "Unable to complete database transaction:\n%s" % e
        logger.error(msg)
        result.error_msg = msg
        result.save()
        self.request.callbacks = None
        return

    if not rd_created and rd.is_available:
        # The item already exists and is available. Fantastic, mark it as free,
        # and call it a day.
        rd.is_free_on_pacer = True
        rd.save()
        result.delete()
        self.request.callbacks = None
        return

    return {
        'result': result,
        'rd_pk': rd.pk,
        'pacer_court_id': result.court_id
    }
def handle_outdated_resources(plan):
    """
    Marks repositories, importers, distributors which are no longer present in Pulp2.

    Delete Publications and Distributions which are no longer present in Pulp2.

    Args:
        plan(MigrationPlan): A Migration Plan
    """
    RepoSetup.mark_changed_relations()
    RepoSetup.finalize()

    for plugin_plan in plan.get_plugin_plans():
        inplan_repos = plugin_plan.get_repositories()

        # filter by repo type and by the repos specified in a plan
        repos_to_consider = plan.type_to_repo_ids[plugin_plan.type]
        repos_to_consider = set(inplan_repos).intersection(repos_to_consider)

        mongo_repo_q = mongo_Q(repo_id__in=repos_to_consider)
        mongo_repo_obj_ids = set(str(i.id) for i in Repository.objects(mongo_repo_q).only('id'))

        repo_type_q = Q(pulp2_repo_type=plugin_plan.type)
        inplan_repo_q = Q(pulp2_object_id__in=mongo_repo_obj_ids)
        Pulp2Repository.objects.filter(repo_type_q).exclude(inplan_repo_q).update(not_in_plan=True)

        # Mark removed or excluded importers
        inplan_imp_repos = plugin_plan.get_importers_repos()
        mongo_imp_q = mongo_Q(repo_id__in=inplan_imp_repos)
        mongo_imp_obj_ids = set(str(i.id) for i in Importer.objects(mongo_imp_q).only('id'))
        imp_types = plugin_plan.migrator.importer_migrators.keys()

        imp_type_q = Q(pulp2_type_id__in=imp_types)
        inplan_imp_q = Q(pulp2_object_id__in=mongo_imp_obj_ids)
        Pulp2Importer.objects.filter(imp_type_q).exclude(inplan_imp_q).update(not_in_plan=True)

        # Mark removed or excluded distributors
        inplan_dist_repos = plugin_plan.get_distributors_repos()
        mongo_dist_q = mongo_Q(repo_id__in=inplan_dist_repos)
        mongo_dist_obj_ids = set(str(i.id) for i in Distributor.objects(mongo_dist_q).only('id'))
        dist_types = plugin_plan.migrator.distributor_migrators.keys()

        dist_type_q = Q(pulp2_type_id__in=dist_types)
        inplan_dist_q = Q(pulp2_object_id__in=mongo_dist_obj_ids)
        Pulp2Distributor.objects.filter(dist_type_q).exclude(inplan_dist_q).update(not_in_plan=True)

    # Delete old Publications/Distributions which are no longer present in Pulp2.

    # It's critical to remove Distributions to avoid base_path overlap.
    # It makes the migration logic easier if we remove old Publications as well.

    # Delete criteria:
    #     - pulp2distributor is no longer in plan
    #     - pulp2repository content changed (repo.is_migrated=False) or it is no longer in plan

    repos_with_old_distributions_qs = Pulp2Repository.objects.filter(
        Q(is_migrated=False) | Q(not_in_plan=True)
    )

    old_dist_query = Q(pulp3_distribution__isnull=False) | Q(pulp3_publication__isnull=False)
    old_dist_query &= Q(pulp2_repos__in=repos_with_old_distributions_qs) | Q(not_in_plan=True)

    with transaction.atomic():
        pulp2distributors_with_old_distributions_qs = Pulp2Distributor.objects.filter(
            old_dist_query
        )

        pulp2distributors_with_old_distributions_qs.update(
            is_migrated=False
        )

        # If publication is shared by multiple distributions, on the corresponding distributors
        # flip the flag to false so the affected distributions will be updated with the new
        # publication
        Pulp2Distributor.objects.filter(
            pulp3_publication__in=Publication.objects.filter(
                pulp2distributor__in=pulp2distributors_with_old_distributions_qs
            )
        ).update(is_migrated=False)

        # Delete outdated publications
        Publication.objects.filter(
            pulp2distributor__in=pulp2distributors_with_old_distributions_qs).delete()

        # Delete outdated distributions
        BaseDistribution.objects.filter(
            pulp2distributor__in=pulp2distributors_with_old_distributions_qs).delete()

        # Remove relations to the pulp2repository in case the relation changed.
        # Pulp2Distributors with is_migrated=false is handled and re-added properly at
        # migration stage.
        # NOTE: this needs to be removed last, the queries above use this relation.
        not_migrated_dists = Pulp2Distributor.objects.filter(is_migrated=False).only('pulp_id')
        Pulp2Distributor.pulp2_repos.through.objects.filter(
            pulp2distributor__in=not_migrated_dists
        ).delete()
Example #39
0
    def _save_aggregate(self, event, hashes, release, **kwargs):
        project = event.project

        # attempt to find a matching hash
        all_hashes = self._find_hashes(project, hashes)

        try:
            existing_group_id = (h[0] for h in all_hashes if h[0]).next()
        except StopIteration:
            existing_group_id = None

        # XXX(dcramer): this has the opportunity to create duplicate groups
        # it should be resolved by the hash merging function later but this
        # should be better tested/reviewed
        if existing_group_id is None:
            kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen'])
            with transaction.atomic():
                short_id = project.next_short_id()
                group, group_is_new = Group.objects.create(project=project,
                                                           short_id=short_id,
                                                           **kwargs), True
        else:
            group = Group.objects.get(id=existing_group_id)

            group_is_new = False

        # If all hashes are brand new we treat this event as new
        is_new = False
        new_hashes = [h[1] for h in all_hashes if h[0] is None]
        if new_hashes:
            affected = GroupHash.objects.filter(
                project=project,
                hash__in=new_hashes,
                group__isnull=True,
            ).update(group=group, )

            if affected != len(new_hashes):
                self._ensure_hashes_merged(group, new_hashes)
            elif group_is_new and len(new_hashes) == len(all_hashes):
                is_new = True

        # XXX(dcramer): it's important this gets called **before** the aggregate
        # is processed as otherwise values like last_seen will get mutated
        can_sample = should_sample(
            event.data.get('received') or float(event.datetime.strftime('%s')),
            group.data.get('last_received')
            or float(group.last_seen.strftime('%s')),
            group.times_seen,
        )

        if not is_new:
            is_regression = self._process_existing_aggregate(
                group=group,
                event=event,
                data=kwargs,
                release=release,
            )
        else:
            is_regression = False

        # Determine if we've sampled enough data to store this event
        if is_new or is_regression:
            is_sample = False
        else:
            is_sample = can_sample

        tsdb.incr_multi([
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ],
                        timestamp=event.datetime)

        tsdb.record_frequency_multi([
            (tsdb.models.frequent_projects_by_organization, {
                project.organization_id: {
                    project.id: 1,
                },
            }),
            (tsdb.models.frequent_issues_by_project, {
                project.id: {
                    group.id: 1,
                },
            }),
        ],
                                    timestamp=event.datetime)

        return group, is_new, is_regression, is_sample
Example #40
0
 def set_session(self, session):
     with transaction.atomic():
         RoomToSession.objects.filter(room_name=self.name).delete()
         if session:
             RoomToSession.objects.create(room_name=self.name,
                                          session=session)
Example #41
0
    def test_fk_assignment_and_related_object_cache(self):
        # Tests of ForeignKey assignment and the related-object cache (see #6886).

        p = Parent.objects.create(name="Parent")
        c = Child.objects.create(name="Child", parent=p)

        # Look up the object again so that we get a "fresh" object.
        c = Child.objects.get(name="Child")
        p = c.parent

        # Accessing the related object again returns the exactly same object.
        self.assertIs(c.parent, p)

        # But if we kill the cache, we get a new object.
        del c._parent_cache
        self.assertIsNot(c.parent, p)

        # Assigning a new object results in that object getting cached immediately.
        p2 = Parent.objects.create(name="Parent 2")
        c.parent = p2
        self.assertIs(c.parent, p2)

        # Assigning None succeeds if field is null=True.
        p.bestchild = None
        self.assertIsNone(p.bestchild)

        # bestchild should still be None after saving.
        p.save()
        self.assertIsNone(p.bestchild)

        # bestchild should still be None after fetching the object again.
        p = Parent.objects.get(name="Parent")
        self.assertIsNone(p.bestchild)

        # Assigning None will not fail: Child.parent is null=False.
        setattr(c, "parent", None)

        # You also can't assign an object of the wrong type here
        with self.assertRaises(ValueError):
            setattr(c, "parent", First(id=1, second=1))

        # You can assign None to Child.parent during object creation.
        Child(name='xyzzy', parent=None)

        # But when trying to save a Child with parent=None, the database will
        # raise IntegrityError.
        with self.assertRaises(IntegrityError), transaction.atomic():
            Child.objects.create(name='xyzzy', parent=None)

        # Creation using keyword argument should cache the related object.
        p = Parent.objects.get(name="Parent")
        c = Child(parent=p)
        self.assertIs(c.parent, p)

        # Creation using keyword argument and unsaved related instance (#8070).
        p = Parent()
        msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
        with self.assertRaisesMessage(ValueError, msg):
            Child.objects.create(parent=p)

        msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
        with self.assertRaisesMessage(ValueError, msg):
            ToFieldChild.objects.create(parent=p)

        # Creation using attname keyword argument and an id will cause the
        # related object to be fetched.
        p = Parent.objects.get(name="Parent")
        c = Child(parent_id=p.id)
        self.assertIsNot(c.parent, p)
        self.assertEqual(c.parent, p)
def pre_migrate_all_without_content(plan):
    """
    Pre-migrate repositories, relations to their contents, importers and distributors.

    Look at the last updated times in the pulp2to3 tables for repositories/importers/distributors:
     * pulp2_last_unit_added or pulp2_last_unit_removed for repositories
     * pulp2_last_updated for importers and distributors

    Query empty-never-had-content repos (can't filter them out in any way) and repos for which
    there were:
     * content changes since the last run
     * importer changes since the last run
     * distributor changes since the last run

    Query in order of last_unit_added for the case when pre-migration is interrupted before we are
    done with repositories.

    Args:
        plan(MigrationPlan): A Migration Plan
    """

    _logger.debug('Pre-migrating Pulp 2 repositories')

    with ProgressReport(message='Processing Pulp 2 repositories, importers, distributors',
                        code='processing.repositories', total=0) as pb:

        for plugin_plan in plan.get_plugin_plans():
            repos = plugin_plan.get_repositories()
            importers_repos = plugin_plan.get_importers_repos()
            distributors_repos = plugin_plan.get_distributors_repos()

            importer_types = list(plugin_plan.migrator.importer_migrators.keys())
            distributor_migrators = plugin_plan.migrator.distributor_migrators
            distributor_types = list(distributor_migrators.keys())

            # figure out which repos/importers/distributors have been updated since the last run
            epoch = datetime.utcfromtimestamp(0)
            repo_type_q = Q(pulp2_repo_type=plugin_plan.type)
            imp_type_q = Q(pulp2_type_id__in=importer_types)
            dist_type_q = Q(pulp2_type_id__in=distributor_types)

            plugin_pulp2repos = Pulp2Repository.objects.filter(repo_type_q)
            repo_premigrated_last_by_added = plugin_pulp2repos.aggregate(
                Max('pulp2_last_unit_added')
            )['pulp2_last_unit_added__max'] or epoch
            repo_premigrated_last_by_removed = plugin_pulp2repos.aggregate(
                Max('pulp2_last_unit_removed')
            )['pulp2_last_unit_removed__max'] or epoch
            imp_premigrated_last = Pulp2Importer.objects.filter(imp_type_q).aggregate(
                Max('pulp2_last_updated')
            )['pulp2_last_updated__max'] or epoch
            dist_premigrated_last = Pulp2Distributor.objects.filter(dist_type_q).aggregate(
                Max('pulp2_last_updated')
            )['pulp2_last_updated__max'] or epoch

            is_content_added_q = mongo_Q(last_unit_added__gte=repo_premigrated_last_by_added)
            is_content_removed_q = mongo_Q(last_unit_removed__gte=repo_premigrated_last_by_removed)
            is_new_enough_repo_q = is_content_added_q | is_content_removed_q
            is_empty_repo_q = mongo_Q(last_unit_added__exists=False)
            is_new_enough_imp_q = mongo_Q(last_updated__gte=imp_premigrated_last)
            is_new_enough_dist_q = mongo_Q(last_updated__gte=dist_premigrated_last)
            repo_repo_id_q = mongo_Q(repo_id__in=repos)
            imp_repo_id_q = mongo_Q(repo_id__in=importers_repos)
            dist_repo_id_q = mongo_Q(repo_id__in=distributors_repos)

            updated_importers = Importer.objects(
                imp_repo_id_q & is_new_enough_imp_q
            ).only('repo_id')
            updated_imp_repos = set(imp.repo_id for imp in updated_importers)
            updated_distributors = Distributor.objects(
                dist_repo_id_q & is_new_enough_dist_q
            ).only('repo_id')
            updated_dist_repos = set(dist.repo_id for dist in updated_distributors)
            updated_impdist_repos = updated_imp_repos | updated_dist_repos

            mongo_updated_repo_q = repo_repo_id_q & (is_new_enough_repo_q | is_empty_repo_q)
            mongo_updated_imp_dist_repo_q = mongo_Q(repo_id__in=updated_impdist_repos)

            mongo_repo_qs = Repository.objects(
                mongo_updated_repo_q | mongo_updated_imp_dist_repo_q
            ).order_by('last_unit_added')

            pb.total += mongo_repo_qs.count()
            pb.save()

            for repo_data in mongo_repo_qs.only('id',
                                                'repo_id',
                                                'last_unit_added',
                                                'last_unit_removed',
                                                'description'):
                repo_id = repo_data.repo_id
                with transaction.atomic():
                    if repo_id in repos:
                        pre_migrate_repo(repo_data, plan.repo_id_to_type)
                    if repo_id in importers_repos:
                        pre_migrate_importer(repo_id, importer_types)
                    if repo_id in distributors_repos:
                        pre_migrate_distributor(repo_id, distributor_migrators)
                    pb.increment()
Example #43
0
def update_api(request):
    """
    修改接口信息
    project_id 项目ID
    api_id 接口ID
    first_group_id 一级分组ID
    second_group_id 二级分组ID
    name 接口名称
    httpType  HTTP/HTTPS
    requestType 请求方式
    address  请求地址
    headDict 头文件
    requestParameterType 参数请求格式
    requestList 请求参数列表
    responseList 返回参数列表
    mockStatus  mockhttp状态
    code mock代码
    description 描述
    :return:
    """
    data = json.loads(request.body)
    if not data["project_id"] or not data["first_group_id"] or not data["name"] or not data["httpType"] or not \
            data["requestType"] or not data["address"] or not data["requestParameterType"] or not data["status"] or \
            not data["api_id"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if not isinstance(data["project_id"], int) or not isinstance(data["first_group_id"], int) or \
            not isinstance(data["api_id"], int):
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if data["status"] not in ["True", "False"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if not isinstance(data["project_id"], int) or not isinstance(
            data["first_group_id"], int):
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if data["httpType"] not in ["HTTP", "HTTPS"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if data["requestType"] not in ["POST", "GET", "PUT", "DELETE"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if data["requestParameterType"] not in ["form-data", "raw", "Restful"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    obj = Project.objects.filter(id=data["project_id"])
    if obj:
        obm = ApiInfo.objects.filter(id=data["api_id"],
                                     project=data["project_id"])
        if obm:
            obi = ApiInfo.objects.filter(
                name=data["name"],
                project=data["project_id"]).exclude(id=data["api_id"])
            if len(obi) == 0:
                try:
                    with transaction.atomic():
                        first_group = ApiGroupLevelFirst.objects.filter(
                            id=data["first_group_id"],
                            project=data["project_id"])
                        if len(first_group) == 0:
                            return JsonResponse(
                                code_msg=GlobalStatusCode.group_not_exist())
                        if data["first_group_id"] and data["second_group_id"]:
                            if not isinstance(data["second_group_id"], int):
                                return JsonResponse(code_msg=GlobalStatusCode.
                                                    parameter_wrong())
                            second_group = ApiGroupLevelSecond.objects.filter(
                                id=data["second_group_id"],
                                apiGroupLevelFirst=data["first_group_id"])
                            if len(second_group) == 0:
                                return JsonResponse(code_msg=GlobalStatusCode.
                                                    group_not_exist())
                            try:
                                obm.update(
                                    project=Project.objects.get(
                                        id=data["project_id"]),
                                    apiGroupLevelFirst=ApiGroupLevelFirst.
                                    objects.get(id=data["first_group_id"]),
                                    apiGroupLevelSecond=ApiGroupLevelSecond.
                                    objects.get(id=data["second_group_id"]),
                                    name=data["name"],
                                    httpType=data["httpType"],
                                    requestType=data["requestType"],
                                    apiAddress=data["address"],
                                    requestParameterType=data[
                                        "requestParameterType"],
                                    mockCode=data["mockStatus"],
                                    data=data["code"],
                                    status=data["status"],
                                    userUpdate=User.objects.get(
                                        id=request.user.pk),
                                    description=data["description"])
                            except KeyError:
                                return JsonResponse(code_msg=GlobalStatusCode.
                                                    parameter_wrong())
                        else:
                            return JsonResponse(
                                code_msg=GlobalStatusCode.parameter_wrong())
                        if len(data["headDict"]):
                            _list = []
                            for j in data["headDict"]:
                                try:
                                    _list.append(j["id"])
                                except KeyError:
                                    pass
                            parameter = ApiHead.objects.filter(
                                api=data["api_id"])
                            for n in parameter:
                                if n.pk not in _list:
                                    n.delete()
                            for i in data["headDict"]:
                                if i["name"]:
                                    try:
                                        ApiHead.objects.filter(id=i["id"], api=data["api_id"]).\
                                            update(name=i["name"], value=i["value"])
                                    except KeyError:
                                        ApiHead(api=ApiInfo.objects.get(
                                            id=data["api_id"]),
                                                name=i["name"],
                                                value=i["value"]).save()
                        if data["requestParameterType"] == "form-data":
                            ApiParameterRaw.objects.filter(
                                api=data["api_id"]).delete()
                            if len(data["requestList"]):
                                _list = []
                                for j in data["requestList"]:
                                    try:
                                        _list.append(j["id"])
                                    except KeyError:
                                        pass
                                parameter = ApiParameter.objects.filter(
                                    api=data["api_id"])
                                for n in parameter:
                                    if n.pk not in _list:
                                        n.delete()
                                for i in data["requestList"]:
                                    try:
                                        if i["name"]:
                                            try:
                                                ApiParameter.objects.filter(id=i["id"], api=data["api_id"]).\
                                                    update(name=i["name"], value=i["value"], required=i["required"],
                                                           restrict=i["restrict"], _type=i["_type"],
                                                           description=i["description"])
                                            except KeyError:
                                                ApiParameter(
                                                    api=ApiInfo.objects.get(
                                                        id=data["api_id"]),
                                                    name=i["name"],
                                                    value=i["value"],
                                                    required=i["required"],
                                                    _type=i["_type"],
                                                    description=i[
                                                        "description"]).save()
                                    except KeyError:
                                        logging.exception("Error")
                                        return JsonResponse(
                                            code_msg=GlobalStatusCode.
                                            parameter_wrong())

                        else:
                            ApiParameterRaw.objects.filter(
                                api=data["api_id"]).delete()
                            ApiParameter.objects.filter(
                                api=data["api_id"]).delete()
                            if data["requestList"]:
                                ApiParameterRaw(
                                    api=ApiInfo.objects.get(id=data["api_id"]),
                                    data=data["requestList"]).save()

                        if len(data["responseList"]):
                            _list = []
                            for j in data["responseList"]:
                                try:
                                    _list.append(j["id"])
                                except KeyError:
                                    pass
                            parameter = ApiResponse.objects.filter(
                                api=data["api_id"])
                            for n in parameter:
                                if n.pk not in _list:
                                    n.delete()
                            for i in data["responseList"]:
                                if i["name"]:
                                    try:
                                        ApiResponse.objects.filter(id=i["id"], api=data["api_id"]).\
                                            update(name=i["name"], value=i["value"], required=i["required"],
                                                   _type=i["_type"],
                                                   description=i["description"])
                                    except KeyError:
                                        ApiResponse(
                                            api=ApiInfo.objects.get(
                                                id=data["api_id"]),
                                            name=i["name"],
                                            value=i["value"],
                                            required=i["required"],
                                            _type=i["_type"],
                                            description=i["description"]).save(
                                            )
                            record_dynamic(data["project_id"], "修改", "接口",
                                           "修改接口“%s”" % data["name"])
                        api_record = ApiOperationHistory(
                            apiInfo=ApiInfo.objects.get(id=data["api_id"]),
                            user=User.objects.get(id=request.user.pk),
                            description="修改接口\"%s\"" % data["name"])
                        api_record.save()
                        return JsonResponse(
                            code_msg=GlobalStatusCode.success())
                except Exception as e:
                    logging.exception("ERROR")
                    logging.error(e)
                    return JsonResponse(code_msg=GlobalStatusCode.fail())
            else:
                return JsonResponse(code_msg=GlobalStatusCode.api_is_exist())
        else:
            return JsonResponse(code_msg=GlobalStatusCode.api_not_exist())
    else:
        return JsonResponse(code_msg=GlobalStatusCode.project_not_exist())
Example #44
0
    def save(self, project, raw=False):
        from sentry.tasks.post_process import index_event_tags

        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        level = data.pop('level')

        culprit = data.pop('culprit', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        fingerprint = data.pop('fingerprint', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)
        environment = data.pop('environment', None)

        # unused
        time_spent = data.pop('time_spent', None)

        if not culprit:
            culprit = generate_culprit(data, platform=platform)

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'message': message,
            'platform': platform,
        }

        event = Event(project_id=project.id,
                      event_id=event_id,
                      data=data,
                      time_spent=time_spent,
                      datetime=date,
                      **kwargs)

        tags = data.get('tags') or []
        tags.append(('level', LOG_LEVELS[level]))
        if logger_name:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))
        if release:
            # TODO(dcramer): we should ensure we create Release objects
            tags.append(('sentry:release', release))
        if environment:
            tags.append(('environment', environment))

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags,
                                      event,
                                      _with_transaction=False)
            if added_tags:
                tags.extend(added_tags)

        event_user = self._get_event_user(project, data)
        if event_user:
            tags.append(('sentry:user', event_user.tag_value))

        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        data['fingerprint'] = fingerprint or ['{{ default }}']

        # Get rid of ephemeral interface data
        for interface_class, _ in iter_interfaces():
            interface = interface_class()
            if interface.ephemeral:
                data.pop(interface.get_path(), None)

        # prioritize fingerprint over checksum as its likely the client defaulted
        # a checksum whereas the fingerprint was explicit
        if fingerprint:
            hashes = map(md5_from_hash,
                         get_hashes_from_fingerprint(event, fingerprint))
        elif checksum:
            hashes = [checksum]
        else:
            hashes = map(md5_from_hash, get_hashes_for_event(event))

        # TODO(dcramer): temp workaround for complexity
        data['message'] = message
        event_type = eventtypes.get(data.get('type', 'default'))(data)

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'data': {
                'last_received':
                event.data.get('received')
                or float(event.datetime.strftime('%s')),
                'type':
                event_type.key,
                # we cache the events metadata on the group to ensure its
                # accessible in the stream
                'metadata':
                event_type.get_metadata(),
            },
        })

        # TODO(dcramer): temp workaround for complexity
        del data['message']

        if release:
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            group_kwargs['first_release'] = release

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event, hashes=hashes, release=release, **group_kwargs)

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic(using=router.db_for_write(EventMapping)):
                EventMapping.objects.create(project=project,
                                            group=group,
                                            event_id=event_id)
        except IntegrityError:
            self.logger.info('Duplicate EventMapping found for event_id=%s',
                             event_id,
                             exc_info=True)
            return event

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info('Duplicate Event found for event_id=%s',
                                 event_id,
                                 exc_info=True)
                return event

            index_event_tags.delay(
                project_id=project.id,
                group_id=group.id,
                event_id=event.id,
                tags=tags,
            )

        if event_user:
            tsdb.record_multi((
                (tsdb.models.users_affected_by_group, group.id,
                 (event_user.tag_value, )),
                (tsdb.models.users_affected_by_project, project.id,
                 (event_user.tag_value, )),
            ),
                              timestamp=event.datetime)

        if is_new and release:
            buffer.incr(Release, {'new_groups': 1}, {
                'id': release.id,
            })

        safe_execute(Group.objects.add_tags,
                     group,
                     tags,
                     _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send(project=project,
                                          group=group,
                                          sender=Project)

            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info(
                'Raw event passed; skipping post process for event_id=%s',
                event_id)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
Example #45
0
    def _perform_update_iteration(self):
        """Performs a single iteration of the database update
        """

        # Retrieve 500 job executions that need to be updated and get job IDs
        job_ids = set()
        for job_exe in JobExecution.objects.filter(status__isnull=False).only('id', 'job_id')[:500]:
            job_ids.add(job_exe.job_id)

        # Retrieve all job executions for those jobs in sorted order
        job_exe_count = 0
        current_job_id = None
        current_exe_num = 1
        exe_num_dict = {}  # {exe_num: [job_exe.id]}
        job_exe_end_models = []
        job_exe_output_models = []
        job_exe_qry = JobExecution.objects.select_related('job').filter(job_id__in=job_ids)
        for job_exe in job_exe_qry.defer('resources', 'configuration', 'stdout', 'stderr').order_by('job_id', 'id'):
            job_exe_count += 1
            if job_exe.job_id == current_job_id:
                current_exe_num += 1
            else:
                current_job_id = job_exe.job_id
                current_exe_num = 1

            # This job_exe model needs to be updated with its exe_num
            if current_exe_num in exe_num_dict:
                exe_num_dict[current_exe_num].append(job_exe.id)
            else:
                exe_num_dict[current_exe_num] = [job_exe.id]

            if job_exe.status in ['COMPLETED', 'FAILED', 'CANCELED']:
                # Create corresponding job_exe_end model
                job_exe_end = JobExecutionEnd()
                job_exe_end.job_exe_id = job_exe.id
                job_exe_end.job_id = job_exe.job_id
                job_exe_end.job_type_id = job_exe.job.job_type_id
                job_exe_end.exe_num = current_exe_num

                # Create task results from job_exe task fields
                task_list = []
                if job_exe.pre_started:
                    pre_task_dict = {'task_id': '%s_%s' % (job_exe.get_cluster_id(), 'pre'), 'type': 'pre',
                                     'was_launched': True, 'was_started': True,
                                     'started': datetime_to_string(job_exe.pre_started)}
                    if job_exe.pre_completed:
                        pre_task_dict['ended'] = datetime_to_string(job_exe.pre_completed)
                    if job_exe.pre_exit_code is not None:
                        pre_task_dict['exit_code'] = job_exe.pre_exit_code
                    task_list.append(pre_task_dict)
                if job_exe.job_started:
                    job_task_dict = {'task_id': '%s_%s' % (job_exe.get_cluster_id(), 'job'), 'type': 'main',
                                     'was_launched': True, 'was_started': True,
                                     'started': datetime_to_string(job_exe.job_started)}
                    if job_exe.job_completed:
                        job_task_dict['ended'] = datetime_to_string(job_exe.job_completed)
                    if job_exe.job_exit_code is not None:
                        job_task_dict['exit_code'] = job_exe.job_exit_code
                    task_list.append(job_task_dict)
                if job_exe.post_started:
                    post_task_dict = {'task_id': '%s_%s' % (job_exe.get_cluster_id(), 'post'), 'type': 'post',
                                      'was_launched': True, 'was_started': True,
                                      'started': datetime_to_string(job_exe.post_started)}
                    if job_exe.post_completed:
                        post_task_dict['ended'] = datetime_to_string(job_exe.post_completed)
                    if job_exe.post_exit_code is not None:
                        post_task_dict['exit_code'] = job_exe.post_exit_code
                    task_list.append(post_task_dict)
                task_results = TaskResults({'tasks': task_list})

                job_exe_end.task_results = task_results.get_dict()
                job_exe_end.status = job_exe.status
                job_exe_end.error_id = job_exe.error_id
                job_exe_end.node_id = job_exe.node_id
                job_exe_end.queued = job_exe.queued
                job_exe_end.started = job_exe.started
                job_exe_end.seed_started = task_results.get_task_started('main')
                job_exe_end.seed_ended = task_results.get_task_ended('main')
                job_exe_end.ended = job_exe.ended
                job_exe_end_models.append(job_exe_end)

            if job_exe.status == 'COMPLETED':
                # Create corresponding job_exe_output model
                job_exe_output = JobExecutionOutput()
                job_exe_output.job_exe_id = job_exe.id
                job_exe_output.job_id = job_exe.job_id
                job_exe_output.job_type_id = job_exe.job.job_type_id
                job_exe_output.exe_num = current_exe_num
                job_exe_output.output = job_exe.results
                job_exe_output_models.append(job_exe_output)

        # Update/create models in an atomic transaction
        with transaction.atomic():
            for exe_num, job_exe_ids in exe_num_dict.items():
                JobExecution.objects.filter(id__in=job_exe_ids).update(exe_num=exe_num, status=None, error_id=None,
                                                                       command_arguments=None, environment=None,
                                                                       cpus_scheduled=None, mem_scheduled=None,
                                                                       disk_out_scheduled=None,
                                                                       disk_total_scheduled=None, pre_started=None,
                                                                       pre_completed=None, pre_exit_code=None,
                                                                       job_started=None, job_completed=None,
                                                                       job_exit_code=None, job_metrics=None,
                                                                       post_started=None, post_completed=None,
                                                                       post_exit_code=None, stdout=None, stderr=None,
                                                                       results_manifest=None, results=None, ended=None,
                                                                       last_modified=None)
            JobExecutionEnd.objects.bulk_create(job_exe_end_models)
            JobExecutionOutput.objects.bulk_create(job_exe_output_models)

        logger.info('Updated %d job executions', job_exe_count)
        self._updated_job_exe += job_exe_count
        percent = (float(self._updated_job_exe) / float(self._total_job_exe)) * 100.00
        print 'Completed %s of %s job executions (%.1f%%)' % (self._updated_job_exe, self._total_job_exe, percent)
Example #46
0
def harvest(self,
            log_id=None,
            ignore_disabled=False,
            ingest=True,
            exhaust=True,
            superfluous=False,
            force=False,
            limit=None):
    """Complete the harvest of the given HarvestLog or next the next available HarvestLog.

    Args:
        log_id (int, optional): Harvest the given log. Defaults to None.
            If the given log cannot be locked, the task will retry indefinitely.
            If the given log belongs to a disabled or deleted Source or SourceConfig, the task will fail.
        ingest (bool, optional): Whether or not to start the full ingest process for harvested data. Defaults to True.
        exhaust (bool, optional): Whether or not to start another harvest task if one is found. Defaults to True.
            Used to prevent a backlog of harvests. If we have a valid job, spin off another task to eat through
            the rest of the queue.
        superfluous (bool, optional): Re-ingest Rawdata that we've already collected. Defaults to False.

    """
    qs = HarvestLog.objects.all()

    if log_id is not None:
        logger.debug('Loading harvest log %d', log_id)
        qs = qs.filter(id=log_id)
    else:
        logger.debug(
            'log_id was not specified, searching for an available log.')

        if not ignore_disabled:
            qs = qs.exclude(source_config__disabled=True, ).exclude(
                source_config__source__is_deleted=True)

        qs = qs.filter(
            status__in=HarvestLog.READY_STATUSES,
            end_date__lte=timezone.now().date(),
            source_config__harvest_after__lte=timezone.now().time(),
        ).unlocked('source_config')

    with qs.lock_first('source_config') as log:
        if log is None and log_id is None:
            logger.warning('No HarvestLogs are currently available')
            return None

        if log is None and log_id is not None:
            # If an id was given to us, we should have gotten a log
            log = HarvestLog.objects.get(id=log_id)  # Force the failure
            raise Exception('Failed to load {} but then found {!r}.'.format(
                log_id, log))  # Should never be reached

        if self.request.id:
            # Additional attributes for the celery backend
            # Allows for better analytics of currently running tasks
            self.update_state(
                meta={
                    'log_id': log.id,
                    'source': log.source_config.source.long_title,
                    'source_config': log.source_config.label,
                })

            log.task_id = self.request.id
            HarvestLog.objects.filter(id=log.id).update(
                task_id=self.request.id)

        if log.completions > 0 and log.status == HarvestLog.STATUS.succeeded and not superfluous:
            log.skip(HarvestLog.SkipReasons.duplicated)
            logger.warning(
                '%r has already been harvested. Force a re-run with superfluous=True',
                log)
            return None
        elif log.completions > 0 and log.status == HarvestLog.STATUS.succeeded:
            logger.info(
                '%r has already been harvested. Re-running superfluously', log)

        if log.harvester_version < log.source_config.harvester.version:
            # If a harvest log has an outdated harvester_version but has not been run before, we can go ahead and upgrade it.
            # Otherwise, mark it obsolete and skip it.
            if log.completions > 0:
                log.skip(HarvestLog.SkipReasons.obsolete)
                logger.warning(
                    '%r is outdated but has previously completed, skipping...',
                    log)
                return None

            try:
                # Attempt to upgrade the log
                with transaction.atomic():
                    log.harvester_version = log.source_config.harvester.version
                    log.save()
            except IntegrityError:
                # Sometimes a new harvest log will already be generated for one reason or another.
                # We can safely mark this log obsolete
                log.skip(HarvestLog.SkipReasons.obsolete)
                logger.warning(
                    'A newer version of %r already exists, skipping...', log)
                return None

            logger.warning(
                '%r has been updated to the latest harvester version, %s', log,
                log.harvester_version)

        if exhaust and log_id is None:
            if force:
                logger.warning('propagating force=True until queue exhaustion')

            logger.debug('Spawning another harvest task')
            res = harvest.apply_async(self.request.args, self.request.kwargs)
            logger.info('Spawned %r', res)

        logger.info('Harvesting %r', log)

        try:
            for datum in log.source_config.get_harvester().harvest_from_log(
                    log, force=force, ignore_disabled=ignore_disabled,
                    limit=limit):
                if ingest and (datum.created or superfluous):
                    transform.apply_async((datum.id, ))
        except HarvesterConcurrencyError as e:
            # If log_id has been specified there's a chance that
            # the advisory lock was not, in fact, acquired. If so retry indefinitely to preserve
            # existing functionality
            # Use random to add jitter to help break up locking issues
            # Kinda hacky, allow a stupidly large number of retries as there is no options for infinite
            raise self.retry(
                exc=e,
                max_retries=99999,
                countdown=(random.random() + 1) *
                min(settings.CELERY_RETRY_BACKOFF_BASE**self.request.retries,
                    60 * 15))
def task(request):
    user_id = request.session['user_id']
    if request.method == 'POST': # This part will only execute after submitting annotation answer for given data instance
        data_class_id = request.POST['data_class_id']
        data_instance = request.POST['DataInstance']
        task_id = request.POST['task_id']
        try:
            with transaction.atomic():
                annotating_data_instance = MediaDataInstance.objects.get(taskID_id=task_id, media=data_instance)
                # system will check whether the data instance is belongs to this user for more accuracy.
                if (annotating_data_instance.WhoIsViewing==user_id) and (annotating_data_instance.IsViewing==True) and (annotating_data_instance.NumberOfAnnotations < Task.objects.get(id=task_id).requiredNumofAnnotations):  # for extra protection.Can be removed if nessasary
                    # create data annotation result and save it in the database
                    data_annotation_result = DataAnnotationResult(TaskID=Task.objects.get(id=task_id),
                                                                  DataInstance=annotating_data_instance,
                                                                  ClassID=data_class_id,
                                                                  UserID=user_id)
            
                    data_annotation_result.save()
                    # release the lock for submitted data instance
                    annotating_data_instance.IsViewing = False
                    annotating_data_instance.WhoIsViewing = 0
                    annotating_data_instance.NumberOfAnnotations += 1
                    annotating_data_instance.save()
                    return redirect('/DoDataAnnotationTask/Task?task_id=' + str(task_id))
                else:
                    return redirect('/DoDataAnnotationTask/Task?task_id=' + str(task_id))
        except DatabaseError:
            print('DatabaseError in task() annotation submission Image Data Annotation')
            return redirect('/UserManagement/MyTasks/')
    else: # this part is responsible for giving data instance to annotate. This will execute after clicking a task, submitting an annotation or skipping a data instance
        try:
            task_id = request.GET['task_id']
            if len(ContributorTask.objects.filter(User_id=user_id, Task_id=task_id)) == 0:# check whether this user is registerd for this task
                return redirect('/UserManagement/MyTasks/')
            data_instance_annotation_times = int(Task.objects.get(id=task_id).requiredNumofAnnotations) # Take required number of annotations for the task
            annotated_data_instances = DataAnnotationResult.objects.filter(TaskID_id=task_id, UserID=user_id).order_by('-LastUpdate')# Take submitted annotation for this task by this user so that we can prevent being annotate same data instance by the same user.
            data_instances_to_exclude = [] # add those data instance to this list
            for i in annotated_data_instances:
                data_instances_to_exclude += [i.DataInstance.id]
            try:# there is an option to skip data instances. here skipped data instances are addded to excluding data instances list so that annotator will not get those data instances.
                skip_instance=request.GET['skip_instance']
                skip_instance_object = MediaDataInstance.objects.get(taskID_id=task_id, media=skip_instance)
                data_instances_to_exclude += [skip_instance_object.id]
                skip_instance_request =True
            except:
                skip_instance_request =False
            try:
                with transaction.atomic():
                    data_annotation = MediaDataInstance.objects.filter(taskID_id=task_id,IsViewing=False,NumberOfAnnotations__lt=data_instance_annotation_times).exclude(id__in=data_instances_to_exclude) 
                    # take data instances which are not viewing and number of annotations are less than required number of annotations while excluding the data instances which have annotated already by this user
                    if len(data_annotation) > 0:
                        data_instance = random.choice(data_annotation) # choose one data instance randomly and put a lock to that data instance before giving.
                        data_instance_about_to_annotate = MediaDataInstance.objects.get(taskID_id=task_id, media=data_instance.media)
                        data_instance_about_to_annotate.IsViewing=True
                        data_instance_about_to_annotate.WhoIsViewing=user_id
                        data_instance_about_to_annotate.save()
                        if len(annotated_data_instances) > 0:
                            return render(request, 'DoDataAnnotationTask/DataAnnotationTask.html', {'data_instance_available': True,
                                                                                                    'task_object': Task.objects.get(id=task_id),
                                                                                                    'data_classes': Cateogary.objects.filter(taskID_id=task_id),
                                                                                                    'data_instance': data_instance,
                                                                                                    'first_name':Profile.objects.get(user=request.user).first_name,
                                                                                                    'task_id': task_id,
                                                                                                    'annotated_data_instances_available': True,
                                                                                                    'annotated_data_instances': annotated_data_instances})
                        else:
                            return render(request, 'DoDataAnnotationTask/DataAnnotationTask.html', {'data_instance_available': True,
                                                                                                    'task_object': Task.objects.get(id=task_id),
                                                                                                    'data_classes': Cateogary.objects.filter(taskID_id=task_id),
                                                                                                    'data_instance': data_instance,
                                                                                                    'first_name':Profile.objects.get(user=request.user).first_name,
                                                                                                    'task_id': task_id,
                                                                                                    'annotated_data_instances_available': False})
                    elif len(data_annotation)==0 and skip_instance_request: # if contributor wants to skip the only remaining data instance, same data instance will be given again and again
                        data_instance = MediaDataInstance.objects.get(taskID_id=task_id, id=skip_instance_object.id)
                        data_instance_about_to_annotate = data_instance
                        data_instance_about_to_annotate.IsViewing=True
                        data_instance_about_to_annotate.WhoIsViewing=user_id
                        data_instance_about_to_annotate.save()
                        if len(annotated_data_instances) > 0:
                            return render(request, 'DoDataAnnotationTask/DataAnnotationTask.html', {'data_instance_available': True,
                                                                                                    'task_object': Task.objects.get(id=task_id),
                                                                                                    'data_classes': Cateogary.objects.filter(taskID_id=task_id),
                                                                                                    'data_instance': data_instance,
                                                                                                    'first_name':Profile.objects.get(user=request.user).first_name,
                                                                                                    'task_id': task_id,
                                                                                                    'annotated_data_instances_available': True,
                                                                                                    'annotated_data_instances': annotated_data_instances})
                        else:
                            return render(request, 'DoDataAnnotationTask/DataAnnotationTask.html', {'data_instance_available': True,
                                                                                                    'task_object': Task.objects.get(id=task_id),
                                                                                                    'data_classes': Cateogary.objects.filter(taskID_id=task_id),
                                                                                                    'data_instance': data_instance,
                                                                                                    'first_name':Profile.objects.get(user=request.user).first_name,
                                                                                                    'task_id': task_id,
                                                                                                    'annotated_data_instances_available': False})
                    else: # if all the data instances have been annotated, system will automatically change the status of the task as 'completed'
                        remaining_data_instances = MediaDataInstance.objects.filter(taskID_id=task_id,NumberOfAnnotations__lt=data_instance_annotation_times)
                        if len(remaining_data_instances)==0:
                            completed_task = Task.objects.get(id=task_id)
                            if completed_task.status=='inprogress':
                                completed_task.status = 'completed'
                                completed_task.save()
                        if len(annotated_data_instances) > 0:
                            return render(request, 'DoDataAnnotationTask/DataAnnotationTask.html', {'data_instance_available': False,
                                                                                                    'task_object': Task.objects.get(id=task_id),
                                                                                                    'task_id': task_id,
                                                                                                    'first_name':Profile.objects.get(user=request.user).first_name,
                                                                                                    'annotated_data_instances_available': True,
                                                                                                    'annotated_data_instances': annotated_data_instances})
                        else:
                            return render(request, 'DoDataAnnotationTask/DataAnnotationTask.html', {'data_instance_available': False,
                                                                                                    'task_object': Task.objects.get(id=task_id),
                                                                                                    'task_id': task_id,
                                                                                                    'first_name':Profile.objects.get(user=request.user).first_name,
                                                                                                    'annotated_data_instances_available': False,})
            except DatabaseError:
                print('DatabaseError in task() giving data instance to annotator Image data annotation')
                return redirect('/UserManagement/MyTasks/')
        except:
            print('Failure in task() Image data annotation ')
            return redirect('/UserManagement/MyTasks/')
Example #48
0
def add_api(request):
    """
    新增接口信息
    project_id 项目ID
    first_group_id 一级分组ID
    second_group_id 二级分组ID
    name 接口名称
    httpType  HTTP/HTTPS
    requestType 请求方式
    address  请求地址
    headDict 头文件
    requestParameterType 参数请求格式
    requestList 请求参数列表
    responseList 返回参数列表
    mockStatus  mockhttp状态
    code mock代码
    description 描述
    :return:
    """
    data = json.loads(request.body)
    if not data["project_id"] or not data["first_group_id"] or not data["name"] or not data["httpType"] or not \
            data["requestType"] or not data["address"] or not data["requestParameterType"] or not data["status"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if data["status"] not in ["True", "False"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if not isinstance(data["project_id"], int) or not isinstance(
            data["first_group_id"], int):
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if data["httpType"] not in ["HTTP", "HTTPS"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if data["requestType"] not in ["POST", "GET", "PUT", "DELETE"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    if data["requestParameterType"] not in ["form-data", "raw", "Restful"]:
        return JsonResponse(code_msg=GlobalStatusCode.parameter_wrong())
    obj = Project.objects.filter(id=data["project_id"])
    if obj:
        obi = ApiInfo.objects.filter(name=data["name"],
                                     project=data["project_id"])
        if obi:
            return JsonResponse(code_msg=GlobalStatusCode.name_repetition())
        else:
            try:
                with transaction.atomic():
                    first_group = ApiGroupLevelFirst.objects.filter(
                        id=data["first_group_id"], project=data["project_id"])
                    if len(first_group) == 0:
                        return JsonResponse(
                            code_msg=GlobalStatusCode.group_not_exist())
                    if data["first_group_id"] and data["second_group_id"]:
                        if not isinstance(data["second_group_id"], int):
                            return JsonResponse(
                                code_msg=GlobalStatusCode.parameter_wrong())
                        second_group = ApiGroupLevelSecond.objects.filter(
                            id=data["second_group_id"],
                            apiGroupLevelFirst=data["first_group_id"])
                        if len(second_group) == 0:
                            return JsonResponse(
                                code_msg=GlobalStatusCode.group_not_exist())
                        try:
                            oba = ApiInfo(
                                project=Project.objects.get(
                                    id=data["project_id"]),
                                apiGroupLevelFirst=ApiGroupLevelFirst.objects.
                                get(id=data["first_group_id"]),
                                apiGroupLevelSecond=ApiGroupLevelSecond.
                                objects.get(id=data["second_group_id"]),
                                name=data["name"],
                                httpType=data["httpType"],
                                status=data["status"],
                                requestType=data["requestType"],
                                apiAddress=data["address"],
                                requestParameterType=data[
                                    "requestParameterType"],
                                mockCode=data["mockStatus"],
                                data=data["code"],
                                userUpdate=User.objects.get(
                                    id=request.user.pk),
                                description=data["desc"])
                        except KeyError:
                            return JsonResponse(
                                code_msg=GlobalStatusCode.parameter_wrong())
                    else:
                        return JsonResponse(
                            code_msg=GlobalStatusCode.parameter_wrong())
                    oba.save()
                    if len(data["headDict"]):
                        for i in data["headDict"]:
                            try:
                                if i["name"]:
                                    ApiHead(api=ApiInfo.objects.get(id=oba.pk),
                                            name=i["name"],
                                            value=i["value"]).save()
                            except KeyError:
                                logging.exception("Error")
                                return JsonResponse(GlobalStatusCode.fail())
                    if data["requestParameterType"] == "form-data":
                        if len(data["requestList"]):
                            for i in data["requestList"]:
                                try:
                                    # i = i.replace("true", "True").replace("false", "False")
                                    if i["name"]:
                                        ApiParameter(
                                            api=ApiInfo.objects.get(id=oba.pk),
                                            name=i["name"],
                                            value=i["value"],
                                            required=i["required"],
                                            _type=i["_type"],
                                            restrict=i["restrict"],
                                            description=i["description"]).save(
                                            )
                                except KeyError:
                                    logging.exception("Error")
                                    return JsonResponse(
                                        code_msg=GlobalStatusCode.fail())
                    else:
                        if len(data["requestList"]):
                            ApiParameterRaw(api=ApiInfo.objects.get(id=oba.pk),
                                            data=data["requestList"]).save()
                    if len(data["responseList"]):
                        for i in data["responseList"]:
                            try:
                                # i = i.replace("true", "True").replace("false", "False")
                                if i["name"]:
                                    ApiResponse(
                                        api=ApiInfo.objects.get(id=oba.pk),
                                        name=i["name"],
                                        value=i["value"],
                                        required=i["required"],
                                        _type=i["_type"],
                                        description=i["description"]).save()
                            except KeyError:
                                logging.exception("Error")
                                return JsonResponse(
                                    code_msg=GlobalStatusCode.fail())
                    record_dynamic(data["project_id"], "新增", "接口",
                                   "新增接口“%s”" % data["name"])
                    api_record = ApiOperationHistory(
                        apiInfo=ApiInfo.objects.get(id=oba.pk),
                        user=User.objects.get(id=request.user.pk),
                        description="新增接口\"%s\"" % data["name"])
                    api_record.save()
                    return JsonResponse(data={"api_id": oba.pk},
                                        code_msg=GlobalStatusCode.success())
            except Exception as e:
                logging.exception("error")
                logging.error(e)
                return JsonResponse(code_msg=GlobalStatusCode.fail())
    else:
        return JsonResponse(code_msg=GlobalStatusCode.project_not_exist())
Example #49
0
    def handle(self, *args, **options):
        if settings.ARCHIVES_SEARCH_PLAINTEXT:
            proto = "http"
        else:
            proto = "https"
        r = requests.get('{0}://{1}/listinfo/'.format(
            proto, settings.ARCHIVES_SEARCH_SERVER))
        j = r.json()
        allgroups = list(set([l['group'] for l in j]))
        with transaction.atomic():
            curs = connection.cursor()

            # Add any groups necessary
            curs.execute(
                "INSERT INTO lists_mailinglistgroup (groupname, sortkey) SELECT n,50 FROM UNNEST(%s) n(n) WHERE NOT EXISTS (SELECT 1 FROM lists_mailinglistgroup WHERE groupname=n) RETURNING groupname",
                (allgroups, ))
            for n, in curs.fetchall():
                print "Added group %s" % n

            # Add and update lists
            for l in j:
                curs.execute(
                    "SELECT id FROM lists_mailinglist WHERE listname=%s",
                    (l['name'], ))
                if curs.rowcount == 0:
                    curs.execute(
                        "INSERT INTO lists_mailinglist (listname, group_id, active, description, shortdesc) VALUES (%s, (SELECT id FROM lists_mailinglistgroup WHERE groupname=%s), %s, %s, %s)",
                        (l['name'], l['group'], l['active'], l['description'],
                         l['shortdesc']))
                    print "Added list %s" % l['name']
                else:
                    curs.execute(
                        "UPDATE lists_mailinglist SET group_id=(SELECT id FROM lists_mailinglistgroup WHERE groupname=%s), active=%s, description=%s, shortdesc=%s WHERE listname=%s AND NOT (group_id=(SELECT id FROM lists_mailinglistgroup WHERE groupname=%s) AND active=%s AND description=%s AND shortdesc=%s) RETURNING listname",
                        (
                            l['group'],
                            l['active'],
                            l['description'],
                            l['shortdesc'],
                            l['name'],
                            l['group'],
                            l['active'],
                            l['description'],
                            l['shortdesc'],
                        ))
                    for n, in curs.fetchall():
                        print "Updated list %s" % n

            # Delete any lists that shouldn't exist anymore (this is safe because we don't keep any data about them,
            # so they are trivial to add back)
            curs.execute(
                "DELETE FROM lists_mailinglist WHERE NOT listname=ANY(%s) RETURNING listname",
                ([l['name'] for l in j], ))
            for n, in curs.fetchall():
                print "Deleted list %s" % n
            # Delete listgroups
            curs.execute(
                "DELETE FROM lists_mailinglistgroup WHERE NOT groupname=ANY(%s) RETURNING groupname",
                (allgroups, ))
            for n, in curs.fetchall():
                print "Deleted group %s" % n

            if options['dryrun']:
                raise CommandError("Dry run, rolling back")
Example #50
0
    def post(self, request, *args, **kwargs):
        if self.keyword_form.is_valid():
            with transaction.atomic():
                self.keyword.keyword = self.keyword_form.cleaned_data[
                    'keyword']
                self.keyword.description = self.keyword_form.cleaned_data[
                    'description']
                self.keyword.delimiter = self.keyword_form.cleaned_data[
                    'delimiter']
                self.keyword.override_open_sessions = self.keyword_form.cleaned_data[
                    'override_open_sessions']

                self.keyword.initiator_doc_type_filter = []
                if self.keyword_form.cleaned_data[
                        'allow_keyword_use_by'] == 'users':
                    self.keyword.initiator_doc_type_filter.append(
                        'CommCareUser')
                if self.keyword_form.cleaned_data[
                        'allow_keyword_use_by'] == 'cases':
                    self.keyword.initiator_doc_type_filter.append(
                        'CommCareCase')

                self.keyword.save()

                self.keyword.keywordaction_set.all().delete()
                if self.keyword_form.cleaned_data[
                        'sender_content_type'] != NO_RESPONSE:
                    app_id, form_unique_id = split_combined_id(
                        self.keyword_form.
                        cleaned_data['sender_app_and_form_unique_id'])
                    self.keyword.keywordaction_set.create(
                        recipient=KeywordAction.RECIPIENT_SENDER,
                        action=self.keyword_form.
                        cleaned_data['sender_content_type'],
                        message_content=self.keyword_form.
                        cleaned_data['sender_message'],
                        app_id=app_id,
                        form_unique_id=form_unique_id,
                    )
                if self.process_structured_message:
                    app_id, form_unique_id = split_combined_id(
                        self.keyword_form.
                        cleaned_data['structured_sms_app_and_form_unique_id'])
                    self.keyword.keywordaction_set.create(
                        recipient=KeywordAction.RECIPIENT_SENDER,
                        action=KeywordAction.ACTION_STRUCTURED_SMS,
                        app_id=app_id,
                        form_unique_id=form_unique_id,
                        use_named_args=self.keyword_form.
                        cleaned_data['use_named_args'],
                        named_args=self.keyword_form.
                        cleaned_data['named_args'],
                        named_args_separator=self.keyword_form.
                        cleaned_data['named_args_separator'],
                    )
                if self.keyword_form.cleaned_data[
                        'other_recipient_content_type'] != NO_RESPONSE:
                    app_id, form_unique_id = split_combined_id(
                        self.keyword_form.
                        cleaned_data['other_recipient_app_and_form_unique_id'])
                    self.keyword.keywordaction_set.create(
                        recipient=self.keyword_form.
                        cleaned_data['other_recipient_type'],
                        recipient_id=self.keyword_form.
                        cleaned_data['other_recipient_id'],
                        action=self.keyword_form.
                        cleaned_data['other_recipient_content_type'],
                        message_content=self.keyword_form.
                        cleaned_data['other_recipient_message'],
                        app_id=app_id,
                        form_unique_id=form_unique_id,
                    )

                return HttpResponseRedirect(
                    reverse(KeywordsListView.urlname, args=[self.domain]))
        return self.get(request, *args, **kwargs)
Example #51
0
    def insert_plaintext(self, hashfile):

        with transaction.atomic():
            locked = False
            while not locked:
                try:
                    # Lock: prevent cracked file from being processed
                    hashfile_lock = Lock.objects.select_for_update().filter(hashfile_id=hashfile.id, lock_ressource="hashfile")[0]

                    locked = True
                except OperationalError as e:
                    continue

        hashfile_path = os.path.join(os.path.dirname(__file__), "..", "Files", "Hashfiles", hashfile.hashfile)
        if os.path.exists(hashfile_path):
            try:
                batch_create_list = []
                for index, line in enumerate(open(hashfile_path, encoding='utf-8')):
                    if index < hashfile.cracked_count:
                        continue

                    line = line.strip()
                    password = line.split(":")[-1]
                    if hashfile.username_included:
                        username = line.split(":")[0]
                        password_hash = ""
                    else:
                        username = None
                        password_hash = ""

                    pass_len, pass_charset, _, pass_mask, _ = analyze_password(password)

                    h = Hash(
                            hashfile=hashfile,
                            hash_type=hashfile.hash_type,
                            username=username,
                            password=password,
                            hash=password_hash,
                            password_len=pass_len,
                            password_charset=pass_charset,
                            password_mask=pass_mask,
                    )
                    batch_create_list.append(h)

                    if len(batch_create_list) >= 100000:
                        hashfile.line_count += len(batch_create_list)
                        hashfile.cracked_count += len(batch_create_list)
                        while len(batch_create_list) != 0:
                            Hash.objects.bulk_create(batch_create_list[:1000])
                            batch_create_list = batch_create_list[1000:]
                        hashfile.save()

                hashfile.line_count += len(batch_create_list)
                hashfile.cracked_count += len(batch_create_list)
                while len(batch_create_list) != 0:
                    Hash.objects.bulk_create(batch_create_list[:1000])
                    batch_create_list = batch_create_list[1000:]
                hashfile.save()

            except Exception as e:
                traceback.print_exc()

            # Crackedfile processing if over, remove lock
            del hashfile_lock
def Collector__delete(self):
    # NOTE: Original Django code. Only changes are marked as ADDED.

    # sort instance collections
    for model, instances in self.data.items():
        self.data[model] = sorted(instances, key=attrgetter("pk"))

    # if possible, bring the models in an order suitable for databases that
    # don't support transactions or cannot defer constraint checks until the
    # end of a transaction.
    self.sort()
    # number of objects deleted for each model label
    deleted_counter = Counter()

    with transaction.atomic(using=self.using, savepoint=False):
        # ADDED: Chunk deletion of larger amounts of related objects (+6 lines)
        for obj_queryset in self.chunk_deletes:
            exists = True
            while exists:
                values_list = obj_queryset.values_list('pk', flat=True)[:DELETION_MAX_CHUNK]
                obj_queryset.model.objects.filter(pk__in=values_list).delete()
                exists = obj_queryset.exists()

        # send pre_delete signals
        for model, obj in self.instances_with_model():
            if not model._meta.auto_created:
                signals.pre_delete.send(
                    sender=model, instance=obj, using=self.using
                )

        # fast deletes
        for qs in self.fast_deletes:
            count = qs._raw_delete(using=self.using)
            deleted_counter[qs.model._meta.label] += count

        # update fields
        for model, instances_for_fieldvalues in self.field_updates.items():
            for (field, value), instances in instances_for_fieldvalues.items():
                query = sql.UpdateQuery(model)
                query.update_batch([obj.pk for obj in instances],
                                   {field.name: value}, self.using)

        # reverse instance collections
        for instances in self.data.values():
            instances.reverse()

        # delete instances
        for model, instances in self.data.items():
            query = sql.DeleteQuery(model)
            pk_list = [obj.pk for obj in instances]
            count = query.delete_batch(pk_list, self.using)
            deleted_counter[model._meta.label] += count

            if not model._meta.auto_created:
                for obj in instances:
                    signals.post_delete.send(
                        sender=model, instance=obj, using=self.using
                    )

    # update collected instances
    for model, instances_for_fieldvalues in self.field_updates.items():
        for (field, value), instances in instances_for_fieldvalues.items():
            for obj in instances:
                setattr(obj, field.attname, value)
    for model, instances in self.data.items():
        for instance in instances:
            setattr(instance, model._meta.pk.attname, None)
    return sum(deleted_counter.values()), dict(deleted_counter)
Example #53
0
def setup_lang(sender, **kwargs):
    """Hook for creating basic set of languages on database migration."""
    if sender.label == 'lang':
        with transaction.atomic():
            Language.objects.setup(False)
Example #54
0
    def compare_potfile(self, hashfile, potfile=None):
        if not potfile:
            potfile = self.get_potfile()

        with transaction.atomic():
            locked = False
            while not locked:
                try:
                    # Lock: lock all the potfiles, this way only one instance of hashcat will be running at a time, the --left option eats a lot of RAM...
                    potfile_locks = list(Lock.objects.select_for_update().filter(lock_ressource="potfile"))
                    # Lock: prevent hashes file from being processed
                    hashfile_lock = Lock.objects.select_for_update().filter(hashfile_id=hashfile.id, lock_ressource="hashfile")[0]

                    locked = True
                except OperationalError as e:
                    continue

        hashfile_path = os.path.join(os.path.dirname(__file__), "..", "Files", "Hashfiles", hashfile.hashfile)

        # trick to allow multiple instances of hashcat
        session_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12))

        cracked_file = tempfile.NamedTemporaryFile(delete=False)

        # is there a way to combine --show and --remove in hashcat ?

        # Get cracked hashes
        cmd_line = [self.get_binary(), '--show', '-m', str(hashfile.hash_type), hashfile_path, '-o', cracked_file.name, '--session', session_name]
        cmd_line += ['--outfile-format', '3']
        if potfile:
            cmd_line += ['--potfile-path', potfile]
        print("%s: Command: %s" % (hashfile.name, " ".join(cmd_line)))
        p = subprocess.Popen(cmd_line, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
        p.wait()

        # Remove cracked hashes from list
        f = tempfile.NamedTemporaryFile(delete=False)
        f.close()
        cmd_line = [self.get_binary(), '--left', '-m', str(hashfile.hash_type), hashfile_path, '-o', f.name, '--session', session_name]
        cmd_line += ['--outfile-format', '1']
        if potfile:
            cmd_line += ['--potfile-path', potfile]
        print("%s: Command: %s" % (hashfile.name, " ".join(cmd_line)))
        p = subprocess.Popen(cmd_line, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
        p.wait()

        copyfile(f.name, hashfile_path)
        os.remove(f.name)

        # hashcat over, remove lock on potfile and hashfile
        del potfile_locks
        del hashfile_lock

        if os.path.exists(cracked_file.name):

            start = time.perf_counter()

            cursor = connection.cursor()
            tmp_table_name = "tmp_table_%s" % ''.join(random.choice(string.ascii_lowercase+string.digits) for i in range(10))
            try:
                # create temporary table
                cursor.execute("BEGIN;")
                cursor.execute("CREATE TEMPORARY TABLE " + tmp_table_name + " (hash char(190) PRIMARY KEY, password varchar(190) NOT NULL, pass_len INTEGER, pass_charset varchar(190), pass_mask varchar(190));")
                cursor.execute("SET unique_checks=0;")

                bulk_insert_list = []
                nb_insert = 0
                for index, line in enumerate(open(cracked_file.name, encoding='utf-8')):
                    line = line.strip()
                    password = line.split(":")[-1]
                    password_hash = ":".join(line.split(":")[0:-1])

                    pass_len, pass_charset, _, pass_mask, _ = analyze_password(password)

                    bulk_insert_list += [password_hash, password, pass_len, pass_charset, pass_mask]
                    nb_insert += 1

                    if nb_insert >= 1000:
                        cursor.execute("INSERT INTO " + tmp_table_name + " VALUES " + ", ".join(["(%s, %s, %s, %s, %s)"]*nb_insert) + ";", bulk_insert_list)
                        bulk_insert_list = []
                        nb_insert = 0

                    # insert into table every 100K rows will prevent MySQL from raising "The number of locks exceeds the lock table size"
                    if index % 100000 == 0:
                        print(index)
                        cursor.execute("UPDATE " + tmp_table_name + " b JOIN Hashcat_hash a ON a.hash = b.hash AND a.hash_type=%s SET a.password = b.password, a.password_len = b.pass_len, a.password_charset = b.pass_charset, a.password_mask = b.pass_mask;", [hashfile.hash_type])
                        cursor.execute("DELETE FROM " + tmp_table_name + ";")
                        cursor.execute("COMMIT;")

                if len(bulk_insert_list) != 0:
                    cursor.execute("INSERT INTO " + tmp_table_name + " VALUES " + ", ".join(["(%s, %s, %s, %s, %s)"]*nb_insert) + ";", bulk_insert_list)

                cursor.execute("UPDATE " + tmp_table_name + " b JOIN Hashcat_hash a ON a.hash = b.hash AND a.hash_type=%s SET a.password = b.password, a.password_len = b.pass_len, a.password_charset = b.pass_charset, a.password_mask = b.pass_mask;", [hashfile.hash_type])
                cursor.execute("COMMIT;")
            except Exception as e:
                traceback.print_exc()
            finally:
                cursor.execute("SET unique_checks=1;")
                cursor.execute("DROP TABLE %s;" % tmp_table_name)
                cursor.execute("COMMIT;")
                cursor.close()

                hashfile.cracked_count = Hash.objects.filter(hashfile_id=hashfile.id, password__isnull=False).count()
                hashfile.save()

            end = time.perf_counter()
            print("Update password time: %fs" % (end-start,))

            os.remove(cracked_file.name)
Example #55
0
    def post(self, request):
        """
        Creates a new CCX course for a given Master Course.

        Args:
            request (Request): Django request object.

        Return:
            A JSON serialized representation a newly created CCX course.
        """
        master_course_id = request.data.get('master_course_id')
        master_course_object, master_course_key, error_code, http_status = get_valid_course(
            master_course_id,
            advanced_course_check=True
        )
        if master_course_object is None:
            return Response(
                status=http_status,
                data={
                    'error_code': error_code
                }
            )

        # validating the rest of the input
        valid_input, field_errors = get_valid_input(request.data)
        if field_errors:
            return Response(
                status=status.HTTP_400_BAD_REQUEST,
                data={
                    'field_errors': field_errors
                }
            )

        try:
            coach = User.objects.get(email=valid_input['coach_email'])
        except User.DoesNotExist:
            return Response(
                status=status.HTTP_404_NOT_FOUND,
                data={
                    'error_code': 'coach_user_does_not_exist'
                }
            )

        with transaction.atomic():
            ccx_course_object = CustomCourseForEdX(
                course_id=master_course_object.id,
                coach=coach,
                display_name=valid_input['display_name'])
            ccx_course_object.save()

            # Make sure start/due are overridden for entire course
            start = TODAY().replace(tzinfo=pytz.UTC)
            override_field_for_ccx(ccx_course_object, master_course_object, 'start', start)
            override_field_for_ccx(ccx_course_object, master_course_object, 'due', None)

            # Enforce a static limit for the maximum amount of students that can be enrolled
            override_field_for_ccx(
                ccx_course_object,
                master_course_object,
                'max_student_enrollments_allowed',
                valid_input['max_students_allowed']
            )

            # Hide anything that can show up in the schedule
            hidden = 'visible_to_staff_only'
            for chapter in master_course_object.get_children():
                override_field_for_ccx(ccx_course_object, chapter, hidden, True)
                for sequential in chapter.get_children():
                    override_field_for_ccx(ccx_course_object, sequential, hidden, True)
                    for vertical in sequential.get_children():
                        override_field_for_ccx(ccx_course_object, vertical, hidden, True)

            # make the coach user a coach on the master course
            make_user_coach(coach, master_course_key)

            # pull the ccx course key
            ccx_course_key = CCXLocator.from_course_locator(master_course_object.id, ccx_course_object.id)
            # enroll the coach in the newly created ccx
            email_params = get_email_params(
                master_course_object,
                auto_enroll=True,
                course_key=ccx_course_key,
                display_name=ccx_course_object.display_name
            )
            enroll_email(
                course_id=ccx_course_key,
                student_email=coach.email,
                auto_enroll=True,
                email_students=True,
                email_params=email_params,
            )
            # assign coach role for the coach to the newly created ccx
            assign_coach_role_to_ccx(ccx_course_key, coach, master_course_object.id)

        serializer = self.get_serializer(ccx_course_object)
        return Response(
            status=status.HTTP_201_CREATED,
            data=serializer.data
        )
Example #56
0
    def insert_hashes(self, hashfile):

        with transaction.atomic():
            locked = False
            while not locked:
                try:
                    # Lock: prevent cracked file from being processed
                    hashfile_lock = Lock.objects.select_for_update().filter(hashfile_id=hashfile.id, lock_ressource="hashfile")[0]

                    locked = True
                except OperationalError as e:
                    continue

        hashfile_path = os.path.join(os.path.dirname(__file__), "..", "Files", "Hashfiles", hashfile.hashfile)
        if os.path.exists(hashfile_path):


            try:
                # 1 - import hashfile to database
                print("importing hashfile to database")

                start = time.perf_counter()

                batch_create_list = []
                hash_count = 0
                for index, line in enumerate(open(hashfile_path, encoding='utf-8')):
                    try:
                        line = line.strip()
                        if hashfile.username_included:
                            username = line.split(":")[0]
                            password_hash = ":".join(line.split(":")[1:])
                        else:
                            username = None
                            password_hash = line
                    except IndexError:
                        continue

                    h = Hash(
                            hashfile=hashfile,
                            hash_type=hashfile.hash_type,
                            username=username,
                            hash=password_hash,
                            password=None,
                            password_len=None,
                            password_charset=None,
                            password_mask=None,
                    )
                    batch_create_list.append(h)

                    if len(batch_create_list) >= 100000:
                        print(index)
                        hashfile.line_count += len(batch_create_list)
                        while len(batch_create_list) != 0:
                            Hash.objects.bulk_create(batch_create_list[:1000])
                            batch_create_list = batch_create_list[1000:]
                        hashfile.save()

                hashfile.line_count += len(batch_create_list)
                while len(batch_create_list) != 0:
                    Hash.objects.bulk_create(batch_create_list[:1000])
                    batch_create_list = batch_create_list[1000:]
                hashfile.save()

                end = time.perf_counter()
                print("Inserted hashes in : %fs" % (end-start,))

                # 2 - if username in hashfile, delete file and create one with only the hashes, 
                #     --username takes a lot of RAM with hashcat, this method is better when processing huge hashfiles

                start = time.perf_counter()

                if hashfile.username_included:
                    os.remove(hashfile_path)

                    tmpfile_name = ''.join([random.choice(string.ascii_lowercase) for i in range(16)])
                    tmpfile_path = os.path.join(os.path.dirname(__file__), "..", "Files", "tmp", tmpfile_name)

                    cursor = connection.cursor()
                    cursor.execute("SELECT DISTINCT hash FROM Hashcat_hash WHERE hashfile_id=%s INTO OUTFILE %s", [hashfile.id, tmpfile_path])
                    cursor.close()

                    copyfile(tmpfile_path, hashfile_path)
                    os.remove(tmpfile_path)

                end = time.perf_counter()
                print("Wrote hashfile on disk in : %fs" % (end-start,))

            except Exception as e:
                traceback.print_exc()
        else:
            print("Error: hashfile doesn't exists")

        # Crackedfile processing if over, remove lock
        del hashfile_lock
Example #57
0
def mark_order_as_failed(order, charge_failure_reason):
    logger.info('mark_order_as_failed',
                order=order.order_id,
                charge_failure_reason=charge_failure_reason)
    with transaction.atomic():
        order.mark_as_failed(charge_failure_reason)
Example #58
0
    def patch(self, request, ccx_course_id=None):
        """
        Modifies a CCX course.

        Args:
            request (Request): Django request object.
            ccx_course_id (string): URI element specifying the CCX course location.
        """
        ccx_course_object, ccx_course_key, error_code, http_status = self.get_object(ccx_course_id, is_ccx=True)
        if ccx_course_object is None:
            return Response(
                status=http_status,
                data={
                    'error_code': error_code
                }
            )

        master_course_id = request.data.get('master_course_id')
        if master_course_id is not None and unicode(ccx_course_object.course_id) != master_course_id:
            return Response(
                status=status.HTTP_403_FORBIDDEN,
                data={
                    'error_code': 'master_course_id_change_not_allowed'
                }
            )

        valid_input, field_errors = get_valid_input(request.data, ignore_missing=True)
        if field_errors:
            return Response(
                status=status.HTTP_400_BAD_REQUEST,
                data={
                    'field_errors': field_errors
                }
            )

        with transaction.atomic():
            # update the display name
            if 'display_name' in valid_input:
                ccx_course_object.display_name = valid_input['display_name']
            # check if the coach has changed and in case update it
            old_coach = None
            if 'coach_email' in valid_input:
                try:
                    coach = User.objects.get(email=valid_input['coach_email'])
                except User.DoesNotExist:
                    return Response(
                        status=status.HTTP_404_NOT_FOUND,
                        data={
                            'error_code': 'coach_user_does_not_exist'
                        }
                    )
                if ccx_course_object.coach.id != coach.id:
                    old_coach = ccx_course_object.coach
                    ccx_course_object.coach = coach
            ccx_course_object.save()
            # update the overridden field for the maximum amount of students
            if 'max_students_allowed' in valid_input:
                override_field_for_ccx(
                    ccx_course_object,
                    ccx_course_object.course,
                    'max_student_enrollments_allowed',
                    valid_input['max_students_allowed']
                )
            # if the coach has changed, update the permissions
            if old_coach is not None:
                # get the master course key and master course object
                master_course_object, master_course_key, _, _ = get_valid_course(unicode(ccx_course_object.course_id))
                # make the new ccx coach a coach on the master course
                make_user_coach(coach, master_course_key)
                # enroll the coach in the ccx
                email_params = get_email_params(
                    master_course_object,
                    auto_enroll=True,
                    course_key=ccx_course_key,
                    display_name=ccx_course_object.display_name
                )
                enroll_email(
                    course_id=ccx_course_key,
                    student_email=coach.email,
                    auto_enroll=True,
                    email_students=True,
                    email_params=email_params,
                )
                # enroll the coach to the newly created ccx
                assign_coach_role_to_ccx(ccx_course_key, coach, master_course_object.id)

        return Response(
            status=status.HTTP_204_NO_CONTENT,
        )
Example #59
0
def refund_item(item, reason):
    logger.info('refund_item', item=item.ticket_id)
    stripe_refund = stripe_integration.refund_item(item)
    with transaction.atomic():
        Refund.objects.create_for_item(item, reason, stripe_refund.id,
                                       stripe_refund.created)
Example #60
0
def mark_order_as_errored_after_charge(order, charge_id):
    logger.info('mark_order_as_errored_after_charge',
                order=order.order_id,
                charge_id=charge_id)
    with transaction.atomic():
        order.mark_as_errored_after_charge(charge_id)