def parse_corp_clients(csheet, work): if check_empty_nit(csheet): for i in range(3,csheet.nrows): aff = Affected() aff.work = work aff.name = csheet.cell(i, 1).value aff.office = csheet.cell(i, 3).value aff.service = csheet.cell(i, 5).value aff.capacity = csheet.cell(i, 6).value aff.nit = int(csheet.cell(i, 9).value) aff.save() else: e = IntegrityError() e.__cause__="Uno o mas clientes no tienen nit" raise e
def save(self, *args, **kwargs): if not self.unicode_representation: self.unicode_representation = None initial = kwargs.pop('initial', False) creator_id = kwargs.pop('creator_id', None) if not initial: if settings.METAMODEL['DEBUG']: # Check integrity for instance_field in self.fields.select_related(): if not instance_field.field.multiple and \ not instance_field.field.nullable: if instance_field.value is None: raise IntegrityError('{} is a required field of {}' ''.format( instance_field.field.name, self.model)) self.unicode_representation = self.get_unicode_representation() if not self.is_model_primitive(): ordering_value = self.compute_ordering_value() try: ordering_value = Decimal(str(ordering_value)) self.decimal_value = ordering_value except InvalidOperation: self.unicode_value = str(ordering_value) if not self.unicode_value: self.unicode_value = None if self.is_model_primitive() and \ self.unicode_representation is not None: raise IntegrityError('Primitive values cannot have unicode ' 'representations') if self.model.name == 'BooleanField': if self.decimal_value not in [0, 1]: raise IntegrityError self.unicode_value = None if self.model.name == 'CharField': if not isinstance(self.unicode_value, str) \ and not isinstance(self.unicode_value, str): raise IntegrityError self.decimal_value = None if self.model.name == 'IntegerField': if self.decimal_value is None: raise IntegrityError self.unicode_value = None if self.model.name == 'DateField': if self.decimal_value is None: raise IntegrityError try: date.fromordinal(self.decimal_value) except ValueError: raise IntegrityError self.unicode_value = None if self.model.name == 'DateTimeField': if self.decimal_value is None: raise IntegrityError try: epoch = datetime.utcfromtimestamp(0) epoch + timedelta(seconds=int(self.decimal_value)) except (OverflowError, ValueError): raise IntegrityError self.unicode_value = None if self.model.name == 'FileField': if not isinstance(self.unicode_value, str): raise IntegrityError self.decimal_value = None if self.model.name == 'IntegerField': if not isinstance(self.decimal_value, int) and \ not isinstance(self.decimal_value, Decimal): raise IntegrityError self.unicode_value = None created = not bool(self.id) result = super(InstanceModel, self).save(*args, **kwargs) if not initial: instance_model_saved.send(sender=self.__class__, instance_model=self, created=created, creator_id=creator_id) return result
def __init__(self,stage, *args, **kwargs): super(StageForm, self).__init__(*args, **kwargs) prev=_fightpreview(stage.fight)[stage.order-1] if prev['pk'] != stage.pk: raise IntegrityError("Stage number integrity Error") self.fields['rejections'] = forms.ModelMultipleChoiceField(queryset=Problem.objects.filter(tournament=stage.fight.round.tournament), required=False, widget=Select2MultipleWidget) # , number__in=prev['free'] self.fields['presented'] = forms.ModelChoiceField(queryset=Problem.objects.filter(tournament=stage.fight.round.tournament),widget=Select2Widget ,required=False ) #, number__in=prev['free'] if stage.presented: self.fields['presented'].initial = stage.presented_id self.fields['rejections'].initial = list(stage.rejections.all().values_list('pk',flat=True)) self.fields['rep']=forms.ModelChoiceField(queryset=stage.rep_attendance.team.teammember_set(manager="students").prefetch_related('attendee__active_user__user').all(),widget=Select2Widget,required=False) if stage.reporter: self.fields['rep'].initial = stage.rep_attendance.active_person self.fields['opp']=forms.ModelChoiceField(queryset=stage.opp_attendance.team.teammember_set(manager="students").prefetch_related('attendee__active_user__user').all(),widget=Select2Widget,required=False) if stage.opponent: self.fields['opp'].initial = stage.opp_attendance.active_person self.fields['rev']=forms.ModelChoiceField(queryset=stage.rev_attendance.team.teammember_set(manager="students").prefetch_related('attendee__active_user__user').all(),widget=Select2Widget,required=False) if stage.reviewer: self.fields['rev'].initial = stage.rev_attendance.active_person self.grades=[] index=30 for js in stage.fight.jurorsession_set(manager="voting").all(): j={'name':js.juror.attendee.full_name,'pk':js.pk} f=forms.IntegerField(min_value=1,max_value=10,required=False) self.fields['grade-%d-rep'%(js.pk, )]=f f.widget.attrs['tabindex']=index f.jurorsession=js f.attendance=stage.rep_attendance try: f.initial_obj = stage.rep_attendance.jurorgrade_set.get(juror_session=js) f.initial = int(f.initial_obj.grade) except: pass f = forms.IntegerField(min_value=1, max_value=10,required=False) self.fields['grade-%d-opp' % (js.pk,)] = f f.widget.attrs['tabindex'] = index+50 f.jurorsession = js f.attendance = stage.opp_attendance try: f.initial_obj = stage.opp_attendance.jurorgrade_set.get(juror_session=js) f.initial = int(f.initial_obj.grade) except: pass f = forms.IntegerField(min_value=1, max_value=10,required=False) self.fields['grade-%d-rev' % (js.pk,)] = f f.widget.attrs['tabindex'] = index+100 f.jurorsession = js f.attendance = stage.rev_attendance try: f.initial_obj = stage.rev_attendance.jurorgrade_set.get(juror_session=js) f.initial = int(f.initial_obj.grade) except: pass index+=1 self.grades.append(j)
def save(self, *args, **kwargs): if self.group.competition != self.phase.competition: raise IntegrityError( "Group and Phase competition must be the same") super(SubmissionResultGroupPhase, self).save(*args, **kwargs)
def save(self, *args, **kwargs): if self.scoredef.computed is True: raise IntegrityError( "Cannot use a computed field for a computed score") super(SubmissionComputedScoreField, self).save(*args, **kwargs)
def test_raises_error_creating_integration_feature(self, mock_create, mock_log): mock_create.side_effect = IntegrityError() self.creator.call() mock_log.assert_called_with(sentry_app="nulldb", error_message="")
def save(self, **kwargs): if self.data_type and (self.data_type, self.data_type) not in self.TYPES: raise IntegrityError('Invalid data type') super(Concept, self).save()
def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ if self.features.supports_pragma_foreign_key_check: with self.cursor() as cursor: if table_names is None: violations = cursor.execute( 'PRAGMA foreign_key_check').fetchall() else: violations = chain.from_iterable( cursor.execute( 'PRAGMA foreign_key_check(%s)' % self.ops.quote_name(table_name)).fetchall() for table_name in table_names) # See https://www.sqlite.org/pragma.html#pragma_foreign_key_check for table_name, rowid, referenced_table_name, foreign_key_index in violations: foreign_key = cursor.execute( 'PRAGMA foreign_key_list(%s)' % self.ops.quote_name(table_name)).fetchall( )[foreign_key_index] column_name, referenced_column_name = foreign_key[3:5] primary_key_column_name = self.introspection.get_primary_key_column( cursor, table_name) primary_key_value, bad_value = cursor.execute( 'SELECT %s, %s FROM %s WHERE rowid = %%s' % ( self.ops.quote_name(primary_key_column_name), self.ops.quote_name(column_name), self.ops.quote_name(table_name), ), (rowid, ), ).fetchone() raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % (table_name, primary_key_value, table_name, column_name, bad_value, referenced_table_name, referenced_column_name)) else: with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column( cursor, table_name) if not primary_key_column_name: continue relations = self.introspection.get_relations( cursor, table_name) for column_name, (referenced_column_name, referenced_table_name) in relations: cursor.execute(""" SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, )) for bad_row in cursor.fetchall(): raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ))
def integrity_error_side_effect(username="******"): raise IntegrityError()
def check_in(self): if self.checked_in: raise IntegrityError("attendee is already checked in") self.checked_in = timezone.now()
def delete_group_from_stormpath(sender, instance, **kwargs): try: APPLICATION.groups.search({'name': instance.name})[0].delete() except StormpathError as e: raise IntegrityError(e)
def parse_questions(file, course_users, all_topics, host): host = merge_url_parts([_format(host), _format("static")]) distractors = [] with open(file) as data_file: data = json.load(data_file) questions = data["questions"] counter = 0 for q in questions: try: with transaction.atomic(): distractor_count = 0 counter = counter + 1 if q["explanation"]["content"] is None: q["explanation"]["content"] = " " question = Question(content=q["question"]["content"], explanation=q["explanation"]["content"], difficulty=randrange(0, 5), quality=randrange(0, 5), difficultyCount=randrange(0, 100), qualityCount=randrange(0, 100), author=choice(course_users)) question.save() d = decode_images(question.id, question, q["question"]["payloads"], "q", host) if not d: raise IntegrityError("Invalid Question Image") d = decode_images(question.id, question, q["explanation"]["payloads"], "e", host) if not d: raise IntegrityError("Invalid Explanation Image") q_topics = q["topics"] for topic in q_topics: idx = 0 while idx < len(all_topics): if topic["name"] == all_topics[idx].name: break idx += 1 question.topics.add(all_topics[idx]) question.save() _response_choices = ["A", "B", "C", "D"] if True not in [ q["responses"][i].get("isCorrect", False) for i in _response_choices ]: raise IntegrityError("No correct answer for question") for i in _response_choices: response = q["responses"][i] if response["content"] is None: response["content"] = " " distractor = Distractor(content=response["content"], response=i, isCorrect=response["isCorrect"], question=question) distractor.save() distractor_count += 1 d = decode_images(distractor.id, distractor, response["payloads"], "d", host) if not d: raise IntegrityError("Invalid Distractor Image") distractors.append(distractor) except IntegrityError as e: distractors = distractors[:len(distractors) - distractor_count] print("Invalid question: " + str(counter)) return distractors
def save(self, *args, **kwargs): if self.date_from > self.date_to: raise IntegrityError('date_from must be less, than date_to') if self.status == mch.STATUS_CONFIRMED: self.subtract_day_off() super().save(*args, **kwargs)
def assertSize(self): if len(self.timestamp) != len(self): raise IntegrityError("Timestamp and data not equally long")
def authenticate(self, **credentials): """ Handles authentication of a user from the given credentials. Credentials must be a combination of 'request' and 'google_user'. If any other combination of credentials are given then we raise a TypeError, see authenticate() in django.contrib.auth.__init__.py. """ User = get_user_model() if not issubclass(User, GaeAbstractBaseUser): raise ImproperlyConfigured( "djangae.contrib.auth.backends.AppEngineUserAPI requires AUTH_USER_MODEL to be a " " subclass of djangae.contrib.auth.base.GaeAbstractBaseUser.") if len(credentials) != 1: # Django expects a TypeError if this backend cannot handle the given credentials raise TypeError() google_user = credentials.get('google_user', None) if google_user: user_id = google_user.user_id() email = google_user.email().lower() try: return User.objects.get(username=user_id) except User.DoesNotExist: try: existing_user = User.objects.get( email=BaseUserManager.normalize_email(email)) except User.DoesNotExist: return User.objects.create_user(user_id, email) # If the existing user was precreated, update and reuse it if existing_user.username is None: if (getattr(settings, 'DJANGAE_ALLOW_USER_PRE_CREATION', False) or # Backwards compatibility, remove before 1.0 getattr(settings, 'ALLOW_USER_PRE_CREATION', False)): # Convert the pre-created User object so that the user can now login via # Google Accounts, and ONLY via Google Accounts. existing_user.username = user_id existing_user.last_login = timezone.now() existing_user.save() return existing_user # There's a precreated user but user precreation is disabled # This will fail with an integrity error from django.db import IntegrityError raise IntegrityError( "GAUTH: Found existing User with email=%s and username=None, " "but user precreation is disabled." % email) # There is an existing user with this email address, but it is tied to a different # Google user id. As we treat the user id as the primary identifier, not the email # address, we leave the existing user in place and blank its email address (as the # email field is unique), then create a new user with the new user id. else: logging.info( "GAUTH: Creating a new user with an existing email address " "(User(email=%s, pk=%s))" % (email, existing_user.pk)) with self.atomic(**self.atomic_kwargs): existing_user = User.objects.get(pk=existing_user.pk) existing_user.email = None existing_user.save() return User.objects.create_user(user_id, email) else: raise TypeError( ) # Django expects to be able to pass in whatever credentials it has, and for you to raise a TypeError if they mean nothing to you
def save(self, *args, **kwargs): if self.step <= 0: raise IntegrityError("Step must be > 0") super(RegularTimeSeries, self).save(*args, **kwargs)
def create(self, validated_data): """ This function exists becuase it's the only function this serializer is allowed to do, AND we've got some caveats about doing it. Mainly verifying that it's a valid word etc. Basically, we go through and try to do everything. If there's an error, it will mostly be a DoesNotExist, and mostly we want to throw that back for the frontend to deal with. """ errormessage = "" # puzzleplay is the universal play object, with player=None puzzleplay = validated_data['puzzle'] # play is THIS USER'S play object. We'll need this to check what rules this user is # playing by. try: try: play = models.Play.objects.filter(puzzle=puzzleplay.puzzle, player=self.context['request'].user.player)[0] except AttributeError: raise PermissionDenied except IndexError: # There is no play record for this user/puzzle combination. Create it. play = models.Play(puzzle=puzzleplay.puzzle, player=self.context['request'].user.player) play.save() try: # Here, we get the correct word record for this word. But note the roundabout method # This is to make sure that it exists FOR THIS PUZZLE. Simply finding it isn't enough. word = models.WordList.objects.filter( play=puzzleplay, word__word=validated_data['word'] )[0].word except IndexError: errormessage = "invalid word. Not on this puzzle or not a word." if play.missed: # Track missed words by adding a wordlist record with no word. word = None else: raise IntegrityError(errormessage) # Create the wordlist object. wordlist = models.WordList(word=word, play=play, foundtime=validated_data['foundtime']) try: wordlist.save() except IntegrityError: errormessage = "Not Unique. Word alread found." # The user entered the same word again. if play.repeats: # Track repeated words by adding a wordlist record with no word. wordlist.word = None # This "save" call should never get a uniqueness check failed, because null!=null # (according to SQL) wordlist.save() else: raise IntegrityError(errormessage) # Lie about it: If word=null, raise an integrityError if(wordlist.word is None): raise IntegrityError(errormessage) return wordlist
class CVReferencesModel(models.Model): """ Intermediate class for CharVersion -> CharVersion semantic references. We have an entry here whenever a CharVersion refers to another CharVersion (and effectively locks the target!) Constraints: source != target (restriction may be removed, but we check it for now) source.owner == target.owner target.edit_mode in ALLOWED_REFERENCE_TARGETS (list defined in EditModes.py) Furthermore, CVReference has some "owner" that is responsible for creating/deleting it, depending on ref_type. For ref_type OVERWRITE, this is the source itself, which must have edit_mode.is_overwriter() == True. We would like to enforce these checks at the DB level, but cannot do so with Django. """ class Meta(MyMeta): pass class ReferenceType(models.IntegerChoices): OVERWRITE = 1 source: CharVersionModel = models.ForeignKey( CharVersionModel, on_delete=models.CASCADE, related_name='references_from') target: CharVersionModel = models.ForeignKey(CharVersionModel, on_delete=models.PROTECT, related_name='references_to') reason_str: str = models.CharField(max_length=200, blank=False, null=False) ref_type: int = models.IntegerField(choices=ReferenceType.choices, default=ReferenceType.OVERWRITE.value) objects: MANAGER_TYPE[CVReferencesModel] def check_validity(self) -> None: """ Checks validity constraints for this CVReference (assumed to be in sync with DB). We cannot include these constraints at the DB level with django because it involves joins. Indicates failure by raising an IntegrityError. """ if self.source == self.target: # Self-references would be doable, but they would require special handling and it's not worth the pain. raise IntegrityError("CharVersion reference to itself") if self.source.owner != self.target.owner: raise IntegrityError( "CharVersion references only allowed with the same Char model") if self.target.edit_mode not in ALLOWED_REFERENCE_TARGETS: raise IntegrityError( "CharVersion reference to target that does not allow being references" ) if self.ref_type == self.ReferenceType.OVERWRITE.value: if self.source.edit_mode.is_overwriter() is False: raise IntegrityError( "CharVersion references overwrite target, but but source has wrong type" ) if type(self).objects.filter(target=self.target).count() != 1: raise IntegrityError( "CharVersion references overwrite target, but target has other references to it." ) @classmethod def check_reference_validity_for_char_version( cls, /, char_version: CharVersionModel) -> None: """ Checks validity constraints for all References involving char_version (assumed to be in sync with DB) """ for source_references in cls.objects.filter(source=char_version): source_references.check_validity() for target_references in cls.objects.filter(target=char_version): target_references.check_validity() if char_version.edit_mode.is_overwriter() and \ cls.objects.filter(source=char_version, ref_type=cls.ReferenceType.OVERWRITE.value).count() != 1: raise IntegrityError("Invalid number of overwrite targets")
def save(self, *args, **kwargs): if self.subject.scheme == self.level.scheme: return super(Teach, self).save(*args, **kwargs) else: raise IntegrityError("schemes didn't match")
def raise_exception(): raise IntegrityError()
def delete(self, *args, **kwargs): # Disallow deleting a range with any dependents if self.benefit_set.exists() or self.condition_set.exists(): raise IntegrityError(_('Can not delete range with a dependent benefit or condition.')) return super().delete(*args, **kwargs)
def save(self, *args, **kwargs): if self.scoredef.competition != self.group.competition: raise IntegrityError( "Score Def competition and phase compeition must be the same") super(SubmissionScoreDefGroup, self).save(*args, **kwargs)
def save(self, *args, **kwargs): """don't create a request for a registered email""" if not self.id and User.objects.filter(email=self.email).exists(): raise IntegrityError() super().save(*args, **kwargs)
def save(self, *args, **kwargs): if self.scoredef.computed is True and value: raise IntegrityError("Score is computed. Cannot assign a value") super(SubmissionScore, self).save(*args, **kwargs)
def save(self, userprofile, commit=True): if commit is False: raise IntegrityError( 'Saving logic complicated, commit must be enabled') if userprofile.is_member(): raise IntegrityError('Model is already MemberProfile') # 1. clone profile uniqname = userprofile.uniqname marysuec = userprofile marysuec_user = userprofile.user marysuec_user.username = '******' marysuec_user.id = None marysuec_user.pk = None marysuec_user.save() marysuec.user = marysuec_user # 2. change uniqname to marysuec marysuec.uniqname = 'marysuec' marysuec.save() userprofile = UserProfile.objects.get(uniqname=uniqname) # 3. reassign all relationships of interest from profile A to marysuec nepp = userprofile.noneventprojectparticipant_set.all().distinct() shifts = userprofile.event_attendee.all().distinct() announcement_blurbs = userprofile.announcementblurb_set.all().distinct( ) waitlist_slot = userprofile.waitlistslot_set.all().distinct() itembring = userprofile.usercanbringpreferreditem_set.all().distinct() praise_giver = userprofile.praise_giver.all().distinct() praise_receiver = userprofile.praise_recipient.all().distinct() prefs = userprofile.userpreference_set.all().distinct() background_check = userprofile.backgroundcheck_set.all().distinct() for n in nepp: n.participant = marysuec n.save() for s in shifts: s.attendees.add(marysuec) s.attendees.remove(userprofile) for a in announcement_blurbs: a.contacts.add(marysuec) a.contacts.remove(userprofile) for w in waitlist_slot: w.user = marysuec w.save() for item in itembring: item.user = marysuec item.save() for p in praise_giver: p.giver = marysuec p.save() for p in praise_receiver: p.recipient = marysuec p.save() for p in prefs: p.user = marysuec p.save() for b in background_check: b.member = marysuec b.save() # 4. delete profile A userprofile.delete() # 5. create profile A' m = super(ConvertNonMemberToMemberForm, self).save(commit=False) m.uniqname = uniqname m.user = User.objects.get(username=uniqname) m.nickname = marysuec.nickname m.first_name = marysuec.first_name m.middle_name = marysuec.middle_name m.last_name = marysuec.last_name m.suffix = marysuec.suffix m.maiden_name = marysuec.maiden_name m.title = marysuec.title # 6. save profile A' m.save() # 7. reassign all relationships from profile marysuec to A' for n in nepp: n.participant = m n.save() for s in shifts: s.attendees.add(m) s.attendees.remove(marysuec) for a in announcement_blurbs: a.contacts.add(m) a.contacts.remove(marysuec) for w in waitlist_slot: w.user = m w.save() for item in itembring: item.user = m item.save() for p in praise_giver: p.giver = m p.save() for p in praise_receiver: p.recipient = m p.save() for p in prefs: p.user = m p.save() for b in background_check: b.member = m b.save() # 8. delete marysuec marysuec.delete() marysuec_user.delete()
def assign_jobs(): """ Check all jobs against all available devices and assign only if all conditions are met This routine needs to remain fast, so has to manage local cache variables of device status but still cope with a job queue over 1,000 and a device matrix of over 100. The main load is in find_device_for_job as *all* jobs in the queue must be checked at each tick. (A job far back in the queue may be the only job which exactly matches the most recent devices to become available.) When viewing the logs of these operations, the device will be Idle when Assigning to a Submitted job. That job may be for a device_type or a specific device (typically health checks use a specific device). The device will be Reserved when Assigned to a Submitted job on that device - the type will not be mentioned. The total number of assigned jobs and devices will be output at the end of each tick. Finally, the reserved device is removed from the local cache of available devices. Warnings are emitted if the device states are not as expected, before or after assignment. """ # FIXME: once scheduler daemon is disabled, implement as in share/zmq/assign.[dia|png] # FIXME: Make the forced health check constraint explicit # evaluate the testjob query set using list() logger = logging.getLogger('dispatcher-master') _validate_queue() jobs = list(get_job_queue()) if not jobs: return assigned_jobs = [] reserved_devices = [] # this takes a significant amount of time when under load, only do it once per tick devices = list(get_available_devices()) logger.debug("[%d] devices available", len(devices)) logger.debug("[%d] jobs in the queue", len(jobs)) # a forced health check can be assigned even if the device is not in the list of idle devices. for job in jobs: # pylint: disable=too-many-nested-blocks # this needs to stay as a tight loop to cope with load device = find_device_for_job(job, devices) # slower steps as assignment happens less often than the checks if device: if not _validate_idle_device(job, device): if device in devices: devices.remove(device) logger.debug( "Removing %s from the list of available devices", str(device.hostname)) continue logger.info("Assigning %s for %s", device, job) # avoid catching exceptions inside atomic (exceptions are slow too) # https://docs.djangoproject.com/en/1.7/topics/db/transactions/#controlling-transactions-explicitly if AuthToken.objects.filter(user=job.submitter).count(): job.submit_token = AuthToken.objects.filter(user=job.submitter).first() else: job.submit_token = AuthToken.objects.create(user=job.submitter) try: # Make this sequence atomic with transaction.atomic(): job.actual_device = device job.save() device.current_job = job # implicit device save in state_transition_to() chk = device.state_transition_to( Device.RESERVED, message="Reserved for job %s" % job.display_id, job=job, master=True) if not chk: raise IntegrityError('Unable to create device state transition.') except IntegrityError: # Retry in the next call to _assign_jobs logger.warning( "Transaction failed for job %s, device %s", job.display_id, device.hostname) assigned_jobs.append(job.id) reserved_devices.append(device.hostname) logger.info("Assigned %s to %s", device, job) if device in devices: logger.debug("Removing %s from the list of available devices", str(device.hostname)) devices.remove(device) # re-evaluate the devices query set using list() now that the job loop is complete devices = list(get_available_devices()) postprocess = _validate_non_idle_devices(reserved_devices, devices) if postprocess and reserved_devices: logger.debug("All queued jobs checked, %d devices reserved and validated", len(reserved_devices)) logger.info("Assigned %d jobs on %s devices", len(assigned_jobs), len(reserved_devices))
def get_current_term(): current_terms = CurrentTerm.objects.all() if current_terms.count() != 1: raise IntegrityError('There must be exactly 1 current term object') return current_terms[0].current_term
def save(self, revision=True, *args, **kwargs): """ Handles the saving/updating of a Publishable instance. Arguments: revision - if True, a new version of this Publishable will be created. """ if revision: # If this is a revision, set it to be the head of the list and increment the revision id self.head = True self.revision_id += 1 previous_revision = self.get_previous_revision() if not self.is_parent(): # If this is a revision, delete the old head of the list. type(self).objects.filter(parent=self.parent, head=True).update(head=False) # Clear the instance id to force Django to save a new instance. # Both fields (pk, id) required for this to work -- something to do with model inheritance self.pk = None self.id = None # New version is unpublished by default self.is_published = False # Raise integrity error if instance with given slug already exists. if type(self).objects.filter(slug=self.slug).exclude( parent=self.parent).exists(): raise IntegrityError("%s with slug '%s' already exists." % (type(self).__name__, self.slug)) # Set created_at to current time, but only for first version if not self.created_at: self.created_at = timezone.now() self.updated_at = timezone.now() if revision: self.updated_at = timezone.now() # Check that there is only one 'head' if self.is_conflicting_head(): raise IntegrityError("%s with head=True already exists." % (type(self).__name__, )) # Check that there is only one version with this revision_id if self.is_conflicting_revision_id(): raise IntegrityError("%s with revision_id=%s already exists." % (self.revision_id, type(self).__name__)) super(Publishable, self).save(*args, **kwargs) # Update the parent foreign key if not self.parent: self.parent = self super(Publishable, self).save(update_fields=['parent']) if revision: # Set latest version for all articles type(self).objects.filter(parent=self.parent).update( latest_version=self.revision_id) self.latest_version = self.revision_id return self
def create_uploaded_persons_tasks(data): """ Create persons and tasks from upload data. """ # Quick sanity check. if any([row.get('errors') for row in data]): raise InternalError('Uploaded data contains errors, cancelling upload') persons_created = [] tasks_created = [] events = set() with transaction.atomic(): for row in data: try: row_repr = ('{personal} {family} {username} <{email}>, ' '{role} at {event}').format(**row) fields = {key: row[key] for key in Person.PERSON_UPLOAD_FIELDS} fields['username'] = row['username'] if row['person_exists'] and row['existing_person_id']: # we should use existing Person p = Person.objects.get(pk=row['existing_person_id']) elif row['person_exists'] and not row['existing_person_id']: # we should use existing Person p = Person.objects.get( personal=fields['personal'], family=fields['family'], username=fields['username'], email=fields['email'], ) else: # we should create a new Person without any email provided p = Person(**fields) p.save() persons_created.append(p) if row['event'] and row['role']: e = Event.objects.get(slug=row['event']) r = Role.objects.get(name=row['role']) # if the number of learners attending the event changed, # we should update ``event.attendance`` if row['role'] == 'learner': events.add(e) t, created = Task.objects.get_or_create(person=p, event=e, role=r) if created: tasks_created.append(t) except IntegrityError as e: raise IntegrityError('{0} (for "{1}")'.format( str(e), row_repr)) except ObjectDoesNotExist as e: raise ObjectDoesNotExist('{0} (for "{1}")'.format( str(e), row_repr)) return persons_created, tasks_created
def parse_minutegram(msheet, csheet, sw, user): work = Work() if msheet.cell(0,7).value == '': e = IntegrityError() e.__cause__="El trabajo no tiene numero" raise e else: work.number = msheet.cell(0, 7).value if column_value_search(1, 'DESCRIPCION TP:', msheet): drow = column_value_search(1, 'DESCRIPCION TP:', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion DESCRIPCION TP" raise e if column_value_search(1, 'JUSTIFICACION: ', msheet): jrow = column_value_search(1, 'JUSTIFICACION: ', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion JUSTIFICACION" raise e if column_value_search(1, 'OBSERVACIONES:', msheet): orow = column_value_search(1, 'OBSERVACIONES:', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion OBSERVACIONES" raise e if column_value_search(1, 'PLAN DE TRABAJO (MINUTOGRAMA):', msheet): wprow = column_value_search(1, 'PLAN DE TRABAJO (MINUTOGRAMA):', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion PLAN DE TRABAJO" raise e if column_value_search(1, 'PLAN DE CONTINGENCIA / ROLLBACK:', msheet): cprow = column_value_search(1, 'PLAN DE CONTINGENCIA / ROLLBACK:', msheet) else: e = IntegrityError() e.__cause__="El documento no tiene seccion PLAN DE CONTINGENCIA / ROLLBACK" raise e #este bloque de codigo asigna los datos extraidos del formulario al work creado work.ticketArea = sw.ticketArea work.department = sw.department work.municipality = sw.municipality work.impact = sw.impact work.ticketCause = sw.ticketCause work.initialDate = sw.initialDate work.finalDate = sw.finalDate work.outboundDate = sw.outboundDate work.createdDate = datetime.date.today() work.affectTime = sw.affectTime work.rollbackTime = sw.rollbackTime now = timezone.make_aware(datetime.datetime.now(), timezone.get_default_timezone()) #Si el tiempo dado para la causa esta en horas se entiende que debe pasarse a areas internas y nunca externas if sw.ticketCause.timeLapseType == Cause.HOURS and sw.ticketArea.type == Area.INTERN: if now + datetime.timedelta(days=1, hours=sw.ticketCause.internTimeLapse) <= sw.initialDate: work.limitResponseDate = now + datetime.timedelta(days=1, hours=sw.ticketCause.internTimeLapse) else: e = IntegrityError() e.__cause__="El tiempo maximo de respuesta de los clientes es mas tarde que la fecha de inicio del trabajo" raise e elif sw.ticketCause.timeLapseType == Cause.HOURS and sw.ticketArea.type == Area.EXTERN: e = IntegrityError() e.__cause__="La Causa del ticket no puede asignarse a un area externa" raise e elif sw.ticketCause.timeLapseType == Cause.DAYS and sw.ticketArea.type == Area.INTERN: if now + datetime.timedelta(days=1+sw.ticketCause.internTimeLapse) <= sw.initialDate: work.limitResponseDate = now + datetime.timedelta(days=1+sw.ticketCause.internTimeLapse) else: e = IntegrityError() e.__cause__="El tiempo maximo de respuesta de los clientes es mas tarde que la fecha de inicio del trabajo" raise e elif sw.ticketCause.timeLapseType == Cause.DAYS and sw.ticketArea.type == Area.INTERN: if now + datetime.timedelta(days=1+sw.ticketCause.externTimeLapse) <= sw.initialDate: work.limitResponseDate = now + datetime.timedelta(days=1+sw.ticketCause.externTimeLapse) else: e = IntegrityError() e.__cause__="El tiempo maximo de respuesta de los clientes es mas tarde que la fecha de inicio del trabajo" raise e #se asigna el usuario loggeado al trabajo if user: work.userCreator = user #------------------------------------------------------------------------------- work.description = msheet.cell(drow+1, 1).value work.justification = msheet.cell(jrow+1, 1).value work.observations = msheet.cell(orow+1, 1).value try: group = WorkGroup.objects.get(number = work.number) for w in group.work_set.all(): w.state = Work.CANCELED for acc in w.acceptance_set.all(): acc.valid = False acc.save() w.save() work.group = group work.programmed = Work.REPROGRAMMED except: group = WorkGroup() group.number = work.number group.save() work.group = group work.save() #loads work plans for i in range(wprow+2,cprow): if check_line(i, 2, 6, msheet): wp = WorkPlan() wp.work=work wp.initialDate = xldate_as_datetime(msheet.cell(i, 2).value, 0) wp.finalDate = xldate_as_datetime(msheet.cell(i, 3).value, 0) wp.affectation = datetime.time(*(xldate_as_tuple(msheet.cell(i, 4).value, 0))[3:]) wp.activity = msheet.cell(i, 5).value wp.save() else: e = IntegrityError() e.__cause__="Alguno de los planes de trabajo tiene un campo vacio" raise e #loads contingency plans for i in range(cprow+2, drow-1): if check_line(i, 2, 6, msheet): cp = ContingencyPlan() cp.work=work cp.initialDate = xldate_as_datetime(msheet.cell(i, 2).value, 0) cp.finalDate = xldate_as_datetime(msheet.cell(i, 3).value, 0) cp.affectation = datetime.time(*(xldate_as_tuple(msheet.cell(i, 4).value, 0))[3:]) cp.activity = msheet.cell(i, 5).value cp.save() else: e = IntegrityError() e.__cause__="Alguno de los planes de contingencia tiene un campo vacio" raise e parse_corp_clients(csheet, work)
def __init__(self, msg, protected_objects): self.protected_objects = protected_objects # TODO change this to use super() when we drop Python 2.4 IntegrityError.__init__(self, msg, protected_objects)
def save(self, *args, **kwargs): model = self.__class__ if (model.objects.count() > 0 and self.pk != model.objects.get().pk): raise IntegrityError('Can only create 1 %s instance' % model.__name__) super(SingleInstanceMixin, self).save(*args, **kwargs)