def save_headwords(taskId): task = m.Task.query.get(taskId) if not task: raise InvalidUsage(_('task {0} not found').format(taskId), 404) if task.taskType != 'Spelling': raise InvalidUsage( _('task {0} has unexpected task type').format(taskId)) data = MyForm(Field( 'headwords', is_mandatory=True, )).get_data() load = m.Load(taskId=taskId, createdBy=699) SS.add(load) SS.flush() rawPieces = [] for i, r in enumerate(data['headwords']): assemblyContext = 'L_%05d_%05d' % (load.loadId, i) allocationContext = 'L_%05d' % load.loadId try: del r['meta'] except KeyError: pass print r rawPiece = m.RawPiece(taskId=taskId, loadId=load.loadId, assemblyContext=assemblyContext, allocationContext=allocationContext, words=1, **r) rawPieces.append(rawPiece) SS.add(rawPiece) SS.flush() return jsonify(rawPieces=m.RawPiece.dump(rawPieces))
def assign_batch_to_user(batchId, userId): batch = m.Batch.query.get(batchId) if not batch: raise InvalidUsage(_('batch {0} not found').format(batchId), 404) user = m.User.query.get(userId) if not user: raise InvalidUsage(_('user {0} not found').format(userId), 404) # TODO: perform more checks according to policy if policy.active_worker_only: if m.TaskWorker.query.filter_by(taskId=batch.taskId).filter_by( subTaskId=batch.subTaskId).filter_by(userId=userId).filter_by( removed=False).count() == 0: raise InvalidUsage( _('user {0} is not working on sub task {1}').format( userId, batch.subTaskId)) # TODO: change time from naive to timezone aware batch.leaseGranted = datetime.utcnow().replace(tzinfo=pytz.utc) # batch.leaseGranted = datetime.now() batch.leaseExpires = batch.leaseGranted + batch.subTask.defaultLeaseLife batch.user = user SS.flush() return jsonify({ 'message': _('batch {0} has been assigned to user {1}, expires at {2}').format( batchId, user.userName, batch.leaseExpires), 'batch': m.Batch.dump(batch), })
def create_label_group(labelSetId): ''' creates a new label group ''' labelSet = m.LabelSet.query.get(labelSetId) if not labelSet: raise InvalidUsage( _('label set {0} not found').format(labelSetId), 404) data = MyForm( Field('name', is_mandatory=True, validators=[ validators.is_string, (check_label_group_name_uniqueness, (labelSetId, None)), ]), Field('dropDownDisplay', default=False, validators=[ validators.is_bool, ]), Field('isMandatory', default=False, validators=[ validators.is_bool, ]), ).get_data() labelGroup = m.LabelGroup(**data) SS.add(labelGroup) SS.flush() return jsonify({ 'message': _('created label group {0} successfully').format(labelGroup.name), 'labelGroup': m.LabelGroup.dump(labelGroup), })
def create_language(self): desc = json.loads(self.Message) data = util.edm.decode_changes('Language', desc['changes']) current_app.logger.info( 'a language is being created using {}'.format(data)) try: lang = m.Language.query.filter(m.Language.name == data['name']).one() current_app.logger.info( 'found language {}, applying changes {}'.format(lang.name, data)) changes = {} for k, v in data.iteritems(): try: if getattr(lang, k) != v: setattr(lang, k, v) changes[k] = v except AttributeError: continue current_app.logger.debug('actual changes {}'.format(changes)) SS.flush() SS.commit() except sqlalchemy.orm.exc.NoResultFound: SS.rollback() lang = m.Language(**data) SS.add(lang) SS.flush() SS.commit()
def create_sub_task_rate_record(subTaskId): subTask = m.SubTask.query.get(subTaskId) if not subTask: raise InvalidUsage(_('sub task {0} not found').format(subTaskId)) data = MyForm( Field('rateId', is_mandatory=True, validators=[]), Field('multiplier', is_mandatory=True, normalizer=lambda data, key, value: float(value), validators=[ (validators.is_number, (), dict(min_value=0)), ]), Field('bonus', default=None, validators=[ (validators.is_number, (), dict(ge=0)), ]), ).get_data() me = session['current_user'] subTaskRate = m.SubTaskRate(taskId=subTask.taskId, updatedBy=me.userId, **data) SS.add(subTaskRate) SS.flush() return jsonify({ 'message': _('created sub task rate {0} successfully').format( subTaskRate.subTaskRateId), 'subTaskRate': m.SubTaskRate.dump(subTaskRate), })
def create_alphabet(): data = MyForm( Field('name', is_mandatory=True, validators=[ validators.non_blank, check_name_uniqueness, ]), Field('description'), Field('dialectId', is_mandatory=True, validators=[ validators.is_number, check_dialect_existence, ]), Field( 'url', default=lambda: None, ), ).get_data() alphabet = m.Alphabet(**data) SS.add(alphabet) SS.flush() return jsonify({ 'message': _('created alphabet {0} successfully').format(alphabet.name), 'alphabet': m.Alphabet.dump(alphabet), })
def create_error_type(): ''' creates a new error type ''' data = MyForm( Field('name', is_mandatory=True, validators=[ validators.non_blank, check_name_uniqueness, ]), Field('errorClassId', is_mandatory=True, validators=[ check_error_class_existence, ]), Field('defaultSeverity', is_mandatory=True, normalizer=lambda data, key, value: float(value), validators=[ (validators.is_number, (), dict(max_value=1, min_value=0)), ]), ).get_data() errorType = m.ErrorType(**data) SS.add(errorType) SS.flush() return jsonify({ 'message': _('created error type {0} successfully').format(errorType.name), 'errorType': m.ErrorType.dump(errorType), })
def save_stats(self): # update dailysubtasktotals SS.bind.execute( m.DailySubtotal.__table__.delete( m.DailySubtotal.subTaskId == self.subTask.subTaskId)) for (subTaskId, userId, workDate), c in self.per_user_per_day.iteritems(): entry = m.DailySubtotal(subTaskId=subTaskId, userId=userId, totalDate=workDate, amount=c.itemCount, words=c.unitCount) SS.add(entry) # update subtaskmetrics/abnormalusage/subtaskmetricerrors # TODO: optionally delete existing entries from above 3 tables # TODO: configure server_default for lastUpdated now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) for source in [self.per_user_per_interval, self.per_user]: for (subTaskId, userId, workIntervalId), c in source.iteritems(): assert ((source == self.per_user_per_interval and subTaskId is None) or (source == self.per_user and workIntervalId is None)) metric = m.SubTaskMetric(userId=userId, workIntervalId=workIntervalId, subTaskId=subTaskId, itemCount=c.itemCount, unitCount=c.unitCount, workRate=c.workRate, accuracy=c.accuracy, lastUpdated=now) SS.add(metric) SS.flush() for errorTypeId, occurences in c.flaggedErrors.iteritems(): entry = m.SubTaskMetricErrorEntry(metricId=metric.metricId, errorTypeId=errorTypeId, occurences=occurences) SS.add(entry) for tagId, degree in c.abnormalTagUsage.iteritems(): entry = m.AbnormalUsageEntry(metricId=metric.metricId, tagId=tagId, labelId=None, degree=degree) SS.add(entry) for labelId, degree in c.abnormalLabelUsage.iteritems(): entry = m.AbnormalUsageEntry(metricId=metric.metricId, tagId=None, labelId=labelId, degree=degree) SS.add(entry) # update subtasks self.subTask.meanAmount = self.sub_task_stats.meanAmount self.subTask.maxAmount = self.sub_task_stats.maxAmount self.subTask.accuracy = self.sub_task_stats.accuracy self.subTask.maxWorkRate = self.sub_task_stats.maxWorkRate self.subTask.medianWorkRate = self.sub_task_stats.medianWorkRate
def save_record(self): if m.SnsMessageRecord.query.get(self.MessageId): raise RuntimeError( _('message {} has been saved already').format(self.MessageId)) record = m.SnsMessageRecord(messageId=self.MessageId, messageType=self.Type, body=self.body) SS.add(record) SS.flush() current_app.logger.debug('record of message {} has been saved'.format( self.MessageId))
def update_label(labelSetId, labelId): ''' updates label settings ''' labelSet = m.LabelSet.query.get(labelSetId) if not labelSet: raise InvalidUsage( _('label set {0} not found').format(labelSetId), 404) label = m.Label.query.get(labelId) if not label or label.labelSetId != labelSetId: raise InvalidUsage(_('label {0} not found').format(lableId), 404) data = MyForm( Field('name', validators=[ (check_label_name_uniqueness, (labelSetId, labelId)), ]), Field('description'), Field('shortcutKey', validators=[ (validators.is_string, (), dict(length=1)), check_label_shortcut_key_non_space, (check_label_shortcut_key_uniqueness, (labelSetId, labelId)), ]), Field('extract', validators=[ validators.non_blank, (check_label_extract_uniqueness, (labelSetId, labelId)), ]), Field('labelGroupId', validators=[ (check_label_group_existence, (labelSetId, )), ]), Field('enabled', validators=[ validators.is_bool, ]), ).get_data() # data['labelSetId'] = labelSetId for key in data.keys(): value = data[key] if getattr(label, key) != value: setattr(label, key, value) else: del data[key] SS.flush() return jsonify({ 'message': _('updated label {0} successfully').format(labelId), 'updatedFields': data.keys(), 'label': m.Label.dump(label), })
def create_new_alphabet_rule(alphabetId): alphabet = m.Alphabet.query.get(alphabetId) if not alphabet: raise InvalidUsage(_('alphabet {0} not found').format(alphabetId), 404) data = MyForm( Field('name'), Field('type'), Field('description'), ).get_data() rule = m.Rule(**data) rule.alphabetId = alphabetId SS.add(rule) SS.flush() return jsonify(rule=m.Rule.dump(rule))
def submit_sheet(sheetId): sheet = m.Sheet.query.get(sheetId) if not sheet: raise InvalidUsage(_('sheet {0} not found').format(sheetId), 404) me = session['current_user'] if sheet.userId != me.userId: raise InvalidUsage( _('you are not the owner of sheet {0}').format(sheetId)) now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) if sheet.status == m.Sheet.STATUS_EXPIRED: raise InvalidUsage(_('sheet {0} has expired already').format(sheetId)) elif sheet.status == m.Sheet.STATUS_FINISHED: raise InvalidUsage(_('sheet {0} is finished already').format(sheetId)) elif sheet.status == m.Sheet.STATUS_SHOULD_EXPIRE: # if sheet.tExpiresBy < now: sheet.tExpiredAt = now SS.flush() SS.commit() raise InvalidUsage(_('sheet {0} has expired already').format(sheetId)) finished = all([entry.answerId != None for entry in sheet.entries]) if not finished: raise InvalidUsage(_('sheet {0} is not finished').format(sheetId)) sheet.tFinishedAt = now SS.flush() # TODO: define autoScoring property on sheet autoScoring = all([i.question.autoScoring for i in sheet.entries]) if autoScoring: passed = mark_answer_sheet(sheet) if passed: message = _(sheet.test.messageSuccess) or _( 'Congratulations, you passed!', 'Your score is {0}.').format( sheet.score) else: message = _(sheet.test.messageFailure) or _( 'Sorry, you failed.', 'Your score is {0}.').format(sheet.score) else: message = _( 'Thank you for taking the test!', 'The translation supervisor will be marking your test in next 7-10 working days,', 'and your result will then be available in AppenOnline.') return jsonify({ 'message': message, })
def unassign_batch(batchId): batch = m.Batch.query.get(batchId) if not batch: raise InvalidUsage(_('batch {0} not found').format(batchId), 404) # TODO: check policy # TODO: update history? if batch.userId != None: batch.unassign() message = _('batch {0} has been un-assigned').format(batchId) else: message = _('batch {0} is not assigned to anyone').format(batchId) SS.flush() return jsonify({ 'message': message, 'batch': m.Batch.dump(batch), })
def create_pool(): data = MyForm( Field('name', is_mandatory=True, validators=[ validators.non_blank, (check_pool_name_uniqueness, (None, )), ]), Field('meta', is_mandatory=True, default='{}', normalizer=normalize_pool_meta_data), Field('taskTypeId', is_mandatory=True, validators=[ validators.is_number, check_task_type_existence, ]), Field('autoScoring', is_mandatory=True, normalizer=normalize_bool_literal, validators=[ validators.is_bool, ]), Field('tagSetId', validators=[ check_tag_set_existence, ]), Field('dataFile', is_mandatory=True), Field('questions', is_mandatory=True, default=[], normalizer=load_questions), ).get_data(is_json=False) questions = data.pop('questions') del data['dataFile'] pool = m.Pool(**data) SS.add(pool) for qd in questions: q = m.Question(**qd) pool.questions.append(q) SS.flush() return jsonify({ 'pool': m.Pool.dump(pool, context={'level': 0}), })
def submit_answer(sheetId): sheet = m.Sheet.query.get(sheetId) if not sheet: raise InvalidUsage(_('sheet {0} not found').format(sheetId), 404) now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) if sheet.tExpiresBy < now: sheet.tExpiredAt = now SS.flush() SS.commit() raise InvalidUsage(_('sheet {0} has expired already').format(sheetId)) me = session['current_user'] if sheet.userId != me.userId: raise InvalidUsage( _('you are not the owner of sheet {0}').format(sheetId)) data = MyForm( Field('sheetEntryId', is_mandatory=True, validators=[ (check_sheet_entry_existence, (sheetId, )), ]), Field('answer', is_mandatory=True, validators=[ validators.is_string, check_answer, ]), ).get_data(with_view_args=False) answer = m.Answer(**data) SS.add(answer) SS.flush() assert answer.answerId # TODO: define relationship on SheetEntry entry = m.SheetEntry.query.get(data['sheetEntryId']) entry.answerId = answer.answerId return jsonify({ 'message': _('created answer {0} successfully').format(answer.answerId), 'answer': m.Answer.dump(answer), })
def populate_rework_sub_task_from_extract(subTaskId): subTask = m.SubTask.query.get(subTaskId) if not subTask: raise InvalidUsage(_('sub task {0} not found').format(subTaskId), 404) if not subTask.workType == m.WorkType.REWORK: raise InvalidUsage( _('work type {0} not supported').format(m.WorkType.REWORK)) data = MyForm( Field('srcSubTaskId', ), Field('dataFile', is_mandatory=True, validators=[ validators.is_file, ]), Field('validation', default='false', normalizer=normalize_bool_literal, validators=[ validators.is_bool, ]), ).get_data(is_json=False) if data['validation']: return jsonify(message=_('data file validated')) srcSubTask = m.SubTask.query.get(data['srcSubTaskId']) dstSubTask = subTask fakeUser = session['current_user'] tx_loader = TxLoader(subTask.taskId) result = tx_loader.load_tx_file(data['dataFile'], srcSubTask, fakeUser, dstSubTask) itemCount = result['itemCount'] me = session['current_user'] now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) # add rework content event content_event = m.SubTaskContentEvent(subTaskId=subTaskId, isAdding=True, tProcessedAt=now, itemCount=itemCount, operator=me.userId) SS.add(content_event) SS.flush() return jsonify( message=_('okay'), event=m.SubTaskContentEvent.dump(content_event), )
def create_qa_batches(self, qaSubTask, userId, intervalId, samples, priority=5): if not samples: return for load in self.paginate(samples, qaSubTask.maxPageSize): b = m.Batch(taskId=qaSubTask.taskId, subTaskId=qaSubTask.subTaskId, notUserId=userId, workIntervalId=intervalId, priority=priority) p = m.Page(pageIndex=0) b.pages.append(p) for memberIndex, workEntryId in enumerate(load): memberEntry = m.PageMemberEntry(memberIndex=memberIndex) memberEntry.workEntryId = workEntryId p.memberEntries.append(memberEntry) SS.add(b) SS.flush()
def submit_marking(sheetId): sheet = m.Sheet.query.get(sheetId) if not sheet: raise InvalidUsage(_('sheet {0} not found').format(sheetId), 404) # TODO: add policy check to enable/disable re-marking and/or marking of expired sheets data = MyForm( Field('moreAttempts', is_mandatory=True, validators=[ validators.is_bool, ]), Field('comment', validators=[ validators.is_string, ]), Field('markings', is_mandatory=True, normalizer=normalize_marking_data), Field('score', is_mandatory=True, default=0, normalizer=calculate_sheet_score), ).get_data() # TODO: define relationship marking on SheetEntry me = session['current_user'] markings = data.pop('markings') for entry, md in zip(sheet.entries, markings): marking = m.Marking(**md) marking.sheetEntryId = entry.sheetEntryId marking.scorerId = me.userId SS.add(marking) SS.flush() entry.markingId = marking.markingId for key in ['moreAttempts', 'comment', 'score']: setattr(sheet, key, data[key]) return jsonify({ 'message': _('marked sheet {0} successfully').format(sheetId), 'sheet': m.Sheet.dump(sheet), })
def update_label_group(labelSetId, labelGroupId): ''' updates label group settings ''' labelSet = m.LabelSet.query.get(labelSetId) if not labelSet: raise InvalidUsage( _('label set {0} not found').format(labelSetId), 404) labelGroup = m.LabelGroup.query.get(labelGroupId) if not labelGroup or labelGroup.labelSetId != labelSetId: raise InvalidUsage( _('label group {0} not found').format(labelGroupId), 404) data = MyForm( Field('name', validators=[ validators.is_string, (check_label_group_name_uniqueness, (labelSetId, labelGroupId)), ]), Field('dropDownDisplay', validators=[ validators.is_bool, ]), Field('isMandatory', validators=[ validators.is_bool, ]), ).get_data() for key in data.keys(): value = data[key] if getattr(labelGroup, key) != value: setattr(labelGroup, key, value) else: del data[key] SS.flush() return jsonify({ 'message': _('updated label group {0} successfully').format(labelGroup.name), 'labelGroup': m.LabelGroup.dump(labelGroup), 'updatedFields': data.keys(), })
def create_error_class(): ''' creates a new error class ''' data = MyForm( Field('name', is_mandatory=True, validators=[validators.non_blank, check_name_uniqueness]), ).get_data() errorClass = m.ErrorClass(**data) SS.add(errorClass) SS.flush() return jsonify({ 'message': _('created error class {0} successfully').format(errorClass.name), 'errorClass': m.ErrorClass.dump(errorClass), })
def create_label(labelSetId): ''' creates a new label ''' labelSet = m.LabelSet.query.get(labelSetId) if not labelSet: raise InvalidUsage( _('label set {0} not found').format(labelSetId), 404) data = MyForm( Field('name', is_mandatory=True, validators=[ (check_label_name_uniqueness, (labelSetId, None)), ]), Field('description'), Field('shortcutKey', validators=[ (validators.is_string, (), dict(length=1)), check_label_shortcut_key_non_space, (check_label_shortcut_key_uniqueness, (labelSetId, None)), ]), Field('extract', is_mandatory=True, validators=[ validators.non_blank, (check_label_extract_uniqueness, (labelSetId, None)), ]), Field('labelGroupId', validators=[ (check_label_group_existence, (labelSetId, )), ]), ).get_data() label = m.Label(**data) SS.add(label) SS.flush() return jsonify({ 'message': _('created label {0} successfully').format(label.name), 'label': m.Label.dump(label), })
def dismiss_all_batches(subTaskId): subTask = m.SubTask.query.get(subTaskId) if not subTask: raise InvalidUsage(_('sub task {0} not found').format(subTaskId), 404) batches = m.Batch.query.filter_by(subTaskId=subTaskId).all() itemCount = 0 for b in batches: for p in b.pages: itemCount += len(p.memberEntries) for memberEntry in p.memberEntries: SS.delete(memberEntry) SS.delete(p) SS.delete(b) me = session['current_user'] now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) # add tracking information tracking_event = m.TrackingEvent(eventType='unbatch_all', userId=me.userId, tTriggeredAt=now, hostIp=request.environ['REMOTE_ADDR'], details=dict(taskId=subTask.taskId, subTaskId=subTaskId, count=len(batches))) SS.add(tracking_event) # add rework content event content_event = m.SubTaskContentEvent(subTaskId=subTaskId, isAdding=False, tProcessedAt=now, itemCount=itemCount, operator=me.userId) SS.add(content_event) SS.flush() return jsonify({ 'message': _('deleted {0} batches from sub task {1}').format( len(batches), subTaskId), })
def start_or_resume_test(testId): me = session['current_user'] test = m.Test.query.get(testId) if not test: raise InvalidUsage(_('test {0} not found').format(testId)) if not test.isEnabled: raise InvalidUsage(_('test {0} is not enabled').format(testId)) # TODO: need to find out ids of languages current user speaks languageIds = [1, 2, 3, 4] record = TestManager.report_eligibility(test, me, languageIds) if not record.get('url'): raise InvalidUsage( _('user {0} is not eligible for test {1}').format( me.userId, testId)) sheets = m.Sheet.query.filter_by(testId=testId).filter_by( userId=me.userId).order_by(m.Sheet.nTimes.desc()).all() # # NOTE: uncomment this block if nTimes needs to be fixed automatically # for i, sheet in enumerate(sheets): # if sheet.nTimes != i: # sheet.nTimes = i if sheets and sheets[-1].status == m.Sheet.STATUS_ACTIVE: return jsonify(sheetId=sheets[-1].sheetId) now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) sheet = m.Sheet(userId=me.userId, testId=testId, nTimes=len(sheets), tStartedAt=now, tExpiresBy=(now + test.timeLimit)) SS.add(sheet) SS.flush() for i, question in enumerate(TestManager.generate_questions(test)): entry = m.SheetEntry(sheetId=sheet.sheetId, index=i, questionId=question.questionId) sheet.entries.append(entry) return jsonify(sheetId=sheet.sheetId)
def update_sub_task_worker_settings(subTaskId, userId): subTask = m.SubTask.query.get(subTaskId) if not subTask: raise InvalidUsage(_('sub task {0} not found').format(subTaskId), 404) user = m.User.query.get(userId) if not user: raise InvalidUsage(_('user {0} not found').format(userId), 404) if not subTask._workType.workable: raise InvalidUsage("no workers allowed for this sub task", 400) data = MyForm( Field('hasReadInstructions', validators=[ validators.is_bool, ]), Field('isNew', validators=[ validators.is_bool, ]), Field('paymentFactor', normalizer=lambda data, key, value: float(value), validators=[ (validators.is_number, (), dict(min_value=0)), ]), Field('removed', validators=[ validators.is_bool, ]), ).get_data() worker = m.TaskWorker.query.get((userId, subTask.taskId, subTaskId)) if not worker: worker = m.TaskWorker(taskId=subTask.taskId, **data) SS.add(worker) else: for key in data: setattr(worker, key, data[key]) SS.flush() return jsonify({ 'worker': m.TaskWorker.dump(worker), })
def update_country(self): desc = json.loads(self.Message) iso3 = desc['iso3'] try: country = m.Country.query.filter(m.Country.iso3 == iso3).one() data = util.edm.decode_changes('Country', desc['changes']) current_app.logger.info('found country {}, applying changes {}'.format( country.name, data)) changes = {} for k, v in data.items(): try: if getattr(country, k) != v: setattr(country, k, v) changes[k] = v except AttributeError: continue current_app.logger.debug('actual changes {}'.format(changes)) SS.flush() SS.commit() except sqlalchemy.orm.exc.NoResultFound: SS.rollback() current_app.logger.info( 'country {} not found, get country from edm'.format(iso3)) result = util.edm.get_country(iso3) data = dict( name=result['name_eng'], iso2=result['iso2'], iso3=iso3, isoNum=result['iso_num'], internet=result['internet'], active=result['active'], ) country = m.Country(**data) SS.add(country) SS.flush() SS.commit() current_app.logger.info('country {} is added locally'.format( country.name)) return
def update_language(self): desc = json.loads(self.Message) iso3 = desc['iso3'] try: lang = m.Language.query.filter(m.Language.iso3 == iso3).one() data = util.edm.decode_changes('Language', desc['changes']) current_app.logger.info( 'found language {}, applying changes {}'.format(lang.name, data)) changes = {} for k, v in data.items(): try: if getattr(lang, k) != v: setattr(lang, k, v) changes[k] = v except AttributeError: continue current_app.logger.debug('actual changes {}'.format(changes)) SS.flush() SS.commit() except sqlalchemy.orm.exc.NoResultFound: SS.rollback() current_app.logger.info( 'language {} not found, get language from edm'.format(iso3)) result = util.edm.get_language(iso3) data = dict( name=result['name_eng'], iso2=result['iso2'], iso3=iso3, active=result['active'], ) lang = m.Language(**data) SS.add(lang) SS.flush() SS.commit() current_app.logger.info('language {} is added locally'.format( lang.name)) return
def load_tx_file(self, filespec, srcSubTask, fakeUser, dstSubTask): self.load_raw_piece_ids() utts = iter_utts(filespec) rawPieceIds = OrderedDict() # create fake work entries for utt in utts: rawPieceId, assemblyContext = self.get_utt_info(utt) labelIds = self.get_applied_labels(utt) result = self.tx_parser.parse(utt['TRANSCRIPTION']) rawPieceIds[rawPieceId] = rawPieceId entry = m.WorkEntry( rawPieceId=rawPieceId, taskId=self.taskId, result=result, subTaskId=srcSubTask.subTaskId, batchId=-1, pageId=-1, workTypeId=srcSubTask.workTypeId, userId=fakeUser.userId, ) SS.add(entry) SS.flush() for labelId in labelIds: label = m.AppliedLabel(entryId=entry.entryId, labelId=labelId) SS.add(label) rawPieceIds = list(rawPieceIds.keys()) # populate destination rework sub task batches = Batcher.batch(dstSubTask, rawPieceIds) for batch in batches: SS.add(batch) return { 'itemCount': len(rawPieceIds), 'batches': batches, }
def migrate_project(projectId): ''' migrates project from pdb database ''' candidate = m.PdbProject.query.get(projectId) if not candidate: raise InvalidUsage(_('project {0} not found').format(projectId), 404) project = m.Project.query.get(projectId) if project: message = _('project {0} already exists').format(projectId) else: project = m.Project(projectId=candidate.projectId, name=candidate.name, _migratedByUser=session['current_user']) SS.add(project) SS.flush() message = _('project {0} successfully migrated').format(projectId) # as an alternative, we do following to trigger flush() #project = m.Project.query.get(projectId) return jsonify({ 'message': message, 'project': m.Project.dump(project), })
def calculate_payment(self, payrollId): payments = [] for interval, userId, events in self.iter_payable_items(): qa = self.get_qa_result(interval, userId) paymentFactor = self.get_payment_factor(userId) itemCount = len(events) unitCount = 0 totalAmount = 0.0 for event in events: rate = self.get_pay_rate(event.created) if not rate: continue units = self.get_units(event.rawPieceId, event.workEntryId) unitCount += units full_amount = ((units if self.subTask.payByUnit else 1) * rate.multiplier * paymentFactor) real_amount = self.adjust_by_accuracy(rate, qa['accuracy'], full_amount) totalAmount += real_amount # add bonus if configured if self.subTask.bonus != None and self.subTask.bonus > 0: totalAmount *= (1.0 + self.subTask.bonus) # adjust amount according to country ratio if required if self.subTask.useWorkRate: user = m.User.query.get(userId) try: ratio = CountryRatioLookupTable.get_ratio(user.countryId) except: # user.countryId is null raise totalAmount *= ratio user = m.User.query.get(userId) if user.paymentType in ( #m.User.PAYMENT_TYPE_CLIENT, m.User.PAYMENT_TYPE_INTERNAL, ): totalAmount = 0 cp = m.CalculatedPayment( payrollId=payrollId, workIntervalId=interval.workIntervalId, userId=userId, taskId=self.subTask.taskId, subTaskId=self.subTask.subTaskId, itemCount=itemCount, unitCount=unitCount, qaedItemCount=qa['qaedItemCount'], qaedUnitCount=qa['qaedUnitCount'], accuracy=qa['accuracy'], amount=totalAmount, originalAmount=totalAmount, receipt=None, #updated=False, ) SS.add(cp) SS.flush() for event in events: event.calculatedPaymentId = cp.calculatedPaymentId payments.append(cp) return payments
def save_work_entry(batchId): batch = m.Batch.query.get(batchId) if not batch: raise InvalidUsage(_('batch {0} not found').format(batchId)) me = session['current_user'] if batch.userId != me.userId: raise InvalidUsage( _('batch {0} is not owned by user {1}').format(batchId, me.userId)) if batch.isExpired: raise InvalidUsage(_('batch {0} has expired already').format(batchId)) subTask = batch.subTask common_data = MyForm( Field('pageId', is_mandatory=True, validators=[ (check_page_existence, (batchId, )), ]), Field('memberIndex', is_mandatory=True, validators=[ check_member_existence, ]), ).get_data() memberEntry = m.PageMemberEntry.query.get( (common_data['pageId'], common_data['memberIndex'])) assert memberEntry ipAddress = request.environ['REMOTE_ADDR'] # audio checking tasks if batch.task.is_type(TaskType.AUDIO_CHECKING): # validate expected data data = MyForm( Field("performance", is_mandatory=True, validators=[ simple_validators.is_dict( mandatory_keys=["metaValues"], optional_keys=["comment", "flags"]), ]), Field("recordings", is_mandatory=True, validators=[ simple_validators.is_dict( key_validator=Recording.check_exists), ]), ).get_data() performance = Performance.query.get(memberEntry.rawPieceId) rawPieceId = memberEntry.rawPieceId # update performance metadata meta_values = process_received_metadata( data["performance"]["metaValues"], performance.recording_platform.performance_meta_categories, expect_saved_value=True) resolve_new_metadata(performance, meta_values, me, AudioCheckingChangeMethod.WORK_PAGE) # performance feedback entry if data["performance"].get("comment") or data["performance"].get( "flags"): entry = performance.add_feedback( me, AudioCheckingChangeMethod.WORK_PAGE, data["performance"].get("comment"), data["performance"].get("flags", [])) db.session.add(entry) # all recording feedback entries for recording_id, recording_data in data["recordings"].items(): recording = Recording.query.get(recording_id) assert recording.performance == performance if recording_data.get("comment") or recording_data.get("flags"): entry = recording.add_feedback( me, AudioCheckingChangeMethod.WORK_PAGE, recording_data.get("comment"), recording_data.get("flags", [])) db.session.add(entry) # add new Entry newEntry = m.BasicWorkEntry( **{ 'rawPieceId': rawPieceId, "result": json.dumps(request.json), 'batchId': batchId, 'taskId': batch.taskId, 'subTaskId': batch.subTaskId, 'workTypeId': batch.subTask.workTypeId, 'userId': me.userId, 'pageId': common_data['pageId'] }) db.session.add(newEntry) db.session.flush() # no QA done qaedEntryId = None qaedEntry = None qaedUserId = None else: # other task types data = MyForm( Field('task', is_mandatory=True, default=batch.task), Field('result', is_mandatory=True, default=''), Field('labels', is_mandatory=True, default=[], normalizer=normalize_label_ids), Field('errors', is_mandatory=True, default=[], normalizer=normalize_error_type_ids), ).get_data() if subTask.workType == m.WorkType.QA: qaedEntryId = memberEntry.workEntryId qaedEntry = m.WorkEntry.query.get(qaedEntryId) qaedUserId = qaedEntry.userId rawPieceId = qaedEntry.rawPieceId taskErrorTypeById = dict([(i.errorTypeId, i) for i in m.TaskErrorType.query.filter_by( taskId=batch.taskId).all()]) else: qaedEntryId = None qaedEntry = None qaedUserId = None rawPieceId = memberEntry.rawPieceId taskErrorTypeById = {} data['errors'] = [] # add new Entry newEntry = m.BasicWorkEntry( **{ # 'entryId': None, # TBD # 'created': None, # auto-fill 'result': data['result'], 'rawPieceId': rawPieceId, 'batchId': batchId, 'taskId': batch.taskId, 'subTaskId': batch.subTaskId, 'workTypeId': batch.subTask.workTypeId, 'userId': me.userId, # 'notes': None, 'qaedUserId': qaedUserId, 'qaedEntryId': qaedEntryId, 'pageId': common_data['pageId'] }) SS.add(newEntry) SS.flush() # add labels for labelId in data['labels']: SS.add(m.AppliedLabel(entryId=newEntry.entryId, labelId=labelId)) # add errors, (if applicable) for errorTypeId in data['errors']: e = taskErrorTypeById.get(errorTypeId, None) SS.add( m.AppliedError(entryId=newEntry.entryId, errorTypeId=e.errorTypeId, severity=e.severity)) # add payable event is_qa = subTask.workType == m.WorkType.QA event = m.PayableEvent( **{ # 'eventId': None, # TBD 'userId': me.userId, 'taskId': batch.taskId, 'subTaskId': batch.subTaskId, # 'created': None, # auto-fill 'batchId': batchId, 'pageId': common_data['pageId'], 'rawPieceId': None if is_qa else rawPieceId, 'workEntryId': qaedEntryId, 'calculatedPaymentId': None, # explicitly set to NULL 'localConnection': is_local_address(ipAddress), 'ipAddress': ipAddress, 'ratio': 1.0, }) SS.add(event) SS.flush() newEntry = m.WorkEntry.query.get(newEntry.entryId) return jsonify( entry=m.WorkEntry.dump(newEntry), event=m.PayableEvent.dump(event), )