def commit_allocations(request): if request.is_ajax(): if request.method == 'POST': user = request.user question_ids = request.POST.getlist('question_ids[]', False) allocs = request.POST.getlist('allocs[]', False) batch_id = request.POST.get('batch_id', False) price = request.POST.get('price', False) domain_id = None response = dict() if not all([allocs, question_ids, batch_id, price]): response['status'] = 'failure' response['msg'] = 'Missing inputs.' return dict_to_json_response(response) if len(question_ids) != len(allocs): response['status'] = 'failure' response['msg'] = 'Numbers of questions and allocations do not match.' return dict_to_json_response(response) batch = Batch.objects.get(pk=int(batch_id)) try: user.pay_out(batch, float(price)) except InsufficientFundsException as e: response['status'] = 'failure' response['msg'] = str(e) # TODO: change this. return dict_to_json_response(response) for x in range(len(question_ids)): q = BaseQuestion.objects.get(pk=int(question_ids[x])) if domain_id is None: domain_id = q.domain_id for uid in allocs[x].split(','): assn = Assignment() assn.answerer = User.objects.get(pk=int(uid)) assn.question = q assn.agreed_price = get_overview_record(assn.answerer, q.domain)['price'] assn.complete = False assn.save() # remove after initial demo profile = assn.answerer.get_profile() profile.has_been_assigned = True profile.save() batch.is_allocated = True batch.save() # TODO: get rid of this. assert domain_id is not None if settings.LOG_MARKET_STATS: log_market_stats(domain_id) if settings.DYNPRICING: # update prices in level records dp.update_prices() # update prices in allocation stems allc.update_prices(domain_id) response['status'] = 'success' response['msg'] = 'Committed successfully. Redirecting...' return dict_to_json_response(response)
def handle_noargs(self, **options): print 'creating data-tamer domain...' domain = Domain() domain.short_name = 'data-tamer' domain.long_name = 'Data Tamer Questions' domain.description = 'Schema mapping questions over golf course data.' domain.save() print 'creating levels...' for x in range(1, 4): level = Level(domain=domain, level_number=x, confidence_upper_bound=(.6 + x*.1)) level.save() level = Level(domain=domain, level_number=4, confidence_upper_bound=1) level.save() print 'creating fake asker...' asker = User.objects.create_user('ask', '', 'test') asker.save() profile = UserProfile(user=asker, user_class='ASK', bank_balance=1000) profile.save() answerers = [] print 'creating fake answerers...' expected_percent_correct = {} for name in generate_names(): u = User.objects.create_user(name, '', 'test') answerers.append(u) expected_percent_correct[u] = get_expected_correct_rate() print 'creating wolfgang...' wg = User.objects.create_user('wolfgang', '', 'test') answerers.append(wg) expected_percent_correct[wg] = 1 for user in answerers: user.save() profile = UserProfile(user=user, user_class='ANS', bank_balance=0) profile.save() temp_acc = TempAccuracy(user=user, accuracy=expected_percent_correct[user]) temp_acc.save() e = Expertise(user=profile, domain=domain, question_quota=40000) e.save() print 'creating a phony data tamer application' app = Application() app.name = 'data-tamer' app.db_alias = settings.TAMER_DB app.ui_url = settings.TAMER_URL app.save() print 'creating training question type' train = QuestionType() train.long_name = 'Training questions' train.short_name = 'training' train.app = app train.question_class = ContentType.objects.get(app_label='ui', model='nrtrainingquestion') train.answer_class = ContentType.objects.get(app_label='ui', model='nrtraininganswer') train.review_class = ContentType.objects.get(app_label='ui', model='nrtrainingreview') train.save() print 'creating automated schema map question type' schemamap = QuestionType() schemamap.long_name = 'Schema mapping questions' schemamap.short_name = 'schemamap' schemamap.app = app schemamap.question_class = ContentType.objects.get(app_label='ui', model='schemamapquestion') schemamap.answer_class = ContentType.objects.get(app_label='ui', model='schemamapanswer') schemamap.review_class = ContentType.objects.get(app_label='ui', model='schemamapreview') schemamap.save() generate_and_insert(10) print 'prime dynamic pricing alogorithm...' dp.update_prices() print 'deleting market history' cur = connection.cursor() cur.execute('delete from market_snap;') cur.connection.commit() print 'creating initial market log' log_market_stats(domain.id) if USE_FAKE_ACCURACY: print 'done' return print 'creating some phony questions...' for x in range(0, NUMBER_OF_QUESTIONS): question = NRTrainingQuestion(asker=asker, domain=domain, question_type=train) question.save() choice_set = [] number_of_choices = random.randint(3, 6) correct_choice_idxs = [x for x in range(0, number_of_choices)] # select the choices that are "true" with frequency specified above for x in range(0, number_of_choices): rand = random.random() if rand > AVERAGE_PERCENT_TRUE: correct_choice_idxs.remove(x) # make the choice rows for x in range(0, number_of_choices): choice = NRTrainingChoice(question=question) choice.save() choice_set.append(choice) # need to make a copy as we will be randomly assigning answerers to questions # and removing them as we go to ensure that the same user isn't assigned # twice to the same question. copy_of_answerers = list(answerers) # modify the range of random.randint to get differently-sized allocations. for x in range(0, random.randint(3,6)): answerer = random.choice(copy_of_answerers) copy_of_answerers = filter(lambda a: a.id != answerer.id, answerers) assgn = Assignment(answerer=answerer, question=question, completed=False) assgn.save() # now have the user supply answers to questions for x in range(0, number_of_choices): choice = choice_set[x] # flip weighted coin to determine correctness is_correct=False rand = random.random() if rand <= expected_percent_correct[answerer]: is_correct=True is_match = False if is_correct and x in correct_choice_idxs: is_match = True answer = NRTrainingAnswer(answerer=answerer, question=question, confidence=1, authority=1) answer.save() assgn.completed = True assgn.save() # here, every "answer" entry is reviewed immediately after it is # submitted. as the answers are submitted in bulk, this will never # happen in a real deployment review = NRTrainingReview(reviewer=asker, answer=answer, is_correct=is_correct, confidence=1, authority=1) review.save() print 'sending training questions three weeks into the past...' cur = connection.cursor() cur.execute('select * from time_warp_training_questions()') cur.connection.commit() #create alloc stems and initial prices # print 'setting level prices...' # update_prices() print 'done!'
def do_batch(reprice=False): batch_mu = CONFIG['batch_size_mean'] batch_sigma = CONFIG['batch_size_stddev'] min_price_prob = CONFIG['min_price_prob'] conf_mu = CONFIG['confidence_req_mean'] conf_sigma = CONFIG['confidence_req_stddev'] price_mu = CONFIG['max_price_mean'] price_sigma = CONFIG['max_price_stddev'] # if random.random() < min_price_prob: # method = 'min_price' # crit = sigmoid(random.gauss(conf_mu, conf_sigma)) # else: # method = 'max_conf' # crit = random.gauss(price_mu, price_sigma) batch_size = int(max([1, math.ceil(random.gauss(batch_mu, batch_sigma))])) # get answerers assgns = db.create_assignments(DOMAIN.id, .9, batch_size, 'min_price') if not assgns: print "failing over to max_conf" assgns = db.create_assignments(DOMAIN.id, 10000, batch_size, 'max_conf') if not assgns: return # create questions questions = {} for x in range(batch_size): q = NRTrainingQuestion(asker=ASKER, domain=DOMAIN, question_type=TYPE) q.save() true = NRTrainingChoice(question=q) true.save() false = NRTrainingChoice(question=q) false.save() assgn = assgns[x] ALLOCATIONS.append(assgn.get_dict()) for u in assgn.members: keymatch = lambda z: z.id == u user_rec = filter(keymatch, USERS.keys())[0] user_acc = USERS[user_rec] a = Assignment(answerer=user_rec, question=q, completed=True) a.save() chosen = false if random.random() <= user_acc: chosen = true answer = NRTrainingAnswer(answerer=user_rec, question=q, confidence=1., authority=1.) answer.save() review = NRTrainingReview(reviewer=ASKER, answer=answer, is_correct=(chosen == true), confidence=1., authority=1.) review.save() if reprice: dp.update_prices() allocs.update_prices(DOMAIN.id) db.log_market_stats(DOMAIN.id) take_system_snapshot() return True