def admin_preview_task(request, experiment_id, override, hit_id=None): if hit_id: hit = get_object_or_404(MtHit, id=hit_id) else: hits = MtHit.objects \ .filter(hit_type__experiment_id=experiment_id, ) \ .order_by('-num_assignments_completed', '?')[:1] try: hit = hits[0] except: try: e = Experiment.objects.get(id=experiment_id) return html_error_response( request, 'There are no HITs created for this experiment yet. ' 'Experiment id: %s, slug: "%s", title: "%s".' % (e.id, e.slug, e.new_hit_settings.title)) except: return html_error_response( request, 'This experiment does not exist. Experiment id: %s.' % (experiment_id)) return external_task(request, experiment_id=experiment_id, hit=hit, override=override)
def admin_preview_task(request, experiment_id, override, hit_id=None): if hit_id: hit = get_object_or_404(MtHit, id=hit_id) else: hits = MtHit.objects \ .filter(hit_type__experiment_id=experiment_id, ) \ .order_by('-num_assignments_completed', '?')[:1] try: hit = hits[0] except: try: e = Experiment.objects.get(id=experiment_id) return html_error_response( request, 'There are no HITs created for this experiment yet. ' 'Experiment id: %s, slug: "%s", title: "%s".' % ( e.id, e.slug, e.new_hit_settings.title) ) except: return html_error_response( request, 'This experiment does not exist. Experiment id: %s.' % (experiment_id) ) return external_task( request, experiment_id=experiment_id, hit=hit, override=override)
def external_task_browser_check(request): if request.method == "GET": valid_browser = False if 'HTTP_USER_AGENT' in request.META: ua = user_agent_parser.Parse(request.META['HTTP_USER_AGENT']) if ua['user_agent']['family'].lower() in ('firefox', 'chrome'): device = ua['device'] if 'is_mobile' not in device or not device['is_mobile']: valid_browser = True if not valid_browser: return html_error_response( request, ''' This task requires Google Chrome. <br/><br/> <a class="btn" href="http://www.google.com/chrome/" target="_blank">Get Google Chrome</a> ''') return None
def external_task_prepare_context(request, experiment_id, hit, override): """ Fetch hit, experiment, assignment, worker, etc. Returns either a dictionary on success, or a response (or exception) if there is some error. """ # obtain HIT if hit is None: if 'hitId' not in request.GET: if request.user.is_staff: return html_error_response( request, 'HIT ID missing from GET parameters') else: raise Http404 hit_id = request.GET['hitId'] try: hit = MtHit.objects \ .select_related( 'hit_type__experiment', 'hit_type__experiment_settings', 'hit_type__requirements') \ .get(id=hit_id) except MtHit.DoesNotExist: # if this HIT cannot be found, tell Amazon about it if (override is None and not request.user.is_staff and 'assignmentId' in request.GET and 'workerId' in request.GET and 'turkSubmitTo' in request.GET): expire_hit_task.delay(hit_id) raise Http404 # obtain experiment experiment = hit.hit_type.experiment if experiment.id != int(experiment_id): if request.user.is_staff: return html_error_response( request, 'Experiment ID (%s) does not match HIT (%s)' % (experiment_id, experiment.id)) else: raise Http404 # obtain worker and assignment worker = get_or_create_mturk_worker_from_request(request) assignment_dirty = False if worker and 'assignmentId' in request.GET: assignment, _ = MtAssignment.objects.get_or_create( id=request.GET['assignmentId'], defaults={ 'hit': hit, 'worker': worker }) if assignment.hit != hit or assignment.worker != worker: assignment.hit = hit assignment.worker = worker assignment_dirty = True else: assignment = None worker = None # obtain worker info specific to the experiment and worker if experiment and worker: experiment_worker, _ = ExperimentWorker.objects.get_or_create( experiment=experiment, worker=worker) else: experiment_worker = None # don't let blocked workers perform our tasks if (worker and worker.blocked) or (experiment_worker and experiment_worker.blocked): message = "Your submissions are too low quality. Please stop doing our tasks." if experiment_worker and experiment_worker.blocked_reason: message += "<br/><br/>" + experiment_worker.blocked_reason elif worker and worker.blocked_reason: message += "<br/><br/>" + worker.blocked_reason return html_error_response(request, message) # fetch contents hit_contents = fetch_hit_contents(hit) if override and 'publishable' in request.GET: hit_contents = filter(lambda x: x and x.publishable(), hit_contents) if not hit.num_contents or not hit_contents: # (in the if statement, also test hit.num_contents since it is only set # after the last content is added) return html_error_response(request, "Somehow there are no items in this HIT.") # fetch test (sentinel) contents if experiment_worker: if assignment.num_test_contents is None: n = experiment.test_contents_per_assignment if n > 0: # select new test contents from the set of possible contents # (that the user has not already answered) test_content_wrappers = experiment.test_contents.all() \ .exclude(responses__experiment_worker=experiment_worker) \ .order_by('-priority')[:n] # register chosen items with assignment assignment.test_contents.add(*test_content_wrappers) else: test_content_wrappers = [] assignment.num_test_contents = len(test_content_wrappers) assignment_dirty = True elif assignment.num_test_contents > 0: # re-fetch existing contents test_content_wrappers = assignment.test_contents.all() else: test_content_wrappers = [] # fetch objects from inside the wrappers if test_content_wrappers: test_contents = fetch_content_tuples([ (x.content_type_id, x.object_id) for x in test_content_wrappers ]) else: test_contents = [] else: test_contents = [] test_content_wrappers = [] # shuffle together (some tasks may sort contents again in javascript) contents = hit_contents + test_contents random.shuffle(contents) # prepare context data context = { 'hit': hit, 'assignment': assignment, 'worker': worker, 'experiment': experiment, 'experiment_id': experiment_id, 'experiment_worker': experiment_worker, 'slug': experiment.slug, 'hit_contents': hit_contents, 'test_content_wrappers': test_content_wrappers, 'test_contents': test_contents, 'contents': contents, 'num_contents': len(contents), 'num_contents_predicted': (len(hit_contents) + experiment.test_contents_per_assignment), 'override': override, } if len(contents) == 1: context['content'] = contents[0] if experiment.version >= 2: # old experiments (version 1) don't use this context['contents_json'] = json.dumps( [c.get_entry_dict() for c in contents]) # list of ids as json context['content_id_json'] = json.dumps([{'id': c.id} for c in contents]) # requirements for req in hit.hit_type.requirements.values('name', 'value'): context[req['name']] = req['value'] if assignment_dirty: assignment.save() return context
def external_task_prepare_context(request, experiment_id, hit, override): """ Fetch hit, experiment, assignment, worker, etc. Returns either a dictionary on success, or a response (or exception) if there is some error. """ # obtain HIT if hit is None: if 'hitId' not in request.GET: if request.user.is_staff: return html_error_response( request, 'HIT ID missing from GET parameters') else: raise Http404 hit_id = request.GET['hitId'] try: hit = MtHit.objects \ .select_related( 'hit_type__experiment', 'hit_type__experiment_settings', 'hit_type__requirements') \ .get(id=hit_id) except MtHit.DoesNotExist: # if this HIT cannot be found, tell Amazon about it if (override is None and not request.user.is_staff and 'assignmentId' in request.GET and 'workerId' in request.GET and 'turkSubmitTo' in request.GET): expire_hit_task.delay(hit_id) raise Http404 # obtain experiment experiment = hit.hit_type.experiment if experiment.id != int(experiment_id): if request.user.is_staff: return html_error_response( request, 'Experiment ID (%s) does not match HIT (%s)' % ( experiment_id, experiment.id) ) else: raise Http404 # obtain worker and assignment worker = get_or_create_mturk_worker_from_request(request) assignment_dirty = False if worker and 'assignmentId' in request.GET: assignment, _ = MtAssignment.objects.get_or_create( id=request.GET['assignmentId'], defaults={'hit': hit, 'worker': worker}) if assignment.hit != hit or assignment.worker != worker: assignment.hit = hit assignment.worker = worker assignment_dirty = True else: assignment = None worker = None # obtain worker info specific to the experiment and worker if experiment and worker: experiment_worker, _ = ExperimentWorker.objects.get_or_create( experiment=experiment, worker=worker) else: experiment_worker = None # don't let blocked workers perform our tasks if (worker and worker.blocked) or (experiment_worker and experiment_worker.blocked): message = "Your submissions are too low quality. Please stop doing our tasks." if experiment_worker and experiment_worker.blocked_reason: message += "<br/><br/>" + experiment_worker.blocked_reason elif worker and worker.blocked_reason: message += "<br/><br/>" + worker.blocked_reason return html_error_response(request, message) # fetch contents hit_contents = fetch_hit_contents(hit) if override and 'publishable' in request.GET: hit_contents = filter(lambda x: x and x.publishable(), hit_contents) if not hit.num_contents or not hit_contents: # (in the if statement, also test hit.num_contents since it is only set # after the last content is added) return html_error_response( request, "Somehow there are no items in this HIT.") # fetch test (sentinel) contents if experiment_worker: if assignment.num_test_contents is None: n = experiment.test_contents_per_assignment if n > 0: # select new test contents from the set of possible contents # (that the user has not already answered) test_content_wrappers = experiment.test_contents.all() \ .exclude(responses__experiment_worker=experiment_worker) \ .order_by('-priority')[:n] # register chosen items with assignment assignment.test_contents.add(*test_content_wrappers) else: test_content_wrappers = [] assignment.num_test_contents = len(test_content_wrappers) assignment_dirty = True elif assignment.num_test_contents > 0: # re-fetch existing contents test_content_wrappers = assignment.test_contents.all() else: test_content_wrappers = [] # fetch objects from inside the wrappers if test_content_wrappers: test_contents = fetch_content_tuples([ (x.content_type_id, x.object_id) for x in test_content_wrappers ]) else: test_contents = [] else: test_contents = [] test_content_wrappers = [] # shuffle together (some tasks may sort contents again in javascript) contents = hit_contents + test_contents random.shuffle(contents) # prepare context data context = { 'hit': hit, 'assignment': assignment, 'worker': worker, 'experiment': experiment, 'experiment_id': experiment_id, 'experiment_worker': experiment_worker, 'slug': experiment.slug, 'hit_contents': hit_contents, 'test_content_wrappers': test_content_wrappers, 'test_contents': test_contents, 'contents': contents, 'num_contents': len(contents), 'num_contents_predicted': (len(hit_contents) + experiment.test_contents_per_assignment), 'override': override, } if len(contents) == 1: context['content'] = contents[0] if experiment.version >= 2: # old experiments (version 1) don't use this context['contents_json'] = json.dumps( [c.get_entry_dict() for c in contents]) # list of ids as json context['content_id_json'] = json.dumps( [{'id': c.id} for c in contents]) # requirements for req in hit.hit_type.requirements.values('name', 'value'): context[req['name']] = req['value'] if assignment_dirty: assignment.save() return context
def task_quality(request, dataset_id='all'): # replace this with a fetch from your database if request.method == 'POST': data = request.REQUEST if not (u'results' in data): return json_error_response(u'No results') results = json.loads(data[u'results']) time_ms = json.loads(data[u'time_ms']) time_active_ms = json.loads(data[u'time_active_ms']) ids = results.keys() user, _ = UserProfile.objects.get_or_create(user=request.user) PersonSegmentationQuality.mturk_submit( user, PersonSegmentation.objects.filter(id__in=ids), results, time_ms, time_active_ms, data[u'version']) return json_success_response() else: segmentations_filter = { 'qualities': None, } if dataset_id != 'all': dataset_id = int(dataset_id) segmentations_filter = dict_union( segmentations_filter, {'task__person__photo__dataset_id': dataset_id}) segmentations = PersonSegmentation.objects.filter( **segmentations_filter) if segmentations: # pick a random non annotated picture #contents = [segmentations[np.random.randint(len(segmentations))]] contents = sample(segmentations, min(50, segmentations.count())) context = { # the current task u'contents_json': json.dumps([c.get_entry_dict() for c in contents]), u'content_id_json': json.dumps([{ 'id': c.id } for c in contents]), u'contents': contents, # if 'true', ask the user a feedback survey at the end and promise # payment to complete it. Must be 'true' or 'false'. u'ask_for_feedback': 'false', # feedback_bonus is the payment in dollars that we promise users # for completing feedback u'feedback_bonus': 0.0, # template containing html for instructions u'instructions': 'segmentation/experiments/quality_segmentation_inst_content.html', u'content_thumb_template': 'segmentation/experiments/quality_segmentation_thumb.html', } external_task_extra_context('segment_quality', context) return render( request, u'segmentation/experiments/quality_segmentation.html', context) else: return html_error_response(request, 'All segmentations are marked.')
def task_segment(request, dataset_id='all', part=False): # replace this with a fetch from your database if request.method == 'POST': data = request.REQUEST if not (u'results' in data): return json_error_response(u'No results') results = json.loads(data[u'results']) time_ms = json.loads(data[u'time_ms']) time_active_ms = json.loads(data[u'time_active_ms']) ids = results.keys() user = UserProfile.objects.get(user=request.user) PersonSegmentation.mturk_submit( user, PersonSegmentationTask.objects.filter(id__in=ids), results, time_ms, time_active_ms, data[u'version']) return json_success_response() else: response = external_task_browser_check(request) if response: return response task_filter = {'responses__isnull': True} if dataset_id != 'all': dataset_id = int(dataset_id) task_filter = dict_union(task_filter, {'person__photo__dataset_id': dataset_id}) task_filter = dict_union(task_filter, {'part__isnull': not part}) # this excludes all tasks that did not receive _any_ response, # the tasks are usually worked on by turkers first. tasks = ( PersonSegmentationTask.objects.filter(**task_filter) # .exclude(responses__qualities__isnull = True) # .exclude(responses__qualities__correct = True) ) if part: instructions = u'segmentation/experiments/segment_part_person_inst_content.html' template = u'segmentation/experiments/segment_part_person.html' else: instructions = u'segmentation/experiments/segment_person_inst_content.html' template = u'segmentation/experiments/segment_person.html' if tasks: # pick a random non annotated picture contents = sample(tasks, min(1, tasks.count())) context = { # the current task u'contents_json': json.dumps([c.get_entry_dict(True) for c in contents]), u'content_id_json': json.dumps([{ 'id': c.id } for c in contents]), u'contents': contents, # if 'true', ask the user a feedback survey at the end and promise # payment to complete it. Must be 'true' or 'false'. u'ask_for_feedback': 'false', # feedback_bonus is the payment in dollars that we promise users # for completing feedback u'feedback_bonus': 0.02, # template containing html for instructions u'instructions': instructions } return render(request, template, context) else: return html_error_response(request, 'All images are already segmented.')