Ejemplo n.º 1
0
def bsdf_wd_vote(request):
    v = 'wd'
    id = request.POST['id']
    score = request.POST['score']
    bsdf_model = get_model('bsdfs', 'ShapeBsdfLabel_' + v)
    bsdf_model.objects.filter(id=id).update(admin_score=score)
    return json_success_response()
Ejemplo n.º 2
0
def external_task_tutorial(request, context):
    """ Handle tutorials.  On a GET, decide whether to serve up a tutorial.
    On a POST, record that the tutorial was completed, then the client will
    refresh.  Returns either a response or None. """

    # unpack some variables
    experiment, worker, override = [
        context[k] for k in ['experiment', 'worker', 'override']
    ]

    if (request.method == "GET" and experiment.has_tutorial
            and (override == "tut" or not is_preview_request(request))):

        show_tutorial = (override == "tut" or
                         not context['experiment_worker'].tutorial_completed)
        if show_tutorial:
            context.update(csrf(request))
            template_name = experiment.template_name()
            return render(request, '%s_tut.html' % template_name, context)

    elif (request.method == "POST" and override is None
          and 'tutorial_complete' in request.POST
          and request.POST['tutorial_complete'] == 'true'):

        ew_id = context['experiment_worker'].id
        ExperimentWorker.objects.filter(id=ew_id) \
            .update(tutorial_completed=True)
        return json_success_response()

    return None
Ejemplo n.º 3
0
def external_task_tutorial(request, context):
    """ Handle tutorials.  On a GET, decide whether to serve up a tutorial.
    On a POST, record that the tutorial was completed, then the client will
    refresh.  Returns either a response or None. """

    # unpack some variables
    experiment, worker, override = [
        context[k] for k in ['experiment', 'worker', 'override']]

    if (request.method == "GET" and experiment.has_tutorial and
            (override == "tut" or not is_preview_request(request))):

        show_tutorial = (override == "tut" or
                         not context['experiment_worker'].tutorial_completed)
        if show_tutorial:
            context.update(csrf(request))
            template_name = experiment.template_name()
            return render(request, '%s_tut.html' % template_name, context)

    elif (request.method == "POST" and override is None and
          'tutorial_complete' in request.POST and
          request.POST['tutorial_complete'] == 'true'):

        ew_id = context['experiment_worker'].id
        ExperimentWorker.objects.filter(id=ew_id) \
            .update(tutorial_completed=True)
        return json_success_response()

    return None
Ejemplo n.º 4
0
def bsdf_wd_vote(request):
    v = 'wd'
    id = request.POST['id']
    score = request.POST['score']
    bsdf_model = get_model('bsdfs', 'ShapeBsdfLabel_' + v)
    bsdf_model.objects.filter(id=id).update(admin_score=score)
    return json_success_response()
Ejemplo n.º 5
0
def admin_example_ajax(request):
    if 'id' not in request.POST or 'good' not in request.POST:
        return json_error_response(
            'Must have both "id" and "good" in POST params')

    id = request.POST['id']
    good = (request.POST['good'].lower() == 'true')
    updated = ExperimentExample.objects \
        .filter(id=id).update(good=good)

    if updated:
        return json_success_response()
    else:
        return json_error_response('Object (id=%s) not found' % id)
Ejemplo n.º 6
0
def admin_example_ajax(request):
    if 'id' not in request.POST or 'good' not in request.POST:
        return json_error_response(
            'Must have both "id" and "good" in POST params')

    id = request.POST['id']
    good = (request.POST['good'].lower() == 'true')
    updated = ExperimentExample.objects \
        .filter(id=id).update(good=good)

    if updated:
        return json_success_response()
    else:
        return json_error_response('Object (id=%s) not found' % id)
Ejemplo n.º 7
0
def photo_curate(request, template='photos/curate.html'):
    if request.method == 'POST':
        if request.POST['action'] == 'button':
            photo_id = request.POST['photo_id']
            attr = request.POST['attr']
            Photo.objects.filter(id=photo_id).update(
                **{attr: request.POST['val'].lower() == u'true'}
            )
            val = Photo.objects.filter(id=photo_id) \
                .values_list(attr, flat=True)[0]
            return json_response({
                'photo_id': photo_id,
                'attr': attr,
                'val': val
            })
        elif request.POST['action'] == 'done':
            items = json.loads(request.POST['items'])
            for item in items:
                print item
                Photo.objects.filter(id=item['photo_id']).update(
                    **item['updates']
                )
            return json_success_response()
        else:
            raise Http404
    else:
        entries = Photo.objects \
            .filter(scene_category_correct=True) \
            .filter(
                Q(inappropriate__isnull=True) |
                Q(nonperspective__isnull=True) |
                Q(stylized__isnull=True) |
                Q(rotated__isnull=True)) \
            .order_by('-num_vertices', 'scene_category_correct_score')

        count = entries.count()
        entries = list(entries[:400])
        entries.sort(key=lambda x: x.aspect_ratio)

        return render(request, template, {
            'nav': 'browse/photo',
            'count': count,
            'entries': entries,
        })
Ejemplo n.º 8
0
def photo_curate(request, template='photos/curate.html'):
    if request.method == 'POST':
        if request.POST['action'] == 'button':
            photo_id = request.POST['photo_id']
            attr = request.POST['attr']
            Photo.objects.filter(id=photo_id).update(
                **{attr: request.POST['val'].lower() == u'true'})
            val = Photo.objects.filter(id=photo_id) \
                .values_list(attr, flat=True)[0]
            return json_response({
                'photo_id': photo_id,
                'attr': attr,
                'val': val
            })
        elif request.POST['action'] == 'done':
            items = json.loads(request.POST['items'])
            for item in items:
                print item
                Photo.objects.filter(id=item['photo_id']).update(
                    **item['updates'])
            return json_success_response()
        else:
            raise Http404
    else:
        entries = Photo.objects \
            .filter(scene_category_correct=True) \
            .filter(
                Q(inappropriate__isnull=True) |
                Q(nonperspective__isnull=True) |
                Q(stylized__isnull=True) |
                Q(rotated__isnull=True)) \
            .order_by('-num_vertices', 'scene_category_correct_score')

        count = entries.count()
        entries = list(entries[:400])
        entries.sort(key=lambda x: x.aspect_ratio)

        return render(request, template, {
            'nav': 'browse/photo',
            'count': count,
            'entries': entries,
        })
Ejemplo n.º 9
0
def rectified_normal_vote(request):
    id = request.POST['id']
    score = request.POST['score']
    ShapeRectifiedNormalLabel.objects.filter(id=id).update(admin_score=score)
    return json_success_response()
Ejemplo n.º 10
0
def admin_submission(request,
                     experiment_slug='all',
                     filter_key='all',
                     template="mturk/admin/submission.html",
                     extra_context=None):

    if request.method == 'POST':
        # admin clicked a button in the UI
        try:
            action = request.POST.get['action']
            assignment = MtAssignment.objects.get(
                id=request.POST['assignment_id'])
            if action == 'approve':
                assignment.approve(feedback=request.POST['message'])
                return json_success_response()
            elif action == 'reject':
                assignment.reject(feedback=request.POST['message'])
                return json_success_response()
            elif action == 'auto-approve':
                assignment.experiment_worker().set_auto_approve(
                    message=request.POST['message'])
                return json_success_response()
            elif action == 'block':
                assignment.experiment_worker().block(
                    reason=request.POST['message'],
                    all_tasks=request.POST.get('all_tasks', False),
                    report_to_mturk=request.POST.get('report_to_mturk', False))
                return json_success_response()
            else:
                return json_error_response("Unknown action: '%s'" % action)
        except Exception as e:
            return json_error_response(str(e))

    else:
        extra_filters = {'submission_complete': True}
        if not settings.MTURK_SANDBOX:
            extra_filters['hit__sandbox'] = False
        if 'worker_id' in request.GET:
            extra_filters['worker__mturk_worker_id'] = request.GET['worker_id']
        if 'hit_id' in request.GET:
            extra_filters['hit_id'] = request.GET['hit_id']

        if filter_key not in SUBMISSION_FILTERS:
            raise Http404
        assignments = MtAssignment.objects.filter(**dict_union(
            extra_filters, SUBMISSION_FILTERS[filter_key]['filter']))

        if experiment_slug != 'all':
            assignments = assignments.filter(
                hit__hit_type__experiment__slug=experiment_slug)

        entries = assignments \
            .defer('post_data', 'post_meta') \
            .order_by('-added') \
            .select_related('worker', 'hit', 'hit__hit_type',
                            'hit__hit_type__experiment') \
            .prefetch_related('submitted_contents__content')

        filters = []
        for key in SUBMISSION_FILTERS_LIST:
            count = 'TODO'
            filters.append(
                dict_union(SUBMISSION_FILTERS[key], {
                    'key': key,
                    'count': count
                }))

        categories = admin_submission_categories()
        category = None
        for c in categories:
            if c['slug'] == experiment_slug:
                category = c
                break
        else:
            raise Http404

        return render(
            request, template,
            dict_union(
                {
                    'nav': 'mturk-admin',
                    'subnav': 'submission',
                    'categories': categories,
                    'category_slug': experiment_slug,
                    'category': category,
                    'filters': filters,
                    'filter_key': filter_key,
                    'entries': entries,
                    'entries_per_page': 1,
                    'span': 'span9',
                    'thumb_template': 'mturk/admin/submission_entry.html',
                }, extra_context))
Ejemplo n.º 11
0
def rectified_normal_vote(request):
    id = request.POST['id']
    score = request.POST['score']
    ShapeRectifiedNormalLabel.objects.filter(id=id).update(admin_score=score)
    return json_success_response()
Ejemplo n.º 12
0
def task_quality(request, dataset_id='all'):
    # replace this with a fetch from your database
    if request.method == 'POST':
        data = request.REQUEST
        if not (u'results' in data):
            return json_error_response(u'No results')

        results = json.loads(data[u'results'])
        time_ms = json.loads(data[u'time_ms'])
        time_active_ms = json.loads(data[u'time_active_ms'])

        ids = results.keys()

        user, _ = UserProfile.objects.get_or_create(user=request.user)

        PersonSegmentationQuality.mturk_submit(
            user, PersonSegmentation.objects.filter(id__in=ids), results,
            time_ms, time_active_ms, data[u'version'])

        return json_success_response()
    else:
        segmentations_filter = {
            'qualities': None,
        }

        if dataset_id != 'all':
            dataset_id = int(dataset_id)

            segmentations_filter = dict_union(
                segmentations_filter,
                {'task__person__photo__dataset_id': dataset_id})

        segmentations = PersonSegmentation.objects.filter(
            **segmentations_filter)

        if segmentations:
            # pick a random non annotated picture
            #contents = [segmentations[np.random.randint(len(segmentations))]]
            contents = sample(segmentations, min(50, segmentations.count()))

            context = {
                # the current task
                u'contents_json':
                json.dumps([c.get_entry_dict() for c in contents]),
                u'content_id_json':
                json.dumps([{
                    'id': c.id
                } for c in contents]),
                u'contents':
                contents,

                # if 'true', ask the user a feedback survey at the end and promise
                # payment to complete it.  Must be 'true' or 'false'.
                u'ask_for_feedback':
                'false',

                # feedback_bonus is the payment in dollars that we promise users
                # for completing feedback
                u'feedback_bonus':
                0.0,

                # template containing html for instructions
                u'instructions':
                'segmentation/experiments/quality_segmentation_inst_content.html',
                u'content_thumb_template':
                'segmentation/experiments/quality_segmentation_thumb.html',
            }

            external_task_extra_context('segment_quality', context)

            return render(
                request, u'segmentation/experiments/quality_segmentation.html',
                context)
        else:
            return html_error_response(request,
                                       'All segmentations are marked.')
Ejemplo n.º 13
0
def external_compatible(request, id):
    """ Increment view counter for a compatible view """
    increment_hit_counter_task.delay(id, 'compatible_count')
    return json_success_response()
Ejemplo n.º 14
0
def external_task_POST(request, context):
    """ Handles POSTs for mturk tasks.  Returns a response. """

    # unpack some variables
    experiment, hit, assignment, worker, override, experiment_worker = [
        context[k] for k in [
            'experiment', 'hit', 'assignment', 'worker', 'override',
            'experiment_worker'
        ]
    ]

    # error checks
    if override is not None:
        return json_error_response("You cannot submit in admin preview mode.")
    if not worker or not assignment:
        return json_error_response(
            "There was an error obtaining your Assignment ID from Amazon.")

    # check that POST is allowed
    if hit.sandbox and not settings.MTURK_ACCEPT_SANDBOX_HITS:
        return json_error_response(
            "Not currently accepting sandbox HITs.  POST data: " +
            json.dumps(request.POST))

    # extract submit data
    results = json.loads(request.POST['results'])
    time_ms = json.loads(request.POST['time_ms']) \
        if 'time_ms' in request.POST else None
    time_active_ms = json.loads(request.POST['time_active_ms']) \
        if 'time_active_ms' in request.POST else None
    time_load_ms = json.loads(request.POST['time_load_ms']) \
        if 'time_load_ms' in request.POST else None
    complete = ('partial' not in request.POST
                or str(request.POST['partial']) != 'true')
    version = json.loads(request.POST['version'])
    action_log = request.POST.get('action_log', '')
    screen_width = request.POST.get('screen_width', None)
    screen_height = request.POST.get('screen_height', None)

    # fix any potential str/int issues
    if isinstance(time_ms, basestring) and time_ms.isdigit():
        time_ms = int(time_ms)
    if isinstance(time_active_ms, basestring) and time_active_ms.isdigit():
        time_active_ms = int(time_active_ms)
    if isinstance(time_load_ms, basestring) and time_load_ms.isdigit():
        time_load_ms = int(time_load_ms)

    # store assignment POST information
    post_dict = {}
    meta_dict = {}
    for k, v in request.META.iteritems():
        # some non-encodable things get put in here -- filter them out by
        # forcing the unicode encoding
        try:
            meta_dict[unicode(k)] = unicode(v)
        except:
            pass
    for k, v in request.POST.iteritems():
        # some non-encodable things get put in here -- filter them out by
        # forcing the unicode encoding
        try:
            post_dict[unicode(k)] = unicode(v)
        except:
            pass

    # store dictionaries, not nested dictionaries
    post_dict[u'results'] = recursive_dict_exclude(results, [u'screenshot'])
    post_dict[u'time_ms'] = time_ms
    post_dict[u'time_active_ms'] = time_active_ms
    post_dict[u'time_load_ms'] = time_load_ms

    assignment.post_data = json.dumps(post_dict)
    assignment.post_meta = json.dumps(meta_dict)
    if 'HTTP_USER_AGENT' in request.META:
        assignment.user_agent = request.META['HTTP_USER_AGENT']

    assignment_dirty = False
    experiment_worker_dirty = False

    # update assignment info
    if complete:
        assignment.time_ms = recursive_sum(time_ms)
        assignment.time_active_ms = recursive_sum(time_active_ms)
        assignment.time_load_ms = recursive_sum(time_load_ms)
        assignment.status = MtAssignment.str_to_status['Submitted']
        assignment.submit_time = datetime.datetime.now()
        assignment.action_log = action_log
        assignment.screen_width = screen_width
        assignment.screen_height = screen_height
        if 'feedback' in request.POST:
            assignment.feedback = request.POST['feedback']
            # must fill in at least 2/3 fields to count
            if assignment.feedback and len(json.loads(
                    assignment.feedback)) >= 2:
                assignment.has_feedback = True
        assignment_dirty = True

    # mark test contents data as seen.  it can't be done async or else the next
    # task will re-serve the same test items.
    rejected_assignment = False
    if assignment.num_test_contents:
        experiment_worker = context['experiment_worker']
        test_content_wrappers = context['test_content_wrappers']
        test_contents = context['test_contents']

        # grade test contents
        responses, responses_correct = hit.hit_type.experiment_settings \
            .out_content_model().mturk_grade_test(
                test_content_wrappers, test_contents, results)

        # store in database
        for i, tcw in enumerate(test_content_wrappers):
            # If the user accepts multiple HITs at once, then they can be
            # served the same test objects.  In that case, only store their
            # first answer, since the second time they see it, they will know
            # it is a test item.
            if not tcw.responses.filter(
                    experiment_worker=experiment_worker).exists():
                tcw.responses.create(
                    experiment_worker=experiment_worker,
                    assignment=assignment,
                    response=unicode(responses[i]),
                    correct=responses_correct[i],
                )

        # update local correct counts
        assignment.num_test_correct = sum(responses_correct)
        assignment.num_test_incorrect = sum(not x for x in responses_correct)
        assignment_dirty = True

        # update global correct counts
        experiment_worker.num_test_correct = \
            experiment_worker.test_content_responses.filter(correct=True).count()
        experiment_worker.num_test_incorrect = \
            experiment_worker.test_content_responses.filter(correct=False).count()
        experiment_worker_dirty = True

        # always approve, but give a message if they do badly
        if assignment.num_test_incorrect >= 3 and assignment.num_test_correct == 0:
            perc = int(
                100 * assignment.num_test_correct /
                (assignment.num_test_correct + assignment.num_test_incorrect))
            message = make_reject_message(experiment, hit, perc)
            #from mturk.tasks import reject_assignment_task
            from mturk.tasks import approve_assignment_task
            approve_assignment_task.apply_async(
                kwargs={
                    'assignment_id': assignment.id,
                    'feedback': message,
                },
                countdown=60,
                retry=True,
                retry_policy={'max_retries': 100})
            rejected_assignment = True

    # block if accuracy every creeps below 80% (with at least 5 errors)
    if experiment_worker.num_test_incorrect > 5:
        perc = int(100 * experiment_worker.num_test_correct /
                   (experiment_worker.num_test_correct +
                    experiment_worker.num_test_incorrect))

        if perc < 80:
            message = make_reject_message(experiment, hit, perc)
            experiment_worker.block(reason=message, method='T', save=False)
            experiment_worker_dirty = True

    # otherwise auto-approve
    elif (not rejected_assignment
          and (experiment_worker.auto_approve or settings.MTURK_AUTO_APPROVE)):
        from mturk.tasks import approve_assignment_task
        approve_assignment_task.apply_async(kwargs={
            'assignment_id':
            assignment.id,
            'feedback':
            experiment_worker.auto_approve_message,
        },
                                            countdown=60,
                                            retry=True,
                                            retry_policy={'max_retries': 100})

    if assignment_dirty:
        assignment.save()
    if experiment_worker_dirty:
        experiment_worker.save()

    # submit (rest of) data asynchronously
    mturk_submit_task.apply_async(
        # note: 'contents' not serialized in this list -- the task re-fetches
        # this from the database.
        kwargs={
            'user_id': worker.user_id,
            'mturk_hit_id': hit.id,
            'mturk_assignment_id': assignment.id,
            'experiment_id': experiment.id,
            'results': results,  # dict with content id as key
            'time_ms': time_ms,  # number or dict with content id as key
            'time_active_ms': time_active_ms,  # same format as time_ms
            'time_load_ms': time_load_ms,
            'complete': complete,
            'version': version,
        },
        retry=True,
        retry_policy={
            'max_retries': None,  # (retry forever)
            'interval_start': 300,
            'interval_step': 60,
            'interval_max': 600,
        })

    # success
    return json_success_response()
Ejemplo n.º 15
0
def external_compatible(request, id):
    """ Increment view counter for a compatible view """
    increment_hit_counter_task.delay(id, 'compatible_count')
    return json_success_response()
Ejemplo n.º 16
0
def external_task_POST(request, context):
    """ Handles POSTs for mturk tasks.  Returns a response. """

    # unpack some variables
    experiment, hit, assignment, worker, override, experiment_worker = [
        context[k] for k in [
            'experiment', 'hit', 'assignment', 'worker', 'override',
            'experiment_worker'
        ]
    ]

    # error checks
    if override is not None:
        return json_error_response(
            "You cannot submit in admin preview mode.")
    if not worker or not assignment:
        return json_error_response(
            "There was an error obtaining your Assignment ID from Amazon.")

    # check that POST is allowed
    if hit.sandbox and not settings.MTURK_ACCEPT_SANDBOX_HITS:
        return json_error_response(
            "Not currently accepting sandbox HITs.  POST data: " +
            json.dumps(request.POST))

    # extract submit data
    results = json.loads(request.POST['results'])
    time_ms = json.loads(request.POST['time_ms']) \
        if 'time_ms' in request.POST else None
    time_active_ms = json.loads(request.POST['time_active_ms']) \
        if 'time_active_ms' in request.POST else None
    time_load_ms = json.loads(request.POST['time_load_ms']) \
        if 'time_load_ms' in request.POST else None
    complete = ('partial' not in request.POST or
                str(request.POST['partial']) != 'true')
    version = json.loads(request.POST['version'])
    action_log = request.POST.get('action_log', '')
    screen_width = request.POST.get('screen_width', None)
    screen_height = request.POST.get('screen_height', None)

    # fix any potential str/int issues
    if isinstance(time_ms, basestring) and time_ms.isdigit():
        time_ms = int(time_ms)
    if isinstance(time_active_ms, basestring) and time_active_ms.isdigit():
        time_active_ms = int(time_active_ms)
    if isinstance(time_load_ms, basestring) and time_load_ms.isdigit():
        time_load_ms = int(time_load_ms)

    # store assignment POST information
    post_dict = {}
    meta_dict = {}
    for k, v in request.META.iteritems():
        # some non-encodable things get put in here -- filter them out by
        # forcing the unicode encoding
        try:
            meta_dict[unicode(k)] = unicode(v)
        except:
            pass
    for k, v in request.POST.iteritems():
        # some non-encodable things get put in here -- filter them out by
        # forcing the unicode encoding
        try:
            post_dict[unicode(k)] = unicode(v)
        except:
            pass

    # store dictionaries, not nested dictionaries
    post_dict[u'results'] = recursive_dict_exclude(results, [
        u'screenshot'])
    post_dict[u'time_ms'] = time_ms
    post_dict[u'time_active_ms'] = time_active_ms
    post_dict[u'time_load_ms'] = time_load_ms

    assignment.post_data = json.dumps(post_dict)
    assignment.post_meta = json.dumps(meta_dict)
    if 'HTTP_USER_AGENT' in request.META:
        assignment.user_agent = request.META['HTTP_USER_AGENT']

    assignment_dirty = False
    experiment_worker_dirty = False

    # update assignment info
    if complete:
        assignment.time_ms = recursive_sum(time_ms)
        assignment.time_active_ms = recursive_sum(time_active_ms)
        assignment.time_load_ms = recursive_sum(time_load_ms)
        assignment.status = MtAssignment.str_to_status['Submitted']
        assignment.submit_time = datetime.datetime.now()
        assignment.action_log = action_log
        assignment.screen_width = screen_width
        assignment.screen_height = screen_height
        if 'feedback' in request.POST:
            assignment.feedback = request.POST['feedback']
            # must fill in at least 2/3 fields to count
            if assignment.feedback and len(json.loads(assignment.feedback)) >= 2:
                assignment.has_feedback = True
        assignment_dirty = True

    # mark test contents data as seen.  it can't be done async or else the next
    # task will re-serve the same test items.
    rejected_assignment = False
    if assignment.num_test_contents:
        experiment_worker = context['experiment_worker']
        test_content_wrappers = context['test_content_wrappers']
        test_contents = context['test_contents']

        # grade test contents
        responses, responses_correct = hit.hit_type.experiment_settings \
            .out_content_model().mturk_grade_test(
                test_content_wrappers, test_contents, results)

        # store in database
        for i, tcw in enumerate(test_content_wrappers):
            # If the user accepts multiple HITs at once, then they can be
            # served the same test objects.  In that case, only store their
            # first answer, since the second time they see it, they will know
            # it is a test item.
            if not tcw.responses.filter(experiment_worker=experiment_worker).exists():
                tcw.responses.create(
                    experiment_worker=experiment_worker,
                    assignment=assignment,
                    response=unicode(responses[i]),
                    correct=responses_correct[i],
                )

        # update local correct counts
        assignment.num_test_correct = sum(responses_correct)
        assignment.num_test_incorrect = sum(not x for x in responses_correct)
        assignment_dirty = True

        # update global correct counts
        experiment_worker.num_test_correct = \
            experiment_worker.test_content_responses.filter(correct=True).count()
        experiment_worker.num_test_incorrect = \
            experiment_worker.test_content_responses.filter(correct=False).count()
        experiment_worker_dirty = True

        # always approve, but give a message if they do badly
        if assignment.num_test_incorrect >= 3 and assignment.num_test_correct == 0:
            perc = int(100 * assignment.num_test_correct / (
                assignment.num_test_correct + assignment.num_test_incorrect))
            message = make_reject_message(experiment, hit, perc)
            #from mturk.tasks import reject_assignment_task
            from mturk.tasks import approve_assignment_task
            approve_assignment_task.apply_async(
                kwargs={
                    'assignment_id': assignment.id,
                    'feedback': message,
                }, countdown=60, retry=True, retry_policy={'max_retries': 100})
            rejected_assignment = True

    # block if accuracy every creeps below 80% (with at least 5 errors)
    if experiment_worker.num_test_incorrect > 5:
        perc = int(100 * experiment_worker.num_test_correct / (
            experiment_worker.num_test_correct +
            experiment_worker.num_test_incorrect))

        if perc < 80:
            message = make_reject_message(experiment, hit, perc)
            experiment_worker.block(reason=message, method='T', save=False)
            experiment_worker_dirty = True

    # otherwise auto-approve
    elif (not rejected_assignment and
            (experiment_worker.auto_approve or settings.MTURK_AUTO_APPROVE)):
        from mturk.tasks import approve_assignment_task
        approve_assignment_task.apply_async(
            kwargs={
                'assignment_id': assignment.id,
                'feedback': experiment_worker.auto_approve_message,
            }, countdown=60, retry=True, retry_policy={'max_retries': 100})

    if assignment_dirty:
        assignment.save()
    if experiment_worker_dirty:
        experiment_worker.save()

    # submit (rest of) data asynchronously
    mturk_submit_task.apply_async(
        # note: 'contents' not serialized in this list -- the task re-fetches
        # this from the database.
        kwargs={
            'user_id': worker.user_id,
            'mturk_hit_id': hit.id,
            'mturk_assignment_id': assignment.id,
            'experiment_id': experiment.id,
            'results': results,  # dict with content id as key
            'time_ms': time_ms,  # number or dict with content id as key
            'time_active_ms': time_active_ms,  # same format as time_ms
            'time_load_ms': time_load_ms,
            'complete': complete,
            'version': version,
        },
        retry=True,
        retry_policy={
            'max_retries': None,  # (retry forever)
            'interval_start': 300,
            'interval_step': 60,
            'interval_max': 600,
        }
    )

    # success
    return json_success_response()
Ejemplo n.º 17
0
def admin_submission(request, experiment_slug='all', filter_key='all',
                     template="mturk/admin/submission.html", extra_context=None):

    if request.method == 'POST':
        # admin clicked a button in the UI
        try:
            action = request.POST.get['action']
            assignment = MtAssignment.objects.get(
                id=request.POST['assignment_id'])
            if action == 'approve':
                assignment.approve(feedback=request.POST['message'])
                return json_success_response()
            elif action == 'reject':
                assignment.reject(feedback=request.POST['message'])
                return json_success_response()
            elif action == 'auto-approve':
                assignment.experiment_worker().set_auto_approve(
                    message=request.POST['message'])
                return json_success_response()
            elif action == 'block':
                assignment.experiment_worker().block(
                    reason=request.POST['message'],
                    all_tasks=request.POST.get('all_tasks', False),
                    report_to_mturk=request.POST.get('report_to_mturk', False))
                return json_success_response()
            else:
                return json_error_response("Unknown action: '%s'" % action)
        except Exception as e:
            return json_error_response(str(e))

    else:
        extra_filters = {
            'submission_complete': True
        }
        if not settings.MTURK_SANDBOX:
            extra_filters['hit__sandbox'] = False
        if 'worker_id' in request.GET:
            extra_filters['worker__mturk_worker_id'] = request.GET['worker_id']
        if 'hit_id' in request.GET:
            extra_filters['hit_id'] = request.GET['hit_id']

        if filter_key not in SUBMISSION_FILTERS:
            raise Http404
        assignments = MtAssignment.objects.filter(**dict_union(
            extra_filters, SUBMISSION_FILTERS[filter_key]['filter']))

        if experiment_slug != 'all':
            assignments = assignments.filter(
                hit__hit_type__experiment__slug=experiment_slug)

        entries = assignments \
            .defer('post_data', 'post_meta') \
            .order_by('-added') \
            .select_related('worker', 'hit', 'hit__hit_type',
                            'hit__hit_type__experiment') \
            .prefetch_related('submitted_contents__content')

        filters = []
        for key in SUBMISSION_FILTERS_LIST:
            count = 'TODO'
            filters.append(dict_union(
                SUBMISSION_FILTERS[key],
                {'key': key, 'count': count}
            ))

        categories = admin_submission_categories()
        category = None
        for c in categories:
            if c['slug'] == experiment_slug:
                category = c
                break
        else:
            raise Http404

        return render(request, template, dict_union({
            'nav': 'mturk-admin',
            'subnav': 'submission',
            'categories': categories,
            'category_slug': experiment_slug,
            'category': category,
            'filters': filters,
            'filter_key': filter_key,
            'entries': entries,
            'entries_per_page': 1,
            'span': 'span9',
            'thumb_template': 'mturk/admin/submission_entry.html',
        }, extra_context))
Ejemplo n.º 18
0
def task_segment(request, dataset_id='all', part=False):
    # replace this with a fetch from your database
    if request.method == 'POST':
        data = request.REQUEST
        if not (u'results' in data):
            return json_error_response(u'No results')

        results = json.loads(data[u'results'])
        time_ms = json.loads(data[u'time_ms'])
        time_active_ms = json.loads(data[u'time_active_ms'])

        ids = results.keys()

        user = UserProfile.objects.get(user=request.user)

        PersonSegmentation.mturk_submit(
            user, PersonSegmentationTask.objects.filter(id__in=ids), results,
            time_ms, time_active_ms, data[u'version'])

        return json_success_response()
    else:
        response = external_task_browser_check(request)
        if response:
            return response

        task_filter = {'responses__isnull': True}

        if dataset_id != 'all':
            dataset_id = int(dataset_id)

            task_filter = dict_union(task_filter,
                                     {'person__photo__dataset_id': dataset_id})

        task_filter = dict_union(task_filter, {'part__isnull': not part})

        # this excludes all tasks that did not receive _any_ response,
        # the tasks are usually worked on by turkers first.
        tasks = (
            PersonSegmentationTask.objects.filter(**task_filter)
            # .exclude(responses__qualities__isnull = True)
            # .exclude(responses__qualities__correct = True)
        )

        if part:
            instructions = u'segmentation/experiments/segment_part_person_inst_content.html'
            template = u'segmentation/experiments/segment_part_person.html'
        else:
            instructions = u'segmentation/experiments/segment_person_inst_content.html'
            template = u'segmentation/experiments/segment_person.html'

        if tasks:
            # pick a random non annotated picture
            contents = sample(tasks, min(1, tasks.count()))

            context = {
                # the current task
                u'contents_json':
                json.dumps([c.get_entry_dict(True) for c in contents]),
                u'content_id_json':
                json.dumps([{
                    'id': c.id
                } for c in contents]),
                u'contents':
                contents,

                # if 'true', ask the user a feedback survey at the end and promise
                # payment to complete it.  Must be 'true' or 'false'.
                u'ask_for_feedback':
                'false',

                # feedback_bonus is the payment in dollars that we promise users
                # for completing feedback
                u'feedback_bonus':
                0.02,

                # template containing html for instructions
                u'instructions':
                instructions
            }

            return render(request, template, context)
        else:
            return html_error_response(request,
                                       'All images are already segmented.')