Пример #1
0
def update_votes_cubam(show_progress=False):
    """ This function is automatically called by
    mturk.tasks.mturk_update_votes_cubam_task """

    from mturk.cubam import update_votes_cubam
    changed_objects = []

    changed_objects += update_votes_cubam(
        MaterialShape, ShapePlanarityLabel.objects.filter(
            invalid=False, shape__invalid=False),
        'shape_id', 'planar', 'planar', quality_method_attr='planar_method',
        score_threshold=0, min_votes=5,
        show_progress=show_progress,
        return_changed_objects=True,
        experiment_filter={'slug': 'label_planarity'}
    )

    changed_objects += update_votes_cubam(
        MaterialShape, MaterialShapeQuality.objects.filter(
            invalid=False, shape__invalid=False),
        'shape_id', 'correct', 'correct',
        score_threshold=0, min_votes=5,
        show_progress=show_progress,
        return_changed_objects=True,
        experiment_filter={'slug': 'quality_material'}
    )

    return changed_objects
Пример #2
0
def update_votes_cubam(show_progress=False):
    """ This function is automatically called by
    mturk.tasks.mturk_update_votes_cubam_task """

    from mturk.cubam import update_votes_cubam
    changed_objects = []

    changed_objects += update_votes_cubam(
        MaterialShape,
        ShapePlanarityLabel.objects.filter(invalid=False,
                                           shape__invalid=False),
        'shape_id',
        'planar',
        'planar',
        quality_method_attr='planar_method',
        score_threshold=0,
        min_votes=5,
        show_progress=show_progress,
        return_changed_objects=True,
        experiment_filter={'slug': 'label_planarity'})

    changed_objects += update_votes_cubam(
        MaterialShape,
        MaterialShapeQuality.objects.filter(invalid=False,
                                            shape__invalid=False),
        'shape_id',
        'correct',
        'correct',
        score_threshold=0,
        min_votes=5,
        show_progress=show_progress,
        return_changed_objects=True,
        experiment_filter={'slug': 'quality_material'})

    return changed_objects
Пример #3
0
def update_votes_cubam(show_progress=False):
    """ This function is automatically called by
    mturk.tasks.mturk_update_votes_cubam_task """

    from mturk.cubam import update_votes_cubam
    changed_objects = []

    changed_objects += update_votes_cubam(
        Photo, PhotoSceneQualityLabel.objects.filter(invalid=False),
        'photo_id', 'correct', 'scene_category_correct',
        quality_method_attr='scene_category_correct_method',
        score_threshold=0, min_votes=5,
        show_progress=show_progress,
        return_changed_objects=True,
        experiment_filter={'slug': 'quality_scene'}
    )

    changed_objects += update_votes_cubam(
        Photo, PhotoWhitebalanceLabel.objects.filter(invalid=False),
        'photo_id', 'whitebalanced', 'whitebalanced',
        score_threshold=0, min_votes=5,
        show_progress=show_progress, return_changed_objects=True,
        experiment_filter={'slug': 'label_whitebalance'}
    )

    return changed_objects
Пример #4
0
def update_votes_cubam(show_progress=False):
    """ This function is automatically called by
    mturk.tasks.mturk_update_votes_cubam_task """

    from mturk.cubam import update_votes_cubam
    changed_objects = []

    changed_objects += update_votes_cubam(
        Photo,
        PhotoSceneQualityLabel.objects.filter(invalid=False),
        'photo_id',
        'correct',
        'scene_category_correct',
        quality_method_attr='scene_category_correct_method',
        score_threshold=0,
        min_votes=5,
        show_progress=show_progress,
        return_changed_objects=True,
        experiment_filter={'slug': 'quality_scene'})

    changed_objects += update_votes_cubam(
        Photo,
        PhotoWhitebalanceLabel.objects.filter(invalid=False),
        'photo_id',
        'whitebalanced',
        'whitebalanced',
        score_threshold=0,
        min_votes=5,
        show_progress=show_progress,
        return_changed_objects=True,
        experiment_filter={'slug': 'label_whitebalance'})

    return changed_objects
Пример #5
0
def update_votes_cubam(show_progress=False):
    """ This function is automatically called by
    mturk.tasks.mturk_update_votes_cubam_task """

    from mturk.cubam import update_votes_cubam

    changed_objects = []

    for bsdf_version in ('wd',):
        bsdf_ct = ContentType.objects.get(
            app_label="bsdfs", model="shapebsdflabel_%s" % bsdf_version)
        bsdf_model = bsdf_ct.model_class()

        # gloss
        changed_objects += update_votes_cubam(
            bsdf_model, ShapeBsdfQuality.objects.filter(
                invalid=False, content_type=bsdf_ct,
                gloss_correct__isnull=False),
            'object_id', 'gloss_correct', 'gloss_correct',
            score_threshold=0, min_votes=5,
            show_progress=show_progress,
            return_changed_objects=True,
            experiment_filter={
                'slug': 'quality_bsdf_gloss',
                'variant': json.dumps({'bsdf_version': bsdf_version}),
            }
        )

        # color
        changed_objects += update_votes_cubam(
            bsdf_model, ShapeBsdfQuality.objects.filter(
                invalid=False, content_type=bsdf_ct,
                color_correct__isnull=False),
            'object_id', 'color_correct', 'color_correct',
            score_threshold=0, min_votes=5,
            show_progress=show_progress,
            return_changed_objects=True,
            experiment_filter={
                'slug': 'quality_bsdf_color',
                'variant': json.dumps({'bsdf_version': bsdf_version}),
            }
        )

    return changed_objects
Пример #6
0
def update_votes_cubam(show_progress=False):
    """ This function is automatically called by
    mturk.tasks.mturk_update_votes_cubam_task """

    from mturk.cubam import update_votes_cubam

    changed_objects = []

    for bsdf_version in ("wd",):
        bsdf_ct = ContentType.objects.get(app_label="bsdfs", model="shapebsdflabel_%s" % bsdf_version)
        bsdf_model = bsdf_ct.model_class()

        # gloss
        changed_objects += update_votes_cubam(
            bsdf_model,
            ShapeBsdfQuality.objects.filter(invalid=False, content_type=bsdf_ct, gloss_correct__isnull=False),
            "object_id",
            "gloss_correct",
            "gloss_correct",
            score_threshold=0,
            min_votes=5,
            show_progress=show_progress,
            return_changed_objects=True,
            experiment_filter={"slug": "quality_bsdf_gloss", "variant": json.dumps({"bsdf_version": bsdf_version})},
        )

        # color
        changed_objects += update_votes_cubam(
            bsdf_model,
            ShapeBsdfQuality.objects.filter(invalid=False, content_type=bsdf_ct, color_correct__isnull=False),
            "object_id",
            "color_correct",
            "color_correct",
            score_threshold=0,
            min_votes=5,
            show_progress=show_progress,
            return_changed_objects=True,
            experiment_filter={"slug": "quality_bsdf_color", "variant": json.dumps({"bsdf_version": bsdf_version})},
        )

    return changed_objects
Пример #7
0
def update_votes_cubam(show_progress=False):
    """ This function is automatically called by
    mturk.tasks.mturk_update_votes_cubam_task """

    from mturk.cubam import update_votes_cubam
    changed_objects = []

    changed_objects += update_votes_cubam(
        ShapeRectifiedNormalLabel,
        ShapeRectifiedNormalQuality.objects.filter(
            invalid=False, rectified_normal__invalid=False),
        'rectified_normal_id', 'correct', 'correct',
        score_threshold=0, min_votes=5,
        show_progress=show_progress,
        return_changed_objects=True,
        experiment_filter={'slug': 'quality_rectify'}
    )

    return changed_objects
Пример #8
0
def update_votes_cubam(show_progress=False):
    """ This function is automatically called by
    mturk.tasks.mturk_update_votes_cubam_task """

    from mturk.models import Experiment
    from mturk.cubam import update_votes_cubam

    # responses that we will consider
    opacity_responses = IntrinsicPointOpacityResponse.objects \
        .filter(invalid=False) \
        .exclude(point__opaque_method='A') \
        .order_by()

    update_votes_cubam(object_model=IntrinsicPoint,
                       labels=opacity_responses,
                       object_attr='point_id',
                       label_attr='opaque',
                       object_label_attr='opaque',
                       quality_method_attr='opaque_method',
                       score_threshold=0,
                       min_votes=4,
                       show_progress=show_progress,
                       return_changed_objects=False,
                       experiment_filter={'slug': 'intrinsic_opacity'})

    # Since we have a 3-way answer, we break it down into two questions:
    # 1) are the two points the same reflectance?
    # 2) if not, does the darker pixel have darker reflectance?
    #
    # With this breakdown, we hope to capture use biases.  A relatively common
    # failure case of the experiment is that users always click the
    # darker pixel.  Another common case is that they always indicate "E"
    # (same).  These biases should be well captured by this breakdown.

    # we have to manually mark it as dirty inbetween our two stages since
    # our dirty tracking system assumes only one run of CUBAM
    cubam_dirty = any(
        Experiment.objects.filter(slug='intrinsic_compare').values_list(
            'cubam_dirty', flat=True))

    # responses that we will consider
    darker_responses = IntrinsicPointComparisonResponse.objects \
        .filter(invalid=False, user__exclude_from_aggregation=False) \
        .order_by()

    # CUBAM for question 1
    update_votes_cubam(object_model=IntrinsicPointComparison,
                       labels=darker_responses,
                       object_attr='comparison_id',
                       label_attr='reflectance_eq',
                       object_label_attr='reflectance_eq',
                       quality_method_attr='darker_method',
                       score_threshold=0,
                       min_votes=4,
                       show_progress=show_progress,
                       return_changed_objects=False,
                       experiment_filter={'slug': 'intrinsic_compare'})

    # items that are updated
    comparisons = IntrinsicPointComparison.objects \
        .exclude(darker_method='A') \
        .order_by()

    # convert back to our 3-way representation (1, 2, E)
    comparisons.filter(reflectance_eq=True).update(
        darker="E",
        reflectance_dd=None,
        reflectance_dd_score=None,
        darker_score=F('reflectance_eq_score'))

    # the dirty-tracking doesn't handle two-stage updates like this one,
    # so we manually mark it as dirty
    if cubam_dirty:
        Experiment.objects \
            .filter(slug='intrinsic_compare') \
            .update(cubam_dirty=True)

    # CUBAM for question 2, only considering the entries that branched as False
    # from question 1
    dd_responses = darker_responses.filter(reflectance_eq=False,
                                           reflectance_dd__isnull=False)

    update_votes_cubam(object_model=IntrinsicPointComparison,
                       labels=dd_responses,
                       object_attr='comparison_id',
                       label_attr='reflectance_dd',
                       object_label_attr='reflectance_dd',
                       quality_method_attr='darker_method',
                       score_threshold=0,
                       min_votes=1,
                       show_progress=show_progress,
                       return_changed_objects=False,
                       experiment_filter={'slug': 'intrinsic_compare'})

    if cubam_dirty:

        if show_progress:
            print 'Updating changed IntrinsicPointComparison instances...'

        # convert back to our 3-way representation (1, 2, E)
        comparisons.filter(reflectance_eq=False) \
            .filter(Q(point1_image_darker=True, reflectance_dd=True) |
                    Q(point1_image_darker=False, reflectance_dd=False)) \
            .update(darker="1")
        comparisons.filter(reflectance_eq=False) \
            .filter(Q(point1_image_darker=True, reflectance_dd=False) |
                    Q(point1_image_darker=False, reflectance_dd=True)) \
            .update(darker="2")

        comparisons.filter(reflectance_eq=False, reflectance_dd=True) \
            .update(darker_score=F('reflectance_dd_score'))
        comparisons.filter(reflectance_eq=False, reflectance_dd=False) \
            .update(darker_score=0 - F('reflectance_dd_score'))

        if show_progress:
            print 'Updating all Photo instances...'

        # update photos (faster to just list all the ids than to try and figure out
        # what changed since there are so many objects)
        from photos.tasks import update_photos_num_intrinsic
        photo_ids = Photo.objects.all().order_by().values_list('id', flat=True)
        update_photos_num_intrinsic(list(photo_ids),
                                    show_progress=show_progress)

    return None
Пример #9
0
def update_votes_cubam(show_progress=False):
    """ This function is automatically called by
    mturk.tasks.mturk_update_votes_cubam_task """

    from mturk.models import Experiment
    from mturk.cubam import update_votes_cubam

    # responses that we will consider
    opacity_responses = IntrinsicPointOpacityResponse.objects \
        .filter(invalid=False) \
        .exclude(point__opaque_method='A') \
        .order_by()

    update_votes_cubam(
        object_model=IntrinsicPoint,
        labels=opacity_responses,
        object_attr='point_id',
        label_attr='opaque',
        object_label_attr='opaque',
        quality_method_attr='opaque_method',
        score_threshold=0,
        min_votes=4,
        show_progress=show_progress,
        return_changed_objects=False,
        experiment_filter={'slug': 'intrinsic_opacity'}
    )

    # Since we have a 3-way answer, we break it down into two questions:
    # 1) are the two points the same reflectance?
    # 2) if not, does the darker pixel have darker reflectance?
    #
    # With this breakdown, we hope to capture use biases.  A relatively common
    # failure case of the experiment is that users always click the
    # darker pixel.  Another common case is that they always indicate "E"
    # (same).  These biases should be well captured by this breakdown.

    # we have to manually mark it as dirty inbetween our two stages since
    # our dirty tracking system assumes only one run of CUBAM
    cubam_dirty = any(Experiment.objects
                      .filter(slug='intrinsic_compare')
                      .values_list('cubam_dirty', flat=True))

    # responses that we will consider
    darker_responses = IntrinsicPointComparisonResponse.objects \
        .filter(invalid=False, user__exclude_from_aggregation=False) \
        .order_by()

    # CUBAM for question 1
    update_votes_cubam(
        object_model=IntrinsicPointComparison,
        labels=darker_responses,
        object_attr='comparison_id',
        label_attr='reflectance_eq',
        object_label_attr='reflectance_eq',
        quality_method_attr='darker_method',
        score_threshold=0,
        min_votes=4,
        show_progress=show_progress,
        return_changed_objects=False,
        experiment_filter={'slug': 'intrinsic_compare'}
    )

    # items that are updated
    comparisons = IntrinsicPointComparison.objects \
        .exclude(darker_method='A') \
        .order_by()

    # convert back to our 3-way representation (1, 2, E)
    comparisons.filter(reflectance_eq=True).update(
        darker="E", reflectance_dd=None, reflectance_dd_score=None,
        darker_score=F('reflectance_eq_score')
    )

    # the dirty-tracking doesn't handle two-stage updates like this one,
    # so we manually mark it as dirty
    if cubam_dirty:
        Experiment.objects \
            .filter(slug='intrinsic_compare') \
            .update(cubam_dirty=True)

    # CUBAM for question 2, only considering the entries that branched as False
    # from question 1
    dd_responses = darker_responses.filter(
        reflectance_eq=False, reflectance_dd__isnull=False)

    update_votes_cubam(
        object_model=IntrinsicPointComparison,
        labels=dd_responses,
        object_attr='comparison_id',
        label_attr='reflectance_dd',
        object_label_attr='reflectance_dd',
        quality_method_attr='darker_method',
        score_threshold=0,
        min_votes=1,
        show_progress=show_progress,
        return_changed_objects=False,
        experiment_filter={'slug': 'intrinsic_compare'}
    )

    if cubam_dirty:

        if show_progress:
            print 'Updating changed IntrinsicPointComparison instances...'

        # convert back to our 3-way representation (1, 2, E)
        comparisons.filter(reflectance_eq=False) \
            .filter(Q(point1_image_darker=True, reflectance_dd=True) |
                    Q(point1_image_darker=False, reflectance_dd=False)) \
            .update(darker="1")
        comparisons.filter(reflectance_eq=False) \
            .filter(Q(point1_image_darker=True, reflectance_dd=False) |
                    Q(point1_image_darker=False, reflectance_dd=True)) \
            .update(darker="2")

        comparisons.filter(reflectance_eq=False, reflectance_dd=True) \
            .update(darker_score=F('reflectance_dd_score'))
        comparisons.filter(reflectance_eq=False, reflectance_dd=False) \
            .update(darker_score=0 - F('reflectance_dd_score'))

        if show_progress:
            print 'Updating all Photo instances...'

        # update photos (faster to just list all the ids than to try and figure out
        # what changed since there are so many objects)
        from photos.tasks import update_photos_num_intrinsic
        photo_ids = Photo.objects.all().order_by().values_list('id', flat=True)
        update_photos_num_intrinsic(list(photo_ids), show_progress=show_progress)

    return None