예제 #1
0
파일: tasks.py 프로젝트: CV-IP/opensurfaces
def auto_rectify_shape(shape_id):
    """ Attempt to automatically rectify the shape based on vanishing points """

    if ShapeRectifiedNormalLabel.objects.filter(shape_id=shape_id, automatic=True).exists():
        print "shape already automatically rectified"
        return

    shape = MaterialShape.objects.get(id=shape_id)

    from normals.perspective import estimate_uvnb_from_vanishing_points
    uvnb, method, num_vanishing_lines = estimate_uvnb_from_vanishing_points(
        shape)
    if not uvnb:
        print "Could not estimate uvnb matrix"
        return

    admin_user = User.objects.get_or_create(
        username='******')[0].get_profile()

    print 'method: %s, uvnb: %s' % (method, uvnb)
    obj = ShapeRectifiedNormalLabel.objects.create(
        user=admin_user,
        shape=shape,
        uvnb=json.dumps(uvnb),
        automatic=True,
        method=method,
        num_vanishing_lines=num_vanishing_lines
    )

    from mturk.tasks import add_pending_objects_task
    add_pending_objects_task.delay([get_content_tuple(obj)])
예제 #2
0
    def mturk_submit(user, hit_contents, results, time_ms,
                     time_active_ms, version,
                     mturk_assignment=None, **kwargs):

        if unicode(version) != u'1.0':
            raise ValueError(
                "Unknown version: '%s' (type: %s)" % (version, type(version)))

        new_objects = {}
        for point in hit_contents:
            key = unicode(point.id)

            new_obj, created = point.opacities.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=time_ms[key],
                time_active_ms=time_active_ms[key],
                opaque=results[key][u'opaque'],
                zoom=results[key][u'zoom'],
            )

            if created:
                new_objects[get_content_tuple(point)] = [new_obj]

        # atomically update comparison objects
        with transaction.atomic():
            qset = IntrinsicPoint.objects \
                .select_for_update() \
                .filter(id__in=[c.id for c in hit_contents])
            for point in qset:
                point.update_opacity()

        return new_objects
예제 #3
0
    def mturk_submit(user,
                     hit_contents,
                     results,
                     time_ms,
                     time_active_ms,
                     version,
                     mturk_assignment=None,
                     **kwargs):

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s' (type: %s)" %
                             (version, type(version)))
        if not hit_contents:
            return {}

        # best we can do is average
        avg_time_ms = time_ms / len(hit_contents)
        avg_time_active_ms = time_active_ms / len(hit_contents)

        new_objects = {}
        for photo in hit_contents:
            correct = (str(results[unicode(photo.id)]).lower() == 'true')

            new_obj, created = photo.scene_qualities.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=avg_time_ms,
                time_active_ms=avg_time_active_ms,
                correct=correct)

            if created:
                new_objects[get_content_tuple(photo)] = [new_obj]

        return new_objects
예제 #4
0
    def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
                     mturk_assignment=None, **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s'" % version)
        if not hit_contents:
            return {}

        # best we can do is average
        avg_time_ms = time_ms / len(hit_contents)
        avg_time_active_ms = time_active_ms / len(hit_contents)

        new_objects = {}
        for shape in hit_contents:
            selected = (
                str(results[unicode(shape.id)]['selected']).lower() == 'true')
            canttell = (
                str(results[unicode(shape.id)]['canttell']).lower() == 'true')

            new_obj, created = shape.planarities.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=avg_time_ms,
                time_active_ms=avg_time_active_ms,
                planar=selected,
                canttell=canttell,
            )

            if created:
                new_objects[get_content_tuple(shape)] = [new_obj]

        return new_objects
예제 #5
0
def auto_rectify_shape(shape_id):
    """ Attempt to automatically rectify the shape based on vanishing points """

    if ShapeRectifiedNormalLabel.objects.filter(shape_id=shape_id,
                                                automatic=True).exists():
        print "shape already automatically rectified"
        return

    shape = MaterialShape.objects.get(id=shape_id)

    from normals.perspective import estimate_uvnb_from_vanishing_points
    uvnb, method, num_vanishing_lines = estimate_uvnb_from_vanishing_points(
        shape)
    if not uvnb:
        print "Could not estimate uvnb matrix"
        return

    admin_user = User.objects.get_or_create(username='******')[0].get_profile()

    print 'method: %s, uvnb: %s' % (method, uvnb)
    obj = ShapeRectifiedNormalLabel.objects.create(
        user=admin_user,
        shape=shape,
        uvnb=json.dumps(uvnb),
        automatic=True,
        method=method,
        num_vanishing_lines=num_vanishing_lines)

    from mturk.tasks import add_pending_objects_task
    add_pending_objects_task.delay([get_content_tuple(obj)])
예제 #6
0
파일: add.py 프로젝트: swayfreeda/openpose
def add_photo(path, must_have_fov=False, must_have_exif=False, **args):
    """ Add a photo to the database """

    if not os.path.exists(path):
        raise ValueError("File does not exist")

    if 'license' not in args:
        args['license'] = License.objects.get_or_create(
            name='All Rights Reserved')[0]

    # md5: check for duplicates
    md5 = md5sum(path)
    duplicate = True
    try:
        photo = Photo.objects.get(md5=md5)
    except Photo.DoesNotExist:
        duplicate = False
    except Photo.MultipleObjectsReturned:
        duplicates = Photo.objects.filter(md5=md5).order_by('id')
        for d in duplicates[1:]:
            d.delete()
    if duplicate:
        raise ValueError("Duplicate photo import: '%s'" % path)

    # parse exif
    if 'exif' not in args:
        print 'Obtaining EXIF...'
        exif = get_exif(path)
        if exif:
            args['exif'] = exif
        elif must_have_exif:
            raise ValueError("Photo has no EXIF: %s" % path)

    if 'fov' not in args:
        print 'Obtaining FOV...'
        fov = get_fov(args['exif'])
        if fov:
            args['fov'] = fov
        elif must_have_fov:
            raise ValueError("Could not obtain photo FOV: %s" % path)

    photo = None

    # use a transaction so that it is only committed to the database
    # after save() returns.  otherwise, there's a small time betwee
    # when the photo is added and it has an image attached.
    with transaction.atomic():
        with open(path, 'rb') as f:
            print 'Uploading photo...'
            photo = Photo.objects.create(image_orig=ImageFile(f),
                                         md5=md5,
                                         **args)

    from mturk.tasks import add_pending_objects_task
    add_pending_objects_task.delay([get_content_tuple(photo)])

    return photo
예제 #7
0
    def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
                     experiment, mturk_assignment=None, **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s'" % version)
        if not hit_contents:
            return {}

        new_objects = {}
        for shape in hit_contents:
            d = results[unicode(shape.id)]
            shape_time_ms = time_ms[unicode(shape.id)]
            shape_time_active_ms = time_active_ms[unicode(shape.id)]

            edit_dict = d[u'edit']
            edit_sum = sum(int(edit_dict[k]) for k in edit_dict)
            edit_nnz = sum(int(int(edit_dict[k]) > 0) for k in edit_dict)

            init_method = 'KR'
            envmap = EnvironmentMap.objects.get(
                id=json.loads(experiment.variant)['envmap_id'])

            doi = int(d[u'doi'])
            contrast = float(d[u'contrast'])
            metallic = (int(d[u'type']) == 1)
            color = d['color']

            give_up = d[u'give_up']
            give_up_msg = d[u'give_up_msg']

            bsdf, bsdf_created = shape.bsdfs_wd.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=shape_time_ms,
                time_active_ms=shape_time_active_ms,
                doi=doi,
                contrast=contrast,
                metallic=metallic,
                color=color,
                give_up=give_up,
                give_up_msg=give_up_msg,
                edit_dict=json.dumps(edit_dict),
                edit_sum=edit_sum,
                edit_nnz=edit_nnz,
                envmap=envmap,
                init_method=init_method,
            )

            if bsdf_created:
                new_objects[get_content_tuple(shape)] = [bsdf]

            if ((not bsdf.image_blob) and 'screenshot' in d and d['screenshot'].startswith('data:image/')):
                save_obj_attr_base64_image(bsdf, 'image_blob', d['screenshot'])

        return new_objects
예제 #8
0
def add_photo(path, must_have_fov=False, must_have_exif=False, **args):
    """ Add a photo to the database """

    if not os.path.exists(path):
        raise ValueError("File does not exist")

    if "license" not in args:
        args["license"] = License.objects.get_or_create(name="All Rights Reserved")[0]

    # md5: check for duplicates
    md5 = md5sum(path)
    duplicate = True
    try:
        photo = Photo.objects.get(md5=md5)
    except Photo.DoesNotExist:
        duplicate = False
    except Photo.MultipleObjectsReturned:
        duplicates = Photo.objects.filter(md5=md5).order_by("id")
        for d in duplicates[1:]:
            d.delete()
    if duplicate:
        raise ValueError("Duplicate photo import: '%s'" % path)

    # parse exif
    if "exif" not in args:
        print "Obtaining EXIF..."
        exif = get_exif(path)
        if exif:
            args["exif"] = exif
        elif must_have_exif:
            raise ValueError("Photo has no EXIF: %s" % path)

    if "fov" not in args:
        print "Obtaining FOV..."
        fov = get_fov(args["exif"])
        if fov:
            args["fov"] = fov
        elif must_have_fov:
            raise ValueError("Could not obtain photo FOV: %s" % path)

    photo = None

    # use a transaction so that it is only committed to the database
    # after save() returns.  otherwise, there's a small time betwee
    # when the photo is added and it has an image attached.
    with transaction.atomic():
        with open(path, "rb") as f:
            print "Uploading photo..."
            photo = Photo.objects.create(image_orig=ImageFile(f), md5=md5, **args)

    from mturk.tasks import add_pending_objects_task

    add_pending_objects_task.delay([get_content_tuple(photo)])

    return photo
예제 #9
0
    def mturk_submit(user,
                     hit_contents,
                     results,
                     time_ms,
                     time_active_ms,
                     version,
                     mturk_assignment=None,
                     **kwargs):

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s' (type: %s)" %
                             (version, type(version)))

        new_objects = {}
        for comparison in hit_contents:
            key = unicode(comparison.id)
            darker = results[key][u'darker']
            confidence = results[key][u'confidence']

            reflectance_dd = None
            if darker == "E":
                reflectance_eq = True
            else:
                reflectance_eq = False
                if comparison.point1_image_darker is not None:
                    if darker == "1":
                        reflectance_dd = comparison.point1_image_darker
                    elif darker == "2":
                        reflectance_dd = not comparison.point1_image_darker

            new_obj, created = comparison.responses.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=time_ms[key],
                time_active_ms=time_active_ms[key],
                darker=darker,
                confidence=confidence,
                reflectance_eq=reflectance_eq,
                reflectance_dd=reflectance_dd,
            )

            if created:
                new_objects[get_content_tuple(comparison)] = [new_obj]

        # clear any existing aggregation
        IntrinsicPointComparison.objects \
            .filter(id__in=[c.id for c in hit_contents]) \
            .update(darker=None, darker_score=None,
                    reflectance_dd=None, reflectance_dd_score=None,
                    reflectance_eq=None, reflectance_eq_score=None)

        return new_objects
예제 #10
0
    def mturk_submit(user,
                     hit_contents,
                     results,
                     time_ms,
                     time_active_ms,
                     version,
                     experiment,
                     mturk_assignment=None,
                     **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s'" % version)
        if not hit_contents:
            return {}

        # best we can do is average
        avg_time_ms = time_ms / len(hit_contents)
        avg_time_active_ms = time_active_ms / len(hit_contents)

        new_objects = {}
        for bsdf in hit_contents:
            selected = (str(results[unicode(
                bsdf.id)]['selected']).lower() == 'true')
            canttell = (str(results[unicode(
                bsdf.id)]['canttell']).lower() == 'true')

            color_correct = None
            gloss_correct = None
            if 'color' in experiment.slug:
                color_correct = selected
            elif 'gloss' in experiment.slug:
                gloss_correct = selected

            content_tuple = get_content_tuple(bsdf)
            new_obj, created = ShapeBsdfQuality.objects.get_or_create(
                content_type=ContentType.objects.get_for_id(content_tuple[0]),
                object_id=content_tuple[1],
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=avg_time_ms,
                time_active_ms=avg_time_active_ms,
                color_correct=color_correct,
                gloss_correct=gloss_correct,
                canttell=canttell)

            if created:
                new_objects[content_tuple] = [new_obj]

        return new_objects
예제 #11
0
    def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
                     mturk_assignment=None, **kwargs):

        if unicode(version) != u'1.0':
            raise ValueError(
                "Unknown version: '%s' (type: %s)" % (version, type(version)))

        new_objects = {}
        for comparison in hit_contents:
            key = unicode(comparison.id)
            darker = results[key][u'darker']
            confidence = results[key][u'confidence']

            reflectance_dd = None
            if darker == "E":
                reflectance_eq = True
            else:
                reflectance_eq = False
                if comparison.point1_image_darker is not None:
                    if darker == "1":
                        reflectance_dd = comparison.point1_image_darker
                    elif darker == "2":
                        reflectance_dd = not comparison.point1_image_darker

            new_obj, created = comparison.responses.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=time_ms[key],
                time_active_ms=time_active_ms[key],
                darker=darker,
                confidence=confidence,
                reflectance_eq=reflectance_eq,
                reflectance_dd=reflectance_dd,
            )

            if created:
                new_objects[get_content_tuple(comparison)] = [new_obj]

        # clear any existing aggregation
        IntrinsicPointComparison.objects \
            .filter(id__in=[c.id for c in hit_contents]) \
            .update(darker=None, darker_score=None,
                    reflectance_dd=None, reflectance_dd_score=None,
                    reflectance_eq=None, reflectance_eq_score=None)

        return new_objects
예제 #12
0
    def mturk_submit(user,
                     hit_contents,
                     results,
                     time_ms,
                     time_active_ms,
                     version,
                     mturk_assignment=None,
                     **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s'" % version)
        if not hit_contents:
            return {}

        new_objects = {}
        for shape in hit_contents:
            normal = results[unicode(shape.id)]
            shape_time_ms = time_ms[unicode(shape.id)]
            shape_time_active_ms = time_active_ms[unicode(shape.id)]

            new_obj, created = shape.rectified_normals.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=shape_time_ms,
                time_active_ms=shape_time_active_ms,
                uvnb=json.dumps(normal['uvnb']),
                method=normal['method'],
                focal_pixels=normal['focal_pixels'],
                canvas_width=normal['canvas_width'],
                canvas_height=normal['canvas_height'],
                pos_x=normal['pos_x'],
                pos_y=normal['pos_y'],
            )

            if created:
                new_objects[get_content_tuple(shape)] = [new_obj]

                # rectify synchronously (since this is a transaction and will
                # not be visible in another thread)
                from normals.tasks import auto_rectify_shape
                auto_rectify_shape(shape.id)

        return new_objects
예제 #13
0
    def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
                     experiment, mturk_assignment=None, **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s'" % version)
        if not hit_contents:
            return {}

        # best we can do is average
        avg_time_ms = time_ms / len(hit_contents)
        avg_time_active_ms = time_active_ms / len(hit_contents)

        new_objects = {}
        for bsdf in hit_contents:
            selected = (str(results[unicode(bsdf.id)]['selected']).lower()
                        == 'true')
            canttell = (str(results[unicode(bsdf.id)]['canttell']).lower()
                        == 'true')

            color_correct = None
            gloss_correct = None
            if 'color' in experiment.slug:
                color_correct = selected
            elif 'gloss' in experiment.slug:
                gloss_correct = selected

            content_tuple = get_content_tuple(bsdf)
            new_obj, created = ShapeBsdfQuality.objects.get_or_create(
                content_type=ContentType.objects.get_for_id(content_tuple[0]),
                object_id=content_tuple[1],
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=avg_time_ms,
                time_active_ms=avg_time_active_ms,
                color_correct=color_correct,
                gloss_correct=gloss_correct,
                canttell=canttell
            )

            if created:
                new_objects[content_tuple] = [new_obj]

        return new_objects
예제 #14
0
def add_pending_objects_task(list_or_model, show_progress=False):
    """ Adds/updates modified objects as inputs to experiments """

    if not list_or_model:
        return

    try:
        experiments = Experiment.objects.all() \
            .filter(new_hit_settings__auto_add_hits=True) \
            .prefetch_related('new_hit_settings')

        if isinstance(list_or_model, list):
            # convert list to a content_tuple list
            tuple_list = [
                (x if isinstance(x, tuple) else get_content_tuple(x))
                for x in list_or_model
            ]

            for exp in experiments:
                # find objects that match the experiment content type
                exp_ct = exp.new_hit_settings.content_type
                id_list = [obj_id for (ct_id, obj_id) in tuple_list
                           if ct_id == exp_ct.id]

                if id_list:
                    queryset = exp_ct.model_class().objects \
                        .filter(id__in=id_list)
                    add_pending_objects_impl(exp, queryset, show_progress)
        else:
            model = list_or_model
            ct = ContentType.objects.get_for_model(model)
            queryset = model.objects.all()
            for exp in experiments.filter(new_hit_settings__content_type=ct):
                add_pending_objects_impl(exp, queryset, show_progress)

    except Exception as exc:
        # Re-add to the queue so that we don't lose tasks
        print 'Exception (%s) -- will retry in 5 minutes' % exc
        traceback.print_exc()
        raise add_pending_objects_task.retry(exc=exc, countdown=60 * 5)
예제 #15
0
    def mturk_submit(user,
                     hit_contents,
                     results,
                     time_ms,
                     time_active_ms,
                     version,
                     mturk_assignment=None,
                     **kwargs):

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s' (type: %s)" %
                             (version, type(version)))

        new_objects = {}
        for point in hit_contents:
            key = unicode(point.id)

            new_obj, created = point.opacities.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=time_ms[key],
                time_active_ms=time_active_ms[key],
                opaque=results[key][u'opaque'],
                zoom=results[key][u'zoom'],
            )

            if created:
                new_objects[get_content_tuple(point)] = [new_obj]

        # atomically update comparison objects
        with transaction.atomic():
            qset = IntrinsicPoint.objects \
                .select_for_update() \
                .filter(id__in=[c.id for c in hit_contents])
            for point in qset:
                point.update_opacity()

        return new_objects
예제 #16
0
    def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
                     mturk_assignment=None, **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s'" % version)
        if not hit_contents:
            return {}

        new_objects = {}
        for shape in hit_contents:
            normal = results[unicode(shape.id)]
            shape_time_ms = time_ms[unicode(shape.id)]
            shape_time_active_ms = time_active_ms[unicode(shape.id)]

            new_obj, created = shape.rectified_normals.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=shape_time_ms,
                time_active_ms=shape_time_active_ms,
                uvnb=json.dumps(normal['uvnb']),
                method=normal['method'],
                focal_pixels=normal['focal_pixels'],
                canvas_width=normal['canvas_width'],
                canvas_height=normal['canvas_height'],
                pos_x=normal['pos_x'],
                pos_y=normal['pos_y'],
            )

            if created:
                new_objects[get_content_tuple(shape)] = [new_obj]

                # rectify synchronously (since this is a transaction and will
                # not be visible in another thread)
                from normals.tasks import auto_rectify_shape
                auto_rectify_shape(shape.id)

        return new_objects
예제 #17
0
    def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
                     mturk_assignment=None, **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s'" % version)
        if not hit_contents:
            return {}
        if not user:
            raise ValueError("Null user")

        new_objects = {}
        for shape in hit_contents:
            name_string = results[unicode(shape.id)]
            shape_time_ms = time_ms[unicode(shape.id)]
            shape_time_active_ms = time_active_ms[unicode(shape.id)]

            # normalize case
            name_string = name_string.lower()
            name_string = name_string[0].upper() + name_string[1:]

            name = ShapeName.objects.get_or_create(
                name=name_string,
            )[0]

            new_obj, created = shape.names.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=shape_time_ms,
                time_active_ms=shape_time_active_ms,
                name=name)

            if created and name:
                shape.update_entropy(save=True)
                new_objects[get_content_tuple(shape)] = [new_obj]

        return new_objects
예제 #18
0
def mturk_update_votes_cubam_task(show_progress=False):
    # use a lock directory to ensure only one thread is running
    LOCK_DIR = '.mturk_update_votes_cubam_task'
    try:
        os.mkdir(LOCK_DIR)
    except:
        print ("Already running!  If you are *sure* that " +
               "mturk_update_votes_cubam_task is not running, " +
               "delete the .mturk_update_votes_cubam_task directory")
        return

    try:
        from common.utils import import_modules
        modules = import_modules(settings.MTURK_MODULES)

        for mt1 in modules:
            if not hasattr(mt1, 'update_votes_cubam'):
                continue

            print '\nStarting: %s.update_votes_cubam()' % mt1.__name__
            changed_objects = mt1.update_votes_cubam(
                show_progress=show_progress)

            if changed_objects:
                # update pending contents
                add_pending_objects_task.delay(
                    [get_content_tuple(c) for c in changed_objects])

                # other updates
                for mt2 in modules:
                    if hasattr(mt2, 'update_changed_objects'):
                        mt2.update_changed_objects(changed_objects)

            print '\nDone: %s.update_votes_cubam()' % mt1.__name__
    finally:
        os.rmdir(LOCK_DIR)
예제 #19
0
def triangulate_submitted_shapes_impl(photo, user, mturk_assignment,
                                      shape_model, submitted_shapes):

    if not submitted_shapes:
        return

    if not os.path.isfile(settings.TRIANGULATE_BIN):
        raise RuntimeError(
            "ERROR: '%s' (settings.TRIANGULATE_BIN) does not exist -- "
            "check that it is compiled" % settings.TRIANGULATE_BIN)

    input_lines = [
        ('%s ' % s.id) + ' '.join(filter(None, s.vertices.split(',')))
        for s in submitted_shapes
    ]
    input_txt = '\n'.join(input_lines) + '\nEND'

    process = None
    try:
        process = subprocess.Popen(args=settings.TRIANGULATE_BIN,
                                   stdin=subprocess.PIPE,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   shell=True)
        output_txt, errors_txt = process.communicate(input_txt)
    except:
        if process:
            process.kill()
            process.wait()
        raise

    if not output_txt:
        raise ValueError(
            "Error with triangulate.  Bin:%s\nInput:\n%s\n\nOutput:\n%s\n\nErrors:\n%s"
            % (settings.TRIANGULATE_BIN, input_txt, output_txt, errors_txt))

    if errors_txt:
        print errors_txt

    #print("Bin:%s\nInput:\n%s\n\nOutput:\n%s\n\nErrors:\n%s" % (
    #settings.TRIANGULATE_BIN, input_txt, output_txt, errors_txt))

    new_content_tuples = []
    output_lines = output_txt.split('\n')

    with transaction.atomic():
        for line in output_lines:
            line = line.strip()
            if not line:
                continue

            fields = line.split('|')
            if len(fields) != 4:
                raise ValueError("Invalid output: %s" % repr(output_txt))

            ids = [int(f) for f in filter(None, fields[0].split(' '))]

            if not ids:
                print 'Discarding shape not contained in input'
                continue

            verts, tris, segs = [
                ','.join(filter(None, f.split(' '))) for f in fields[1:4]
            ]

            # compute polygon area and discard small polygons
            area = complex_polygon_area(verts, tris)
            # 0.0002 is roughly a 32x32 patch for a 2400x2400 image
            if area < 0.0001:
                print 'Discarding: verts: "%s", tris: "%s", segs: "%s", area: %s' % (
                    verts, tris, segs, area)
                continue

            # convert area to pixels
            pixel_area = area * photo.image_orig.width * \
                photo.image_orig.height

            # extract segmentation times
            time_ms_list = []
            ss_list = []
            for ss in submitted_shapes:
                if int(ss.id) in ids:
                    ss_list.append(ss)
                    time_ms_list.append(ss.time_ms)

            if not ss_list or not time_ms_list:
                print 'Discarding shape not mapping to input shapes'

            # use the average time of the submitted shapes
            time_ms = sum(time_ms_list) / float(len(time_ms_list))

            # auto-grant high quality for users with qualifications
            quality_method = None
            correct = None
            if pixel_area >= 12000:
                from mturk.models import MtQualificationAssignment
                try:
                    correct = bool(
                        MtQualificationAssignment.objects.get(
                            worker=user, qualification__slug="mat_seg").value)
                    if correct:
                        quality_method = 'Q'
                except MtQualificationAssignment.DoesNotExist:
                    correct = False

            new_obj, created = shape_model.objects.get_or_create(
                photo=photo,
                user=user,
                mturk_assignment=mturk_assignment,
                vertices=verts,
                triangles=tris,
                segments=segs,
                area=area,
                pixel_area=pixel_area,
                time_ms=time_ms,
                defaults={
                    'added': ss_list[0].added,
                    'correct': correct,
                    'quality_method': quality_method,
                })

            if created:
                for ss in ss_list:
                    new_obj.submitted_shapes.add(ss)
                new_content_tuples.append(get_content_tuple(new_obj))

        # these are created outside of the mturk view response, so we need to
        # manually add them to the pending objects queue
        # (imported here to avoid circular imports)
        for (ct_id, obj_id) in new_content_tuples:
            mturk_assignment.submitted_contents.get_or_create(
                content_type=ContentType.objects.get_for_id(ct_id),
                object_id=obj_id,
            )

    # update photo shape count synchronously
    from photos.tasks import update_photos_num_shapes
    update_photos_num_shapes([photo.id])
    new_content_tuples.append(get_content_tuple(photo))

    # new pending objects
    from mturk.tasks import add_pending_objects_task
    add_pending_objects_task.delay(new_content_tuples)
예제 #20
0
    def mturk_submit(user,
                     hit_contents,
                     results,
                     time_ms,
                     time_active_ms,
                     version,
                     mturk_assignment=None,
                     **kwargs):

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s'" % version)
        if not hit_contents:
            return {}

        new_objects = {}
        for photo in hit_contents:
            points = results[unicode(photo.id)].strip()
            photo_time_ms = time_ms[unicode(photo.id)]
            photo_time_active_ms = time_active_ms[unicode(photo.id)]

            # null by default
            chroma_median = None

            # count points
            points_list = points.split(',')
            num_points = len(points_list)
            if num_points < 2:
                num_points = 0
            else:
                if num_points % 2 != 0:
                    raise ValueError("Odd number of coordinates (%d)" %
                                     num_points)
                num_points //= 2

            # compute median chromaticity
            if num_points > 0:
                pil = photo.open_image(width='300')
                chromaticities = []
                for idx in xrange(num_points):
                    x = float(points_list[idx * 2]) * pil.size[0]
                    y = float(points_list[idx * 2 + 1]) * pil.size[1]
                    rgb = pil.getpixel((x, y))
                    if rgb[0] >= 253 and rgb[1] >= 253 and rgb[2] >= 253:
                        continue  # oversaturated
                    lab = RGBColor(rgb[0], rgb[1], rgb[2]).convert_to('lab')
                    chroma = math.sqrt(lab.lab_a * lab.lab_a +
                                       lab.lab_b * lab.lab_b)
                    chromaticities.append(chroma)
                if chromaticities:
                    chromaticities.sort()
                    chroma_median = chromaticities[len(chromaticities) // 2]

            # add whitebalance label
            new_obj, created = photo.whitebalances.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=photo_time_ms,
                time_active_ms=photo_time_active_ms,
                points=points,
                num_points=num_points,
                chroma_median=chroma_median)

            # update photo filter status
            if created:
                new_objects[get_content_tuple(photo)] = [new_obj]

        return new_objects
예제 #21
0
    def mturk_submit(user,
                     hit_contents,
                     results,
                     time_ms,
                     time_active_ms,
                     version,
                     mturk_assignment=None,
                     **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'2.0':
            raise ValueError("Unknown version: %s" % version)

        new_objects = {}
        for task in hit_contents:
            task_id = str(task.id)
            scribbles = results[task_id][u'scribbles']
            person_time_ms = time_ms[task_id]
            person_time_active_ms = time_active_ms[task_id]

            # check if the scribbles make sense
            for scribble in scribbles:
                for point in scribble[u'points']:
                    if len(point) != 2:
                        raise ValueError("Point with more than 2 coordinates")

            # check if the results contain a segmentation already, if so do not
            # recalculate the segmentation

            overlay_img = None
            if u'segmentation' in results[task_id]:
                try:
                    overlay_img_data = base64.standard_b64decode(
                        results[task_id][u'segmentation'])
                    overlay_img = Image.open(StringIO(overlay_img_data))
                    print("reusing segmentation data")
                except:
                    overlay_img = None

            if not overlay_img:
                # generate the segmentation image
                overlay_img = calc_person_overlay_img(task, scribbles)
                print("NOT reusing segmentation data")

            with transaction.atomic():
                with NamedTemporaryFile(prefix=u'segmentation_' +
                                        task.person.photo.name + u'_',
                                        suffix=u'.png') as f:
                    overlay_img.save(f, u"PNG")
                    f.seek(0)
                    segmentation = ImageFile(f)

                    new_obj, created = task.responses.get_or_create(
                        user=user,
                        segmentation=segmentation,
                        mturk_assignment=mturk_assignment,
                        time_ms=person_time_ms,
                        time_active_ms=person_time_active_ms,
                        # (repr gives more float digits)
                        scribbles=json.dumps(scribbles),
                        num_scribbles=len(scribbles),
                    )

                    if created:
                        new_objects[get_content_tuple(task)] = [new_obj]

        return new_objects
예제 #22
0
def triangulate_submitted_shapes_impl(
        photo, user, mturk_assignment, shape_model, submitted_shapes):

    if not submitted_shapes:
        return

    if not os.path.isfile(settings.TRIANGULATE_BIN):
        raise RuntimeError("ERROR: '%s' (settings.TRIANGULATE_BIN) does not exist -- "
                           "check that it is compiled" % settings.TRIANGULATE_BIN)

    input_lines = [('%s ' % s.id) + ' '.join(
        filter(None, s.vertices.split(','))) for s in submitted_shapes]
    input_txt = '\n'.join(input_lines) + '\nEND'

    process = None
    try:
        process = subprocess.Popen(
            args=settings.TRIANGULATE_BIN,
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            shell=True)
        output_txt, errors_txt = process.communicate(input_txt)
    except:
        if process:
            process.kill()
            process.wait()
        raise

    if not output_txt:
        raise ValueError(
            "Error with triangulate.  Bin:%s\nInput:\n%s\n\nOutput:\n%s\n\nErrors:\n%s" % (
                settings.TRIANGULATE_BIN, input_txt, output_txt, errors_txt)
        )

    if errors_txt:
        print errors_txt

    #print("Bin:%s\nInput:\n%s\n\nOutput:\n%s\n\nErrors:\n%s" % (
        #settings.TRIANGULATE_BIN, input_txt, output_txt, errors_txt))

    new_content_tuples = []
    output_lines = output_txt.split('\n')

    with transaction.atomic():
        for line in output_lines:
            line = line.strip()
            if not line:
                continue

            fields = line.split('|')
            if len(fields) != 4:
                raise ValueError("Invalid output: %s" % repr(output_txt))

            ids = [int(f) for f in filter(None, fields[0].split(' '))]

            if not ids:
                print 'Discarding shape not contained in input'
                continue

            verts, tris, segs = [','.join(filter(None, f.split(' ')))
                                 for f in fields[1:4]]

            # compute polygon area and discard small polygons
            area = complex_polygon_area(verts, tris)
            # 0.0002 is roughly a 32x32 patch for a 2400x2400 image
            if area < 0.0001:
                print 'Discarding: verts: "%s", tris: "%s", segs: "%s", area: %s' % (
                    verts, tris, segs, area)
                continue

            # convert area to pixels
            pixel_area = area * photo.image_orig.width * \
                photo.image_orig.height

            # extract segmentation times
            time_ms_list = []
            ss_list = []
            for ss in submitted_shapes:
                if int(ss.id) in ids:
                    ss_list.append(ss)
                    time_ms_list.append(ss.time_ms)

            if not ss_list or not time_ms_list:
                print 'Discarding shape not mapping to input shapes'

            # use the average time of the submitted shapes
            time_ms = sum(time_ms_list) / float(len(time_ms_list))

            # auto-grant high quality for users with qualifications
            quality_method = None
            correct = None
            if pixel_area >= 12000:
                from mturk.models import MtQualificationAssignment
                try:
                    correct = bool(MtQualificationAssignment.objects.get(
                        worker=user, qualification__slug="mat_seg").value)
                    if correct:
                        quality_method = 'Q'
                except MtQualificationAssignment.DoesNotExist:
                    correct = False

            new_obj, created = shape_model.objects.get_or_create(
                photo=photo,
                user=user,
                mturk_assignment=mturk_assignment,
                vertices=verts,
                triangles=tris,
                segments=segs,
                area=area,
                pixel_area=pixel_area,
                time_ms=time_ms,
                defaults={
                    'added': ss_list[0].added,
                    'correct': correct,
                    'quality_method': quality_method,
                }
            )

            if created:
                for ss in ss_list:
                    new_obj.submitted_shapes.add(ss)
                new_content_tuples.append(get_content_tuple(new_obj))

        # these are created outside of the mturk view response, so we need to
        # manually add them to the pending objects queue
        # (imported here to avoid circular imports)
        for (ct_id, obj_id) in new_content_tuples:
            mturk_assignment.submitted_contents.get_or_create(
                content_type=ContentType.objects.get_for_id(ct_id),
                object_id=obj_id,
            )

    # update photo shape count synchronously
    from photos.tasks import update_photos_num_shapes
    update_photos_num_shapes([photo.id])
    new_content_tuples.append(get_content_tuple(photo))

    # new pending objects
    from mturk.tasks import add_pending_objects_task
    add_pending_objects_task.delay(new_content_tuples)
예제 #23
0
def mturk_submit_impl(**kwargs):
    #slug = kwargs['experiment'].slug
    #print '%s time_ms: %s, time_active_ms: %s, time_load_ms: %s' % (
        #slug, kwargs['time_ms'], kwargs['time_active_ms'],
        #kwargs['time_load_ms'])
    #print '%s results: %s' % (slug, kwargs['results'])
    #if kwargs['mturk_assignment'].feedback:
        #print '%s feedback: %s' % (slug, kwargs['mturk_assignment'].feedback)

    # fetch objects if passed by ID
    if 'user_id' in kwargs:
        kwargs['user'] = UserProfile.objects.get(user_id=kwargs['user_id'])
    if 'mturk_hit_id' in kwargs:
        kwargs['mturk_hit'] = MtHit.objects.get(id=kwargs['mturk_hit_id'])
    if 'mturk_assignment_id' in kwargs:
        kwargs['mturk_assignment'] = MtAssignment.objects.get(id=kwargs['mturk_assignment_id'])
    if 'experiment_id' in kwargs:
        kwargs['experiment'] = Experiment.objects.get(id=kwargs['experiment_id'])

    # fetch experiment settings
    hit_type = kwargs['mturk_hit'].hit_type
    exp_settings = hit_type.experiment_settings
    if not exp_settings:
        # if the settings are somehow missing, update all records with the
        # newest experiment settings
        exp_settings = kwargs['experiment'].new_hit_settings
        MtHitType.objects.filter(id=hit_type.id) \
            .update(experiment_settings=exp_settings)

    # fetch hit contents
    if 'hit_contents' not in kwargs:
        kwargs['hit_contents'] = fetch_hit_contents(kwargs['mturk_hit'])
    hit_contents = kwargs['hit_contents']

    # new_objects_dict: {(content_type_id, content_id): [created items]}
    # (if [created items] is empty, the entry may be omitted)
    if hit_contents:
        new_objects_dict = exp_settings.out_content_model() \
            .mturk_submit(**kwargs)
    else:
        print "WARNING: no hit_contents in %s" % kwargs['mturk_hit'].id
        new_objects_dict = {}

    # sanity check
    if not all(isinstance(k, tuple) for k in new_objects_dict):
        raise ValueError(
            "Invalid new_objects_dict: %s" % repr(new_objects_dict))

    # flatten all items into one list
    new_objects_list = []
    for obj_list in new_objects_dict.values():
        new_objects_list += obj_list

    # attach objects to assignment
    for obj in new_objects_list:
        MtSubmittedContent.objects.get_or_create(
            assignment=kwargs['mturk_assignment'],
            object_id=obj.id,
            content_type=ContentType.objects.get_for_model(obj),
        )

    for content in hit_contents:
        # content_tuple: (content type id, object id)
        content_tuple = get_content_tuple(content)
        if content_tuple not in new_objects_dict:
            # print '%s: no new objects generated' % repr(content_tuple)
            continue

        delta_completed = len(new_objects_dict[content_tuple])
        delta_scheduled = exp_settings.out_count_ratio

        # update the fact that some outputs have been completed
        PendingContent.objects \
            .filter(
                experiment=kwargs['experiment'],
                content_type=ContentType.objects.get_for_id(content_tuple[0]),
                object_id=content_tuple[1],
            ).update(
                num_outputs_completed=F(
                    'num_outputs_completed') + delta_completed,
                num_outputs_scheduled=F(
                    'num_outputs_scheduled') - delta_scheduled,
            )

    # consider all affected objects for new experiments
    pending_objects = list(set(hit_contents + new_objects_list))
    add_pending_objects_task.delay(
        [get_content_tuple(c) for c in pending_objects])

    # mark experiment as dirty
    Experiment.objects.filter(id=kwargs['experiment'].id) \
        .update(cubam_dirty=True)

    # here, "complete" means that the user actually submitted (and is not a
    # "partial submission", i.e. a background auto-submit performed by the
    # experiment script)
    if not kwargs['complete']:
        return

    # sync with mturk 30 minutes from now (it can take a while to update the
    # status; 1 minute is not enough)
    sync_hit_task.apply_async(
        args=[kwargs['mturk_hit'].id],
        countdown=30 * 60)

    # mark as done
    MtAssignment.objects.filter(id=kwargs['mturk_assignment'].id) \
        .update(submission_complete=True)
예제 #24
0
    def mturk_submit(user, hit_contents, results, time_ms, time_active_ms,
                     experiment, version, mturk_assignment=None, **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: %s" % version)

        photo = hit_contents[0]
        poly_list = results[str(photo.id)]
        time_ms_list = time_ms[str(photo.id)]
        time_active_ms_list = time_active_ms[str(photo.id)]

        if len(poly_list) != len(time_ms_list):
            raise ValueError("Result length mismatch (%s polygons, %s times)" % (
                len(poly_list), len(time_ms_list)))

        shape_model = MaterialShape
        slug = experiment.slug
        if slug == "segment_material":
            shape_type = 'M'
        elif slug == "segment_object":
            shape_type = 'O'
        else:
            raise ValueError("Unknown slug: %s" % slug)

        # store results in SubmittedShape objects
        new_objects_list = []
        for idx in xrange(len(poly_list)):
            poly_vertices = poly_list[idx]
            poly_time_ms = time_ms_list[idx]
            poly_time_active_ms = time_active_ms_list[idx]

            num_vertices = len(poly_vertices)
            if num_vertices % 2 != 0:
                raise ValueError("Odd number of vertices (%d)" % num_vertices)
            num_vertices //= 2

            new_obj, created = photo.submitted_shapes.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=poly_time_ms,
                time_active_ms=poly_time_active_ms,
                # (repr gives more float digits)
                vertices=','.join([repr(f) for f in poly_vertices]),
                num_vertices=num_vertices,
                shape_type=shape_type
            )

            if created:
                new_objects_list.append(new_obj)

        # triangulate polygons (creates instances of shape_model)
        if new_objects_list:
            from shapes.tasks import triangulate_submitted_shapes_task
            triangulate_submitted_shapes_task.delay(
                photo, user, mturk_assignment, shape_model, new_objects_list)

        if new_objects_list:
            return {get_content_tuple(photo): new_objects_list}
        else:
            return {}
예제 #25
0
    def mturk_submit(user,
                     hit_contents,
                     results,
                     time_ms,
                     time_active_ms,
                     version,
                     experiment,
                     mturk_assignment=None,
                     **kwargs):
        """ Add new instances from a mturk HIT after the user clicks [submit] """

        if unicode(version) != u'1.0':
            raise ValueError("Unknown version: '%s'" % version)
        if not hit_contents:
            return {}

        new_objects = {}
        for shape in hit_contents:
            d = results[unicode(shape.id)]
            shape_time_ms = time_ms[unicode(shape.id)]
            shape_time_active_ms = time_active_ms[unicode(shape.id)]

            edit_dict = d[u'edit']
            edit_sum = sum(int(edit_dict[k]) for k in edit_dict)
            edit_nnz = sum(int(int(edit_dict[k]) > 0) for k in edit_dict)

            init_method = 'KR'
            envmap = EnvironmentMap.objects.get(
                id=json.loads(experiment.variant)['envmap_id'])

            doi = int(d[u'doi'])
            contrast = float(d[u'contrast'])
            metallic = (int(d[u'type']) == 1)
            color = d['color']

            give_up = d[u'give_up']
            give_up_msg = d[u'give_up_msg']

            bsdf, bsdf_created = shape.bsdfs_wd.get_or_create(
                user=user,
                mturk_assignment=mturk_assignment,
                time_ms=shape_time_ms,
                time_active_ms=shape_time_active_ms,
                doi=doi,
                contrast=contrast,
                metallic=metallic,
                color=color,
                give_up=give_up,
                give_up_msg=give_up_msg,
                edit_dict=json.dumps(edit_dict),
                edit_sum=edit_sum,
                edit_nnz=edit_nnz,
                envmap=envmap,
                init_method=init_method,
            )

            if bsdf_created:
                new_objects[get_content_tuple(shape)] = [bsdf]

            if ((not bsdf.image_blob) and 'screenshot' in d
                    and d['screenshot'].startswith('data:image/')):
                save_obj_attr_base64_image(bsdf, 'image_blob', d['screenshot'])

        return new_objects