Example #1
0
def algorithm_ranks(algorithm_ids,
                    values,
                    error_attr='mean_error',
                    show_progress=False):

    all_ranks = {id: [] for id in algorithm_ids}

    if show_progress:
        print 'organizing scores...'

    iterator = progress_bar(values) if show_progress else values
    photo_to_algs = {}
    for v in iterator:
        pid = v['photo_id']
        if pid in photo_to_algs:
            photo_to_algs[pid].append(v)
        else:
            photo_to_algs[pid] = [v]

    if show_progress:
        print 'ranking algorithms...'

    iterator = photo_to_algs.iteritems()
    if show_progress:
        iterator = progress_bar(iterator)
    for (photo_id, algs) in photo_to_algs.iteritems():
        if len(algs) < len(algorithm_ids):
            continue

        ranks = rankdata([v[error_attr] for v in algs], method='average')
        for i, v in enumerate(algs):
            all_ranks[v['algorithm_id']].append(ranks[i])

    return all_ranks
Example #2
0
def water_zone(zone_gpio, t):
    if DEBUG:
        print("Debug mode, not turning on relay")
    else:
        GPIO.output(zone_gpio, ON)
    progress_bar(progress_bar_time=t)
    GPIO.output(zone_gpio, OFF)
Example #3
0
def algorithm_ranks(algorithm_ids, values, error_attr='mean_error',
                    show_progress=False):

    all_ranks = {id: [] for id in algorithm_ids}

    if show_progress:
        print 'organizing scores...'

    iterator = progress_bar(values) if show_progress else values
    photo_to_algs = {}
    for v in iterator:
        pid = v['photo_id']
        if pid in photo_to_algs:
            photo_to_algs[pid].append(v)
        else:
            photo_to_algs[pid] = [v]

    if show_progress:
        print 'ranking algorithms...'

    iterator = photo_to_algs.iteritems()
    if show_progress:
        iterator = progress_bar(iterator)
    for (photo_id, algs) in photo_to_algs.iteritems():
        if len(algs) < len(algorithm_ids):
            continue

        ranks = rankdata([v[error_attr] for v in algs], method='average')
        for i, v in enumerate(algs):
            all_ranks[v['algorithm_id']].append(ranks[i])

    return all_ranks
    def prepare_plot_thresholds(self, comparisons):
        print "Preparing %s comparisons" % len(comparisons)
        ratios = np.linspace(1.001, 1.5, 512)
        fractions = []
        for r in progress_bar(ratios):
            num = 0
            den = 0
            for c in comparisons:
                if (c.point1.synthetic_diff_intensity /
                        c.point2.synthetic_diff_intensity > r):

                    if c.darker == '2':
                        num += 1
                    den += 1

                elif (c.point2.synthetic_diff_intensity /
                      c.point1.synthetic_diff_intensity > r):

                    if c.darker == '1':
                        num += 1
                    den += 1

            if den:
                fractions.append(float(num) / float(den))
            else:
                fractions.append(0)

        return ratios, fractions, len(comparisons)
Example #5
0
    def handle(self, *args, **options):
        admin_user = User.objects.get_or_create(
            username='******')[0].get_profile()

        with transaction.atomic():
            for root in progress_bar(glob.glob('%s/*' % args[0])):
                slug = os.path.basename(root)
                print slug

                if 'janivar50' in slug or 'kitchen' in slug:
                    scene_category = PhotoSceneCategory.objects.get(
                        name='kitchen')
                elif 'mike_indoor' in slug or 'sofas' in slug:
                    scene_category = PhotoSceneCategory.objects.get(
                        name='living room')
                else:
                    raise ValueError("Unknown")

                light_stack = PhotoLightStack.objects.create(slug=slug)

                files = glob.glob('%s/*' % root)
                photos = [
                    add_photo(
                        path=f,
                        user=admin_user,
                        scene_category=scene_category,
                        light_stack=light_stack,
                    )
                    for f in files
                ]
Example #6
0
def update_photos_num_intrinsic(photo_ids, show_progress=False):
    from intrinsic.models import IntrinsicPoint, \
        IntrinsicPointComparison, IntrinsicImagesDecomposition
    iterator = progress_bar(photo_ids) if show_progress else photo_ids
    for photo_id in iterator:
        num_comparisons = IntrinsicPointComparison.objects \
            .filter(photo_id=photo_id) \
            .filter(Q(darker__isnull=False, darker_score__gt=0) |
                    Q(darker_method='A')) \
            .count()
        num_points = IntrinsicPoint.objects \
            .filter(photo_id=photo_id) \
            .count()
        errors = IntrinsicImagesDecomposition.objects \
            .filter(photo_id=photo_id,
                    algorithm__active=True,
                    mean_sum_error__isnull=False) \
            .values_list('mean_error')
        if errors:
            median_intrinsic_error = np.median(errors)
        else:
            median_intrinsic_error = None
        Photo.objects.filter(id=photo_id).update(
            num_intrinsic_comparisons=num_comparisons,
            num_intrinsic_points=num_points,
            median_intrinsic_error=median_intrinsic_error,
        )
Example #7
0
    def forwards(self, orm):
        # Adding field 'Photo.orig_width'
        db.add_column(
            u'photos_photo',
            'orig_width',
            self.gf('django.db.models.fields.IntegerField')(null=True),
            keep_default=False)

        # Adding field 'Photo.orig_height'
        db.add_column(
            u'photos_photo',
            'orig_height',
            self.gf('django.db.models.fields.IntegerField')(null=True),
            keep_default=False)

        storage = get_opensurfaces_storage()

        Photo = orm['photos.Photo']
        for (id, path) in progress_bar(
                Photo.objects.values_list('id', 'image_orig')):
            try:
                img = Image.open(storage.open(path))
                width, height = img.size
                Photo.objects.filter(id=id).update(orig_width=width,
                                                   orig_height=height)
            except Exception as e:
                print e
Example #8
0
def update_photos_num_intrinsic(photo_ids, show_progress=False):
    from intrinsic.models import IntrinsicPoint, \
        IntrinsicPointComparison, IntrinsicImagesDecomposition
    iterator = progress_bar(photo_ids) if show_progress else photo_ids
    for photo_id in iterator:
        num_comparisons = IntrinsicPointComparison.objects \
            .filter(photo_id=photo_id) \
            .filter(Q(darker__isnull=False, darker_score__gt=0) |
                    Q(darker_method='A')) \
            .count()
        num_points = IntrinsicPoint.objects \
            .filter(photo_id=photo_id) \
            .count()
        errors = IntrinsicImagesDecomposition.objects \
            .filter(photo_id=photo_id,
                    algorithm__active=True,
                    mean_sum_error__isnull=False) \
            .values_list('mean_error')
        if errors:
            median_intrinsic_error = np.median(errors)
        else:
            median_intrinsic_error = None
        Photo.objects.filter(id=photo_id).update(
            num_intrinsic_comparisons=num_comparisons,
            num_intrinsic_points=num_points,
            median_intrinsic_error=median_intrinsic_error,
        )
    def prepare_plot_thresholds(self, comparisons):
        print "Preparing %s comparisons" % len(comparisons)
        ratios = np.linspace(1.001, 1.5, 512)
        fractions = []
        for r in progress_bar(ratios):
            num = 0
            den = 0
            for c in comparisons:
                if c.point1.synthetic_diff_intensity / c.point2.synthetic_diff_intensity > r:

                    if c.darker == "2":
                        num += 1
                    den += 1

                elif c.point2.synthetic_diff_intensity / c.point1.synthetic_diff_intensity > r:

                    if c.darker == "1":
                        num += 1
                    den += 1

            if den:
                fractions.append(float(num) / float(den))
            else:
                fractions.append(0)

        return ratios, fractions, len(comparisons)
    def user_change(self):

        for min_separation in [Decimal('0.03'), Decimal('0.07')]:
            sum_confusion_matrix = np.zeros((3, 3))
            eq_weight = 0.0
            neq_weight = 0.0
            eq_count = 0.0
            neq_count = 0.0
            for light_stack in PhotoLightStack.objects.all():
                confusion_matrix = np.zeros((3, 3))
                confusion_matrix_count = np.zeros((3, 3))

                photos = light_stack.photos.all()
                print '%s photos' % len(photos)
                for photo1 in photos:
                    comparisons1 = photo1.intrinsic_comparisons \
                        .filter(darker__isnull=False,
                                point1__min_separation=min_separation) \
                        .select_related('point1', 'point2')
                    map1 = comparisons_to_map(comparisons1)
                    for photo2 in photos:
                        if photo1.id == photo2.id:
                            continue

                        comparisons2 = photo2.intrinsic_comparisons \
                            .filter(darker__isnull=False,
                                    point1__min_separation=min_separation) \
                            .select_related('point1', 'point2')

                        for c2 in progress_bar(comparisons2):
                            c1 = map1.get(comparison_key(c2), None)

                            if c1:
                                confusion_matrix[
                                    DARKER_TO_IDX[c1.darker],
                                    DARKER_TO_IDX[c2.darker]
                                ] += 0.5 * (c1.darker_score + c2.darker_score)

                                if c1.darker == c2.darker:
                                    eq_weight += c1.darker_score
                                    eq_weight += c2.darker_score
                                    eq_count += 2.0
                                else:
                                    neq_weight += c1.darker_score
                                    neq_weight += c2.darker_score
                                    neq_count += 2.0

                sum_confusion_matrix += confusion_matrix
                #print 'photo', min_separation, confusion_matrix // 2

            #print min_separation, sum_confusion_matrix // 2

            print min_separation, 'equal', eq_weight / eq_count
            print min_separation, 'ineq', neq_weight / neq_count

            print min_separation, 1 - (
                float(np.trace(sum_confusion_matrix)) /
                np.sum(sum_confusion_matrix)
            )
    def user_change(self):

        for min_separation in [Decimal('0.03'), Decimal('0.07')]:
            sum_confusion_matrix = np.zeros((3, 3))
            eq_weight = 0.0
            neq_weight = 0.0
            eq_count = 0.0
            neq_count = 0.0
            for light_stack in PhotoLightStack.objects.all():
                confusion_matrix = np.zeros((3, 3))
                confusion_matrix_count = np.zeros((3, 3))

                photos = light_stack.photos.all()
                print '%s photos' % len(photos)
                for photo1 in photos:
                    comparisons1 = photo1.intrinsic_comparisons \
                        .filter(darker__isnull=False,
                                point1__min_separation=min_separation) \
                        .select_related('point1', 'point2')
                    map1 = comparisons_to_map(comparisons1)
                    for photo2 in photos:
                        if photo1.id == photo2.id:
                            continue

                        comparisons2 = photo2.intrinsic_comparisons \
                            .filter(darker__isnull=False,
                                    point1__min_separation=min_separation) \
                            .select_related('point1', 'point2')

                        for c2 in progress_bar(comparisons2):
                            c1 = map1.get(comparison_key(c2), None)

                            if c1:
                                confusion_matrix[
                                    DARKER_TO_IDX[c1.darker],
                                    DARKER_TO_IDX[c2.darker]] += 0.5 * (
                                        c1.darker_score + c2.darker_score)

                                if c1.darker == c2.darker:
                                    eq_weight += c1.darker_score
                                    eq_weight += c2.darker_score
                                    eq_count += 2.0
                                else:
                                    neq_weight += c1.darker_score
                                    neq_weight += c2.darker_score
                                    neq_count += 2.0

                sum_confusion_matrix += confusion_matrix
                #print 'photo', min_separation, confusion_matrix // 2

            #print min_separation, sum_confusion_matrix // 2

            print min_separation, 'equal', eq_weight / eq_count
            print min_separation, 'ineq', neq_weight / neq_count

            print min_separation, 1 - (float(np.trace(sum_confusion_matrix)) /
                                       np.sum(sum_confusion_matrix))
Example #12
0
 def get_user_ids(self):
     user_ids = IntrinsicPointComparisonResponse.objects.filter(user__exclude_from_aggregation=False) \
         .order_by().distinct('user').values_list('user_id', flat=True)
     return [
         u for u in progress_bar(user_ids)
         if (UserProfile.objects.filter(user_id=u, blocked=False).exists(
         ) and ExperimentWorker.objects.filter(
             worker_id=u, blocked=False, num_test_correct__gte=20).exists())
     ]
Example #13
0
def update_synthetic_diff_intensity(show_progress=False):
    """ Updates these fields for synthetic images:
        :attr:`intrinsic.models.IntrinsicPoint.synthetic_diff_intensity`
        :attr:`intrinsic.models.IntrinsicPointComparison.synthetic_diff_intensity_ratio`
        :attr:`intrinsic.models.IntrinsicPointComparison.synthetic_diff_cv`
    """

    decomp_iterator = IntrinsicSyntheticDecomposition.objects.all()
    if show_progress:
        decomp_iterator = progress_bar(decomp_iterator)

    for decomp in decomp_iterator:
        diff_col, diff_ind, diff_dir, comb = decomp.open_multilayer_exr_layers(
            ['diff_col', 'diff_ind', 'diff_dir', 'combined'])
        diff = diff_col * (diff_ind + diff_dir)

        points = IntrinsicPoint.objects \
            .filter(photo_id=decomp.photo_id)

        for p in points:
            # diffuse color intensity
            p_diff_col = diff_col[int(p.y * diff_col.shape[0]), int(p.x * diff_col.shape[1]), :]
            p.synthetic_diff_intensity = np.mean(p_diff_col)

            # diffuse energy fraction
            p_diff = diff[int(p.y * diff.shape[0]), int(p.x * diff.shape[1]), :]
            p_comb = comb[int(p.y * comb.shape[0]), int(p.x * comb.shape[1]), :]
            p.synthetic_diff_fraction = np.mean(p_diff) / np.mean(p_comb)

            # coefficient of variation of the local 3x3 block
            px = int(p.x * diff_col.shape[1])
            py = int(p.y * diff_col.shape[0])
            p_diff_col_block = diff_col[
                max(py - 1, 0):min(py + 1, diff_col.shape[0]),
                max(px - 1, 0):min(px + 1, diff_col.shape[1]), :]
            mu_block = np.mean(p_diff_col_block)
            if mu_block > 0:
                p.synthetic_diff_cv = np.std(p_diff_col_block) / mu_block
            else:
                p.synthetic_diff_cv = None

            p.save()

        comparisons = IntrinsicPointComparison.objects \
            .filter(photo_id=decomp.photo_id) \
            .select_related('point1', 'point2')

        for c in comparisons:
            if c.point1.synthetic_diff_intensity >= 1e-3 and c.point2.synthetic_diff_intensity >= 1e-3:
                c.synthetic_diff_intensity_ratio = (
                    c.point1.synthetic_diff_intensity /
                    c.point2.synthetic_diff_intensity
                )
            else:
                c.synthetic_diff_intensity_ratio = None
            c.save()
 def get_user_ids(self):
     user_ids = IntrinsicPointComparisonResponse.objects.filter(user__exclude_from_aggregation=False) \
         .order_by().distinct('user').values_list('user_id', flat=True)
     return [
         u for u in progress_bar(user_ids) if (
             UserProfile.objects.filter(user_id=u, blocked=False).exists()
             and ExperimentWorker.objects.filter(
                 worker_id=u, blocked=False, num_test_correct__gte=20).exists()
         )
     ]
Example #15
0
    def handle(self, *args, **options):
        image_size = 512
        photo_ids = [int(id) for id in open('photo-ids.txt').readlines()]

        outdir = 'photos-outdir/'
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        for photo_id in progress_bar(photo_ids):
            export_photo_task.delay(
                photo_id, image_size=image_size, outdir=outdir)
    def alg_change(self):
        algorithms = IntrinsicImagesAlgorithm.objects.filter(active=True) \
            .order_by('slug', '-id')
        algorithm_errors = {
            alg: [] for alg in algorithms
        }

        light_stacks = PhotoLightStack.objects.all()

        for alg in progress_bar(algorithms):
            use_alg = True
            for light_stack in light_stacks:
                photo_ids = light_stack.photos.values_list('id', flat=True)

                decompositions = IntrinsicImagesDecomposition.objects.filter(
                    algorithm=alg, photo_id__in=photo_ids)

                if len(decompositions) != len(photo_ids):
                    use_alg = False
                    break

                errors = []
                for d1 in decompositions:
                    r1 = open_image(d1.reflectance_image)
                    r1 = srgb_to_rgb(np.asarray(r1).astype(float) / 255.0)
                    r1 = np.mean(r1, axis=-1)

                    for d2 in decompositions:
                        if d1.photo_id == d2.photo_id:
                            continue
                        r2 = open_image(d2.reflectance_image)
                        r2 = srgb_to_rgb(np.asarray(r2).astype(float) / 255.0)
                        r2 = np.mean(r2, axis=-1)

                        errors.append(lmse(r1, r2))
                algorithm_errors[alg].append(np.mean(errors))

            if use_alg:
                print alg.slug, alg.id, \
                    np.mean(algorithm_errors[alg]), \
                    np.median(algorithm_errors[alg]), \
                    np.std(algorithm_errors[alg])

        errors = [
            (alg, np.mean(errors), np.median(errors), np.std(errors))
            for alg, errors in algorithm_errors.iteritems()
            if len(errors) == len(light_stacks)
        ]
        errors.sort(key=lambda x: x[1])

        for alg, e, m, s in errors:
            print alg.slug, alg.id, e, m, s
Example #17
0
    def handle(self, *args, **options):

        # dense samoling:
        kwargs = {}
        kwargs['min_separation'] = Decimal('0.03')
        kwargs['avoid_existing_points'] = False
        kwargs['chromaticity_thresh'] = None

        with transaction.atomic():
            for light_stack in progress_bar(PhotoLightStack.objects.all()):
                photos = light_stack.photos.all()

                photo_id = photos[0].id

                # sample the new photo
                sample_intrinsic_points_task(
                    photo_id=photo_id, min_edges=1, **kwargs)

                # add points and edges
                points = list(
                    IntrinsicPoint.objects.filter(
                        photo_id=photo_id,
                        min_separation=kwargs['min_separation']
                    )
                )
                edges = list(
                    IntrinsicPointComparison.objects.filter(
                        photo_id=photo_id,
                        point1__min_separation=kwargs['min_separation'],
                        point2__min_separation=kwargs['min_separation'],
                    )
                )

                for photo in photos[1:]:
                    point_map = {}
                    for old in points:
                        # sample new color
                        rgb = photo.get_pixel(old.x, old.y, width='300')
                        sRGB = '%02x%02x%02x' % rgb
                        # copy old position
                        point_map[old.id] = IntrinsicPoint.objects.create(
                            photo=photo, x=old.x, y=old.y, sRGB=sRGB,
                            min_separation=old.min_separation,
                        )
                    for old in edges:
                        IntrinsicPointComparison.objects.create(
                            photo=photo,
                            point1=point_map[old.point1.id],
                            point2=point_map[old.point2.id],
                        )
    def alg_change(self):
        algorithms = IntrinsicImagesAlgorithm.objects.filter(active=True) \
            .order_by('slug', '-id')
        algorithm_errors = {alg: [] for alg in algorithms}

        light_stacks = PhotoLightStack.objects.all()

        for alg in progress_bar(algorithms):
            use_alg = True
            for light_stack in light_stacks:
                photo_ids = light_stack.photos.values_list('id', flat=True)

                decompositions = IntrinsicImagesDecomposition.objects.filter(
                    algorithm=alg, photo_id__in=photo_ids)

                if len(decompositions) != len(photo_ids):
                    use_alg = False
                    break

                errors = []
                for d1 in decompositions:
                    r1 = open_image(d1.reflectance_image)
                    r1 = srgb_to_rgb(np.asarray(r1).astype(float) / 255.0)
                    r1 = np.mean(r1, axis=-1)

                    for d2 in decompositions:
                        if d1.photo_id == d2.photo_id:
                            continue
                        r2 = open_image(d2.reflectance_image)
                        r2 = srgb_to_rgb(np.asarray(r2).astype(float) / 255.0)
                        r2 = np.mean(r2, axis=-1)

                        errors.append(lmse(r1, r2))
                algorithm_errors[alg].append(np.mean(errors))

            if use_alg:
                print alg.slug, alg.id, \
                    np.mean(algorithm_errors[alg]), \
                    np.median(algorithm_errors[alg]), \
                    np.std(algorithm_errors[alg])

        errors = [(alg, np.mean(errors), np.median(errors), np.std(errors))
                  for alg, errors in algorithm_errors.iteritems()
                  if len(errors) == len(light_stacks)]
        errors.sort(key=lambda x: x[1])

        for alg, e, m, s in errors:
            print alg.slug, alg.id, e, m, s
Example #19
0
def update_flickr_users(ids, show_progress=False):
    """ Scrape Flickr for information about Flickr User profiles.

    :param ids: list of database ids (not Flick usernames)
    """

    values = FlickrUser.objects \
        .filter(id__in=ids) \
        .values_list('id', 'username')

    if show_progress:
        values = progress_bar(values)

    for (id, username) in values:
        html = download('https://www.flickr.com/people/%s/' % username)
        if not html:
            continue

        d = pq(html)

        profile = d('div.profile-section')
        given_name = profile('span.given-name').text().strip()
        family_name = profile('span.family-name').text().strip()
        website_name = profile('a.url').text().strip()
        website_url = profile('a.url').attr('href')
        if website_url:
            website_url = website_url.strip()
        else:
            website_url = ""

        person = d('div.person')
        display_name = person('span.character-name-holder').text().strip()
        sub_name = person('h2').text().strip()

        FlickrUser.objects.filter(id=id).update(
            display_name=display_name,
            sub_name=sub_name,
            given_name=given_name,
            family_name=family_name,
            website_name=website_name,
            website_url=website_url,
        )

        if show_progress:
            print '%s: display: "%s" (%s), name: "%s" "%s", web: "%s" (%s)' % (
                username, display_name, sub_name, given_name, family_name,
                website_name, website_url)
Example #20
0
def update_flickr_users(ids, show_progress=False):
    """ Scrape Flickr for information about Flickr User profiles.

    :param ids: list of database ids (not Flick usernames)
    """

    values = FlickrUser.objects \
        .filter(id__in=ids) \
        .values_list('id', 'username')

    if show_progress:
        values = progress_bar(values)

    for (id, username) in values:
        html = download('https://www.flickr.com/people/%s/' % username)
        if not html:
            continue

        d = pq(html)

        profile = d('div.profile-section')
        given_name = profile('span.given-name').text().strip()
        family_name = profile('span.family-name').text().strip()
        website_name = profile('a.url').text().strip()
        website_url = profile('a.url').attr('href')
        if website_url:
            website_url = website_url.strip()
        else:
            website_url = ""

        person = d('div.person')
        display_name = person('span.character-name-holder').text().strip()
        sub_name = person('h2').text().strip()

        FlickrUser.objects.filter(id=id).update(
            display_name=display_name,
            sub_name=sub_name,
            given_name=given_name,
            family_name=family_name,
            website_name=website_name,
            website_url=website_url,
        )

        if show_progress:
            print '%s: display: "%s" (%s), name: "%s" "%s", web: "%s" (%s)' % (
                username, display_name, sub_name, given_name, family_name,
                website_name, website_url)
    def handle(self, *args, **options):
        comparisons = []

        comparisons += IntrinsicPointComparison.objects.all() \
            .filter(point1_image_darker__isnull=True) \
            .values_list('id', 'point1__sRGB', 'point2__sRGB')

        comparisons += IntrinsicPointComparisonResponse.objects.all() \
            .filter(reflectance_eq=False, reflectance_dd__isnull=True) \
            .order_by().distinct('comparison') \
            .values_list('comparison__id', 'comparison__point1__sRGB', 'comparison__point2__sRGB')

        comparisons = list(set(comparisons))

        for (id, sRGB1, sRGB2) in progress_bar(comparisons):
            c1 = RGBColor()
            c1.set_from_rgb_hex(sRGB1)
            l1 = c1.convert_to('lab').lab_l

            c2 = RGBColor()
            c2.set_from_rgb_hex(sRGB2)
            l2 = c2.convert_to('lab').lab_l

            if l1 < l2:
                IntrinsicPointComparison.objects \
                    .filter(id=id).update(point1_image_darker=True)
                IntrinsicPointComparisonResponse.objects \
                    .filter(comparison_id=id, darker="1") \
                    .update(reflectance_eq=False, reflectance_dd=True)
                IntrinsicPointComparisonResponse.objects \
                    .filter(comparison_id=id, darker="2") \
                    .update(reflectance_eq=False, reflectance_dd=False)
            else:
                IntrinsicPointComparison.objects \
                    .filter(id=id).update(point1_image_darker=False)
                IntrinsicPointComparisonResponse.objects \
                    .filter(comparison_id=id, darker="1") \
                    .update(reflectance_eq=False, reflectance_dd=False)
                IntrinsicPointComparisonResponse.objects \
                    .filter(comparison_id=id, darker="2") \
                    .update(reflectance_eq=False, reflectance_dd=True)

            IntrinsicPointComparisonResponse.objects \
                .filter(comparison_id=id, darker="E") \
                .update(reflectance_eq=True, reflectance_dd=None)
Example #22
0
    def handle(self, *args, **options):
        basename = 'results_sweep_thresh'
        with open('%s.pkl' % basename) as f:
            thresh_to_attr_to_results = pickle.load(f)

        thresholds = sorted(thresh_to_attr_to_results.keys())

        algorithm_slugs = list(
            IntrinsicImagesAlgorithm.objects.filter(
                active=True).order_by().distinct('slug').values_list(
                    'slug', flat=True))

        for show_error in [True, False]:
            show_error_str = "yerr" if show_error else "nerr"
            for error_attr in progress_bar(
                    IntrinsicImagesDecomposition.ERROR_ATTRS):
                for kind in ['rank', 'error']:
                    for i, slug in enumerate(algorithm_slugs):
                        val_std = [
                            thresh_to_attr_to_results[t][error_attr][
                                'slug_to_%s' % kind][slug] for t in thresholds
                        ]

                        vals = [x[0] for x in val_std]

                        if show_error:
                            stds = [x[1] for x in val_std]
                            thresholds_jitter = [
                                t + i * 0.001 for t in thresholds
                            ]
                            plt.errorbar(thresholds_jitter, vals, yerr=stds)
                        else:
                            plt.plot(thresholds, vals)

                    plt.xlim([0.0, 1.0])
                    plt.ylim([0.0, (0.6 if kind == 'error' else 10.0)])
                    plt.legend(algorithm_slugs, prop={'size': 8})
                    plt.xlabel("delta")

                    plt.ylabel("%s%s" %
                               (error_attr, " rank" if kind == "rank" else ""))

                    plt.savefig('%s-%s-%s.png' %
                                (error_attr, kind, show_error_str))
                    plt.close()
    def handle(self, *args, **options):
        algorithms = IntrinsicImagesAlgorithm.objects \
            .filter(iiw_best=True) \
            .order_by('iiw_mean_error') \

        export = [
            OrderedDict([
                ('id', a.id),
                ('slug', a.slug),
                ('citation_html', a.citation.citation_html() if a.citation else None),
                ('iiw_mean_error', a.iiw_mean_error),
                ('iiw_mean_runtime', a.iiw_mean_runtime),
                ('parameters', json.loads(a.parameters)),
                ('intrinsic_images_decompositions', [
                    OrderedDict([
                        ('id', d.id),
                        ('photo_id', d.photo_id),
                        ('runtime', d.runtime),
                        ('mean_error', d.mean_error),
                        ('mean_sparse_error', d.mean_sparse_error),
                        ('mean_dense_error', d.mean_dense_error),
                        ('original_image', d.photo.image_orig.url),
                        ('reflectance_image', d.reflectance_image.url),
                        ('shading_image', d.shading_image.url),
                        ('attribution_name', d.photo.attribution_name
                         if d.photo.attribution_name
                         else (d.photo.flickr_user.display_name if d.photo.flickr_user else None)),
                        ('attribution_url', d.photo.attribution_url if d.photo.attribution_url else d.photo.get_flickr_url()),
                        ('license_name', d.photo.license.name if d.photo.license else None),
                        ('license_url', d.photo.license.url if d.photo.license else None),
                    ])
                    for d in a.intrinsic_images_decompositions
                    .filter(photo__in_iiw_dataset=True)
                    .order_by('photo__id')
                ]),
            ])
            for a in progress_bar(algorithms)
        ]

        print 'Writing json...'
        json.dump(
            obj=export,
            fp=open('intrinsic-decompositions-export.json', 'w'),
            indent=2,
            sort_keys=False)
Example #24
0
    def handle(self, *args, **options):
        photos = Photo.objects.filter(id__in=[95686, 97532, 116625, 85877, 69122, 104870])
        for p in photos:
            decomp = IntrinsicImagesDecomposition.objects.get(photo_id=p.id, algorithm_id=1141)
            img_i = p.open_image(width='orig')
            img_r = open_image(decomp.reflectance_image)
            img_s = open_image(decomp.shading_image)

            if not os.path.exists('example-intrinsic-segments/%s' % p.id):
                os.makedirs('example-intrinsic-segments/%s' % p.id)

            img_i.save('example-intrinsic-segments/%s/image.jpg' % p.id)
            img_r.save('example-intrinsic-segments/%s/reflectance.png' % p.id)
            img_s.save('example-intrinsic-segments/%s/shading.png' % p.id)

            for s in progress_bar(p.material_shapes.all()):
                mask_complex_polygon(img_i, s.vertices, s.triangles)[0].save('example-intrinsic-segments/%s/shape-%s-image.png' % (p.id, s.id))
                mask_complex_polygon(img_r, s.vertices, s.triangles)[0].save('example-intrinsic-segments/%s/shape-%s-reflectance.png' % (p.id, s.id))
                mask_complex_polygon(img_s, s.vertices, s.triangles)[0].save('example-intrinsic-segments/%s/shape-%s-shading.png' % (p.id, s.id))
    def handle(self, *args, **options):
        basename = 'results_sweep_thresh'
        with open('%s.pkl' % basename) as f:
            thresh_to_attr_to_results = pickle.load(f)

        thresholds = sorted(thresh_to_attr_to_results.keys())

        algorithm_slugs = list(
            IntrinsicImagesAlgorithm.objects.filter(active=True)
            .order_by().distinct('slug').values_list('slug', flat=True)
        )

        for show_error in [True, False]:
            show_error_str = "yerr" if show_error else "nerr"
            for error_attr in progress_bar(IntrinsicImagesDecomposition.ERROR_ATTRS):
                for kind in ['rank', 'error']:
                    for i, slug in enumerate(algorithm_slugs):
                        val_std = [
                            thresh_to_attr_to_results[t][error_attr]['slug_to_%s' % kind][slug]
                            for t in thresholds
                        ]

                        vals = [x[0] for x in val_std]

                        if show_error:
                            stds = [x[1] for x in val_std]
                            thresholds_jitter = [t + i * 0.001 for t in thresholds]
                            plt.errorbar(thresholds_jitter, vals, yerr=stds)
                        else:
                            plt.plot(thresholds, vals)

                    plt.xlim([0.0, 1.0])
                    plt.ylim([0.0, (0.6 if kind == 'error' else 10.0)])
                    plt.legend(algorithm_slugs, prop={'size': 8})
                    plt.xlabel("delta")

                    plt.ylabel("%s%s" % (error_attr, " rank" if kind == "rank" else ""))

                    plt.savefig('%s-%s-%s.png' % (error_attr, kind, show_error_str))
                    plt.close()
Example #26
0
    def handle(self, *args, **options):
        if len(args) == 1:
            regex = args[0]
            experiments = Experiment.objects.filter(slug__regex=regex)
            if experiments.count() == Experiment.objects.all().count():
                print 'Expiring all experiments'
            else:
                print 'Expiring: %s' % experiments.values_list('slug', flat=True)
        else:
            print "Usage: ./manage.py mtexpire '<regex>'"
            return

        print 'MTURK HOST:', settings.MTURK_HOST
        hits = MtHit.objects.filter(
            expired=False, sandbox=settings.MTURK_SANDBOX,
        )
        if regex != '.*':
            hits = hits.filter(
                hit_type__experiment__slug__regex=regex
            )

        # Expire by highest paying first, just in case you accidentally added
        # HITs that pay $100 and want to expire it quickly.  We have to fetch
        # all the items anyway, so might as well do the sort in python.
        hits = sorted(hits.select_related('hit_type'),
                      key=lambda x: x.hit_type.reward * x.num_assignments_available,
                      reverse=True)

        count = 0
        try:
            for hit in progress_bar(hits):
                print ''
                try:
                    if hit.expire():
                        count += 1
                except Exception as exc:
                    print exc
        finally:
            print 'Expired %d hits' % count
Example #27
0
    def handle(self, *args, **options):
        if len(args) == 1:
            regex = args[0]
        else:
            print "Usage: ./manage.py mtapprove_loop '<regex>'"
            return

        # it takes a little while for MTurk to let you approve an assignment
        # after submission
        sleep_time = 5

        while True:
            assignments = MtAssignment.objects \
                .filter(hit__sandbox=settings.MTURK_SANDBOX, status='S')

            if regex != '.*':
                assignments = assignments.filter(
                    hit__hit_type__experiment__slug__regex=regex)

            c = 0
            for a in progress_bar(assignments):
                try:
                    a.approve(feedback="Thank you!")
                    c += 1
                except Exception as e:
                    if 'This operation can be called with a status of: null' in str(
                            e):
                        print '    (MTurk not ready for approval yet)'
                    else:
                        print e

            if c > 0:
                sleep_time = max(sleep_time // 2, 5)
            else:
                sleep_time = min(sleep_time * 2, 60)

            print "approved %s assignments; sleep %s seconds..." % (c,
                                                                    sleep_time)
            time.sleep(sleep_time)
    def forwards(self, orm):
        # Adding field 'Photo.orig_width'
        db.add_column(u'photos_photo', 'orig_width',
                      self.gf('django.db.models.fields.IntegerField')(null=True),
                      keep_default=False)

        # Adding field 'Photo.orig_height'
        db.add_column(u'photos_photo', 'orig_height',
                      self.gf('django.db.models.fields.IntegerField')(null=True),
                      keep_default=False)

        storage = get_opensurfaces_storage()

        Photo = orm['photos.Photo']
        for (id, path) in progress_bar(Photo.objects.values_list('id', 'image_orig')):
            try:
                img = Image.open(storage.open(path))
                width, height = img.size
                Photo.objects.filter(id=id).update(
                    orig_width=width, orig_height=height)
            except Exception as e:
                print e
    def handle(self, *args, **options):

        if len(args) < 1:
            print 'Usage: ./manage.py intrinsic_download_photos <outdir>'
            return

        outdir = args[0]
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        photo_ids = IntrinsicPointComparison.objects \
            .filter(darker_score__gt=0, darker__isnull=False) \
            .order_by() \
            .distinct('photo') \
            .values_list('photo_id')

        qset = Photo.objects.filter(id__in=photo_ids)
        for p in progress_bar(qset):
            image = p.image_512
            filename = os.path.join(outdir, '%s.jpg' % p.id)
            with open(filename, 'wb') as f:
                image.seek(0)
                shutil.copyfileobj(image, f)
Example #30
0
    def handle(self, *args, **options):
        if len(args) == 1:
            regex = args[0]
        else:
            print "Usage: ./manage.py mtapprove_loop '<regex>'"
            return

        # it takes a little while for MTurk to let you approve an assignment
        # after submission
        sleep_time = 5

        while True:
            assignments = MtAssignment.objects \
                .filter(hit__sandbox=settings.MTURK_SANDBOX, status='S')

            if regex != '.*':
                assignments = assignments.filter(
                    hit__hit_type__experiment__slug__regex=regex)

            c = 0
            for a in progress_bar(assignments):
                try:
                    a.approve(feedback="Thank you!")
                    c += 1
                except Exception as e:
                    if 'This operation can be called with a status of: null' in str(e):
                        print '    (MTurk not ready for approval yet)'
                    else:
                        print e

            if c > 0:
                sleep_time = max(sleep_time // 2, 5)
            else:
                sleep_time = min(sleep_time * 2, 60)

            print "approved %s assignments; sleep %s seconds..." % (c, sleep_time)
            time.sleep(sleep_time)
Example #31
0
 def handle(self, *args, **options):
     rows = IntrinsicImagesDecomposition.objects.all() \
         .values_list('reflectance_image', 'shading_image')
     for names in progress_bar(rows):
         for n in names:
             upload_intrinsic_file.delay(n)
Example #32
0
def algorithm_cv_ranking(algorithm_slugs=None, error_attr='mean_error',
                         show_progress=False):
    """
    Compute algorithm errors and rankings using cross-validation.

    :param algorithm_slugs: algorithms (by ``slug``) to consider.  If ``None``,
        then all algorithms with ``active=True`` are used.

    :param error_attr: error metric to use (attribute of
        ``IntrinsicImagesDecomposition``).

    :return: a ``dict`` with the following entries:

        .. code-block:: py

            {
                'slug_to_rank': { slug: (rank_mean, rank_std) }
                'slug_to_error': { slug: (error_mean, error_std) }
            }

        where ``slug`` is an entry from ``algorithm_slugs``, a
    """

    if not algorithm_slugs:
        algorithm_slugs = list(
            IntrinsicImagesAlgorithm.objects.filter(active=True)
            .order_by().distinct('slug').values_list('slug', flat=True)
        )

    if show_progress:
        print 'Evaluating %s algorithms: %s...' % (
            len(algorithm_slugs), algorithm_slugs)

    if show_progress:
        print 'Computing algorithm cross-validation errors...'

    # slug_to_photo_to_error: { slug: { photo: error } }
    slug_to_photo_to_error = {
        slug: algorithm_cv_errors(slug, error_attr)
        for slug in progress_bar(algorithm_slugs, show_progress)
    }

    # slug_to_errors: { slug: [error] }
    slug_to_errors = {
        slug: photo_to_error.values()
        for slug, photo_to_error in slug_to_photo_to_error.iteritems()
    }

    # slug_to_error: { slug: mean error, std error }
    slug_to_error = {
        slug: (np.mean(errors), np.std(errors))
        for slug, errors in slug_to_errors.iteritems()
    }

    if show_progress:
        print 'Computing ranks...'

    # photo_to_slug_error: { photo: [ (slug, error) ] }
    photo_to_slug_error = {}
    for slug, photo_ids in slug_to_photo_to_error.iteritems():
        for p, error in photo_ids.iteritems():
            if p in photo_to_slug_error:
                photo_to_slug_error[p].append((slug, error))
            else:
                photo_to_slug_error[p] = [(slug, error)]

    # slug_to_ranks: { slug: [ranks] }
    slug_to_ranks = {slug: [] for slug in algorithm_slugs}
    for photo, errors in photo_to_slug_error.iteritems():
        if len(errors) < len(algorithm_slugs):
            continue
        ranks = rankdata([e[1] for e in errors], method='average')
        for i, v in enumerate(errors):
            slug_to_ranks[v[0]].append(ranks[i])

    # slug_to_rank: { slug: mean rank, std rank }
    slug_to_rank = {
        slug: (np.mean(ranks), np.std(ranks))
        for slug, ranks in slug_to_ranks.iteritems()
    }

    if show_progress:
        print 'Computing ranks... done'

    return {
        'slug_to_rank': slug_to_rank,
        'slug_to_error': slug_to_error,
    }
    def handle(self, *args, **options):
        admin_user = User.objects.get_or_create(
            username='******')[0].get_profile()

        for filename in progress_bar(args):
            if not os.path.exists(filename):
                raise ValueError("File does not exist: '%s'" % filename)

            blendswap_id = os.path.basename(filename).split('_')[0]
            license, scene_url, scene_artist = \
                License.get_for_blendswap_scene(blendswap_id)

            print 'file:', filename
            print 'license:', license
            print 'url:', scene_url
            print 'artist:', scene_artist

            tmpdir = tempfile.mkdtemp()
            try:
                print "Loading %s..." % filename
                md5 = md5sum(filename)
                if IntrinsicSyntheticDecomposition.objects.filter(
                        md5=md5).exists():
                    print "Already added: %s" % filename
                    continue

                multilayer = open_multilayer_exr(filename,
                                                 tonemap=True,
                                                 thumb_size=512,
                                                 show_progress=True)
                paths = {}
                for key, img in multilayer.iteritems():
                    path = os.path.join(tmpdir, '%s-%s.jpg' % (md5, key))
                    img.save(path)
                    paths[key] = path

                with transaction.atomic():
                    photo = add_photo(
                        path=paths["combined"],
                        user=admin_user,
                        license=license,
                        synthetic=True,
                        whitebalanced=True,
                        inappropriate=False,
                        nonperspective=False,
                        stylized=False,
                        rotated=False,
                    )

                    print "Uploading layers: %s..." % paths.keys()
                    IntrinsicSyntheticDecomposition.objects.create(
                        photo=photo,
                        multilayer_exr=ImageFile(open(filename, 'rb')),
                        scene_artist=scene_artist,
                        scene_url=scene_url,
                        md5=md5,
                        **{("%s_thumb" % key): ImageFile(open(path, 'rb'))
                           for key, path in paths.iteritems()
                           if key != "combined"})
            finally:
                shutil.rmtree(tmpdir)

        update_synthetic_diff_intensity()
Example #34
0
def algorithm_cv_ranking(algorithm_slugs=None,
                         error_attr='mean_error',
                         show_progress=False):
    """
    Compute algorithm errors and rankings using cross-validation.

    :param algorithm_slugs: algorithms (by ``slug``) to consider.  If ``None``,
        then all algorithms with ``active=True`` are used.

    :param error_attr: error metric to use (attribute of
        ``IntrinsicImagesDecomposition``).

    :return: a ``dict`` with the following entries:

        .. code-block:: py

            {
                'slug_to_rank': { slug: (rank_mean, rank_std) }
                'slug_to_error': { slug: (error_mean, error_std) }
            }

        where ``slug`` is an entry from ``algorithm_slugs``, a
    """

    if not algorithm_slugs:
        algorithm_slugs = list(
            IntrinsicImagesAlgorithm.objects.filter(
                active=True).order_by().distinct('slug').values_list(
                    'slug', flat=True))

    if show_progress:
        print 'Evaluating %s algorithms: %s...' % (len(algorithm_slugs),
                                                   algorithm_slugs)

    if show_progress:
        print 'Computing algorithm cross-validation errors...'

    # slug_to_photo_to_error: { slug: { photo: error } }
    slug_to_photo_to_error = {
        slug: algorithm_cv_errors(slug, error_attr)
        for slug in progress_bar(algorithm_slugs, show_progress)
    }

    # slug_to_errors: { slug: [error] }
    slug_to_errors = {
        slug: photo_to_error.values()
        for slug, photo_to_error in slug_to_photo_to_error.iteritems()
    }

    # slug_to_error: { slug: mean error, std error }
    slug_to_error = {
        slug: (np.mean(errors), np.std(errors))
        for slug, errors in slug_to_errors.iteritems()
    }

    if show_progress:
        print 'Computing ranks...'

    # photo_to_slug_error: { photo: [ (slug, error) ] }
    photo_to_slug_error = {}
    for slug, photo_ids in slug_to_photo_to_error.iteritems():
        for p, error in photo_ids.iteritems():
            if p in photo_to_slug_error:
                photo_to_slug_error[p].append((slug, error))
            else:
                photo_to_slug_error[p] = [(slug, error)]

    # slug_to_ranks: { slug: [ranks] }
    slug_to_ranks = {slug: [] for slug in algorithm_slugs}
    for photo, errors in photo_to_slug_error.iteritems():
        if len(errors) < len(algorithm_slugs):
            continue
        ranks = rankdata([e[1] for e in errors], method='average')
        for i, v in enumerate(errors):
            slug_to_ranks[v[0]].append(ranks[i])

    # slug_to_rank: { slug: mean rank, std rank }
    slug_to_rank = {
        slug: (np.mean(ranks), np.std(ranks))
        for slug, ranks in slug_to_ranks.iteritems()
    }

    if show_progress:
        print 'Computing ranks... done'

    return {
        'slug_to_rank': slug_to_rank,
        'slug_to_error': slug_to_error,
    }
Example #35
0
def intrinsic_update_training_result(show_progress=False):
    """ Update the error on IntrinsicImagesAlgorithm objects """

    IntrinsicImagesAlgorithm.objects.all().update(iiw_best=False,
                                                  iiw_mean_error=None)

    slugs = IntrinsicImagesAlgorithm.objects.all() \
        .order_by('slug') \
        .distinct('slug') \
        .values_list('slug', flat=True)

    photos = Photo.objects.filter(in_iiw_dataset=True)
    num_photos = photos.count()
    if show_progress:
        photo_ids = list(photos.values_list('id', flat=True))

    for slug in slugs:
        if show_progress:
            print slug

        algorithms = IntrinsicImagesAlgorithm.objects \
            .filter(active=True, slug=slug) \
            .order_by('id')
        if show_progress:
            algorithms = progress_bar(algorithms)

        best_algorithm = None
        for algorithm in algorithms:
            iiw_decompositions = IntrinsicImagesDecomposition.objects.filter(
                algorithm=algorithm,
                photo__in_iiw_dataset=True,
                mean_error__isnull=False)

            num_decompositions = iiw_decompositions.count()
            if num_decompositions < num_photos:
                if show_progress:
                    decomp_photo_ids = set(
                        iiw_decompositions.values_list('photo_id', flat=True))
                    if num_photos - num_decompositions < 10:
                        missing_ids = []
                        for p in photo_ids:
                            if p not in decomp_photo_ids:
                                missing_ids.append(p)
                        print "Algorithm %s (id: %s): missing %s photos %s" % (
                            slug, algorithm.id,
                            num_photos - num_decompositions, missing_ids)
                    else:
                        print "Algorithm %s (id: %s): missing %s photos" % (
                            slug, algorithm.id,
                            num_photos - num_decompositions)
                continue

            iiw_mean_error = iiw_decompositions.aggregate(
                a=Avg('mean_error'))['a']
            iiw_mean_runtime = iiw_decompositions.aggregate(
                a=Avg('runtime'))['a']

            algorithm.iiw_mean_error = iiw_mean_error
            algorithm.iiw_mean_runtime = iiw_mean_runtime
            algorithm.save()

            if not best_algorithm or iiw_mean_error < best_algorithm.iiw_mean_error:
                best_algorithm = algorithm

        if best_algorithm:
            best_algorithm.iiw_best = True
            best_algorithm.save()
Example #36
0
    def evaluate_algorithms(self, user_ids, error_attr):
        algorithm_slugs = list(
            IntrinsicImagesAlgorithm.objects.filter(active=True).exclude(
                slug__endswith='_prototype').order_by().distinct(
                    'slug').values_list('slug', flat=True))

        # { slug: { photo_id : error } }
        print 'Evaluating algorithms...'
        slug_errors = {
            slug: self.algorithm_cv_errors(slug, error_attr)
            for slug in progress_bar(algorithm_slugs)
        }
        #algorithm_slugs.append('human')
        #slug_errors['human'] = self.human_errors(user_ids, error_attr)

        print 'Computing ranks (photo --> slug)...'

        # { photo: { slug: error } }
        photo_to_slugs = {}
        for slug, photo_ids in slug_errors.iteritems():
            for p, error in photo_ids.iteritems():
                if p in photo_to_slugs:
                    photo_to_slugs[p].append((slug, error))
                else:
                    photo_to_slugs[p] = [(slug, error)]

        print 'Computing ranks (slug --> ranks)...'

        # { slug: [ranks] }
        slug_ranks = {slug: [] for slug in algorithm_slugs}
        for photo, errors in photo_to_slugs.iteritems():
            if len(errors) < len(algorithm_slugs):
                continue
            ranks = rankdata([e[1] for e in errors], method='average')
            for i, v in enumerate(errors):
                slug_ranks[v[0]].append(ranks[i])

        print >> self.f, r"""
\begin{figure*}[tb]
\centering
\begin{subfigure}[b]{0.49\textwidth}
\begin{bchart}[max=65,plain,unit=\%,scale=0.8,width=1.2\textwidth]
""".strip()
        items = sorted(slug_ranks,
                       key=lambda x: np.mean(slug_errors[x].values()))
        for slug in items:
            scale = (50 if error_attr == 'mean_sum_error' else 100)
            print >> self.f, r'  \bcbar[text={%s}]{%.1f}' % (
                SLUG_TO_TEX[slug],
                np.mean(slug_errors[slug].values()) * scale,
            )
        print >> self.f, r"""
\end{bchart}
\caption{%%
Weighted human disagreement (WHD), as described in
Section~\ref{ssec:metric}.
}
\end{subfigure}
\begin{subfigure}[b]{0.49\textwidth}
\begin{bchart}[min=0,max=8,plain,scale=0.8,width=1.2\textwidth]
""".strip()
        for slug in items:
            print >> self.f, r'  \bcbar[text={%s}]{%.2f}' % (
                SLUG_TO_TEX[slug],
                np.mean(slug_ranks[slug]),
            )
        print >>self.f, r"""
\end{bchart}
\caption{Mean rank (1 to 8).}
\end{subfigure}
\vspace{-6pt}
\caption{%
Quantitative comparison of our algorithm against several recent algorithms.
The ``median individual human'' is calculated using responses that were
excluded from the aggregation, and from users who were not blocked by our
sentinels.  We use NUM_USERS users for this test and NUM_TOTAL total users for
aggregation.}
}
\label{fig:ranking}
\end{figure*}
""".strip().replace('NUM_USERS', str(len(user_ids))) \
           .replace('NUM_TOTAL', str(IntrinsicPointComparisonResponse.objects.order_by().distinct('user').count()))
        self.f.flush()
Example #37
0
    def handle(self, *args, **options):

        algs = IntrinsicImagesAlgorithm.objects.filter(
            slug='bell2014_densecrf',
            active=True,
        )

        result = []

        for a in progress_bar(algs):
            params = json.loads(a.parameters)

            if ('pairwise_weight' not in params or params['n_iters'] > 60
                    or params['n_iters'] < 1):

                print "Disabling", a.id, ' '
                a.active = False
                a.save()
                continue

            decomps = IntrinsicImagesDecomposition.objects \
                .filter(algorithm=a,
                        error_comparison_thresh=0.10,
                        photo__in_iiw_dataset=True) \

            count = decomps.count()

            if count > 150:
                whd = decomps.aggregate(s=Avg('mean_error'))['s']
                runtime = a.intrinsic_images_decompositions.aggregate(
                    s=Avg('runtime'))['s']
                result.append((whd, runtime, count, a))
            else:
                print a.id, count, ' '

        default_params = {
            'n_iters': 10,
            'shading_blur_sigma': 0.1,
            'shading_blur_init_method': 'none',
            'kmeans_intensity_scale': 0.5,
            'kmeans_n_clusters': 30,
            'abs_reflectance_weight': 0,
            'abs_shading_weight': 1e3,
            'abs_shading_gray_point': 0.5,
            'shading_target_weight': 1e3,
            'chromaticity_weight': 10,
            'pairwise_weight': 1e4,
            'theta_p': 0.1,
            'theta_l': 0.1,
            'theta_c': 0.03,
            'split_clusters': True,
        }

        p = default_params.copy()
        p['n_iters'] = 25
        p['abs_shading_weight'] = 500.0
        p['chromaticity_weight'] = 0
        p['theta_c'] = 0.025
        p['shading_target_weight'] = 2e4
        p['pairwise_intensity_chromaticity'] = True
        p['shading_target_norm'] = 'L2'
        p['kmeans_n_clusters'] = 20
        best_params = p

        result.sort(key=lambda x: x[0], reverse=True)
        for whd, runtime, count, a in result:
            params = json.loads(a.parameters)

            diff = {}
            for k, v in params.iteritems():
                if k in default_params:
                    if v != default_params[k]:
                        diff[k] = '%s --> %s' % (default_params[k], v)
                else:
                    diff[k] = '[default] --> %s' % v

            print 'id: %s, count: %s, whd: %.1f%%, runtime: %s s, params: %s' % (
                a.id, count, whd * 100.0, runtime, diff)

        print '-' * 30
        print '\n' * 2

        algorithm_ids = []

        for whd, runtime, count, a in result:
            params = json.loads(a.parameters)

            c = 0
            for k, v in params.iteritems():
                if v != best_params.get(k):
                    c += 1
            if c > 2:
                continue

            diff = {}
            for k, v in params.iteritems():
                if k in best_params:
                    if v != best_params[k]:
                        diff[k] = '%s --> %s' % (best_params[k], v)
                else:
                    diff[k] = '[default] --> %s' % v

            print 'id: %s, count: %s, whd: %.1f%%, runtime: %s s, params: %s' % (
                a.id, count, whd * 100.0, runtime, diff)

            algorithm_ids.append(a.id)
    def handle(self, *args, **options):
        admin_user = User.objects.get_or_create(
            username='******')[0].get_profile()

        for filename in progress_bar(args):
            if not os.path.exists(filename):
                raise ValueError("File does not exist: '%s'" % filename)

            blendswap_id = os.path.basename(filename).split('_')[0]
            license, scene_url, scene_artist = \
                License.get_for_blendswap_scene(blendswap_id)

            print 'file:', filename
            print 'license:', license
            print 'url:', scene_url
            print 'artist:', scene_artist

            tmpdir = tempfile.mkdtemp()
            try:
                print "Loading %s..." % filename
                md5 = md5sum(filename)
                if IntrinsicSyntheticDecomposition.objects.filter(md5=md5).exists():
                    print "Already added: %s" % filename
                    continue

                multilayer = open_multilayer_exr(
                    filename, tonemap=True, thumb_size=512, show_progress=True)
                paths = {}
                for key, img in multilayer.iteritems():
                    path = os.path.join(tmpdir, '%s-%s.jpg' % (md5, key))
                    img.save(path)
                    paths[key] = path

                with transaction.atomic():
                    photo = add_photo(
                        path=paths["combined"],
                        user=admin_user,
                        license=license,
                        synthetic=True,
                        whitebalanced=True,
                        inappropriate=False,
                        nonperspective=False,
                        stylized=False,
                        rotated=False,
                    )

                    print "Uploading layers: %s..." % paths.keys()
                    IntrinsicSyntheticDecomposition.objects.create(
                        photo=photo,
                        multilayer_exr=ImageFile(open(filename, 'rb')),
                        scene_artist=scene_artist,
                        scene_url=scene_url,
                        md5=md5,
                        **{
                            ("%s_thumb" % key): ImageFile(open(path, 'rb'))
                            for key, path in paths.iteritems()
                            if key != "combined"
                        }
                    )
            finally:
                shutil.rmtree(tmpdir)

        update_synthetic_diff_intensity()
Example #39
0
    def handle(self, *args, **options):

        algs = IntrinsicImagesAlgorithm.objects.filter(
            slug='bell2014_densecrf',
            active=True,
        )

        result = []

        for a in progress_bar(algs):
            params = json.loads(a.parameters)

            if ('pairwise_weight' not in params or
                    params['n_iters'] > 60 or params['n_iters'] < 1):

                print "Disabling", a.id, ' '
                a.active = False
                a.save()
                continue

            decomps = IntrinsicImagesDecomposition.objects \
                .filter(algorithm=a,
                        error_comparison_thresh=0.10,
                        photo__in_iiw_dataset=True) \

            count = decomps.count()

            if count > 150:
                whd = decomps.aggregate(s=Avg('mean_error'))['s']
                runtime = a.intrinsic_images_decompositions.aggregate(s=Avg('runtime'))['s']
                result.append((whd, runtime, count, a))
            else:
                print a.id, count, ' '

        default_params = {
            'n_iters': 10,
            'shading_blur_sigma': 0.1,
            'shading_blur_init_method': 'none',
            'kmeans_intensity_scale': 0.5,
            'kmeans_n_clusters': 30,
            'abs_reflectance_weight': 0,
            'abs_shading_weight': 1e3,
            'abs_shading_gray_point': 0.5,
            'shading_target_weight': 1e3,
            'chromaticity_weight': 10,
            'pairwise_weight': 1e4,
            'theta_p': 0.1,
            'theta_l': 0.1,
            'theta_c': 0.03,
            'split_clusters': True,
        }

        p = default_params.copy()
        p['n_iters'] = 25
        p['abs_shading_weight'] = 500.0
        p['chromaticity_weight'] = 0
        p['theta_c'] = 0.025
        p['shading_target_weight'] = 2e4
        p['pairwise_intensity_chromaticity'] = True
        p['shading_target_norm'] = 'L2'
        p['kmeans_n_clusters'] = 20
        best_params = p

        result.sort(key=lambda x: x[0], reverse=True)
        for whd, runtime, count, a in result:
            params = json.loads(a.parameters)

            diff = {}
            for k, v in params.iteritems():
                if k in default_params:
                    if v != default_params[k]:
                        diff[k] = '%s --> %s' % (default_params[k], v)
                else:
                    diff[k] = '[default] --> %s' % v

            print 'id: %s, count: %s, whd: %.1f%%, runtime: %s s, params: %s' % (
                a.id, count, whd * 100.0, runtime, diff)

        print '-' * 30
        print '\n' * 2

        algorithm_ids = []

        for whd, runtime, count, a in result:
            params = json.loads(a.parameters)

            c = 0
            for k, v in params.iteritems():
                if v != best_params.get(k):
                    c += 1
            if c > 2:
                continue

            diff = {}
            for k, v in params.iteritems():
                if k in best_params:
                    if v != best_params[k]:
                        diff[k] = '%s --> %s' % (best_params[k], v)
                else:
                    diff[k] = '[default] --> %s' % v

            print 'id: %s, count: %s, whd: %.1f%%, runtime: %s s, params: %s' % (
                a.id, count, whd * 100.0, runtime, diff)

            algorithm_ids.append(a.id)
    def human_errors(self, user_ids, error_attr):
        print 'Fetching user responses...'
        all_user_values = IntrinsicPointComparisonResponse.objects \
            .filter(user__exclude_from_aggregation=False,
                    user_id__in=user_ids,
                    comparison__darker__isnull=False,
                    comparison__darker_score__gt=0,
                    comparison__photo__in_iiw_dataset=True) \
            .values_list('comparison__photo_id', 'darker',
                         'comparison__darker', 'comparison__darker_score',
                         'user_id', 'comparison__point1__min_separation')
        user_to_values = group_by_value(all_user_values, 4)

        photo_to_errors = {}
        user_mean_errors = []
        all_errors = []
        for user_id in progress_bar(user_ids):
            mean_errors = []
            photo_ids = group_by_value(user_to_values[user_id], 0)
            for photo_id, values in photo_ids.iteritems():
                if not values:
                    continue

                if error_attr == 'mean_error':
                    error_sum = np.sum(x[3] for x in values if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_dense_error':
                    values1 = [x for x in values if x[5] < 0.05]
                    error_sum = np.sum(x[3] for x in values1 if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values1)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_sparse_error':
                    values1 = [x for x in values if x[5] > 0.05]
                    error_sum = np.sum(x[3] for x in values1 if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values1)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_eq_error':
                    values = [x for x in values if x[2] == 'E']
                    error_sum = np.sum(x[3] for x in values if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_neq_error':
                    values = [x for x in values if x[2] in ('1', '2')]
                    error_sum = np.sum(x[3] for x in values if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_sum_error':
                    values1 = [x for x in values if x[2] == 'E']
                    error_sum = np.sum(x[3] for x in values1 if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values1)
                    error1 = error_sum / error_total if error_total else None

                    values2 = [x for x in values if x[2] in ('1', '2')]
                    error_sum = np.sum(x[3] for x in values2 if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values2)
                    error2 = error_sum / error_total if error_total else None

                    if error1 is not None or error2 is not None:
                        error = (error1 if error1 else 0) + (error2 if error2 else 0)
                    else:
                        error = None
                else:
                    raise ValueError()

                if error is not None:
                    all_errors.append(error)
                    mean_errors.append(error)
                    if photo_id in photo_to_errors:
                        photo_to_errors[photo_id].append(error)
                    else:
                        photo_to_errors[photo_id] = [error]

            user_mean_errors.append(np.mean(mean_errors))

        #print >>self.f, 'ERROR METRIC: %s' % error_attr
        #print >>self.f, 'By User: User mean error: %s (%s) +/- %s, %s users' % (
            #np.mean(user_mean_errors),
            #np.median(user_mean_errors),
            #np.std(user_mean_errors),
            #len(user_mean_errors)
        #)
        #print >>self.f, 'Across all: User mean error: %s (%s) +/- %s, %s values' % (
            #np.mean(all_errors),
            #np.median(all_errors),
            #np.std(all_errors),
            #len(all_errors)
        #)
        #by_photo = [np.median(errors) for errors in photo_to_errors.values()]
        #print >>self.f, 'By photo: User mean error: %s (%s) +/- %s, %s values' % (
            #np.mean(by_photo),
            #np.median(by_photo),
            #np.std(by_photo),
            #len(by_photo)
        #)

        #for p in (5.0, 25.0, 50.0, 75.0, 95.0):
            #print >>self.f, 'Percentile %s: %s' % (np.percentile(user_mean_errors, p), p)
        self.f.flush()

        return {
            photo_id: np.median(errors)
            for photo_id, errors in photo_to_errors.iteritems()
        }
    def evaluate_algorithms(self, user_ids, error_attr):
        algorithm_slugs = list(
            IntrinsicImagesAlgorithm.objects
            .filter(active=True)
            .exclude(slug__endswith='_prototype')
            .order_by().distinct('slug')
            .values_list('slug', flat=True)
        )

        # { slug: { photo_id : error } }
        print 'Evaluating algorithms...'
        slug_errors = {
            slug: self.algorithm_cv_errors(slug, error_attr)
            for slug in progress_bar(algorithm_slugs)
        }
        #algorithm_slugs.append('human')
        #slug_errors['human'] = self.human_errors(user_ids, error_attr)

        print 'Computing ranks (photo --> slug)...'

        # { photo: { slug: error } }
        photo_to_slugs = {}
        for slug, photo_ids in slug_errors.iteritems():
            for p, error in photo_ids.iteritems():
                if p in photo_to_slugs:
                    photo_to_slugs[p].append((slug, error))
                else:
                    photo_to_slugs[p] = [(slug, error)]

        print 'Computing ranks (slug --> ranks)...'

        # { slug: [ranks] }
        slug_ranks = {slug: [] for slug in algorithm_slugs}
        for photo, errors in photo_to_slugs.iteritems():
            if len(errors) < len(algorithm_slugs):
                continue
            ranks = rankdata([e[1] for e in errors], method='average')
            for i, v in enumerate(errors):
                slug_ranks[v[0]].append(ranks[i])

        print >>self.f, r"""
\begin{figure*}[tb]
\centering
\begin{subfigure}[b]{0.49\textwidth}
\begin{bchart}[max=65,plain,unit=\%,scale=0.8,width=1.2\textwidth]
""".strip()
        items = sorted(slug_ranks, key=lambda x: np.mean(slug_errors[x].values()))
        for slug in items:
            scale = (50 if error_attr == 'mean_sum_error' else 100)
            print >>self.f, r'  \bcbar[text={%s}]{%.1f}' % (
                SLUG_TO_TEX[slug],
                np.mean(slug_errors[slug].values()) * scale,
            )
        print >>self.f, r"""
\end{bchart}
\caption{%%
Weighted human disagreement (WHD), as described in
Section~\ref{ssec:metric}.
}
\end{subfigure}
\begin{subfigure}[b]{0.49\textwidth}
\begin{bchart}[min=0,max=8,plain,scale=0.8,width=1.2\textwidth]
""".strip()
        for slug in items:
            print >>self.f, r'  \bcbar[text={%s}]{%.2f}' % (
                SLUG_TO_TEX[slug],
                np.mean(slug_ranks[slug]),
            )
        print >>self.f, r"""
\end{bchart}
\caption{Mean rank (1 to 8).}
\end{subfigure}
\vspace{-6pt}
\caption{%
Quantitative comparison of our algorithm against several recent algorithms.
The ``median individual human'' is calculated using responses that were
excluded from the aggregation, and from users who were not blocked by our
sentinels.  We use NUM_USERS users for this test and NUM_TOTAL total users for
aggregation.}
}
\label{fig:ranking}
\end{figure*}
""".strip().replace('NUM_USERS', str(len(user_ids))) \
           .replace('NUM_TOTAL', str(IntrinsicPointComparisonResponse.objects.order_by().distinct('user').count()))
        self.f.flush()
Example #42
0
    def handle(self, *args, **options):
        print >>self.stdout, 'MTurk info:'
        for key in dir(settings):
            if key.startswith('MTURK') or 'DEBUG' in key:
                print '  %s: %s' % (key, getattr(settings, key))

        print >>self.stdout, '\nDownloading list of hits...'
        connection = get_mturk_connection()

        # repeatedly try and download list
        while True:
            try:
                all_hits = list(connection.get_all_hits())
                break
            except MTurkRequestError as e:
                print e
                sleep(5)

        # LOCAL
        all_hit_ids = set(extract_mturk_attr(data, 'HITId') for data in all_hits)
        print >>self.stdout, '\nSyncing: local --> Amazon...'
        num_updated = MtHit.objects \
            .filter(sandbox=settings.MTURK_SANDBOX) \
            .exclude(hit_status='D') \
            .exclude(id__in=all_hit_ids) \
            .update(hit_status='D', expired=True)
        if num_updated:
            print 'No remote copy of %s hits -- marked them as disposed' % num_updated

        num_updated = MtAssignment.objects \
            .filter(hit__hit_status='D', status='S') \
            .update(status='A')
        if num_updated:
            print '%s assignments pending with disposed hits -- marked them as approved' % num_updated

        # REMOTE
        for sync_assignments in [False, True]:
            print >>self.stdout, '\nSyncing: Amazon --> local... (sync asst: %s)' % (
                sync_assignments)
            for data in progress_bar(all_hits):
                hit_id = extract_mturk_attr(data, 'HITId')

                try:
                    hit = MtHit.objects.get(id=hit_id)
                    for _ in xrange(5):
                        try:
                            hit.sync_status(
                                data, sync_assignments=sync_assignments)
                            break
                        except MTurkRequestError as e:
                            print e
                            sleep(5)
                except MtHit.DoesNotExist:
                    print 'No local copy of %s -- approving and deleting from Amazon (disabling)' % hit_id
                    try:
                        connection.disable_hit(hit_id)
                    except Exception as exc:
                        print exc

        print >>self.stdout, '\nFetching account balance...'
        print >>self.stdout, 'Account balance:', connection.get_account_balance()
        print >>self.stdout, '\nDone'
    def handle(self, *args, **options):

        print 'Increment algorithm versions...'
        IntrinsicImagesAlgorithm.objects.all() \
            .update(task_version=F('task_version') + 1)

        print 'Fetching photos...'
        photo_ids = list(
            Photo.objects.filter(in_iiw_dataset=True)
            .order_by('id')
            .values_list('id', flat=True)
        )

        print 'Fetching algorithms...'
        algorithms = [
            IntrinsicImagesAlgorithm.objects.get_or_create(
                slug=a['slug'], parameters=a['parameters'],
                baseline=a['slug'].startswith('baseline_'))[0]
            for a in get_algorithm_variants()
        ]

        # HACK TO REMOVE
        algorithms += list(
            IntrinsicImagesAlgorithm.objects
            .filter(slug='bell2014_densecrf', active=True)
            .annotate(c=Count('intrinsic_images_decompositions'),
                      s=Avg('intrinsic_images_decompositions__mean_error'))
            .filter(c__gte=1000, s__lte=0.25)
        )

        print 'Filter out inactive or duplicate algorithms...'
        seen_algorithm_ids = set()
        distinct_algorithms = []
        for a in algorithms:
            if a.active and a.id not in seen_algorithm_ids:
                seen_algorithm_ids.add(a.id)
                distinct_algorithms.append(a)
        algorithms = distinct_algorithms

        for a in progress_bar(algorithms):
            if a.intrinsic_images_decompositions.exists():
                a._mean_error = a.intrinsic_images_decompositions \
                    .aggregate(s=Avg('mean_error'))['s']
            else:
                a._mean_error = 0.0
        algorithms.sort(key=lambda a: a._mean_error)

        c = [0, 0]
        print 'Starting jobs...'
        for algorithm in progress_bar(algorithms):
            completed_photo_ids = set(
                IntrinsicImagesDecomposition.objects
                .filter(algorithm_id=algorithm.id, mean_error__isnull=False, error_comparison_thresh=0.05)
                .values_list('photo_id', flat=True)
            )
            for photo_id in photo_ids:
                if photo_id in completed_photo_ids:
                    continue

                #if algorithm.slug.startswith('shen2011_') or algorithm.slug.startswith('zhao2012_'):
                if not algorithm.slug.startswith('bell2014_'):
                    intrinsic_decomposition_task_matlab.delay(
                        photo_id=photo_id,
                        algorithm_id=algorithm.id,
                        task_version=algorithm.task_version,
                    )
                    c[0] += 1
                else:
                    intrinsic_decomposition_task.delay(
                        photo_id=photo_id,
                        algorithm_id=algorithm.id,
                        task_version=algorithm.task_version,
                    )
                    c[1] += 1

        print "intrinsic_create_jobs: queued %s matlab, %s general tasks" % tuple(c)
Example #44
0
def compute_optimal_delta(show_progress=False):

    if show_progress:
        print "Loading ground truth..."

    preload = []

    decomp_iterator = IntrinsicSyntheticDecomposition.objects.all()
    if show_progress:
        decomp_iterator = progress_bar(decomp_iterator)

    for decomp in decomp_iterator:
        comparisons = IntrinsicPointComparison.objects \
            .filter(photo_id=decomp.photo_id) \
            .filter(point1__opaque=True, point2__opaque=True, darker__isnull=False) \
            .select_related('point1', 'point2') \

        if not comparisons:
            continue

        diff_col, = decomp.open_multilayer_exr_layers(['diff_col', ])

        preload.append([
            list(comparisons),
            diff_col
        ])

    def objective(thresh):
        if thresh < 0:
            return 1.0

        neq_thresh = 1.0 / (1.0 + thresh)
        eq_thresh = 1.0 + thresh

        errors = []
        for comparisions, diff_col in preload:
            error_sum = 0.0
            weight_sum = 0.0
            for c in comparisons:
                l1, l2 = [
                    np.mean(diff_col[int(p.y * diff_col.shape[0]), int(p.x * diff_col.shape[1]), :])
                    for p in (c.point1, c.point2)
                ]
                if l1 < 1e-3 or l2 < 1e-3:
                    continue

                if c.darker == "1":  # l1 < l2
                    error_sum += 0 if l1 / max(l2, 1e-10) < neq_thresh else c.darker_score
                elif c.darker == "2":  # l2 < l1
                    error_sum += 0 if l2 / max(l1, 1e-10) < neq_thresh else c.darker_score
                elif c.darker == "E":  # l1 - l2
                    ratio = max(l1, l2) / max(min(l1, l2), 1e-10)
                    error_sum += 0 if ratio < eq_thresh else c.darker_score
                else:
                    raise ValueError("Unknown value of darker: %s" % c.darker)

                weight_sum += c.darker_score
            if weight_sum:
                errors.append(error_sum / weight_sum)

        mean_error = np.mean(errors)
        print thresh, mean_error
        return mean_error

    if show_progress:
        print "Optimizing threshold..."

    return float(brute(objective, ranges=[(0.0, 0.5)], disp=show_progress))
Example #45
0
def compute_optimal_delta(show_progress=False):

    if show_progress:
        print "Loading ground truth..."

    preload = []

    decomp_iterator = IntrinsicSyntheticDecomposition.objects.all()
    if show_progress:
        decomp_iterator = progress_bar(decomp_iterator)

    for decomp in decomp_iterator:
        comparisons = IntrinsicPointComparison.objects \
            .filter(photo_id=decomp.photo_id) \
            .filter(point1__opaque=True, point2__opaque=True, darker__isnull=False) \
            .select_related('point1', 'point2') \

        if not comparisons:
            continue

        diff_col, = decomp.open_multilayer_exr_layers([
            'diff_col',
        ])

        preload.append([list(comparisons), diff_col])

    def objective(thresh):
        if thresh < 0:
            return 1.0

        neq_thresh = 1.0 / (1.0 + thresh)
        eq_thresh = 1.0 + thresh

        errors = []
        for comparisions, diff_col in preload:
            error_sum = 0.0
            weight_sum = 0.0
            for c in comparisons:
                l1, l2 = [
                    np.mean(diff_col[int(p.y * diff_col.shape[0]),
                                     int(p.x * diff_col.shape[1]), :])
                    for p in (c.point1, c.point2)
                ]
                if l1 < 1e-3 or l2 < 1e-3:
                    continue

                if c.darker == "1":  # l1 < l2
                    error_sum += 0 if l1 / max(
                        l2, 1e-10) < neq_thresh else c.darker_score
                elif c.darker == "2":  # l2 < l1
                    error_sum += 0 if l2 / max(
                        l1, 1e-10) < neq_thresh else c.darker_score
                elif c.darker == "E":  # l1 - l2
                    ratio = max(l1, l2) / max(min(l1, l2), 1e-10)
                    error_sum += 0 if ratio < eq_thresh else c.darker_score
                else:
                    raise ValueError("Unknown value of darker: %s" % c.darker)

                weight_sum += c.darker_score
            if weight_sum:
                errors.append(error_sum / weight_sum)

        mean_error = np.mean(errors)
        print thresh, mean_error
        return mean_error

    if show_progress:
        print "Optimizing threshold..."

    return float(brute(objective, ranges=[(0.0, 0.5)], disp=show_progress))
Example #46
0
    def human_errors(self, user_ids, error_attr):
        print 'Fetching user responses...'
        all_user_values = IntrinsicPointComparisonResponse.objects \
            .filter(user__exclude_from_aggregation=False,
                    user_id__in=user_ids,
                    comparison__darker__isnull=False,
                    comparison__darker_score__gt=0,
                    comparison__photo__in_iiw_dataset=True) \
            .values_list('comparison__photo_id', 'darker',
                         'comparison__darker', 'comparison__darker_score',
                         'user_id', 'comparison__point1__min_separation')
        user_to_values = group_by_value(all_user_values, 4)

        photo_to_errors = {}
        user_mean_errors = []
        all_errors = []
        for user_id in progress_bar(user_ids):
            mean_errors = []
            photo_ids = group_by_value(user_to_values[user_id], 0)
            for photo_id, values in photo_ids.iteritems():
                if not values:
                    continue

                if error_attr == 'mean_error':
                    error_sum = np.sum(x[3] for x in values if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_dense_error':
                    values1 = [x for x in values if x[5] < 0.05]
                    error_sum = np.sum(x[3] for x in values1 if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values1)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_sparse_error':
                    values1 = [x for x in values if x[5] > 0.05]
                    error_sum = np.sum(x[3] for x in values1 if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values1)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_eq_error':
                    values = [x for x in values if x[2] == 'E']
                    error_sum = np.sum(x[3] for x in values if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_neq_error':
                    values = [x for x in values if x[2] in ('1', '2')]
                    error_sum = np.sum(x[3] for x in values if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values)
                    error = error_sum / error_total if error_total else None
                elif error_attr == 'mean_sum_error':
                    values1 = [x for x in values if x[2] == 'E']
                    error_sum = np.sum(x[3] for x in values1 if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values1)
                    error1 = error_sum / error_total if error_total else None

                    values2 = [x for x in values if x[2] in ('1', '2')]
                    error_sum = np.sum(x[3] for x in values2 if x[1] != x[2])
                    error_total = np.sum(x[3] for x in values2)
                    error2 = error_sum / error_total if error_total else None

                    if error1 is not None or error2 is not None:
                        error = (error1 if error1 else 0) + (error2
                                                             if error2 else 0)
                    else:
                        error = None
                else:
                    raise ValueError()

                if error is not None:
                    all_errors.append(error)
                    mean_errors.append(error)
                    if photo_id in photo_to_errors:
                        photo_to_errors[photo_id].append(error)
                    else:
                        photo_to_errors[photo_id] = [error]

            user_mean_errors.append(np.mean(mean_errors))

        #print >>self.f, 'ERROR METRIC: %s' % error_attr
        #print >>self.f, 'By User: User mean error: %s (%s) +/- %s, %s users' % (
        #np.mean(user_mean_errors),
        #np.median(user_mean_errors),
        #np.std(user_mean_errors),
        #len(user_mean_errors)
        #)
        #print >>self.f, 'Across all: User mean error: %s (%s) +/- %s, %s values' % (
        #np.mean(all_errors),
        #np.median(all_errors),
        #np.std(all_errors),
        #len(all_errors)
        #)
        #by_photo = [np.median(errors) for errors in photo_to_errors.values()]
        #print >>self.f, 'By photo: User mean error: %s (%s) +/- %s, %s values' % (
        #np.mean(by_photo),
        #np.median(by_photo),
        #np.std(by_photo),
        #len(by_photo)
        #)

        #for p in (5.0, 25.0, 50.0, 75.0, 95.0):
        #print >>self.f, 'Percentile %s: %s' % (np.percentile(user_mean_errors, p), p)
        self.f.flush()

        return {
            photo_id: np.median(errors)
            for photo_id, errors in photo_to_errors.iteritems()
        }
    def handle(self, *args, **options):
        algorithm_ids = [1141, 709, 1217, 426, 522, 633]

        photo_ids = IntrinsicImagesDecomposition.objects.filter(algorithm_id=1141) \
            .filter(mean_sum_error__isnull=False,
                    photo__stylized=False,
                    photo__rotated=False,
                    photo__synthetic=False,
                    photo__license__publishable=True,
                    photo__num_intrinsic_comparisons__gte=20,
                    #photo__aspect_ratio__lt=1,
                    ) \
            .order_by('-photo__num_intrinsic_comparisons') \
            .values_list('photo_id', flat=True)[:100]

        if not os.path.exists('visual-comparison'):
            os.makedirs('visual-comparison')

        with open('supplemental-comparisons.tex', 'w') as f:
            for photo_num, photo_id in enumerate(progress_bar(photo_ids)):
                Photo.objects.get(id=photo_id).open_image(width=512).save(
                    'visual-comparison/photo-%s.jpg' % photo_id)

                decomps = [
                    IntrinsicImagesDecomposition.objects.get(
                        algorithm_id=algorithm_id, photo_id=photo_id)
                    for algorithm_id in algorithm_ids
                ]

                for d in decomps:
                    open_image(d.reflectance_image).save('visual-comparison/decomp-%s-r.jpg' % d.id)
                    open_image(d.shading_image).save('visual-comparison/decomp-%s-s.jpg' % d.id)

                print >>f, """
                    \\begin{figure*}[tb]
                    \centering
                    \\begin{tabular}{@{}c@{\hskip 0.3em}c@{\hskip 0.3em}c@{\hskip 0.3em}c@{\hskip 0.3em}c@{\hskip 0.3em}c@{\hskip 0.3em}c@{\hskip 0.3em}c@{\hskip 0.3em}}
                        \\gfxw{0.135}{visual-comparison/photo-%s.jpg} &
                        \\rotatebox{90}{\small{Reflectance $\mathbf{R}$}} &
                    """.strip() % photo_id

                for i, d in enumerate(decomps):
                    print >>f, r"\gfxw{0.135}{visual-comparison/decomp-%s-r.jpg}".strip() % d.id
                    if i < len(decomps) - 1:
                        print >>f, "&"

                print >>f, r"\\ & \rotatebox{90}{\small{Shading $S$}} &"

                for i, d in enumerate(decomps):
                    print >>f, r"\gfxw{0.135}{visual-comparison/decomp-%s-s.jpg}".strip() % d.id
                    if i < len(decomps) - 1:
                        print >>f, "&"

                print >>f, r"\\ & &"

                for i, d in enumerate(decomps):
                    if i == 0:
                        print >>f, r'\MetricName{} = %.1f\%%' % (d.mean_error * 100.0)
                    else:
                        print >>f, r'%.1f\%%' % (d.mean_error * 100.0)
                    if i < len(decomps) - 1:
                        print >>f, "&"

                print >>f, """
                    \\\\
                    Image $\\mathbf{I}$ &
                    &
                    Our algorithm &
                    \\cite{zhao-PAMI2012} &
                    \\cite{garces2012} &
                    \\cite{shen-CVPR2011b} &
                    Retinex (gray) &
                    Retinex (color)
                    \\end{tabular}
                    \\vspace{-6pt}
                    \\caption{\\new{%%
                    Visual comparison of our algorithm against several recent open-source
                    algorithms.  Each algorithm uses the best parameters found from training
                    (i.e., minimizes mean \\MetricNameDelta{} across all photos).
                    OpenSurfaces Photo ID: %s.
                    }}
                    \\label{fig:visual-comparison-%s}
                    \\vspace{-6pt}
                    \\end{figure*}
                    """.strip() % (photo_id, photo_id)

                if (photo_num + 1) % 3 == 0:
                    print >>f, r"\clearpage"
Example #48
0
    def handle(self, *args, **options):
        print >> self.stdout, 'MTurk info:'
        for key in dir(settings):
            if key.startswith('MTURK') or 'DEBUG' in key:
                print '  %s: %s' % (key, getattr(settings, key))

        print >> self.stdout, '\nDownloading list of hits...'
        connection = get_mturk_connection()

        # repeatedly try and download list
        while True:
            try:
                all_hits = list(connection.get_all_hits())
                break
            except MTurkRequestError as e:
                print e
                sleep(5)

        # LOCAL
        all_hit_ids = set(
            extract_mturk_attr(data, 'HITId') for data in all_hits)
        print >> self.stdout, '\nSyncing: local --> Amazon...'
        num_updated = MtHit.objects \
            .filter(sandbox=settings.MTURK_SANDBOX) \
            .exclude(hit_status='D') \
            .exclude(id__in=all_hit_ids) \
            .update(hit_status='D', expired=True)
        if num_updated:
            print 'No remote copy of %s hits -- marked them as disposed' % num_updated

        num_updated = MtAssignment.objects \
            .filter(hit__hit_status='D', status='S') \
            .update(status='A')
        if num_updated:
            print '%s assignments pending with disposed hits -- marked them as approved' % num_updated

        # REMOTE
        for sync_assignments in [False, True]:
            print >> self.stdout, '\nSyncing: Amazon --> local... (sync asst: %s)' % (
                sync_assignments)
            for data in progress_bar(all_hits):
                hit_id = extract_mturk_attr(data, 'HITId')

                try:
                    hit = MtHit.objects.get(id=hit_id)
                    for _ in xrange(5):
                        try:
                            hit.sync_status(data,
                                            sync_assignments=sync_assignments)
                            break
                        except MTurkRequestError as e:
                            print e
                            sleep(5)
                except MtHit.DoesNotExist:
                    print 'No local copy of %s -- approving and deleting from Amazon (disabling)' % hit_id
                    try:
                        connection.disable_hit(hit_id)
                    except Exception as exc:
                        print exc

        print >> self.stdout, '\nFetching account balance...'
        print >> self.stdout, 'Account balance:', connection.get_account_balance(
        )
        print >> self.stdout, '\nDone'
Example #49
0
def update_synthetic_diff_intensity(show_progress=False):
    """ Updates these fields for synthetic images:
        :attr:`intrinsic.models.IntrinsicPoint.synthetic_diff_intensity`
        :attr:`intrinsic.models.IntrinsicPointComparison.synthetic_diff_intensity_ratio`
        :attr:`intrinsic.models.IntrinsicPointComparison.synthetic_diff_cv`
    """

    decomp_iterator = IntrinsicSyntheticDecomposition.objects.all()
    if show_progress:
        decomp_iterator = progress_bar(decomp_iterator)

    for decomp in decomp_iterator:
        diff_col, diff_ind, diff_dir, comb = decomp.open_multilayer_exr_layers(
            ['diff_col', 'diff_ind', 'diff_dir', 'combined'])
        diff = diff_col * (diff_ind + diff_dir)

        points = IntrinsicPoint.objects \
            .filter(photo_id=decomp.photo_id)

        for p in points:
            # diffuse color intensity
            p_diff_col = diff_col[int(p.y * diff_col.shape[0]),
                                  int(p.x * diff_col.shape[1]), :]
            p.synthetic_diff_intensity = np.mean(p_diff_col)

            # diffuse energy fraction
            p_diff = diff[int(p.y * diff.shape[0]),
                          int(p.x * diff.shape[1]), :]
            p_comb = comb[int(p.y * comb.shape[0]),
                          int(p.x * comb.shape[1]), :]
            p.synthetic_diff_fraction = np.mean(p_diff) / np.mean(p_comb)

            # coefficient of variation of the local 3x3 block
            px = int(p.x * diff_col.shape[1])
            py = int(p.y * diff_col.shape[0])
            p_diff_col_block = diff_col[max(py -
                                            1, 0):min(py +
                                                      1, diff_col.shape[0]),
                                        max(px -
                                            1, 0):min(px +
                                                      1, diff_col.shape[1]), :]
            mu_block = np.mean(p_diff_col_block)
            if mu_block > 0:
                p.synthetic_diff_cv = np.std(p_diff_col_block) / mu_block
            else:
                p.synthetic_diff_cv = None

            p.save()

        comparisons = IntrinsicPointComparison.objects \
            .filter(photo_id=decomp.photo_id) \
            .select_related('point1', 'point2')

        for c in comparisons:
            if c.point1.synthetic_diff_intensity >= 1e-3 and c.point2.synthetic_diff_intensity >= 1e-3:
                c.synthetic_diff_intensity_ratio = (
                    c.point1.synthetic_diff_intensity /
                    c.point2.synthetic_diff_intensity)
            else:
                c.synthetic_diff_intensity_ratio = None
            c.save()