Exemplo n.º 1
0
def save_substance_grid(substance_id, outdir, show_progress=False):
    try:
        os.makedirs(outdir)
    except OSError as e:
        print e

    substance = ShapeSubstance.objects.get(id=substance_id)
    print 'substance: %s' % substance
    qset = MaterialShape.objects.filter(substance_id=substance_id,
                                        photo__inappropriate=False)
    if not qset.exists():
        print 'no shapes for %s' % substance
        return

    from common.utils import create_image_grid_qset
    out = create_image_grid_qset(qset,
                                 'image_square_300',
                                 ncols=20,
                                 size=300,
                                 max_qset_size=10 * 20 * 16 / 9,
                                 downsample_ratio=2,
                                 show_progress=show_progress)

    outname = os.path.join(
        outdir,
        substance.name.replace(' - ', '-').replace(' ', '-').replace(
            '/', '-').replace("'", '') + '.png')
    with open(outname, 'wb') as outfile:
        save_image(out, outfile, format="PNG")
Exemplo n.º 2
0
def save_substance_grid(substance_id, outdir, show_progress=False):
    try:
        os.makedirs(outdir)
    except OSError as e:
        print e

    substance = ShapeSubstance.objects.get(id=substance_id)
    print 'substance: %s' % substance
    qset = MaterialShape.objects.filter(
        substance_id=substance_id, photo__inappropriate=False)
    if not qset.exists():
        print 'no shapes for %s' % substance
        return

    from common.utils import create_image_grid_qset
    out = create_image_grid_qset(qset, 'image_square_300',
                                 ncols=20, size=300,
                                 max_qset_size=10 * 20 * 16 / 9,
                                 downsample_ratio=2,
                                 show_progress=show_progress)

    outname = os.path.join(outdir, substance.name
                           .replace(' - ', '-').replace(' ', '-')
                           .replace('/', '-').replace("'", '') + '.png')
    with open(outname, 'wb') as outfile:
        save_image(out, outfile, format="PNG")
Exemplo n.º 3
0
def save_texture_grid(outdir, max_qset_size=None, ncols=25, show_progress=False, category=None):
    try:
        os.makedirs(outdir)
    except OSError as e:
        print e

    qset = ShapeRectifiedNormalLabel.objects \
        .filter(shape__photo__license__publishable=True,
                shape__correct=True, shape__planar=True,
                shape__rectified_normal_id=F('id')) \
        .order_by('-shape__num_vertices')

    if category:
        qset = qset.filter(shape__substance__name=category)

    if not qset.exists():
        print 'no textures found'
        return

    if max_qset_size:
        qset = qset[:max_qset_size]

    from common.utils import create_image_grid_qset
    out = create_image_grid_qset(qset, 'image_rectified_square_300',
                                 ncols=ncols, size=256,
                                 downsample_ratio=1,
                                 show_progress=show_progress)

    if category:
        outname = os.path.join(outdir, '%s.png' % category.lower())
    else:
        outname = os.path.join(outdir, 'textures.png')
    with open(outname, 'wb') as outfile:
        save_image(out, outfile, format="PNG")
Exemplo n.º 4
0
def save_texture_grid(outdir,
                      max_qset_size=None,
                      ncols=25,
                      show_progress=False,
                      category=None):
    try:
        os.makedirs(outdir)
    except OSError as e:
        print e

    qset = ShapeRectifiedNormalLabel.objects \
        .filter(shape__photo__license__publishable=True,
                shape__correct=True, shape__planar=True,
                shape__rectified_normal_id=F('id')) \
        .order_by('-shape__num_vertices')

    if category:
        qset = qset.filter(shape__substance__name=category)

    if not qset.exists():
        print 'no textures found'
        return

    if max_qset_size:
        qset = qset[:max_qset_size]

    from common.utils import create_image_grid_qset
    out = create_image_grid_qset(qset,
                                 'image_rectified_square_300',
                                 ncols=ncols,
                                 size=256,
                                 downsample_ratio=1,
                                 show_progress=show_progress)

    if category:
        outname = os.path.join(outdir, '%s.png' % category.lower())
    else:
        outname = os.path.join(outdir, 'textures.png')
    with open(outname, 'wb') as outfile:
        save_image(out, outfile, format="PNG")
Exemplo n.º 5
0
def save_substance_reflectance_grid(
    outdir, initial_id=None, max_sequence_len=None, ncols=25, show_progress=False, substance=None
):

    try:
        os.makedirs(outdir)
    except OSError as e:
        print e

    print "Fetching all BSDFs..."

    # fetch all bsdfs
    qset = ShapeBsdfLabel_wd.objects.filter(
        color_correct=True,
        gloss_correct=True,
        shape__photo__inappropriate=False,
        # color_correct_score__gte=0.1,
        # gloss_correct_score__gte=0.1,
        shape__photo__license__publishable=True,
    )

    if substance:
        qset = qset.filter(shape__substance__name=substance)

    bsdfs = list(
        qset.extra(select={"correct_score": "color_correct_score + gloss_correct_score"}, order_by=("-correct_score",))
    )

    max_sequence_len = min(max_sequence_len, len(bsdfs))
    print "Constructing sequence (%s items)..." % max_sequence_len

    # start with a more interesting shape
    bsdf = bsdfs[0]
    for b in bsdfs:
        if b.id == initial_id:
            bsdf = b
            break
    bsdf_sequence = [bsdf]

    shape_ids = set()
    shape_ids.add(bsdf.shape_id)

    try:
        while True:

            best_color_d = 1e10
            best_gloss_b = None

            best_gloss_d = 1e10
            best_color_b = None

            for b in bsdfs:
                if b.shape_id in shape_ids:
                    continue

                color_d = bsdf.color_distance(b)
                if color_d <= best_color_d:
                    best_color_d = color_d
                    best_color_b = b

                if color_d <= 2.3:
                    gloss_d = bsdf.gloss_distance(b)
                    if gloss_d < best_gloss_d:
                        best_gloss_d = gloss_d
                        best_gloss_b = b

            best_b = best_gloss_b if best_gloss_b else best_color_b
            if best_b:
                print best_b.id
                bsdf = best_b
                bsdf_sequence.append(best_b)
                shape_ids.add(best_b.shape_id)
                if max_sequence_len and len(bsdf_sequence) >= max_sequence_len:
                    break
            else:
                break

            if show_progress:
                print len(bsdf_sequence)
    except KeyboardInterrupt:
        print "Search interrupted: using %s items" % len(bsdf_sequence)

    print "Fetching shapes..."
    image_pairs = [(b.image_blob, b.shape.image_square_300) for b in bsdf_sequence]

    print "Constructing image..."

    # save result
    from common.utils import create_image_pair_grid_list

    out = create_image_pair_grid_list(image_pairs, ncols=ncols, size=256, show_progress=True)

    print "Saving result..."
    if substance:
        outname = os.path.join(outdir, "%s.png" % substance.replace("/", "-").lower())
    else:
        outname = os.path.join(outdir, "blob-sequence.png")
    with open(outname, "wb") as outfile:
        save_image(out, outfile, format="PNG")
Exemplo n.º 6
0
def save_substance_reflectance_grid(
        outdir, initial_id=None,
        max_sequence_len=None, ncols=25,
        show_progress=False, substance=None):

    try:
        os.makedirs(outdir)
    except OSError as e:
        print e

    print 'Fetching all BSDFs...'

    # fetch all bsdfs
    qset = ShapeBsdfLabel_wd.objects.filter(
        color_correct=True, gloss_correct=True, shape__photo__inappropriate=False,
        #color_correct_score__gte=0.1,
        #gloss_correct_score__gte=0.1,
        shape__photo__license__publishable=True)

    if substance:
        qset = qset.filter(shape__substance__name=substance)

    bsdfs = list(qset.extra(
        select={'correct_score': 'color_correct_score + gloss_correct_score'},
        order_by=('-correct_score',)
    ))

    max_sequence_len = min(max_sequence_len, len(bsdfs))
    print 'Constructing sequence (%s items)...' % max_sequence_len

    # start with a more interesting shape
    bsdf = bsdfs[0]
    for b in bsdfs:
        if b.id == initial_id:
            bsdf = b
            break
    bsdf_sequence = [bsdf]

    shape_ids = set()
    shape_ids.add(bsdf.shape_id)

    try:
        while True:

            best_color_d = 1e10
            best_gloss_b = None

            best_gloss_d = 1e10
            best_color_b = None

            for b in bsdfs:
                if b.shape_id in shape_ids:
                    continue

                color_d = bsdf.color_distance(b)
                if color_d <= best_color_d:
                    best_color_d = color_d
                    best_color_b = b

                if color_d <= 2.3:
                    gloss_d = bsdf.gloss_distance(b)
                    if gloss_d < best_gloss_d:
                        best_gloss_d = gloss_d
                        best_gloss_b = b

            best_b = best_gloss_b if best_gloss_b else best_color_b
            if best_b:
                print best_b.id
                bsdf = best_b
                bsdf_sequence.append(best_b)
                shape_ids.add(best_b.shape_id)
                if max_sequence_len and len(bsdf_sequence) >= max_sequence_len:
                    break
            else:
                break

            if show_progress:
                print len(bsdf_sequence)
    except KeyboardInterrupt:
        print 'Search interrupted: using %s items' % len(bsdf_sequence)

    print 'Fetching shapes...'
    image_pairs = [
        (b.image_blob, b.shape.image_square_300)
        for b in bsdf_sequence
    ]

    print 'Constructing image...'

    # save result
    from common.utils import create_image_pair_grid_list
    out = create_image_pair_grid_list(
        image_pairs, ncols=ncols, size=256, show_progress=True)

    print 'Saving result...'
    if substance:
        outname = os.path.join(
            outdir, '%s.png' % substance.replace('/', '-').lower())
    else:
        outname = os.path.join(outdir, 'blob-sequence.png')
    with open(outname, 'wb') as outfile:
        save_image(out, outfile, format="PNG")
Exemplo n.º 7
0
def detect_vanishing_points_impl(photo, image, save=True):

    # algorithm parameters
    max_em_iter = 0  # if 0, don't do EM
    min_cluster_size = 10
    min_line_len2 = 4.0
    residual_stdev = 0.75
    max_clusters = 8
    outlier_weight = 0.2
    weight_clamp = 0.1
    lambda_perp = 1.0
    verbose = False

    width, height = image.size
    print 'size: %s x %s' % (width, height)

    vpdetection_dir = os.path.abspath(
        os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'opt',
                     'vpdetection', 'matlab'))

    tmpdir = tempfile.mkdtemp()
    try:

        # save image to local tmpdir
        localname = os.path.join(tmpdir, 'image.jpg')
        with open(tmpdir + '/image.jpg', 'wb') as target:
            save_image(image, target, format='JPEG', options={'quality': 90})

        # detect line segments using LSD (Grompone, G., Jakubowicz, J., Morel,
        # J. and Randall, G. (2010). LSD: A Fast Line Segment Detector with a
        # False Detection Control. IEEE Transactions on Pattern Analysis and
        # Machine Intelligence, 32, 722.)
        linesname = os.path.join(tmpdir, 'lines.txt')
        matlab_command = ";".join([
            "try",
            "addpath('../lsd-1.5/')",
            "lines = lsd(double(rgb2gray(imread('%s'))))" % localname,
            "save('%s', 'lines', '-ascii', '-tabs')" % linesname,
            "catch",
            "end",
            "quit",
        ])
        print 'matlab command: %s' % matlab_command
        subprocess.check_call(args=[
            'matlab', '-nodesktop', '-nosplash', '-nodisplay', '-r',
            matlab_command
        ],
                              cwd=vpdetection_dir)

        # cluster lines using J-linkage (Toldo, R. and Fusiello, A. (2008).
        # Robust multiple structures estimation with J-Linkage. European
        # Conference on Computer Vision(ECCV), 2008.)
        # and (Tardif J.-P., Non-iterative Approach for Fast and Accurate
        # Vanishing Point Detection, 12th IEEE International Conference on
        # Computer Vision, Kyoto, Japan, September 27 - October 4, 2009.)
        clustername = os.path.join(tmpdir, 'clusters.txt')
        subprocess.check_call(args=['./vpdetection', linesname, clustername],
                              cwd=vpdetection_dir)

        # collect line clusters
        clusters_dict = {}
        all_lines = []
        for row in open(clustername, 'r').readlines():
            cols = row.split()
            idx = int(cols[4])
            line = [float(f) for f in cols[0:4]]

            # discard small lines
            x1, y1, x2, y2 = line
            len2 = (x1 - x2)**2 + (y2 - y1)**2
            if len2 < min_line_len2:
                continue

            if idx in clusters_dict:
                clusters_dict[idx].append(line)
                all_lines.append(line)
            else:
                clusters_dict[idx] = [line]

    finally:
        shutil.rmtree(tmpdir)

    # discard invalid clusters and sort by cluster length
    thresh = 3 if max_em_iter else min_cluster_size
    clusters = filter(lambda x: len(x) >= thresh, clusters_dict.values())
    clusters.sort(key=line_cluster_length, reverse=True)
    if max_em_iter and len(clusters) > max_clusters:
        clusters = clusters[:max_clusters]
    print "Using %s clusters and %s lines" % (len(clusters), len(all_lines))
    if not clusters:
        print "Not enough clusters"
        return

    # Solve for optimal vanishing point using V_GS in 5.2 section of
    # (http://www-etud.iro.umontreal.ca/~tardif/fichiers/Tardif_ICCV2009.pdf).
    # where "optimal" minimizes algebraic error.
    vectors = []
    for lines in clusters:
        # Minimize 'algebraic' error to get an initial solution
        A = np.zeros((len(lines), 3))
        for i in xrange(0, len(lines)):
            x1, y1, x2, y2 = lines[i]
            A[i, :] = [y1 - y2, x2 - x1, x1 * y2 - y1 * x2]
        __, __, VT = np.linalg.svd(A, full_matrices=False, compute_uv=True)
        if VT.shape != (3, 3):
            raise ValueError("Invalid SVD shape (%s)" % VT.size)
        x, y, w = VT[2, :]
        p = [x / w, y / w]
        v = photo.vanishing_point_to_vector((p[0] / width, p[1] / height))
        vectors.append(v)

    # EM
    if max_em_iter:

        # complete orthonormal system
        if len(vectors) >= 2:
            vectors.append(normalized_cross(vectors[0], vectors[1]))

        ### EM refinement ###

        x0 = None
        x_opt = None
        exp_coeff = 0.5 / (residual_stdev**2)

        num_weights_nnz = 0
        num_weights = 0

        for em_iter in xrange(max_em_iter):

            ### E STEP ###

            # convert back to vanishing points
            points = vectors_to_points(photo, image, vectors)

            # last column is the outlier cluster
            weights = np.zeros((len(all_lines), len(vectors) + 1))

            # estimate weights (assume uniform prior)
            for i_p, p in enumerate(points):
                weights[:, i_p] = [line_residual(l, p) for l in all_lines]
            weights = np.exp(-exp_coeff * np.square(weights))

            # outlier weight
            weights[:, len(points)] = outlier_weight

            # normalize each row (each line segment) to have unit sum
            weights_row_sum = weights.sum(axis=1)
            weights /= weights_row_sum[:, np.newaxis]

            # add sparsity
            weights[weights < weight_clamp] = 0
            num_weights += weights.size
            num_weights_nnz += np.count_nonzero(weights)

            # check convergence
            if (em_iter >= 10 and len(x0) == len(x_opt) and
                    np.linalg.norm(np.array(x0) - np.array(x_opt)) <= 1e-5):
                break

            # sort by weight
            if len(vectors) > 1:
                vectors_weights = [(v, weights[:, i_v].sum())
                                   for i_v, v in enumerate(vectors)]
                vectors_weights.sort(key=lambda x: x[1], reverse=True)
                vectors = [x[0] for x in vectors_weights]

            ### M STEP ###

            # objective function to minimize
            def objective_function(x, *args):
                cur_vectors = unpack_x(x)
                cur_points = vectors_to_points(photo, image, cur_vectors)

                # line-segment errors
                residuals = [
                    weights[i_l, i_p] * line_residual(all_lines[i_l], p)
                    for i_p, p in enumerate(cur_points)
                    for i_l in np.flatnonzero(weights[:, i_p])
                ]

                # penalize deviations from 45 or 90 degree angles
                if lambda_perp:
                    residuals += [
                        lambda_perp * math.sin(4 * math.acos(abs_dot(v, w)))
                        for i_v, v in enumerate(cur_vectors)
                        for w in cur_vectors[:i_v]
                    ]

                return residuals

            # slowly vary parameters
            t = min(1.0, em_iter / 20.0)

            # vary tol from 1e-2 to 1e-6
            tol = math.exp(math.log(1e-2) * (1 - t) + math.log(1e-6) * t)

            from scipy.optimize import leastsq
            x0 = pack_x(vectors)
            x_opt, __ = leastsq(objective_function, x0, ftol=tol, xtol=tol)
            vectors = unpack_x(x_opt)

            ### BETWEEN ITERATIONS ###

            if verbose:
                print 'EM: %s iters, %s clusters, weight sparsity: %s%%' % (
                    em_iter, len(vectors),
                    100.0 * num_weights_nnz / num_weights)
                print 'residual: %s' % sum(y**2
                                           for y in objective_function(x_opt))

            # complete orthonormal system if missing
            if len(vectors) == 2:
                vectors.append(normalized_cross(vectors[0], vectors[1]))

            # merge similar clusters
            cluster_merge_dot = math.cos(math.radians(t * 20.0))
            vectors_merged = []
            for v in vectors:
                if (not vectors_merged or all(
                        abs_dot(v, w) < cluster_merge_dot
                        for w in vectors_merged)):
                    vectors_merged.append(v)
            if verbose and len(vectors) != len(vectors_merged):
                print 'Merging %s --> %s vectors' % (len(vectors),
                                                     len(vectors_merged))
            vectors = vectors_merged

        residual = sum(r**2 for r in objective_function(x_opt))
        print 'EM: %s iters, residual: %s, %s clusters, weight sparsity: %s%%' % (
            em_iter, residual, len(vectors),
            100.0 * num_weights_nnz / num_weights)

        # final points
        points = vectors_to_points(photo, image, vectors)

        # sanity checks
        assert len(vectors) == len(points)

        # re-assign clusters
        clusters_points = [([], p) for p in points]
        line_map_cluster = np.argmax(weights, axis=1)
        for i_l, l in enumerate(all_lines):
            i_c = line_map_cluster[i_l]
            if i_c < len(points):
                clusters_points[i_c][0].append(l)

        # throw away small clusters
        clusters_points = filter(lambda x: len(x[0]) >= min_cluster_size,
                                 clusters_points)

        # reverse sort by cluster length
        clusters_points.sort(key=lambda x: line_cluster_length(x[0]),
                             reverse=True)

        # split into two parallel arrays
        clusters = [cp[0] for cp in clusters_points]
        points = [cp[1] for cp in clusters_points]

    else:  # no EM

        for i_v, lines in enumerate(clusters):

            def objective_function(x, *args):
                p = vectors_to_points(photo, image, unpack_x(x))[0]
                return [line_residual(l, p) for l in lines]

            from scipy.optimize import leastsq
            x0 = pack_x([vectors[i_v]])
            x_opt, __ = leastsq(objective_function, x0)
            vectors[i_v] = unpack_x(x_opt)[0]

        # delete similar vectors
        cluster_merge_dot = math.cos(math.radians(20.0))
        vectors_merged = []
        clusters_merged = []
        for i_v, v in enumerate(vectors):
            if (not vectors_merged or all(
                    abs_dot(v, w) < cluster_merge_dot
                    for w in vectors_merged)):
                vectors_merged.append(v)
                clusters_merged.append(clusters[i_v])
        vectors = vectors_merged
        clusters = clusters_merged

        # clamp number of vectors
        if len(clusters) > max_clusters:
            vectors = vectors[:max_clusters]
            clusters = clusters[:max_clusters]

        points = vectors_to_points(photo, image, vectors)

    # normalize to [0, 0], [1, 1]
    clusters_normalized = [[[
        l[0] / width, l[1] / height, l[2] / width, l[3] / height
    ] for l in lines] for lines in clusters]

    points_normalized = [(x / width, y / height) for (x, y) in points]

    # save result
    photo.vanishing_lines = json.dumps(clusters_normalized)
    photo.vanishing_points = json.dumps(points_normalized)
    photo.vanishing_length = sum(
        line_cluster_length(c) for c in clusters_normalized)
    if save:
        photo.save()
Exemplo n.º 8
0
def detect_vanishing_points_impl(photo, image, save=True):

    # algorithm parameters
    max_em_iter = 0  # if 0, don't do EM
    min_cluster_size = 10
    min_line_len2 = 4.0
    residual_stdev = 0.75
    max_clusters = 8
    outlier_weight = 0.2
    weight_clamp = 0.1
    lambda_perp = 1.0
    verbose = False

    width, height = image.size
    print 'size: %s x %s' % (width, height)

    vpdetection_dir = os.path.abspath(os.path.join(
        os.path.dirname(__file__), os.pardir, os.pardir, 'opt', 'vpdetection', 'matlab'
    ))

    tmpdir = tempfile.mkdtemp()
    try:

        # save image to local tmpdir
        localname = os.path.join(tmpdir, 'image.jpg')
        with open(tmpdir + '/image.jpg', 'wb') as target:
            save_image(image, target, format='JPEG', options={'quality': 90})

        # detect line segments using LSD (Grompone, G., Jakubowicz, J., Morel,
        # J. and Randall, G. (2010). LSD: A Fast Line Segment Detector with a
        # False Detection Control. IEEE Transactions on Pattern Analysis and
        # Machine Intelligence, 32, 722.)
        linesname = os.path.join(tmpdir, 'lines.txt')
        matlab_command = ";".join([
            "try",
            "addpath('../lsd-1.5/')",
            "lines = lsd(double(rgb2gray(imread('%s'))))" % localname,
            "save('%s', 'lines', '-ascii', '-tabs')" % linesname,
            "catch",
            "end",
            "quit",
        ])
        print 'matlab command: %s' % matlab_command
        subprocess.check_call(args=[
            'matlab', '-nodesktop', '-nosplash', '-nodisplay',
            '-r', matlab_command
        ], cwd=vpdetection_dir)

        # cluster lines using J-linkage (Toldo, R. and Fusiello, A. (2008).
        # Robust multiple structures estimation with J-Linkage. European
        # Conference on Computer Vision(ECCV), 2008.)
        # and (Tardif J.-P., Non-iterative Approach for Fast and Accurate
        # Vanishing Point Detection, 12th IEEE International Conference on
        # Computer Vision, Kyoto, Japan, September 27 - October 4, 2009.)
        clustername = os.path.join(tmpdir, 'clusters.txt')
        subprocess.check_call(
            args=['./vpdetection', linesname, clustername],
            cwd=vpdetection_dir)

        # collect line clusters
        clusters_dict = {}
        all_lines = []
        for row in open(clustername, 'r').readlines():
            cols = row.split()
            idx = int(cols[4])
            line = [float(f) for f in cols[0:4]]

            # discard small lines
            x1, y1, x2, y2 = line
            len2 = (x1 - x2) ** 2 + (y2 - y1) ** 2
            if len2 < min_line_len2:
                continue

            if idx in clusters_dict:
                clusters_dict[idx].append(line)
                all_lines.append(line)
            else:
                clusters_dict[idx] = [line]

    finally:
        shutil.rmtree(tmpdir)

    # discard invalid clusters and sort by cluster length
    thresh = 3 if max_em_iter else min_cluster_size
    clusters = filter(lambda x: len(x) >= thresh, clusters_dict.values())
    clusters.sort(key=line_cluster_length, reverse=True)
    if max_em_iter and len(clusters) > max_clusters:
        clusters = clusters[:max_clusters]
    print "Using %s clusters and %s lines" % (len(clusters), len(all_lines))
    if not clusters:
        print "Not enough clusters"
        return

    # Solve for optimal vanishing point using V_GS in 5.2 section of
    # (http://www-etud.iro.umontreal.ca/~tardif/fichiers/Tardif_ICCV2009.pdf).
    # where "optimal" minimizes algebraic error.
    vectors = []
    for lines in clusters:
        # Minimize 'algebraic' error to get an initial solution
        A = np.zeros((len(lines), 3))
        for i in xrange(0, len(lines)):
            x1, y1, x2, y2 = lines[i]
            A[i, :] = [y1 - y2, x2 - x1, x1 * y2 - y1 * x2]
        __, __, VT = np.linalg.svd(A, full_matrices=False, compute_uv=True)
        if VT.shape != (3, 3):
            raise ValueError("Invalid SVD shape (%s)" % VT.size)
        x, y, w = VT[2, :]
        p = [x / w, y / w]
        v = photo.vanishing_point_to_vector(
            (p[0] / width, p[1] / height)
        )
        vectors.append(v)

    # EM
    if max_em_iter:

        # complete orthonormal system
        if len(vectors) >= 2:
            vectors.append(normalized_cross(vectors[0], vectors[1]))

        ### EM refinement ###

        x0 = None
        x_opt = None
        exp_coeff = 0.5 / (residual_stdev ** 2)

        num_weights_nnz = 0
        num_weights = 0

        for em_iter in xrange(max_em_iter):

            ### E STEP ###

            # convert back to vanishing points
            points = vectors_to_points(photo, image, vectors)

            # last column is the outlier cluster
            weights = np.zeros((len(all_lines), len(vectors) + 1))

            # estimate weights (assume uniform prior)
            for i_p, p in enumerate(points):
                weights[:, i_p] = [line_residual(l, p) for l in all_lines]
            weights = np.exp(-exp_coeff * np.square(weights))

            # outlier weight
            weights[:, len(points)] = outlier_weight

            # normalize each row (each line segment) to have unit sum
            weights_row_sum = weights.sum(axis=1)
            weights /= weights_row_sum[:, np.newaxis]

            # add sparsity
            weights[weights < weight_clamp] = 0
            num_weights += weights.size
            num_weights_nnz += np.count_nonzero(weights)

            # check convergence
            if (em_iter >= 10 and len(x0) == len(x_opt) and
                    np.linalg.norm(np.array(x0) - np.array(x_opt)) <= 1e-5):
                break

            # sort by weight
            if len(vectors) > 1:
                vectors_weights = [
                    (v, weights[:, i_v].sum()) for i_v, v in enumerate(vectors)
                ]
                vectors_weights.sort(key=lambda x: x[1], reverse=True)
                vectors = [x[0] for x in vectors_weights]

            ### M STEP ###

            # objective function to minimize
            def objective_function(x, *args):
                cur_vectors = unpack_x(x)
                cur_points = vectors_to_points(photo, image, cur_vectors)

                # line-segment errors
                residuals = [
                    weights[i_l, i_p] * line_residual(all_lines[i_l], p)
                    for i_p, p in enumerate(cur_points)
                    for i_l in np.flatnonzero(weights[:, i_p])
                ]

                # penalize deviations from 45 or 90 degree angles
                if lambda_perp:
                    residuals += [
                        lambda_perp * math.sin(4 * math.acos(abs_dot(v, w)))
                        for i_v, v in enumerate(cur_vectors)
                        for w in cur_vectors[:i_v]
                    ]

                return residuals

            # slowly vary parameters
            t = min(1.0, em_iter / 20.0)

            # vary tol from 1e-2 to 1e-6
            tol = math.exp(math.log(1e-2) * (1 - t) + math.log(1e-6) * t)

            from scipy.optimize import leastsq
            x0 = pack_x(vectors)
            x_opt, __ = leastsq(objective_function, x0, ftol=tol, xtol=tol)
            vectors = unpack_x(x_opt)

            ### BETWEEN ITERATIONS ###

            if verbose:
                print 'EM: %s iters, %s clusters, weight sparsity: %s%%' % (
                    em_iter, len(vectors), 100.0 * num_weights_nnz / num_weights)
                print 'residual: %s' % sum(y ** 2 for y in objective_function(x_opt))

            # complete orthonormal system if missing
            if len(vectors) == 2:
                vectors.append(normalized_cross(vectors[0], vectors[1]))

            # merge similar clusters
            cluster_merge_dot = math.cos(math.radians(t * 20.0))
            vectors_merged = []
            for v in vectors:
                if (not vectors_merged or
                        all(abs_dot(v, w) < cluster_merge_dot for w in vectors_merged)):
                    vectors_merged.append(v)
            if verbose and len(vectors) != len(vectors_merged):
                print 'Merging %s --> %s vectors' % (len(vectors), len(vectors_merged))
            vectors = vectors_merged

        residual = sum(r ** 2 for r in objective_function(x_opt))
        print 'EM: %s iters, residual: %s, %s clusters, weight sparsity: %s%%' % (
            em_iter, residual, len(vectors), 100.0 * num_weights_nnz / num_weights)

        # final points
        points = vectors_to_points(photo, image, vectors)

        # sanity checks
        assert len(vectors) == len(points)

        # re-assign clusters
        clusters_points = [([], p) for p in points]
        line_map_cluster = np.argmax(weights, axis=1)
        for i_l, l in enumerate(all_lines):
            i_c = line_map_cluster[i_l]
            if i_c < len(points):
                clusters_points[i_c][0].append(l)

        # throw away small clusters
        clusters_points = filter(
            lambda x: len(x[0]) >= min_cluster_size, clusters_points)

        # reverse sort by cluster length
        clusters_points.sort(
            key=lambda x: line_cluster_length(x[0]), reverse=True)

        # split into two parallel arrays
        clusters = [cp[0] for cp in clusters_points]
        points = [cp[1] for cp in clusters_points]

    else:  # no EM

        for i_v, lines in enumerate(clusters):
            def objective_function(x, *args):
                p = vectors_to_points(photo, image, unpack_x(x))[0]
                return [line_residual(l, p) for l in lines]
            from scipy.optimize import leastsq
            x0 = pack_x([vectors[i_v]])
            x_opt, __ = leastsq(objective_function, x0)
            vectors[i_v] = unpack_x(x_opt)[0]

        # delete similar vectors
        cluster_merge_dot = math.cos(math.radians(20.0))
        vectors_merged = []
        clusters_merged = []
        for i_v, v in enumerate(vectors):
            if (not vectors_merged or
                    all(abs_dot(v, w) < cluster_merge_dot for w in vectors_merged)):
                vectors_merged.append(v)
                clusters_merged.append(clusters[i_v])
        vectors = vectors_merged
        clusters = clusters_merged

        # clamp number of vectors
        if len(clusters) > max_clusters:
            vectors = vectors[:max_clusters]
            clusters = clusters[:max_clusters]

        points = vectors_to_points(photo, image, vectors)

    # normalize to [0, 0], [1, 1]
    clusters_normalized = [[
        [l[0] / width, l[1] / height, l[2] / width, l[3] / height]
        for l in lines
    ] for lines in clusters]

    points_normalized = [
        (x / width, y / height) for (x, y) in points
    ]

    # save result
    photo.vanishing_lines = json.dumps(clusters_normalized)
    photo.vanishing_points = json.dumps(points_normalized)
    photo.vanishing_length = sum(line_cluster_length(c)
                                 for c in clusters_normalized)
    if save:
        photo.save()