Example #1
0
def estimate_rotation(img):
    assert(img.dtype == 'bool')

    # elimina bloques rellenos para acelerar la deteccion de lineas
    elem = morphology.square(2)
    aux = morphology.binary_dilation(img, elem) - morphology.binary_erosion(img, elem)

    # Detección de lineas usando transformada de Hough probabilística
    thres = 50
    minlen = 0.1 * min(aux.shape)
    maxgap = 0.01 * minlen
    lines = transform.probabilistic_hough(aux, threshold=thres, line_length=minlen, line_gap=maxgap)

    # me aseguro que el primer punto de cada línea sea el más próximo al origen
    for lin in lines:
        (x0,y0), (x1,y1) = lin
        if x1*x1+y1*y1 < x0*x0+y0*y0:
            (x0, x1) = (x1, x0)
            (y0, y1) = (y1, y0)

    # orientación dominante
    angle_half_range = np.math.pi / 4
    nbins = int(2 * angle_half_range * (180./np.math.pi) / 0.2)

    orient = []
    for lin in lines:
        (x0,y0), (x1,y1) = lin
        orient.append(np.math.atan2(y1-y0, x1-x0))

    (h, binval) = np.histogram(orient, range=(-angle_half_range, angle_half_range), bins=nbins)
    alpha = binval[h.argmax()] * (180./ np.math.pi)
    return alpha + 0.5 * (binval[1] - binval[0]) * (180./ np.math.pi)
Example #2
0
 def image_filter(self, image, **kwargs):
     canny_keys = ('sigma', 'low_threshold', 'high_threshold')
     canny_kwargs = dict([(k, kwargs.pop(k)) for k in canny_keys])
     hough_kwargs = kwargs
     edges = canny(image, **canny_kwargs)
     lines = probabilistic_hough(edges, **hough_kwargs)
     self._lines = lines
     return edges
Example #3
0
 def _get_hough_segments(self):
     segs = probabilistic_hough(
         self.canny_image,
         threshold=self.hough_threshold,
         line_length=self.hough_line_length,
         line_gap=self.hough_line_gap,
     )
     return segs
Example #4
0
def find_lines(sourcefile=SOURCEFILE, plot=PLOT, shrink=2, threshold=128,
               scale_first=True,
               **transformparams):
  """
  This function borrows from
  http://scikits-image.org/docs/0.3/auto_examples/plot_hough_transform.html
  """
  # Line finding, using the Probabilistic Hough Transform
  params = {}
  params.update(TRANSFORMPARAMS)
  params.update(transformparams)
  print params
  image_orig = imread(sourcefile)

  # scale up
  if scale_first:
    image_orig = scale_up(image_orig)
  # thin lines
  if shrink > 0:
    # note that this returns a one-bit image
    shrunk_image = shrink_lines(image_orig,threshold=threshold,
                                iterations=shrink)
  else:
    # in this case shrunk_image may still be grayscale
    shrunk_image = image_orig
  # switch the y axis
  image = shrunk_image[::-1,:]
  #edge detection
  edges = canny(image, 2, 1, 25)
  #actual transform
  lines = probabilistic_hough(edges, **params)

  if PLOT:
    plt.figure(figsize=(12, 4))

    plt.subplot(131)
    plt.imshow(image, cmap=plt.cm.gray)
    plt.title('Input image')

    plt.subplot(132)
    plt.imshow(edges, cmap=plt.cm.gray)
    plt.title('Sobel edges')

    plt.subplot(133)
    plt.imshow(edges * 0)

    for line in lines:
        p0, p1 = line
        plt.plot((p0[0], p1[0]), (p0[1], p1[1]))

    plt.title('Lines found with PHT')
    plt.axis('image')
    plt.show()
  if scale_first:
    lines = np.array(lines)/2
  return lines
Example #5
0
def prob_hough_detect(diffs, **ph_kwargs):
    """Use the probabilistic hough transform to detect regions in the data
    that we will flag as being part of the EIT wave front."""
    detection=[]
    for img in diffs:
        invTransform = sunpy.make_map(np.zeros(img.shape), img._original_header)
        lines = probabilistic_hough(img, ph_kwargs)
        if lines is not None:
            for line in lines:
                pos1=line[0]
                pos2=line[1]
                fillLine(pos1,pos2,invTransform)
        detection.append(invTransform)
    return detection
Example #6
0
def main():
    image = io.imread("../Resources/perspective-quadrilateral-src-img.jpg", as_grey=True)
    edges = canny(image, sigma=3)
    lines = probabilistic_hough(edges, line_length=75, threshold=50)

    plt.imshow(edges)

    cols = len(image[0])
    for line in lines:
        eq = derive_sie(line[0], line[1])
        p1, p2 = expand_line(eq, len(image[0]), len(image))
        print line
        print p1, p2
        print ""
        plt.plot((p1[0], p2[0]), (p1[1], p2[1]), color='green')

    plt.axis("off")
    plt.show()
def test_probabilistic_hough():
    # Generate a test image
    img = np.zeros((100, 100), dtype=int)
    for i in range(25, 75):
        img[100 - i, i] = 100
        img[i, i] = 100
    # decrease default theta sampling because similar orientations may confuse
    # as mentioned in article of Galambos et al
    theta=np.linspace(0, np.pi, 45)
    lines = probabilistic_hough(img, theta=theta, threshold=10, line_length=10, line_gap=1)
    # sort the lines according to the x-axis
    sorted_lines = []
    for line in lines:
        line = list(line)
        line.sort(key=lambda x: x[0])
        sorted_lines.append(line)
    assert([(25, 75), (74, 26)] in sorted_lines)
    assert([(25, 25), (74, 74)] in sorted_lines)
Example #8
0
def text_sections(im, output_height):
    im = im.convert('L')
    im_array = image_to_array(im)
    im_arary = im_array / 255

    r = range(-50,50)
    r_theta = [np.pi/2 + x*0.001 for x in r]
    theta = np.array(r_theta)

    edges = canny(im_array, 2, 1, 25)

    lines = probabilistic_hough(edges, threshold=5, line_length=20,
                                line_gap=20, theta=theta)

    lines_sorted = sorted(lines, cmp=cmp_lines)
    rs = regions(lines_sorted, 2)
    text_areas = [bounding_rectangle(ls, 4) for ls in rs]
    for (x0, y0), (x1, y1) in text_areas:
        width, height = x1 - x0, y1 - y0
        output_width = width * output_height // height
        yield(im.transform((output_width, output_height), Image.EXTENT,
                           (x0, y0, x1, y1)))
def test_probabilistic_hough():
    # Generate a test image
    img = np.zeros((100, 100), dtype=int)
    for i in range(25, 75):
        img[100 - i, i] = 100
        img[i, i] = 100
    # decrease default theta sampling because similar orientations may confuse
    # as mentioned in article of Galambos et al
    theta = np.linspace(0, np.pi, 45)
    lines = probabilistic_hough(img,
                                theta=theta,
                                threshold=10,
                                line_length=10,
                                line_gap=1)
    # sort the lines according to the x-axis
    sorted_lines = []
    for line in lines:
        line = list(line)
        line.sort(key=lambda x: x[0])
        sorted_lines.append(line)
    assert ([(25, 75), (74, 26)] in sorted_lines)
    assert ([(25, 25), (74, 74)] in sorted_lines)
Example #10
0
 def _get_hough_segments(self):
     segs = probabilistic_hough(
         self.canny_image, threshold=self.hough_threshold,
         line_length=self.hough_line_length, line_gap=self.hough_line_gap,
     )
     return segs
Example #11
0
def align(img, min_line_length=100):
    sob = sobel(img)
    #print "!! sob"
    bw = sob > threshold_otsu(sob)
    #print "!! otsu"
    lines = probabilistic_hough(bw, line_length=min_line_length)
    #print "!! hough"
    sorted_lines = sorted(lines,
                          key=lambda l: distance(line_to_vector(l)),
                          reverse=True)[:10]

    rotations = {}
    for l1 in sorted_lines:
        v1 = line_to_vector(l1)
        for l2 in sorted_lines:
            if l1 == l2:
                continue
            v2 = line_to_vector(l2)
            theta = numpy.arccos(
                numpy.dot(v1, v2) / (distance(v1) * distance(v2)))
            if abs(numpy.degrees(theta) - 90) <= 1:
                # found an alignment!
                angle = int(
                    round(
                        numpy.degrees(
                            numpy.arccos(numpy.dot(v1,
                                                   (0, 1)) / distance(v1)))))
                if angle > 90:
                    angle = -(angle % 90)
                if angle > 45:
                    angle = 90 - angle
                elif angle < -45:
                    angle = -90 - angle
                if angle not in rotations:
                    rotations[angle] = 0
                rotations[angle] += 1

    if not rotations:
        # couldn't find boundaries, assume aligned
        return img

    angle = max(rotations.items(), key=lambda item: item[1])[0]
    img2 = misc.imrotate(img, angle)

    sob = sobel(img2)
    bw = sob > threshold_otsu(sob)
    lines = probabilistic_hough(bw, line_length=min_line_length)
    sorted_lines = sorted(lines,
                          key=lambda l: distance(line_to_vector(l)),
                          reverse=True)[:4]

    min_y = bw.shape[0]
    max_y = 0
    min_x = bw.shape[1]
    max_x = 0

    for l in sorted_lines:
        (x1, y1), (x2, y2) = l
        if x1 < min_x:
            min_x = x1
        if x1 > max_x:
            max_x = x1
        if x2 < min_x:
            min_x = x2
        if x2 > max_x:
            max_x = x2
        if y1 < min_y:
            min_y = y1
        if y1 > max_y:
            max_y = y1
        if y2 < min_y:
            min_y = y2
        if y2 > max_y:
            max_y = y2

    img3 = img2[min_y + 1:max_y, min_x + 1:max_x]
    #return misc.imresize(img3, (512, 512))
    return img3
plt.subplot(122)
plt.imshow(np.log(1 + h),
           extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]),
                   d[-1], d[0]],
           cmap=plt.cm.gray, aspect=1/1.5)
plt.title('Hough transform')
plt.xlabel('Angles (degrees)')
plt.ylabel('Distance (pixels)')


# Line finding, using the Probabilistic Hough Transform

image = data.camera()
edges = canny(image, 2, 1, 25)
lines = probabilistic_hough(edges, threshold=10, line_length=5, line_gap=3)

plt.figure(figsize=(8, 3))

plt.subplot(131)
plt.imshow(image, cmap=plt.cm.gray)
plt.title('Input image')

plt.subplot(132)
plt.imshow(edges, cmap=plt.cm.gray)
plt.title('Canny edges')

plt.subplot(133)
plt.imshow(edges * 0)

for line in lines:
Example #13
0
                    top=0.9,
                    bottom=0.02,
                    left=0.02,
                    right=0.98)

plt.show()

import numpy as np
from skimage.transform import hough, probabilistic_hough
from skimage.filter import canny

image = oilCropNorth

# Line finding, using the Probabilistic Hough Transform
edges = canny(image, sigma=1)
lines = probabilistic_hough(edges, threshold=10, line_length=5, line_gap=3)

plt.subplot(131)
plt.imshow(image, cmap=plt.cm.gray)
plt.title('Input image')
plt.subplot(132)
plt.imshow(edges, cmap=plt.cm.gray)
plt.title('Sobel edges')

plt.subplot(133)
plt.imshow(edges * 0)

for line in lines:
    p0, p1 = line
    plt.plot((p0[0], p1[0]), (p0[1], p1[1]))
Example #14
0
File: all3.py Project: KWMalik/tau
 #misc.imsave("orig.png", out)
 #misc.imsave("rot.png", out2)
 
 from skimage.filter import threshold_otsu, canny, sobel, prewitt
 from skimage.transform import probabilistic_hough
 from scipy.cluster.vq import kmeans
 import math
 mono = mean(out2, 2)
 #bw = mono > threshold_otsu(mono)
 mono = sobel(mono)
 bw = mono > threshold_otsu(mono)
 #mono = misc.imfilter(misc.imfilter(mono, "edge_enhance_more"), "edge_enhance_more")
 #mono = misc.imfilter(mono, "smooth")
 #mono = canny(mono)
 misc.imsave("otsu.png", bw)
 lines = probabilistic_hough(bw, line_length = 400, line_gap = 300)
 def norm(line):
     (x0, y0), (x1,y1) = line
     return math.sqrt((x0-x1)**2 + (y0-y1)**2)
 lines = sorted(lines, key = norm, reverse = True)
 angles = [math.atan2(y1-y0, x1-x0) * (180 / math.pi) for (x0, y0), (x1,y1) in lines]
 perpendicular = [a for a in angles for b in angles if 85 <= abs(a - b) <= 95]
 
 #print angles
 #print perpendicular
 
 clusters, _ = kmeans(numpy.array(perpendicular), 4)
 print clusters
 
 
 #misc.imsave("noise.png", add_noise(out, 1, -128, 128))
Example #15
0
    diffs.append(diffmap_plus)
    diffs2.append(diffmap_minus)

    # extract the image
    img = diffmap_plus
    img2 = diffmap_minus
    
    
  
    
    # Perform the hough transform on the positive and negative difference maps separately
    #transform = probabilistic_hough(img)
    #transform2 = probabilistic_hough(img2)
    
    
    lines = probabilistic_hough(img, threshold=10, line_length=5, line_gap=3)
    lines2 = probabilistic_hough(img2, threshold=10, line_length=5, line_gap=3)
   
    


   
    
    invTransform = sunpy.make_map(np.zeros(imgShape),input_maps[i+1]._original_header)
    if lines is not None and lines2 is not None:
        for line in lines:
            pos1=line[0]
            pos2=line[1]
            fillLine(pos1,pos2,invTransform)
        detection.append(invTransform)
        for line in lines2:
Example #16
0
def align(img, min_line_length = 100):
    sob = sobel(img)
    #print "!! sob"
    bw = sob > threshold_otsu(sob)
    #print "!! otsu"
    lines = probabilistic_hough(bw, line_length = min_line_length)
    #print "!! hough"
    sorted_lines = sorted(lines, key = lambda l: distance(line_to_vector(l)), 
        reverse = True)[:10]
    
    rotations = {}
    for l1 in sorted_lines:
        v1 = line_to_vector(l1)
        for l2 in sorted_lines:
            if l1 == l2:
                continue
            v2 = line_to_vector(l2)
            theta = numpy.arccos(numpy.dot(v1, v2) / (distance(v1) * distance(v2)))
            if abs(numpy.degrees(theta) - 90) <= 1:
                # found an alignment!
                angle = int(round(numpy.degrees(numpy.arccos(numpy.dot(v1, (0, 1)) / distance(v1)))))
                if angle > 90:
                    angle = -(angle % 90)
                if angle > 45:
                    angle = 90 - angle
                elif angle < -45:
                    angle = -90 - angle
                if angle not in rotations:
                    rotations[angle] = 0
                rotations[angle] += 1
    
    if not rotations:
        # couldn't find boundaries, assume aligned 
        return img
    
    angle = max(rotations.items(), key = lambda item: item[1])[0]
    img2 = misc.imrotate(img, angle)
    
    sob = sobel(img2)
    bw = sob > threshold_otsu(sob)
    lines = probabilistic_hough(bw, line_length = min_line_length)
    sorted_lines = sorted(lines, key = lambda l: distance(line_to_vector(l)), 
        reverse = True)[:4]
    
    min_y = bw.shape[0]
    max_y = 0
    min_x = bw.shape[1]
    max_x = 0
    
    for l in sorted_lines:
        (x1, y1), (x2, y2) = l
        if x1 < min_x:
            min_x = x1
        if x1 > max_x:
            max_x = x1
        if x2 < min_x:
            min_x = x2
        if x2 > max_x:
            max_x = x2
        if y1 < min_y:
            min_y = y1
        if y1 > max_y:
            max_y = y1
        if y2 < min_y:
            min_y = y2
        if y2 > max_y:
            max_y = y2
    
    img3 = img2[min_y+1:max_y, min_x+1:max_x]
    #return misc.imresize(img3, (512, 512))
    return img3
spec_data = w.get_spectrogram(5)
munged_data = ndimage.gaussian_filter(spec_data, sigma=1.0)
black_white = munged_data > 2.0 * munged_data.mean()
data = black_white

# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(data, return_distance=True)
skel1 = skeletonize(data)

# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel
dist_on_skel1 = distance * skel1

h, theta, d = hough(skel1)
lines = probabilistic_hough(skel1, threshold=6, line_length=5, line_gap=3)
lines = sorted(lines)

plt.figure(figsize=(8, 4))
plt.subplot(141)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.subplot(142)
plt.imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest')
plt.contour(data, [0.5], colors='w')
plt.axis('off')
plt.subplot(143)
plt.imshow(dist_on_skel1, cmap=plt.cm.spectral, interpolation='nearest')
plt.contour(data, [0.5], colors='w')
plt.axis('off')
Example #18
0
def detect_lines(img, simplify=False):
    minsize = min(img.shape)
    maxsize = max(img.shape)

    # Detección de lineas usando transformada de Hough probabilística
    minlen = 0.1 * minsize
    maxgap = 0.1 * minlen
    angles = np.array([0, np.math.pi/2]) # asume imagen rectificada
    lines = transform.probabilistic_hough(img, theta=angles, threshold=10, line_length=minlen, line_gap=maxgap)

    # separa líneas verticales y horizontales
    vlines = []
    hlines = []
    for lin in lines:
        p0, p1 = lin
        (x0, y0) = p0
        (x1, y1) = p1
        linlen = np.math.sqrt(sqdist(p0, p1))
        if linlen > minsize:
            continue
        xc, yc = 0.5*(x0+x1), 0.5*(y0+y1)
        lin_info = [x0, y0, x1, y1, xc, yc, linlen]
        if x0 == x1:
            if y1 > y0:
                lin_info[1], lin_info[3] = lin_info[3], lin_info[1]
            vlines.append(lin_info)
        else:
            if x1 > x0:
                lin_info[0], lin_info[2] = lin_info[2], lin_info[0]
            hlines.append(lin_info)

    # filtrado de líneas duplicadas
    if simplify:
        dthr = 0.01 * minsize
        dthr = dthr * dthr

        for lines in (vlines, hlines):
            i = 0
            while i < len(lines):
                l1 = lines[i]

                acc = np.array(l1)
                nacc = 1.0

                j = i+1
                while j < len(lines):
                    l2 = lines[j]
                    d1 = sqdist(l1[0:2], l2[0:2])
                    d2 = sqdist(l1[2:4], l2[2:4])
                    # ambos extremos están muy próximos entre si
                    if d1 < dthr and d2 < dthr:
                        acc = acc + np.array(l2)
                        nacc = nacc + 1.0
                        lines.pop(j)
                    else:
                        j = j+1

                lines[i] = (acc/nacc).tolist()
                lines[i][4] = 0.5 * (lines[i][0]+lines[i][2])
                lines[i][5] = 0.5 * (lines[i][1]+lines[i][3])
                lines[i][6] =  np.math.sqrt(sqdist((lines[i][0],lines[i][1]), (lines[i][2],lines[i][3])))
                i = i+1

    # # ordena por longitud decreciente
    # vlines = sorted(vlines, key=lambda a_entry: a_entry[6])
    # vlines = vlines[::-1]
    # hlines = sorted(hlines, key=lambda a_entry: a_entry[6])
    # hlines = hlines[::-1]

    return hlines, vlines
def mono_cracks(features):
    struct = ndimage.generate_binary_structure(2, 1)
    im = features['im_norm']
    im_no_rows = features['im_no_fingers']
    h, w = im.shape

    # apply a filter that enhances thin dark lines
    smoothed = cv2.GaussianBlur(im_no_rows,
                                ksize=(0, 0),
                                sigmaX=1.0,
                                borderType=cv2.BORDER_REPLICATE)
    dark_lines = np.zeros(smoothed.shape, np.float32)
    pixel_ops.CrackEnhance2(smoothed, dark_lines)

    if False:
        view = ImageViewer(im_no_rows)
        ImageViewer(smoothed)
        ImageViewer(dark_lines)
        view.show()
        sys.exit()

    # ignore background
    r = features['wafer_radius']
    pixel_ops.ApplyThresholdGT_F32(features['im_center_dist_im'], dark_lines,
                                   r, 0)

    # HOUGH PARAMS
    LINE_THRESH = 0.07
    MIN_LINE_LEN = 0.017  # 0.025
    LINE_GAP = 4
    ANGLE_TOL = 15
    dark_lines_binary = (dark_lines > LINE_THRESH).astype(np.uint8)
    line_length = int(round(im.shape[0] * MIN_LINE_LEN))
    lines = probabilistic_hough(
        dark_lines_binary,
        threshold=20,
        line_length=line_length,
        line_gap=LINE_GAP,
        theta=np.deg2rad(np.r_[np.arange(45 - ANGLE_TOL, 45 + ANGLE_TOL + 1),
                               np.arange(135 - ANGLE_TOL, 135 + ANGLE_TOL +
                                         1)]))

    line_im = np.zeros_like(dark_lines)
    edge_dist = int(round(im.shape[0] * 0.035))
    coms = []
    middle_y, middle_x = features['wafer_middle_y'], features['wafer_middle_x']
    for line in lines:
        r0, c0, r1, c1 = line[0][1], line[0][0], line[1][1], line[1][0]
        rs, cs = draw.line(r0, c0, r1, c1)
        line_im[rs, cs] = 1
        coms.append(cs.mean())

        # connect to edge
        # first end
        rs_end1, cs_end1 = rs[:edge_dist], cs[:edge_dist]
        rs_ex1 = rs_end1 + (rs_end1[0] - rs_end1[-1])
        cs_ex1 = cs_end1 + (cs_end1[0] - cs_end1[-1])
        center_dists = np.sqrt((cs_ex1 - middle_x)**2 + (rs_ex1 - middle_y)**2)
        mask = ((rs_ex1 >= 0) & (rs_ex1 < h) & (cs_ex1 >= 0) & (cs_ex1 < w) &
                (center_dists < r))
        # make sure some pixels have been mask (i.e. outside of cell) and is dark or short
        # print im_no_rows[rs_ex1[mask], cs_ex1[mask]].mean(), mask.sum()
        if mask.sum() < len(rs_ex1) and (
                im_no_rows[rs_ex1[mask], cs_ex1[mask]].mean() < 0.45
                or mask.sum() < 9):
            line_im[rs_ex1[mask], cs_ex1[mask]] = 2

        # second end
        rs_end2, cs_end2 = rs[-edge_dist:], cs[-edge_dist:]
        rs_ex2 = rs_end2 + (rs_end2[-1] - rs_end2[0])
        cs_ex2 = cs_end2 + (cs_end2[-1] - cs_end2[0])
        center_dists = np.sqrt((cs_ex2 - middle_x)**2 + (rs_ex2 - middle_y)**2)
        mask = ((rs_ex2 >= 0) & (rs_ex2 < h) & (cs_ex2 >= 0) & (cs_ex2 < w) &
                (center_dists < r))
        # make sure some pixels have been mask (i.e. outside of cell) and is dark or short
        if mask.sum() < len(cs_ex2) and (
                im_no_rows[rs_ex2[mask], cs_ex2[mask]].mean() < 0.45
                or mask.sum() < 9):
            line_im[rs_ex2[mask], cs_ex2[mask]] = 2

    # join cracks that straddle BBs
    bb_cols = np.r_[0, features['_busbar_cols'], w - 1]
    for i1, i2 in itertools.combinations(range(len(lines)), 2):
        c1 = min(coms[i1], coms[i2])
        c2 = max(coms[i1], coms[i2])
        # make sure on different sides of BB (compare midpoints)
        straddle = False
        for bb in range(len(bb_cols) - 2):
            if bb_cols[bb] < c1 < bb_cols[bb + 1] < c2 < bb_cols[bb + 2]:
                straddle = True
                break
        if not straddle:
            continue

        # make sure similar orientation & offset
        def orientation(r0, c0, r1, c1):
            orien = math.degrees(math.atan2(r1 - r0, c1 - c0))
            if orien < 0: orien += 180
            if orien > 180: orien -= 180
            return orien

        or1 = orientation(lines[i1][0][1], lines[i1][0][0], lines[i1][1][1],
                          lines[i1][1][0])
        or2 = orientation(lines[i2][0][1], lines[i2][0][0], lines[i2][1][1],
                          lines[i2][1][0])
        or_diff = abs(or2 - or1)
        if or_diff > 5:
            continue

        # find line between closest points
        if coms[i1] < coms[i2]:
            line1, line2 = lines[i1], lines[i2]
        else:
            line1, line2 = lines[i2], lines[i1]
        joining_line = draw.line(line1[1][1], line1[1][0], line2[0][1],
                                 line2[0][0])
        if len(joining_line[0]) > 0.05 * w:
            continue
        line_im[joining_line] = 3

    if False:
        view = ImageViewer(im_no_rows)
        ImageViewer(dark_lines)
        ImageViewer(dark_lines_binary)
        ImageViewer(line_im)
        view.show()
        sys.exit()

    # clean up lines
    line_im = ndimage.binary_closing(line_im, struct,
                                     iterations=2).astype(np.uint8)
    ys, xs = np.where(line_im)
    pixel_ops.FastThin(line_im,
                       ys.copy().astype(np.int32),
                       xs.copy().astype(np.int32), ip.thinning_lut)
    line_im = ndimage.binary_dilation(line_im, struct)

    # filter by "strength", which is a combination of darkness and length
    ccs, num_ccs = ip.connected_components(line_im)
    pixel_ops.ApplyThresholdGT_F32(dark_lines, dark_lines, 0.3, 0.3)
    if False:
        strength = ndimage.sum(dark_lines,
                               labels=ccs,
                               index=np.arange(num_ccs + 1))
    else:
        # median will be more robust than mean (dark spots can lead to false positives)
        median_vals = ndimage.median(dark_lines,
                                     labels=ccs,
                                     index=np.arange(num_ccs + 1))
        lengths = np.zeros(num_ccs + 1, np.int32)
        pixel_ops.CCSizes(ccs, lengths)
        strength = median_vals * lengths
    strength[0] = 0
    strongest_candidates = np.argsort(strength)[::-1]
    strongest_candidates = strongest_candidates[
        strength[strongest_candidates] > parameters.CELL_CRACK_STRENGTH]

    if False:
        # print strongest_candidates
        strength_map = np.take(strength, ccs)
        candidates = (strength_map > parameters.CELL_CRACK_STRENGTH).astype(
            np.uint8)
        view = ImageViewer(strength_map)
        ImageViewer(ip.overlay_mask(im, candidates, 'r'))
        view.show()
        sys.exit()

    # filter each candidate using other features
    mask_cracks = np.zeros_like(im, np.uint8)
    locs = ndimage.find_objects(ccs)
    crack_count = 0
    for cc_label in strongest_candidates:
        e = cc_label - 1
        y1, y2 = max(0, locs[e][0].start - 3), min(h, locs[e][0].stop + 3)
        x1, x2 = max(0, locs[e][1].start - 3), min(w, locs[e][1].stop + 3)
        crack = (ccs[y1:y2, x1:x2] == cc_label)
        im_win = im[y1:y2, x1:x2]
        ys, xs = np.where(crack)
        ys = ys + y1
        xs = xs + x1
        com_y = ys.mean()
        com_x = xs.mean()

        if False:
            view = ImageViewer(crack)
            ImageViewer(im_win)
            view.show()

        # remove cracks along corner edge by checking if center of mass
        #  is same distance from middle as cell radius, and parallel to edge
        center_dists = np.sqrt((ys - (h / 2.0))**2 + (xs - (w / 2.0))**2)
        center_dist = center_dists.mean()
        dist_range = center_dists.max() - center_dists.min()
        r = features['wafer_radius'] - features['cell_edge_tb']
        center_ratio = (min(center_dist, r) / max(center_dist, r))
        if center_ratio > 0.98 and dist_range < 10:
            continue

        # keep cracks that have passed the tests & compute properties
        crack_count += 1
        mask_cracks[y1:y2, x1:x2][crack] = cz_wafer.DEFECT_CRACK

        if ('input_param_verbose' not in features
                or features['input_param_verbose'] or crack_count < 6):
            cz_wafer.crack_properties(ys, xs, crack_count, features,
                                      mask_cracks)

        if crack_count >= parameters.MAX_NUM_CRACKS:
            break

    if False:
        # view = ImageViewer(ccs)
        print "Crack pixels: ", mask_cracks.sum()
        view = ImageViewer(ccs)
        ImageViewer(ip.overlay_mask(im, mask_cracks, 'r'))
        view.show()
        sys.exit()

    features['mk_cracks_u8'] = mask_cracks
    features['defect_count'] = crack_count
    if crack_count > 0:
        features['defect_present'] = 1
    else:
        features['defect_present'] = 0

    # thin before finding length
    crack_skel = mask_cracks.copy()
    ys, xs = np.where(mask_cracks)
    pixel_ops.FastThin(crack_skel,
                       ys.copy().astype(np.int32),
                       xs.copy().astype(np.int32), ip.thinning_lut)
    features['defect_length'] = crack_skel.sum()
    features['_crack_skel'] = crack_skel