def create_cell_mask(norm, features):
    # (1=background, 2=busbar, 4=finger, 8=cell edge)
    cell_mask = features['mask_busbar_filled'].astype(np.uint8) * 2
    cell_mask[features['_finger_row_nums'], :] += 4
    if 'mask_grid_cols' in features:
        cell_mask[:, features['mask_grid_cols']] |= 4

    r = features['im_center_dist_im']
    h, w = norm.shape
    left = features['cell_edge_left']
    right = features['cell_edge_right']
    top = features['cell_edge_tb']
    bottom = h - features['cell_edge_tb']
    cell_mask[:top, :] = 8
    cell_mask[bottom:, :] = 8
    cell_mask[:, :left] = 8
    cell_mask[:, right:] = 8
    edge_width = int((left + (w - right) + top + (h - bottom)) / 4.0) + 1
    features['cell_edge_width_avg'] = edge_width
    # pixel_ops.ApplyThresholdGT_F32_U8(r, cell_mask, features['wafer_radius']-edge_width, 8)
    pixel_ops.ApplyThresholdGT_F32_U8(r, cell_mask,
                                      features['wafer_radius'] - edge_width, 8)
    pixel_ops.ApplyThresholdGT_F32_U8(r, cell_mask, features['wafer_radius'],
                                      1)
    features['bl_cropped_u8'] = cell_mask
def feature_extraction(im, features):
    t_start = timeit.default_timer()

    # crop
    crop_props = crop(im)
    features['corners'] = crop_props['corners']
    #print crop_props.keys()
    #features['crop_top'] = crop_props['crop_top']
    # features['corner_tl_x'] = crop_props['corners'][0][1]
    # features['corner_tl_y'] = crop_props['corners'][0][0]
    # features['corner_tr_x'] = crop_props['corners'][1][1]
    # features['corner_tr_y'] = crop_props['corners'][1][0]
    # features['corner_br_x'] = crop_props['corners'][2][1]
    # features['corner_br_y'] = crop_props['corners'][2][0]
    # features['corner_bl_x'] = crop_props['corners'][3][1]
    # features['corner_bl_y'] = crop_props['corners'][3][0]
    features['wafer_radius'] = crop_props['radius']
    features['_wafer_middle_orig'] = crop_props['center']
    features['crop_rotation'] = crop_props['estimated_rotation']
    cropped = cropping.correct_rotation(im, crop_props, pad=False, border_erode=parameters.BORDER_ERODE_CZ,
                                        fix_chamfer=False)
    if not cropped.flags['C_CONTIGUOUS']:
        cropped = np.ascontiguousarray(cropped)

    if False:
        view = ImageViewer(im)
        ImageViewer(cropped)
        view.show()

    # histogram features
    h, w = cropped.shape
    ip.histogram_percentiles(cropped, features, h // 2, w // 2, features['wafer_radius'])

    # normalise image
    min_val = features['hist_percentile_01'] / float(features['hist_percentile_99'])
    norm_upper = features['hist_percentile_99']
    norm_lower = min(0.2, min_val)
    normed = ((cropped / norm_upper) - norm_lower) / (1 - norm_lower)

    # calculate distance from wafer rotation middle
    r, theta = np.empty_like(normed, np.float32), np.empty_like(normed, np.float32)
    pixel_ops.CenterDistance(r, theta, h // 2, w // 2)
    features['im_center_dist_im'] = r

    # create mask: 1=background
    wafer_mask = np.zeros_like(cropped, np.uint8)
    pixel_ops.ApplyThresholdGT_F32_U8(features['im_center_dist_im'], wafer_mask, features['wafer_radius'], 1)
    features['bl_cropped_u8'] = wafer_mask

    features['im_cropped_u8'] = (np.clip(normed, 0.0, 1.0) * 255).astype(np.uint8)
    if cropped.dtype.type is np.uint16:
        features['im_cropped_u16'] = cropped
    else:
        features['im_cropped_u16'] = cropped.astype(np.uint16)

    # compute runtime
    t_stop = timeit.default_timer()
    features['runtime'] = t_stop - t_start

    return crop_props
Exemple #3
0
def robust_dislocations(smooth, impure, features):
    c = parameters.ROBUST_CROP
    smooth = np.ascontiguousarray(smooth[c:-c, c:-c])
    impure = np.ascontiguousarray(impure[c:-c, c:-c])

    struct = ndimage.generate_binary_structure(2, 1)

    # robust dislocation mask
    dog1 = (cv2.dilate(
        smooth,
        cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE,
            (parameters.DOG_STRUCT_SIZE, parameters.DOG_STRUCT_SIZE))) -
            smooth)
    dog2 = (ip.fast_smooth(smooth, sigma=parameters.DOG_SIGMA2) -
            cv2.GaussianBlur(smooth, (0, 0),
                             parameters.DOG_SIGMA1,
                             borderType=cv2.BORDER_REPLICATE))
    dog = dog1 + dog2

    IMP_THRESH = 0.4
    pixel_ops.ApplyThresholdLT_F32(impure, dog, IMP_THRESH, 0)

    if False:
        view = ImageViewer(dog1)
        ImageViewer(dog2)
        ImageViewer(dog1 + dog2)
        ImageViewer(dog)
        view.show()

    defect_mask = np.zeros_like(dog, np.uint8)
    DOG_THRESH = parameters.BLOCK_DISLOCATION_THRESH
    pixel_ops.ApplyThresholdGT_F32_U8(dog, defect_mask, DOG_THRESH, 1)
    num_pure_pixels = pixel_ops.CountThresholdGT_F32(impure, IMP_THRESH)
    defect_robust = (pixel_ops.CountEqual_U8(defect_mask,
                                             1)) / float(num_pure_pixels)

    # compute surface area
    eroded = defect_mask - cv2.erode(defect_mask, struct.astype(np.uint8))
    defect_pixels = float(pixel_ops.CountEqual_U8(defect_mask, 1))
    if defect_pixels > 0:
        defect_surface = pixel_ops.CountEqual_U8(eroded, 1) / defect_pixels
    else:
        defect_surface = 0

    if False:
        print defect_robust, defect_surface
        view = ImageViewer(smooth)
        ImageViewer(defect_mask)
        ImageViewer(eroded)
        view.show()
        sys.exit()

    features['defect_robust_area_fraction'] = defect_robust
    features['defect_surface'] = defect_surface
def find_slug(im, features):
    h, w = im.shape
    h2, w2 = h // 2, w // 2

    # highlight edges in each quadrant
    edgesH = cv2.Sobel(im, cv2.CV_32F, 0, 1)
    edgesV = cv2.Sobel(im, cv2.CV_32F, 1, 0)
    corner_edges = np.zeros_like(im)
    corner_edges[:h2, :w2] = edgesH[:h2, :w2] + edgesV[:h2, :w2]
    corner_edges[:h2, -w2:] = edgesH[:h2, -w2:] - edgesV[:h2, -w2:]
    corner_edges[-h2:, -w2:] = -1 * edgesH[-h2:, -w2:] - edgesV[-h2:, -w2:]
    corner_edges[-h2:, :w2] = -1 * edgesH[-h2:, :w2] + edgesV[-h2:, :w2]

    # find points on the corners
    left = corner_edges[:, :w2]
    ys = np.arange(left.shape[0])
    xs = np.argmax(left, axis=1)
    mask = corner_edges[ys, xs] > 0.4
    ys = ys[mask]
    xs = xs[mask]
    right = corner_edges[:, w2:]
    ys2 = np.arange(right.shape[0])
    xs2 = w2 + np.argmax(right, axis=1)
    mask = corner_edges[ys2, xs2] > 0.4
    ys2 = ys2[mask]
    xs2 = xs2[mask]
    ys = np.r_[ys, ys2]
    xs = np.r_[xs, xs2]

    if False:
        ImageViewer(corner_edges)
        plt.figure()
        plt.imshow(im, cmap="gray")
        plt.plot(xs, ys, "o")
        plt.show()
        sys.exit()

    t1 = default_timer()

    # user Hough transform to vote on most likely center/radius
    # - assume true center is within 150 pixels of image middle

    # phrase 1: rough fit
    MAX_OFFSET = 200
    step = 3
    acc_ys = np.arange(h2 - MAX_OFFSET, h2 + MAX_OFFSET + 1, step)
    acc_xs = np.arange(w2 - MAX_OFFSET, w2 + MAX_OFFSET + 1, step)
    diag = math.sqrt(h2 ** 2 + w2 ** 2)
    min_r = int(0.5 * diag)
    max_r = int(diag)
    acc = np.zeros((acc_ys.shape[0], acc_xs.shape[0], max_r - min_r), np.int32)
    pixel_ops.CircleHoughAcc2(ys, xs, acc_ys, acc_xs, acc, min_r, max_r)
    acc = ndimage.gaussian_filter(acc.astype(np.float32), sigma=(1, 1, 0))
    i, j, r = ndimage.maximum_position(acc)
    middle_y, middle_x, radius = acc_ys[i], acc_xs[j], r + min_r

    if True:
        # phrase 2: fine tune
        acc_ys = np.arange(middle_y - (2 * step), middle_y + (2 * step) + 1)
        acc_xs = np.arange(middle_x - (2 * step), middle_x + (2 * step) + 1)
        min_r = int(radius - 10)
        max_r = int(radius + 10)
        acc = np.zeros((acc_ys.shape[0], acc_xs.shape[0], max_r - min_r), np.int32)
        pixel_ops.CircleHoughAcc2(ys, xs, acc_ys, acc_xs, acc, min_r, max_r)
        acc = ndimage.gaussian_filter(acc.astype(np.float32), sigma=(1, 1, 0))
        i, j, r = ndimage.maximum_position(acc)

        middle_y, middle_x, radius = acc_ys[i], acc_xs[j], r + min_r

    features['center_y'] = middle_y
    features['center_x'] = middle_x
    features['radius'] = radius
    features['crop_rotation'] = 0
    features['crop_left'] = 0
    features['crop_right'] = im.shape[1] - 1
    features['crop_top'] = 0
    features['crop_bottom'] = im.shape[0] - 1

    mask = np.zeros_like(im, np.uint8)
    r, theta = np.empty_like(im, np.float32), np.empty_like(im, np.float32)
    pixel_ops.CenterDistance(r, theta, middle_y, middle_x)
    pixel_ops.ApplyThresholdGT_F32_U8(r, mask, radius, 1)

    features['bl_uncropped_u8'] = mask
    features['bl_cropped_u8'] = mask

    if False:
        print default_timer() - t1
        rgb = create_overlay(im, features)
        view = ImageViewer(rgb)
        # ImageViewer(mask)
        view.show()
        sys.exit()
Exemple #5
0
def feature_extraction(im, features, crop=True, skip_features=False):
    h, w = im.shape

    if im.dtype != np.float32:
        im = im.astype(np.float32)

    if crop:
        # cropping
        rotation_corrected = block_rotate(im, features)
        cropped_u16 = block_crop(rotation_corrected, features)
        bounds = features['_crop_bounds']
    else:
        rotation_corrected = im
        cropped_u16 = im
        bounds = (0, w - 1, 0, h - 1)
        features['_crop_bounds'] = bounds
        features['crop_rotation'] = 0

    # get original coordinates of the block corners
    find_corners(im, features)
    features['_rotation_corrected'] = rotation_corrected

    if False:
        view = ImageViewer(im)
        ImageViewer(cropped_u16)
        view.show()
        sys.exit()

    # normalisation
    vals = cropped_u16[::2, ::2].flat
    vals = np.sort(vals)
    min_val = vals[int(0.01 * vals.shape[0])]
    max_val = vals[int(0.99 * vals.shape[0])]
    features['norm_range'] = max_val - min_val
    features['norm_lower'] = min_val
    im_normed = (cropped_u16 - min_val) / (max_val - min_val)
    pixel_ops.ApplyThresholdLT_F32(im_normed, im_normed, 0.0, 0.0)

    cropped = im_normed
    croped_u8 = im_normed.copy()
    pixel_ops.ApplyThresholdGT_F32(croped_u8, croped_u8, 1.0, 1.0)
    features['im_cropped_u8'] = (croped_u8 * 255).astype(np.uint8)
    features['im_cropped_u16'] = cropped_u16.astype(np.uint16)

    if skip_features or ('input_param_skip_features' in features
                         and int(features['input_param_skip_features']) == 1):
        return

    if False:
        view = ImageViewer(im)
        ImageViewer(cropped, vmin=0, vmax=1)
        view.show()
        sys.exit()

    # compute some row/column percentiles
    col_sorted = np.sort(cropped[::4, :], axis=0)
    features['_col_90'] = np.ascontiguousarray(
        col_sorted[int(round(0.9 * 0.25 * cropped.shape[0])), :])
    features['_col_60'] = np.ascontiguousarray(
        col_sorted[int(round(0.6 * 0.25 * cropped.shape[0])), :])
    row_sorted = np.sort(cropped[:, ::4], axis=1)
    features['_row_90'] = np.ascontiguousarray(
        row_sorted[:, int(round(0.9 * 0.25 * cropped.shape[1]))])

    # background
    background = block_background(cropped, features)

    # foreground
    foreground = block_foreground(cropped, features)

    # normalise background
    background /= background.max()

    # calculate metrics
    robust_dislocations(cropped, background, features)

    # dislocation area
    DIS_THRESH = 0.3
    dislocation_area = (
        pixel_ops.CountThresholdGT_F32(foreground, DIS_THRESH) /
        float(foreground.shape[0] * foreground.shape[1]))
    impure_area = 1 - (pixel_ops.CountThresholdGT_F32(background, 0.5) /
                       float(foreground.shape[0] * foreground.shape[1]))

    # edge width
    l4 = background.shape[1] // 4
    profile = background[:, l4:-l4].mean(axis=1)
    fg = np.where(profile > parameters.BRICK_EDGE_THRESH)[0]
    if len(fg) > 0:
        left_width, right = fg[[0, -1]]
        right_width = len(profile) - right - 1
        edge_width = max(left_width, right_width)
        if edge_width < 0.05 * len(profile):
            edge_width = 0
    else:
        edge_width = 100
    features['edge_width'] = edge_width

    if False:
        print edge_width
        ImageViewer(cropped)
        plt.figure()
        plt.plot(profile)
        plt.show()

    if False:
        dislocations = np.zeros_like(foreground, dtype=np.uint8)
        pixel_ops.ApplyThresholdGT_F32_U8(foreground, dislocations, DIS_THRESH,
                                          1)

        print features['defect_robust_area_fraction'], impure_area

        view = ImageViewer(im)
        ImageViewer(dislocations)
        ImageViewer(foreground)
        ImageViewer(background, vmin=0, vmax=1)
        view.show()
        # sys.exit()

    imp_cutoff = 0.55
    pixel_ops.ApplyThresholdGT_F32(background, background, imp_cutoff,
                                   imp_cutoff)
    background /= imp_cutoff
    background = np.log10(2 - background)

    dis_cutoff = 0.1
    foreground -= dis_cutoff
    foreground = np.clip(foreground, 0, 1)
    foreground *= 0.5

    features['ov_impure_u8'] = (background * 255).astype(np.uint8)
    features['ov_defects_u8'] = (foreground * 255).astype(np.uint8)
    features['_bounds'] = bounds
    pixel_ops.ClipImage(im_normed, 0, 1)
    features['dislocation_area_fraction'] = dislocation_area
    features['impure_area_fraction'] = impure_area

    return features
def feature_combination(features):
    if "ov_bright_area_u8" in features:
        # dilate to find defects near edges
        bright = (features['ov_bright_area_u8'] / 255.).astype(np.float32)
        kernel = np.ones((15, 15), np.uint8)
        bright = cv2.dilate(bright, kernel=kernel)
    else:
        bright = None
    if "ov_splotches_u8" in features:
        firing = (features['ov_splotches_u8'] / 255.).astype(np.float32)
    else:
        firing = None
    if "ov_dark_areas_u8" in features:
        dark = (features['ov_dark_areas_u8'] / 255.).astype(np.float32)
    else:
        dark = None

    # bright areas causing dark areas
    if bright is not None and dark is not None:
        weight = 5.0
        cols = bright.mean(axis=0)
        rows = bright.mean(axis=1)

        dark -= np.r_[cols] * weight
        dark -= np.c_[rows] * weight
        pixel_ops.ApplyThresholdLT_F32(dark, dark, 0.0, 0.0)

        if False:
            plt.figure()
            plt.plot(cols)
            plt.plot(rows)
            view = ImageViewer(bright)
            ImageViewer(features['ov_dark_areas_u8'] / 255.)
            ImageViewer(dark)
            view.show()

        features['ov_dark_areas_u8'] = (dark * 255).astype(np.uint8)

    # dark middles

    # remove dark spots & broken fingers in high firing areas
    if firing is not None:
        FIRING_THRESH = 0.1
        # if "mk_cracks_u8" in features:
        #    pixel_ops.ApplyThresholdGT_F32_U8(firing, features["mk_cracks_u8"], FIRING_THRESH, 0)
        if "mk_dark_spots_outline_u8" in features:
            pixel_ops.ApplyThresholdGT_F32_U8(firing, features["mk_dark_spots_outline_u8"], FIRING_THRESH, 0)

        if "mk_finger_break_u8" in features:
            # TODO: handle finger breaks differently: look at sum along finger, and remove all
            pixel_ops.ApplyThresholdGT_F32_U8(firing, features["mk_finger_break_u8"], FIRING_THRESH, 0)

        if False:
            view = ImageViewer(firing)
            view.show()

    # remove cracks, dark spots & broken fingers in bright areas
    if bright is not None:
        # expand a bit to get artifacts on corner

        BRIGHT_THRESH = 0.1
        # if "mk_cracks_u8" in features:
        #    pixel_ops.ApplyThresholdGT_F32_U8(bright, features["mk_cracks_u8"], BRIGHT_THRESH, 0)
        if "mk_finger_break_u8" in features:
            pixel_ops.ApplyThresholdGT_F32_U8(bright, features["mk_finger_break_u8"], BRIGHT_THRESH, 0)
        if "mk_dark_spots_outline_u8" in features:
            pixel_ops.ApplyThresholdGT_F32_U8(bright, features["mk_dark_spots_outline_u8"], BRIGHT_THRESH, 0)

        if False:
            view = ImageViewer(bright)
            view.show()

    # bright areas causing middle dark

    # overlapping cracks and dark spots
    if "mk_dark_spots_outline_u8" in features and "mk_cracks_u8" in features and \
                    features['mk_dark_spots_outline_u8'].max() > 0 and \
                    features['mk_cracks_u8'].max() > 0:
        ccs, num_ccs = ip.connected_components(features["mk_dark_spots_outline_u8"])
        remove_list = set(ccs[(features['mk_dark_spots_outline_u8'] == 1) & (features['mk_cracks_u8'] == 1)])
        lut = np.ones(num_ccs + 1, np.int32)
        lut[0] = 0
        lut[list(remove_list)] = 0
        removed = np.take(lut, ccs)

        if False:
            view = ImageViewer(features["mk_cracks_u8"])
            ImageViewer(ccs)
            ImageViewer(removed)
            view.show()

        features['mk_dark_spots_outline_u8'] = removed.astype(np.uint8)
def feature_extraction(im, features, already_cropped=False):
    t_start = timeit.default_timer()

    if 'input_param_num_stripes' in features:
        num_stripes = features['input_param_num_stripes']
    else:
        num_stripes = 6

    features['num_rows'] = 1
    features['num_cols'] = num_stripes

    if 'input_param_multi' in features:
        multi = int(features['input_param_multi']) == 1
    else:
        multi = False

    # rotation & cropping
    rotated = cropping.correct_cell_rotation(im,
                                             features,
                                             already_cropped=already_cropped)
    cropped = cropping.crop_cell(rotated,
                                 im,
                                 features,
                                 width=None,
                                 already_cropped=already_cropped)

    # stripe corners
    corner_tr_x = features['corner_tr_x']
    corner_tr_y = features['corner_tr_y']
    corner_tl_x = features['corner_tl_x']
    corner_tl_y = features['corner_tl_y']
    corner_br_x = features['corner_br_x']
    corner_br_y = features['corner_br_y']
    corner_bl_x = features['corner_bl_x']
    corner_bl_y = features['corner_bl_y']
    if features['cell_rotated']:
        x_diff_l = corner_bl_x - corner_tl_x
        y_diff_l = corner_bl_y - corner_tl_y
        x_diff_r = corner_br_x - corner_tr_x
        y_diff_r = corner_br_y - corner_tr_y
        for i in range(num_stripes):
            p_start = i / float(num_stripes)
            p_stop = (i + 1) / float(num_stripes)
            features["%02d_corner_tl_y" % (i + 1)] = int(
                round(corner_tl_y + (p_start * y_diff_l)))
            features["%02d_corner_tl_x" % (i + 1)] = int(
                round(corner_tl_x + (p_start * x_diff_l)))
            features["%02d_corner_bl_y" % (i + 1)] = int(
                round(corner_tl_y + (p_stop * y_diff_l)))
            features["%02d_corner_bl_x" % (i + 1)] = int(
                round(corner_tl_x + (p_stop * x_diff_l)))

            features["%02d_corner_tr_y" % (i + 1)] = int(
                round(corner_tr_y + (p_start * y_diff_r)))
            features["%02d_corner_tr_x" % (i + 1)] = int(
                round(corner_tr_x + (p_start * x_diff_r)))
            features["%02d_corner_br_y" % (i + 1)] = int(
                round(corner_tr_y + (p_stop * y_diff_r)))
            features["%02d_corner_br_x" % (i + 1)] = int(
                round(corner_tr_x + (p_stop * x_diff_r)))
    else:
        x_diff_t = corner_tr_x - corner_tl_x
        y_diff_t = corner_tr_y - corner_tl_y
        x_diff_b = corner_br_x - corner_bl_x
        y_diff_b = corner_br_y - corner_bl_y
        for i in range(num_stripes):
            p_start = i / float(num_stripes)
            p_stop = (i + 1) / float(num_stripes)
            features["%02d_corner_tl_y" % (i + 1)] = int(
                round(corner_tl_y + (p_start * y_diff_t)))
            features["%02d_corner_tl_x" % (i + 1)] = int(
                round(corner_tl_x + (p_start * x_diff_t)))
            features["%02d_corner_tr_y" % (i + 1)] = int(
                round(corner_tl_y + (p_stop * y_diff_t)))
            features["%02d_corner_tr_x" % (i + 1)] = int(
                round(corner_tl_x + (p_stop * x_diff_t)))

            features["%02d_corner_bl_y" % (i + 1)] = int(
                round(corner_bl_y + (p_start * y_diff_b)))
            features["%02d_corner_bl_x" % (i + 1)] = int(
                round(corner_bl_x + (p_start * x_diff_b)))
            features["%02d_corner_br_y" % (i + 1)] = int(
                round(corner_bl_y + (p_stop * y_diff_b)))
            features["%02d_corner_br_x" % (i + 1)] = int(
                round(corner_bl_x + (p_stop * x_diff_b)))

    features['im_cropped_u16'] = cropped.astype(np.uint16)
    h, w = cropped.shape

    corner_mask = np.ones_like(cropped, np.uint8)
    r, theta = np.empty_like(cropped,
                             np.float32), np.empty_like(cropped, np.float32)
    pixel_ops.CenterDistance(r, theta, features['wafer_middle_y'],
                             features['wafer_middle_x'])
    pixel_ops.ApplyThresholdGT_F32_U8(r, corner_mask, features['wafer_radius'],
                                      0)

    if False:
        print features['cell_rotated']
        plt.figure()
        plt.plot(cropped.mean(axis=0))
        view = ImageViewer(im)
        ImageViewer(rotated)
        ImageViewer(cropped)
        ImageViewer(corner_mask)
        view.show()

    ip.histogram_percentiles(cropped,
                             features,
                             center_y=h // 2,
                             center_x=w // 2,
                             radius=features['wafer_radius'])
    cell.normalise(cropped, features)

    # find cell structure
    f = features.copy()
    cell.cell_structure(cropped, f)
    features['bl_cropped_u8'] = f['bl_cropped_u8']
    ip.histogram_percentiles(cropped,
                             f,
                             center_y=h // 2,
                             center_x=w // 2,
                             radius=features['wafer_radius'])
    cell.normalise(cropped, f)
    cell.remove_cell_template(f['im_norm'], f)

    if 'input_param_skip_features' not in features or int(
            features['input_param_skip_features']) != 1:
        if multi:
            # efficiency analysis
            multi_cell.bright_areas(f)
            multi_cell.efficiency_analysis(f)

            # save results
            features['impure_area_fraction'] = f['impure_area_fraction']
            features['dislocation_area_fraction'] = f[
                'dislocation_area_fraction']

            im_dislocations = f['_foreground']
            dislocation_thresh = f['_dislocation_thresh']
            im_impure = f['_impure']
            impure_thresh = f['_impure_thresh']
        else:
            # cracks
            cell.mono_cracks(f)
            features['mk_cracks_u8'] = f['mk_cracks_u8']
            features['defect_count'] = f['defect_count']
            features['defect_present'] = f['defect_present']
            features['defect_length'] = f['defect_length']
            crack_skel = f['_crack_skel']

        # extract stats from each stripe
        stripe_width = w // num_stripes
        for s in range(num_stripes):
            s1 = int(round(s * stripe_width))
            s2 = int(round(min(w, (s + 1) * stripe_width)))
            stripe = cropped[:, s1:s2]
            mask = corner_mask[:, s1:s2]

            vals = stripe[mask == 1].flat

            features["%02d_hist_harmonic_mean" %
                     (s + 1)] = 1.0 / (1.0 / np.maximum(0.01, vals)).mean()
            features["%02d_hist_median" % (s + 1)] = np.median(vals)
            features["%02d_hist_mean" % (s + 1)] = np.mean(vals)
            features["%02d_hist_percentile_01" %
                     (s + 1)] = stats.scoreatpercentile(vals, 1)
            features["%02d_hist_percentile_99" %
                     (s + 1)] = stats.scoreatpercentile(vals, 99)
            features["%02d_hist_std" % (s + 1)] = np.std(vals)
            features["%02d_hist_cov" %
                     (s + 1)] = features["%02d_hist_std" % (s + 1)] / max(
                         1, features["%02d_hist_mean" % (s + 1)])

            if 'input_param_no_stripe_images' not in features or int(
                    features['input_param_no_stripe_images']) != 1:
                features['im_%02d_u16' % (s + 1)] = stripe.astype(np.uint16)
                features['bl_%02d_cropped_u8' %
                         (s + 1)] = features['bl_cropped_u8'][:, s1:s2]

            if False:
                view = ImageViewer(stripe)
                ImageViewer(mask)
                view.show()

            if multi:
                impure_stripe = im_impure[:, s1:s2]
                dis_stripe = im_dislocations[:, s1:s2]
                features["%02d_dislocation" %
                         (s + 1)] = (dis_stripe > dislocation_thresh).mean()
                features["%02d_impure" %
                         (s + 1)] = (impure_stripe < impure_thresh).mean()
            else:
                crack_stripe = features['mk_cracks_u8'][:, s1:s2]

                if 'input_param_no_stripe_images' not in features or int(
                        features['input_param_no_stripe_images']) != 1:
                    features['mk_%02d_cracks_u8' %
                             (s + 1)] = np.ascontiguousarray(crack_stripe,
                                                             dtype=np.uint8)

                skel_stripe = crack_skel[:, s1:s2]
                features["%02d_defect_length" % (s + 1)] = skel_stripe.sum()
                if features["%02d_defect_length" % (s + 1)] > 0:
                    _, num_ccs = ip.connected_components(crack_stripe)
                    features["%02d_defect_count" % (s + 1)] = num_ccs
                    features["%02d_defect_present" %
                             (s + 1)] = 1 if num_ccs > 0 else 0
                else:
                    features["%02d_defect_count" % (s + 1)] = 0
                    features["%02d_defect_present" % (s + 1)] = 0

    # undo rotation
    if parameters.ORIGINAL_ORIENTATION and features['cell_rotated']:
        features['num_rows'], features['num_cols'] = features[
            'num_cols'], features['num_rows']
        for feature in features.keys():
            if ((feature.startswith('im_') or feature.startswith('ov_')
                 or feature.startswith('bl_') or feature.startswith('mk_'))
                    and features[feature].ndim == 2):
                features[feature] = features[feature].T[:, ::-1]

    # compute runtime
    t_stop = timeit.default_timer()
    features['runtime'] = t_stop - t_start