コード例 #1
0
def main():
    features = {}
    fn = r"C:\Users\Neil\Desktop\R3 crack\raw PL images\cracked wafer PL image.tif"
    im = ip.open_image(fn).astype(np.float32)

    if im.shape[0] > 700:
        print '    WARNING: Image resized'
        im_max = im.max()
        im = ndimage.zoom(im, 0.5)
        if im.max() > im_max:
            im[im > im_max] = im_max

    if False:
        view = ImageViewer(im)
        view.show()

    features['_alg_mode'] = 'mono wafer'
    crop_props = cropping.crop_wafer_cz(im, create_mask=True, skip_crop=False)
    features['corners'] = crop_props['corners']
    cropped = cropping.correct_rotation(
        im,
        crop_props,
        pad=False,
        border_erode=parameters.BORDER_ERODE_CZ,
        fix_chamfer=False)
    mono_wafer.feature_extraction(cropped, crop_props, features=features)

    ip.print_metrics(features)
    rgb = mono_wafer.create_overlay(features)
    view = ImageViewer(rgb)
    view.show()
コード例 #2
0
def create_overlay(features):
    normed = features['im_cropped_u8']
    background = features['ov_impure_u8']
    foreground = features['ov_defects_u8']

    orig = normed.astype(np.int32)

    if False:
        view = ImageViewer(orig)
        ImageViewer(background)
        ImageViewer(foreground)
        view.show()

    rgb = np.empty((background.shape[0], background.shape[1], 3), np.uint8)

    # foreground
    b = orig + foreground
    g = orig - foreground
    r = orig - foreground

    # background
    b -= background
    g -= background
    r += background

    r = np.clip(r, 0, 255)
    g = np.clip(g, 0, 255)
    b = np.clip(b, 0, 255)

    rgb[:, :, 0] = r.astype(np.uint8)
    rgb[:, :, 1] = g.astype(np.uint8)
    rgb[:, :, 2] = b.astype(np.uint8)

    return rgb
コード例 #3
0
def feature_extraction(im, features):
    t_start = timeit.default_timer()

    # crop
    crop_props = crop(im)
    features['corners'] = crop_props['corners']
    #print crop_props.keys()
    #features['crop_top'] = crop_props['crop_top']
    # features['corner_tl_x'] = crop_props['corners'][0][1]
    # features['corner_tl_y'] = crop_props['corners'][0][0]
    # features['corner_tr_x'] = crop_props['corners'][1][1]
    # features['corner_tr_y'] = crop_props['corners'][1][0]
    # features['corner_br_x'] = crop_props['corners'][2][1]
    # features['corner_br_y'] = crop_props['corners'][2][0]
    # features['corner_bl_x'] = crop_props['corners'][3][1]
    # features['corner_bl_y'] = crop_props['corners'][3][0]
    features['wafer_radius'] = crop_props['radius']
    features['_wafer_middle_orig'] = crop_props['center']
    features['crop_rotation'] = crop_props['estimated_rotation']
    cropped = cropping.correct_rotation(im, crop_props, pad=False, border_erode=parameters.BORDER_ERODE_CZ,
                                        fix_chamfer=False)
    if not cropped.flags['C_CONTIGUOUS']:
        cropped = np.ascontiguousarray(cropped)

    if False:
        view = ImageViewer(im)
        ImageViewer(cropped)
        view.show()

    # histogram features
    h, w = cropped.shape
    ip.histogram_percentiles(cropped, features, h // 2, w // 2, features['wafer_radius'])

    # normalise image
    min_val = features['hist_percentile_01'] / float(features['hist_percentile_99'])
    norm_upper = features['hist_percentile_99']
    norm_lower = min(0.2, min_val)
    normed = ((cropped / norm_upper) - norm_lower) / (1 - norm_lower)

    # calculate distance from wafer rotation middle
    r, theta = np.empty_like(normed, np.float32), np.empty_like(normed, np.float32)
    pixel_ops.CenterDistance(r, theta, h // 2, w // 2)
    features['im_center_dist_im'] = r

    # create mask: 1=background
    wafer_mask = np.zeros_like(cropped, np.uint8)
    pixel_ops.ApplyThresholdGT_F32_U8(features['im_center_dist_im'], wafer_mask, features['wafer_radius'], 1)
    features['bl_cropped_u8'] = wafer_mask

    features['im_cropped_u8'] = (np.clip(normed, 0.0, 1.0) * 255).astype(np.uint8)
    if cropped.dtype.type is np.uint16:
        features['im_cropped_u16'] = cropped
    else:
        features['im_cropped_u16'] = cropped.astype(np.uint16)

    # compute runtime
    t_stop = timeit.default_timer()
    features['runtime'] = t_stop - t_start

    return crop_props
コード例 #4
0
def run_single(fn, mode, display=True, downsize=True):
    features = {}
    im = ip.open_image(fn).astype(np.float32)

    if downsize and im.shape[0] > 750:
        print '    WARNING: Image resized'
        im_max = im.max()
        im = ndimage.zoom(im, 0.5)
        if im.max() > im_max:
            im[im > im_max] = im_max

    if False:
        view = ImageViewer(im)
        view.show()

    features['_fn'] = os.path.splitext(os.path.split(fn)[1])[0]

    if mode == "multi":
        features['_alg_mode'] = 'multi wafer'
        multi_cell.feature_extraction(im, features=features)
    elif mode == "mono":
        features['_alg_mode'] = 'mono wafer'
        mono_cell.feature_extraction(im, features=features)

    f = ip.print_metrics(features)
    if display:
        rgb = multi_cell.create_overlay(features)
        view = ImageViewer(im)
        ImageViewer(rgb)
        view.show()

    return f
コード例 #5
0
def dark_spots(features):
    im = features['im_no_fingers']
    h, w = im.shape

    im_mini = im[::6, ::6]
    im_mini_med = cv2.medianBlur(im_mini, ksize=5)
    im_mini_smooth = cv2.GaussianBlur(im_mini_med, ksize=(0, 0), sigmaX=1)
    background = cv2.resize(im_mini_smooth, (w, h))
    dark_areas = background - im
    pixel_ops.ApplyThresholdLT_F32(dark_areas, dark_areas, 0.0, 0.0)

    foreground_mask = ((features['bl_cropped_u8'] == 0) |
                       (features['bl_cropped_u8'] == 4))
    structure = ndimage.generate_binary_structure(2, 1)
    foreground_mask = ndimage.binary_erosion(foreground_mask,
                                             structure=structure,
                                             iterations=3)
    dark_areas[~foreground_mask] = 0

    DARK_SPOT_SENSITIVITY = 0.08
    dark_spots = (dark_areas > DARK_SPOT_SENSITIVITY).astype(np.uint8)
    min_size = int(h * w * 0.0001)
    ip.remove_small_ccs(dark_spots, min_size)

    dark_spots_outline = ndimage.binary_dilation(
        dark_spots, structure=structure, iterations=2) - dark_spots
    features['mk_dark_spots_filled_u8'] = dark_spots
    features['mk_dark_spots_outline_u8'] = dark_spots_outline

    if False:
        view = ImageViewer(im)
        ImageViewer(background)
        ImageViewer(dark_spots)
        ImageViewer(ip.overlay_mask(im, dark_spots_outline))
        view.show()
コード例 #6
0
def module_rotate(im, rotation=None):
    if rotation is None:
        # downsize
        max_side = max(im.shape)
        down_factor = max_side // 2500
        im_down = im[::down_factor, ::down_factor].astype(np.float32)

        # correct rotation
        features = {}
        find_module_rotation(im_down, features)
        rotation = features['rotation']

    if abs(rotation) > 0.01:
        h, w = im.shape
        dsize = (w, h)
        rot_mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, 1.0)
        im_rotated = np.empty((dsize[1], dsize[0]), np.float32)
        cv2.warpAffine(im,
                       rot_mat,
                       dsize,
                       flags=cv2.INTER_LINEAR,
                       borderMode=cv2.BORDER_REPLICATE,
                       dst=im_rotated)

        if False:
            view = ImageViewer(im[::2, ::2])
            ImageViewer(im_rotated[::2, ::2])
            view.show()
    else:
        im_rotated = im.copy()

    return rotation, im_rotated
コード例 #7
0
def compute_hash_pattern_correction(folder):
    fns = glob.glob(os.path.join(folder, "*.tif*"))

    if len(fns) == 0:
        print "No tif files found in: %s" % (folder)
        sys.exit()

    if True:
        ims = [ip.open_image(fn).astype(np.float32) for fn in fns]
        im_mean = ims[0].copy()
        for im in ims[1:]:
            im_mean += im
        im_mean /= len(ims)
        background = cv2.GaussianBlur(im_mean, (0, 0),
                                      8,
                                      borderType=cv2.BORDER_REPLICATE)
        pattern = im_mean - background
        pattern -= pattern.mean()
    else:
        background = ip.open_image(
            r"C:\Users\Neil\BT\Data\R2 FFT\FF Wafer Images\precomputed\std - ff.tif"
        ).astype(np.float32) / 4.0
        im_mean = ip.open_image(
            r"C:\Users\Neil\BT\Data\R2 FFT\FF Wafer Images\precomputed\SUM_Stack.tif"
        ).astype(np.float32) / 4.0
        pattern = im_mean - background
        pattern -= pattern.mean()

    if False:
        view = ImageViewer(im_mean)
        ImageViewer(background)
        ImageViewer(pattern)
        view.show()
        sys.exit()

    # find a mask of the peaks
    fft = fftshift(cv2.dft(pattern, flags=cv2.DFT_COMPLEX_OUTPUT))
    fft_mag = cv2.magnitude(fft[:, :, 0], fft[:, :, 1])
    fft_smooth = cv2.GaussianBlur(cv2.medianBlur(fft_mag, ksize=5),
                                  ksize=(0, 0),
                                  sigmaX=5)
    fft_log = cv2.log(fft_smooth)
    THRESH = 13.75
    mask = fft_log > THRESH

    # ignore middle (low frequency stuff)
    RADIUS = 35

    h, w = pattern.shape
    ys, xs = draw.circle(h // 2, w // 2, RADIUS)
    mask[ys, xs] = 0

    np.save("hash_fft_mask.npy", mask)
    print "FFT mask saved to 'hash_fft_mask.npy'"

    if False:
        view = ImageViewer(fft_log)
        view = ImageViewer(mask)
        view.show()
コード例 #8
0
def robust_dislocations(smooth, impure, features):
    c = parameters.ROBUST_CROP
    smooth = np.ascontiguousarray(smooth[c:-c, c:-c])
    impure = np.ascontiguousarray(impure[c:-c, c:-c])

    struct = ndimage.generate_binary_structure(2, 1)

    # robust dislocation mask
    dog1 = (cv2.dilate(
        smooth,
        cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE,
            (parameters.DOG_STRUCT_SIZE, parameters.DOG_STRUCT_SIZE))) -
            smooth)
    dog2 = (ip.fast_smooth(smooth, sigma=parameters.DOG_SIGMA2) -
            cv2.GaussianBlur(smooth, (0, 0),
                             parameters.DOG_SIGMA1,
                             borderType=cv2.BORDER_REPLICATE))
    dog = dog1 + dog2

    IMP_THRESH = 0.4
    pixel_ops.ApplyThresholdLT_F32(impure, dog, IMP_THRESH, 0)

    if False:
        view = ImageViewer(dog1)
        ImageViewer(dog2)
        ImageViewer(dog1 + dog2)
        ImageViewer(dog)
        view.show()

    defect_mask = np.zeros_like(dog, np.uint8)
    DOG_THRESH = parameters.BLOCK_DISLOCATION_THRESH
    pixel_ops.ApplyThresholdGT_F32_U8(dog, defect_mask, DOG_THRESH, 1)
    num_pure_pixels = pixel_ops.CountThresholdGT_F32(impure, IMP_THRESH)
    defect_robust = (pixel_ops.CountEqual_U8(defect_mask,
                                             1)) / float(num_pure_pixels)

    # compute surface area
    eroded = defect_mask - cv2.erode(defect_mask, struct.astype(np.uint8))
    defect_pixels = float(pixel_ops.CountEqual_U8(defect_mask, 1))
    if defect_pixels > 0:
        defect_surface = pixel_ops.CountEqual_U8(eroded, 1) / defect_pixels
    else:
        defect_surface = 0

    if False:
        print defect_robust, defect_surface
        view = ImageViewer(smooth)
        ImageViewer(defect_mask)
        ImageViewer(eroded)
        view.show()
        sys.exit()

    features['defect_robust_area_fraction'] = defect_robust
    features['defect_surface'] = defect_surface
コード例 #9
0
def cell_edge_width(im, features):
    h, w = im.shape
    h2 = h // 2

    # look for frequency content at the frequency of the finger period
    if False:
        mid = im[h2 - 50:h2 + 50, :]
        period = int(round(features['finger_period']))
        period_avg = np.empty((period, im.shape[1]), np.float32)
        for offset in range(period):
            period_avg[offset, :] = mid[offset::period, :].mean(axis=0)
        col_var = period_avg.max(axis=0) - period_avg.min(axis=0)
    else:
        im_peaks = im[features['_peak_row_nums'], :]
        im_fingers = im[features['_finger_row_nums'][:-1], :]
        diff = (im_peaks - im_fingers)
        # col_var = diff.mean(axis=0)
        col_var = np.median(diff, axis=0)

        if False:
            view = ImageViewer(im_fingers)
            ImageViewer(im_peaks)
            ImageViewer(diff)
            view.show()

    col_var -= col_var.min()
    col_var /= col_var.max()
    interior = np.where(col_var > parameters.CELL_EDGE_STD_THRESH)[0]
    left, right = interior[[0, -1]]

    features['_col_var'] = col_var
    if features['_alg_mode'] == 'multi cell':
        # since one side might be impure (= low intensity & low variation) select the
        #  smaller of the two estimates
        edge_width = max(1, min(left, w - right))
        left, right = edge_width, w - edge_width
        features['cell_edge_left'] = left
        features['cell_edge_right'] = right
        features['cell_edge_tb'] = edge_width
    else:
        features['cell_edge_left'] = max(left, 1)
        features['cell_edge_right'] = min(w - 1, right)
        features['cell_edge_tb'] = ((w - right) + left) // 2

    if False:
        print left, (w - right)
        # print features['cell_edge_width']
        plt.figure()
        plt.plot(col_var)
        plt.vlines([left, right], 0, col_var.max())
        view = ImageViewer(im)
        view.show()
        sys.exit()
コード例 #10
0
def filter_v(filtered, features):
    h, w = filtered.shape

    # Vertical filtering: compare pixel to corresponding locations from grid
    #  rows above and below
    period = int(round(features['finger_period']))
    row_locs = features['_finger_row_nums']

    # can't filter at the top and bottom. the cutoff depends on top/bottom
    #  finger and distance to image edge
    middle_start = max(row_locs[0] + period, 2 * period)
    middle_stop = min(row_locs[-1] - period, h - (2 * period))
    rows_base = np.arange(0, h, period)
    filtered_v = filtered.copy()
    for s in range(period):
        rows = rows_base + s
        rows = rows[rows < h]
        if False:
            rows_im = np.ascontiguousarray(filtered[rows, :])
        else:
            rows_im = np.empty((len(rows), w), np.float32)
            pixel_ops.CopyRows(filtered, rows, rows_im)
        rows_filtered = np.zeros_like(rows_im)
        pixel_ops.FilterV(rows_im, rows_filtered)

        if False:
            print s
            print rows
            # view = ImageViewer(filtered)
            view = ImageViewer(rows_im)
            ImageViewer(rows_filtered)
            view.show()
            # sys.exit()

        # skip anything too close to top or bottom
        row_mask = ((rows > middle_start) & (rows < middle_stop - 1))
        # TODO: speedup the follownig
        filtered_v[rows[row_mask], :] = rows_filtered[row_mask]

    # can't filter areas near corners for the same reason
    if 'param_multi_wafer' not in features or not features['param_multi_wafer']:
        mask = features[
            'im_center_dist'] > features['cell_radius'] - middle_start
        filtered_v[mask] = filtered[mask]

    if False:
        view = ImageViewer(filtered)
        view = ImageViewer(filtered_v)
        view.show()
        sys.exit()

    return filtered_v
コード例 #11
0
def run_block():
    fn = r"C:\Users\Neil\BT\Data\blocks\misc\brick JW - Test PL Image %28PL Image%29.tif"
    #fn = r"C:\Users\Neil\BT\Data\blocks\B4\691 - PL Image B4 N2 4V (PL Image - Composite).tif"
    #fn = r"C:\Users\Neil\BT\Data\blocks\P3045564-20 ten times\.tif"
    #fn = r"C:\Users\Neil\BT\Data\blocks\P3045564-20 ten times\427 - P3045564-20-1 (PL Image).tif"
    im_pl = ip.open_image(fn).astype(np.float32)
    features = {}
    features_block.feature_extraction(im_pl, features)
    rgb = features_block.create_overlay(features)
    ip.print_metrics(features)
    view = ImageViewer(im_pl)
    ImageViewer(rgb)
    view.show()
コード例 #12
0
def create_overlay(features):
    normed = features['im_cropped_u8']

    orig = normed.astype(np.int32)

    if False:
        view = ImageViewer(normed)
        view.show()

    b = orig
    g = orig
    r = orig

    if features['_cell_type'] == 'mono':
        pass
    elif features['_cell_type'] == 'multi':
        foreground = features['ov_dislocations_u8']
        b = orig + foreground
        g = orig - foreground
        r = orig - foreground

        impure = features['ov_impure2_u8']
        b -= impure
        g -= impure
        r += impure
    else:
        assert False

    r = np.clip(r, 0, 255)
    g = np.clip(g, 0, 255)
    b = np.clip(b, 0, 255)
    rgb = np.empty((normed.shape[0], normed.shape[1], 3), np.uint8)
    rgb[:, :, 0] = r.astype(np.uint8)
    rgb[:, :, 1] = g.astype(np.uint8)
    rgb[:, :, 2] = b.astype(np.uint8)

    # cracks
    if "mk_cracks_u8" in features:
        rgb = ip.overlay_mask(rgb, features['mk_cracks_u8'], 'r')

    if features['_cell_type'] == 'mono':
        # dark spots
        rgb = ip.overlay_mask(rgb, features['mk_dark_spots_outline_u8'], 'b')
        # dark areas
        if 'ov_dark_areas_u8' in features:
            dark = features["ov_dark_areas_u8"]
            rgb[:, :, 0] += dark
            rgb[:, :, 1] -= dark
            rgb[:, :, 2] += dark

    return rgb
コード例 #13
0
def bright_lines(features):
    im = features['im_no_fingers']
    h, w = im.shape
    if 'finger_period_row' in features:
        rh = int(round(features['finger_period_row']))
        cw = int(round(features['finger_period_col']))
    else:
        rh = int(round(features['finger_period']))
        cw = int(round(features['finger_period']))

    f_v = im - np.maximum(np.roll(im, shift=2 * cw, axis=1),
                          np.roll(im, shift=-2 * cw, axis=1))
    pixel_ops.ApplyThresholdLT_F32(f_v, f_v, 0.0, 0.0)

    # filter
    mask = (f_v > 0.02).astype(np.uint8)
    min_size = 0.0005 * h * w
    ip.remove_small_ccs(mask, min_size)
    f_v[mask == 0] = 0
    # features['_f_v'] = f_v.copy()

    f_h = im - np.maximum(np.roll(im, shift=2 * rh, axis=0),
                          np.roll(im, shift=-2 * rh, axis=0))
    pixel_ops.ApplyThresholdLT_F32(f_h, f_h, 0.0, 0.0)

    # filter
    mask = (f_h > 0.02).astype(np.uint8)
    min_size = 0.0005 * h * w
    ip.remove_small_ccs(mask, min_size)
    f_h[mask == 0] = 0
    # features['_f_h'] = f_h.copy()

    # normalize
    f_h /= 0.3
    f_v /= 0.3

    pixel_ops.ClipImage(f_h, 0.0, 1.0)
    pixel_ops.ClipImage(f_v, 0.0, 1.0)
    features['ov_lines_horizontal_u8'] = (f_h * 255).astype(np.uint8)
    features['ov_lines_vertical_u8'] = (f_v * 255).astype(np.uint8)

    features['bright_lines_horizontal'] = f_h.mean() * 100
    features['bright_lines_vertical'] = f_v.mean() * 100

    if False:
        view = ImageViewer(im)
        ImageViewer(f_v)
        ImageViewer(f_h)
        view.show()
        sys.exit()
コード例 #14
0
def analyse_module(features):
    im = np.ascontiguousarray(features["_im_ratio_cropped"])
    h, w = im.shape
    # mask out rows and columns
    border = 20
    border_cols = features['_divisions_cols'] - features['_divisions_cols'][0]
    for c in border_cols:
        im[:, max(c - border, 0):min(c + border + 1, w)] = 0
    border_rows = features['_divisions_rows'] - features['_divisions_rows'][0]
    for r in border_rows:
        im[max(r - border, 0):min(r + border + 1, h), :] = 0

    # scale so max is around
    scale = ((2**15) / im.max())
    im *= scale

    f = {}
    hist = ip.histogram_percentiles(im, f, skip_zero=True)
    hist = hist[:f['hist_percentile_99.9']]
    hist_norm = hist / hist.max()
    lower = np.where(hist_norm > 0.02)[0][0]
    upper = 2 * f['hist_peak'] - lower
    high_vals = (hist[upper:].sum() / float(hist.sum()))
    features['module_bright_area_fraction'] = high_vals

    if False:
        print "%s: %0.01f%%" % (features['fn'], high_vals)
        ip.print_metrics(f)
        plt.figure()
        plt.xlabel("PL/EL ratio")
        plt.ylabel("Count")
        plt.title("Above threshold: %0.02f%%" % high_vals)
        xs = np.arange(len(hist)) / float(scale)
        plt.plot(xs, hist)
        plt.vlines([upper / float(scale)], ymin=0, ymax=hist.max())
        if False:
            plt.savefig(
                os.path.join(r"C:\Users\Neil\Desktop\M1\hist",
                             features['fn'] + '_1.png'))
            im = features["_im_ratio_cropped"]
            im[im > f['hist_percentile_99']] = f['hist_percentile_99']
            ip.save_image(
                os.path.join(r"C:\Users\Neil\Desktop\M1\hist",
                             features['fn'] + '_0.png'), im)
        else:
            plt.show()
            view = ImageViewer(im[::3, ::3])
            view.show()
        sys.exit()
コード例 #15
0
def find_module_rotation(im, features):
    if False:
        view = ImageViewer(im)
        view.show()
        sys.exit()

    rotated = np.empty_like(im)
    h, w = im.shape
    max_r = 0.5
    crop = int(round(math.tan(math.radians(max_r)) * (im.shape[0] / 2.0)))
    rotations = np.linspace(-max_r, max_r, num=11)
    scores = []
    for rotation in rotations:
        rot_mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, 1.0)
        cv2.warpAffine(im,
                       rot_mat, (w, h),
                       flags=cv2.INTER_LINEAR,
                       borderMode=cv2.BORDER_CONSTANT,
                       dst=rotated)

        cropped = rotated[crop:-crop, crop:-crop]
        # cols = cropped.mean(axis=0)
        # dog1 = ndimage.gaussian_filter1d(cols, sigma=10) - cols
        rows = cropped.mean(axis=1)
        dog2 = ndimage.gaussian_filter1d(rows, sigma=10) - rows

        # scores.append(dog1.std() + dog2.std())
        scores.append(dog2.std())

        if False:
            print dog2.std()
            # plt.figure()
            # plt.plot(cols)
            # plt.plot(dog1)
            plt.figure()
            plt.plot(rows)
            plt.plot(dog2)
            plt.show()

    if False:
        plt.figure()
        plt.plot(rotations, scores)
        plt.show()

    features['rotation'] = rotations[np.argmax(scores)]
コード例 #16
0
def run_module():
    if False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\REC-144\REC-144_G00_LR0086_P35_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\REC-144\REC-144_G00_LR0086_CC7.80_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\REC-143\REC-143_G00_LR0086_P35_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\REC-143\REC-143_G00_LR0086_CC7.50_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_CC13.00_2x2_EL.tif"
    elif True:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\STP-410\STP-410_G00_LR0052_P53_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\STP-410\STP-410_G00_LR0045_CC5.50_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0245_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0160_CV43.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\APO-217\APO-217_G00_LR0089_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\APO-217\APO-217_G00_LR0089_CC13.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-098\CNY-098_G00_LR0090_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-098\CNY-098_G00_LR0090_CC10.80_2x2_EL.tif"
    elif False:
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-101\CNY-101_G00_LR0090_CC10.80_2x2_EL.tif"
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-101\CNY-101_G00_LR0090_P93_2x2_OCPL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-139\CNY-139_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-139\CNY-139_G00_LR0106_CC13.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_CC13.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-449\CNY-449_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-449\CNY-449_G00_LR0106_CC13.00_2x2_EL.tif"

    im_pl = ip.open_image(fn_pl).astype(np.float32)
    im_el = ip.open_image(fn_el).astype(np.float32)
    features = {'fn': os.path.splitext(os.path.split(fn_pl)[1])[0]}
    features_module.feature_extraction(im_pl, im_el, features)
    ip.print_metrics(features)
    ratio = features['im_pl_el']
    view = ImageViewer(ratio[::4, ::4])
    view.show()
コード例 #17
0
def register(im1, im2):
    if False:

        def Dist(params):
            tx, ty = params
            sy = 1
            M = np.float32([[1, 0, tx], [0, sy, ty]])
            im2_reg = cv2.warpAffine(im2, M, (im2.shape[1], im2.shape[0]))
            return np.power(im2_reg - im1, 2).mean()

        params_op = optimize.fmin_powell(Dist, (0, 0), ftol=1.0, disp=False)
        tx, ty = params_op
        M = np.float32([[1, 0, tx], [0, 1, ty]])
        im2_reg = cv2.warpAffine(im2, M, (im2.shape[1], im2.shape[0]))
    else:
        h, w = im1.shape

        def Dist(params):
            tx, ty, r = params
            M = cv2.getRotationMatrix2D((w // 2, h // 2), r, 1.0)
            M[0, 2] += tx
            M[1, 2] += ty

            im2_reg = cv2.warpAffine(im2, M, (im2.shape[1], im2.shape[0]))
            # return np.power(im2_reg-im1, 2).mean()
            return np.abs(im2_reg - im1).mean()

        params_op = optimize.fmin_powell(Dist, (0, 0, 0), ftol=1.0, disp=False)
        tx, ty, r = params_op
        M = cv2.getRotationMatrix2D((w // 2, h // 2), r, 1.0)
        M[0, 2] += tx
        M[1, 2] += ty
        im2_reg = cv2.warpAffine(im2, M, (im2.shape[1], im2.shape[0]))

    if False:
        print np.power(im2_reg - im1, 2).mean()
        view = ImageViewer(im1)
        view = ImageViewer(im2_reg)
        view.show()
        sys.exit()

    return im2_reg, np.power(im2_reg - im1, 2).mean()
コード例 #18
0
def filter_h(filtered_v, features):
    #  - set busbars to high value so that adjoining defects are detected
    filered_filled = filtered_v.copy()
    if False:
        filered_filled[features['mask_busbar_filled']] = 255
        m = np.ones((1, 31), np.uint8)
        filtered_h = filter.rank.median(filered_filled, m)
    else:
        filered_filled[features['mask_busbar_filled']] = 1.0
        filtered_h = filered_filled.copy()
        pixel_ops.FilterH(filered_filled, filtered_h, parameters.F_LEN_H)

    if False:
        view = ImageViewer(filtered_v)
        ImageViewer(filered_filled)
        ImageViewer(filtered_h)
        view.show()
        sys.exit()

    return filtered_h
コード例 #19
0
def run_stripe():
    if True:
        mode = "mono"
        # crack
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.120013_Baccini 1 in 1 test_ID2_raw.tif"
        # corner
        #fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.113711_Baccini 1 in 1_ID5_raw.tif"
    else:
        mode = "multi"
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.121040_Baccini 1 in 1 test_ID8_raw.tif"

    im_pl = ip.open_image(fn).astype(np.float32)
    features = {"mode": mode}
    features_stripes.feature_extraction(im_pl, features)
    rgb = features_stripes.create_overlay(features)
    ip.print_metrics(features)
    print ip.list_images(features)
    view = ImageViewer(im_pl)
    ImageViewer(features['bl_cropped_u8'])
    ImageViewer(rgb)
    view.show()
コード例 #20
0
def run_single(fn, display=True, downsize=True):
    features = {}
    im = ip.open_image(fn).astype(np.float32)

    if downsize and im.shape[0] > 750:
        print '    WARNING: Image resized'
        im_max = im.max()
        im = ndimage.zoom(im, 0.5)
        if im.max() > im_max:
            im[im > im_max] = im_max

    if False:
        view = ImageViewer(im)
        view.show()

    parameters.SLOPE_MULTI_WAFER = True
    parameters.BORDER_ERODE = 3
    parameters.MIN_IMPURE_AREA = 0.01

    features['_alg_mode'] = 'multi wafer'
    features['_fn'] = os.path.splitext(os.path.split(fn)[1])[0]
    crop_props = cropping.crop_wafer(im, create_mask=True)
    features['corners'] = crop_props['corners']
    cropped = cropping.correct_rotation(im,
                                        crop_props,
                                        pad=False,
                                        border_erode=parameters.BORDER_ERODE)
    multi_wafer.feature_extraction(cropped, crop_props, features=features)
    multi_wafer.combined_features(features)
    rgb = multi_wafer.create_overlay(features)

    f = ip.print_metrics(features, display=display)
    if display:
        print "Wafer type: %s" % multi_wafer.WaferType.types[
            features['wafer_type']]
        view = ImageViewer(rgb)
        ImageViewer(im)
        view.show()

    return f, features['im_cropped_u8'], rgb
コード例 #21
0
def run_cropping(files, mode=None, display=True):
    for e, fn in enumerate(files):
        print "%s (%d/%d)" % (fn, e, len(files))
        features = {}
        im = ip.open_image(fn).astype(np.float32)
        if mode == "cell":
            rotated = cropping.correct_cell_rotation(im,
                                                     features,
                                                     already_cropped=False)
            cropped = cropping.crop_cell(rotated,
                                         im,
                                         features,
                                         width=None,
                                         already_cropped=False)
        elif mode == "mono wafer":
            features['_alg_mode'] = 'mono wafer'
            crop_props = cropping.crop_wafer_cz(im,
                                                create_mask=True,
                                                skip_crop=False)
            features.update(crop_props)

            cropped = cropping.correct_rotation(
                im,
                crop_props,
                pad=False,
                border_erode=parameters.BORDER_ERODE_CZ,
                fix_chamfer=False)

        if False:
            # save crop results
            pil_im = cropping.draw_crop_box(im, features, mode="pil")
            fn_root = os.path.splitext(os.path.split(fn)[1])[0]
            fn_out = os.path.join(r"C:\Users\Neil\Desktop\results\crop",
                                  fn_root + ".png")
            pil_im.save(fn_out)
        else:
            rgb = cropping.draw_crop_box(im, features, mode="rgb")
            pprint(features)
            view = ImageViewer(rgb)
            view.show()
コード例 #22
0
def run_plir():
    fn = r"C:\Users\Neil\BT\Data\2017-09-06 TransferFunctions.TXT"
    vals = features_block.load_transfer(fn)
    spline_plir, spline_nf, spline_sp, spline_lp = features_block.interpolate_transfer(
        vals, debug=False)

    if False:
        fn_sp = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west short pass.tif"
        fn_lp = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west long pass.tif"
        fn_nf = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west no filter.tif"
    elif False:
        fn_sp = r"C:\Users\Neil\BT\Data\blocks\PLIR\marker\S0069_20170807.033044_ID4624_plg.meas.block.b3BL.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\BT\Data\blocks\PLIR\marker\S0069_20170807.033044_ID4624_plg.meas.block.b3BL.north.lp.img.tif"
        fn_nf = r"C:\Users\Neil\BT\Data\blocks\PLIR\marker\S0069_20170807.033044_ID4624_plg.meas.block.b3BL.north.std.img.tif"
    else:
        fn_sp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.lp.img.tif"
        fn_nf = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3pl.img.tif"

    im_sp = ip.open_image(fn_sp, cast_long=False).astype(np.float32)
    im_lp = ip.open_image(fn_lp, cast_long=False).astype(np.float32)
    im_pl = ip.open_image(fn_nf, cast_long=False).astype(np.float32)

    if False:
        im_sp = ndimage.zoom(im_sp, zoom=0.5)
        im_lp = ndimage.zoom(im_lp, zoom=0.5)

    features = {}
    features_block.plir(im_sp,
                        im_lp,
                        im_pl,
                        features,
                        spline_plir=spline_plir,
                        spline_plc=spline_nf)
    ip.print_metrics(features)
    log = np.log(features['im_tau_bulk_f32'])
    view = ImageViewer(features['im_tau_bulk_f32'])
    #ImageViewer(log)
    view.show()
コード例 #23
0
def compute_batch_correction(fn_samples, debug=False):
    df = pd.read_csv(fn_samples)
    if not debug and len(df) != RECOMPUTE_RATE:
        print "Incomplete file"
        return None

    # find the foreground top and bottom
    top = int(round(np.median(df.ix[:, 'top'])))
    bottom = int(round(np.median(df.ix[:, 'bottom'])))

    # average and smooth FF
    ff_data = df.ix[:, 'row_0':].values.astype(np.float32)
    ff_norm = ff_data / np.c_[np.median(ff_data, axis=1)]

    if False:
        # import matplotlib.pylab as plt
        # plt.figure()
        # plt.imshow(ff_norm, interpolation="nearest")
        # plt.show()
        view = ImageViewer(ff_data)
        view = ImageViewer(ff_norm)
        view.show()

    # ff_avg = np.median(ff_norm, axis=0)
    ff_avg = np.mean(ff_norm, axis=0)
    ff_avg[:top] = ff_avg[top]
    ff_avg[bottom:] = ff_avg[bottom]
    sigma = 0.005 * len(ff_avg)
    ff_smooth = ndimage.gaussian_filter1d(ff_avg, sigma=sigma)
    correction = 1.0 / ff_smooth

    if False:
        import matplotlib.pylab as plt
        plt.figure()
        plt.plot(ff_smooth)
        plt.plot(ff_smooth * correction)
        plt.show()

    return correction
コード例 #24
0
def remove_cell_template(norm, features):
    if features['_fingers_grid']:
        # finger grid
        rh = int(round(features['finger_period_row']))
        cw = int(round(features['finger_period_col']))
        no_fingers = ndimage.uniform_filter(norm, size=(rh, cw))

        features['im_no_fingers'] = no_fingers

        # remove busbars
        no_bbs = no_fingers.copy()
        pixel_ops.InterpolateBBs(no_bbs,
                                 np.array(features['_busbar_cols'], np.int32),
                                 features['busbar_width'] + 6)
        features['im_no_figners_bbs'] = no_bbs

        if False:
            view = ImageViewer(norm)
            ImageViewer(no_fingers)
            ImageViewer(no_bbs)
            view.show()
    else:
        # remove fingers
        f_len = features['finger_period']
        f = np.ones((int(round(f_len)), 1), np.float32) / f_len
        no_lines = ndimage.correlate(norm, f)

        # sharpen
        F_LEN2 = int(round(1.5 * f_len))
        f2 = np.ones((1, F_LEN2), np.float32) / F_LEN2
        filtered = ndimage.correlate(norm, f2)
        filtered[filtered < 0.1] = 1.0
        if False:
            view = ImageViewer(norm)
            ImageViewer(no_lines)
            ImageViewer(filtered)
            view.show()

        edges = norm / filtered
        no_fingers = edges * no_lines
        features['im_no_fingers'] = no_fingers

        if '_busbar_cols' in features:
            # remove busbars
            no_bbs = no_fingers.copy()
            pixel_ops.InterpolateBBs(
                no_bbs, np.array(features['_busbar_cols'], np.int32),
                features['busbar_width'] + 6)
        else:
            no_bbs = no_fingers

        features['im_no_figners_bbs'] = no_bbs

        if False:
            view = ImageViewer(norm)
            ImageViewer(no_fingers)
            ImageViewer(no_bbs)
            view.show()
コード例 #25
0
def crop(im):
    im_orig = im
    im = cv2.medianBlur(im, ksize=5)

    # these images have bright spots, so reduce range by:
    # - clipping: bottom 20% and top 20% (variations in this range have no useful info)
    # - square root
    vals = np.sort(im.flat)
    p20 = vals[int(0.05 * vals.shape[0])]
    p80 = vals[int(0.8 * vals.shape[0])]

    im[im > p80] = p80
    im[im < p20] = p20
    im -= p20
    im /= (p80 - p20)
    im = np.sqrt(im)

    if False:
        view = ImageViewer(im_orig)
        ImageViewer(im)
        view.show()

    return cropping.crop_wafer_cz(im, check_foreground=False, outermost_peak=False, create_mask=True)
コード例 #26
0
def run_single(fn, display=True, downsize=True):

    if False:
        mode = "mono"
    else:
        mode = "multi"

    features = {"_cell_type": mode}
    im = ip.open_image(fn).astype(np.float32)

    if False:
        view = ImageViewer(im)
        view.show()

    skip_crop = True
    features_stripes.feature_extraction(im, features, skip_crop)
    f = ip.print_metrics(features)
    if display:
        view = ImageViewer(im)
        rgb = features_stripes.create_overlay(features)
        ImageViewer(rgb)
        view.show()

    return f
コード例 #27
0
def run_plir2():
    fn = r"C:\Users\Neil\BT\Data\2017-09-06 TransferFunctions.TXT"
    vals = features_block.load_transfer(fn)
    spline_plir, spline_nf, spline_sp, spline_lp = features_block.interpolate_transfer(
        vals, debug=False)

    if False:
        fn_sp = r"C:\Users\Neil\BT\Data\blocks\PLIR\2017-11-01\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\BT\Data\blocks\PLIR\2017-11-01\plg.meas.block.b3bl.north.lp.img.tif"
    elif False:
        fn_sp = r"C:\Users\Neil\Desktop\1172\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\Desktop\1172\plg.meas.block.b3bl.north.lp.img.tif"
    else:
        fn_sp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.lp.img.tif"

    im_sp = ip.open_image(fn_sp).astype(np.float32)
    im_lp = ip.open_image(fn_lp).astype(np.float32)

    if False:
        im_sp = ndimage.zoom(im_sp, zoom=0.5)
        im_lp = ndimage.zoom(im_lp, zoom=0.5)

    features = {}
    features_block.plir2(im_sp,
                         im_lp,
                         features,
                         spline_plir=spline_plir,
                         spline_sp=spline_sp)
    ip.print_metrics(features)
    log = np.log(features['im_tau_bulk_f32'])
    view = ImageViewer(features['im_tau_bulk_f32'])
    ImageViewer(log)
    plt.figure()
    plt.plot(features['im_tau_bulk_f32'].mean(axis=0))
    view.show()
コード例 #28
0
def dark_spot_props(win_orig, win_flat, mask_pixels, ys, xs, y, x, h, w):
    struct = ndimage.generate_binary_structure(2, 1)
    mask_crack2 = ndimage.binary_dilation(mask_pixels, struct, iterations=1)
    mask_crack3 = ndimage.binary_dilation(mask_crack2, struct, iterations=1)
    defect_outline = mask_crack3 - mask_crack2

    if False:
        view = ImageViewer(win_flat)
        ImageViewer(mask_pixels)
        ImageViewer(defect_outline)
        view.show()

    # compute some features of the defect that will be used for classification
    defect_features = {}
    defect_features['strength_median'] = np.median(
        win_orig[defect_outline]) - np.median(win_orig[mask_pixels])
    defect_features['strength_mean'] = win_orig[defect_outline].mean(
    ) - win_orig[mask_pixels].mean()
    defect_features['strength_median_flat'] = np.median(
        win_flat[defect_outline]) - np.median(win_flat[mask_pixels])
    defect_features['strength_mean_flat'] = win_flat[defect_outline].mean(
    ) - win_flat[mask_pixels].mean()
    defect_features['strength_flat_max'] = win_flat.min() * -1
    defect_features['num_pixels'] = mask_pixels.sum()
    defect_features['edge_dist'] = min(x, y, w - 1 - x, h - 1 - y)
    defect_features['aspect_ratio'] = (
        max(ys.max() - ys.min(),
            xs.max() - xs.min()) /
        float(max(1, min(ys.max() - ys.min(),
                         xs.max() - xs.min()))))
    defect_features['fill_ratio'] = defect_features['num_pixels'] / float(
        mask_pixels.shape[0] * mask_pixels.shape[1])
    defect_features['location_y'] = y
    defect_features['location_x'] = x

    return defect_features
コード例 #29
0
def dark_spots(features):
    im = features['im_no_fingers']

    # shrink to standard size
    h, w = 300, 300
    im_small = cv2.resize(im, (h, w))

    dark_areas = np.zeros_like(im_small)
    pixel_ops.DarkSpots(im_small, dark_areas, 8)

    candidates = (dark_areas > parameters.DARK_SPOT_MIN_STRENGTH).astype(np.uint8)
    ip.remove_small_ccs(candidates, parameters.DARK_SPOT_MIN_SIZE)

    candidates = cv2.resize(candidates, (im.shape[1], im.shape[0]))
    candidates[features['mask_busbar_filled']] = 0

    dark_spots_outline = ndimage.binary_dilation(candidates, iterations=3).astype(np.uint8) - \
                         ndimage.binary_dilation(candidates, iterations=1).astype(np.uint8)
    features['mk_dark_spots_outline_u8'] = dark_spots_outline
    features['mk_dark_spots_filled_u8'] = candidates
    features['dark_spots_area_fraction'] = candidates.mean()
    dark_areas_no_noise = dark_areas - parameters.DARK_SPOT_MIN_STRENGTH
    pixel_ops.ApplyThresholdLT_F32(dark_areas_no_noise, dark_areas_no_noise, 0.0, 0.0)
    features['dark_spots_strength'] = dark_areas_no_noise.mean() * 10000
    features['dark_spots_count'] = ip.connected_components(candidates)[1]

    if False:
        print features['dark_spots_area_fraction']
        print features['dark_spots_strength']
        print features['dark_spots_count']
        rgb = ip.overlay_mask(im, dark_spots_outline)
        view = ImageViewer(rgb)
        ImageViewer(dark_areas)
        ImageViewer(dark_areas_no_noise)
        ImageViewer(candidates)
        view.show()
コード例 #30
0
def request(mode,
            display=False,
            send_path=False,
            return_path=False,
            skip_features=False,
            return_cropped=True,
            return_uncropped=False,
            return_outline=False):
    ###########
    # REQUEST #
    ###########
    param_names_float = [
        "verbose", "already_cropped", "skip_features", "return_cropped",
        "return_uncropped", "return_outline", "ORIGINAL_ORIENTATION"
    ]
    param_vals_float = [
        0, 0,
        int(skip_features),
        int(return_cropped),
        int(return_uncropped),
        int(return_outline), 1
    ]
    params_dict = dict(zip(param_names_float, param_vals_float))
    param_names_str = []
    param_vals_str = []
    if return_path:
        param_names_str.append("im_output_path")
        param_vals_str.append("C:\Users\Neil\Desktop\im_out")
    images = None

    # assemble image data
    print "Mode = %d" % mode
    if mode == 0:
        msg = struct.pack('=B', mode)
        # send to server
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect((HOST, PORT))
        send_data(sock, msg)
        response = get_data(sock, 1)
        success = struct.unpack('B', response)[0]
        print "Success: %s" % str(success == 0)
        return [], []
    if mode == 10:
        fn = r"C:\Users\Neil\BT\Data\R2 FFT\multi\raw 10 sec.tif"
    elif mode == 40:
        if int(params_dict['already_cropped']) == 0:
            fn = r"C:\Users\Neil\BT\Data\blocks\B4\693 - PL Image B4 W2 4V (PL Image - Composite).tif"
        else:
            fn = r"C:\Users\Neil\BT\Data\blocks\2015-08\tifs\120815_ISE_E_nf_14A_22C_PL_600000-dark&FFcor_cropped.tif"
    elif mode in [70, 71]:
        if mode == 70:
            fn = r"C:\Users\Neil\BT\Data\slugs\zhonghuan\tifs\219609 - 160-1-6 (Uncalibrated PL Image).tif"
        elif mode == 71:
            fn = r"C:\Users\Neil\BT\Data\slugs\pseudo round\2861 - THICK SAMPLE TEST-2 %28Uncalibrated PL Image%29.tif"
        param_names_float += ['rds_percent', 'slug_radius']
        param_vals_float += [50, 0]
    elif mode == 80:
        # PERC mono cell
        # fn = r"C:\Users\Neil\BT\Data\C3\perc\mono\BAC_1024_100\20150910_122155.612_BAC_1024_100_201.tif"
        # fn = r"C:\Users\Neil\BT\Data\cropping_test_set\cells\tifs\plg.meas.cell.plqrs.a.img.tif"
        fn = r"C:\Users\Neil\BT\Data\C3\perc\mono\BAC_1024_100\20150910_122155.612_BAC_1024_100_201.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 81:
        # PERC multi cell
        fn = r"C:\Users\Neil\BT\Data\C3\perc\multi\Point\1329 - REC test E1 PL Image (PL Open-circuit Image).tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 82:
        # mono cell
        fn = r"C:\Users\Neil\BT\Data\C3\mono\INES_c-Si_100_1024\20150908_175300.680_INES_c-Si_100_1024_46.tif"
        if True:
            param_names_float.append("no_post_processing")
            param_vals_float.append(1)
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 83:
        # multi cell
        fn = r"C:\Users\Neil\BT\Data\C3\multi\misc\20170302T110107.328_Batch 3_ID467.tif"
        # fn = r"C:\Users\Neil\BT\Data\C3\multi\Astronergy\20170831T153538.783_zt-DJ--5_ID-8.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 84:
        # mono wafer
        # fn = r"C:\Users\Neil\BT\Data\CIC\cracks\tifs\S0067_20140821.131519_VI_PL21F_ID10063_GRADEB1_BIN2_raw_image.tif"
        # fn = r"C:\Users\Neil\BT\Data\mono wafer\2015-10-26\S0041_20151026.161500_longi DCA 1-2_ID2_GRADEA2_BIN4_raw.tif"
        fn = r"C:\Users\Neil\Desktop\outlines\mode84.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 85:
        # multi wafer
        fn = r"C:\Users\Neil\BT\Data\overlay test set\unnormalised\tifs\S0050_20120516.193034__ID10586 - Cor.tiff"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 86:
        # X3
        fn = r"C:\Users\Neil\BT\Data\X3\mono PERC\20161024_103301.320_a_00058101.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
        param_names_float += [
            "num_stripes", "multi", "no_stripe_images", "ORIGINAL_ORIENTATION"
        ]
        param_vals_float += [5, 0, 1, 1]
    elif mode == 87:
        # mono stripe
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.120710_Baccini 1 in 1 test_ID6_raw.tif"
    elif mode == 88:
        # multi stripe
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.120917_Baccini 1 in 1 test_ID7_raw.tif"
    elif mode == 89:
        # QC-C3
        #fn = r"C:\Users\Neil\BT\Data\half processed\1390 - Tet P4604 PLOC 0.2s 1Sun (Uncalibrated PL Image).tif"
        fn = r"C:\Users\Neil\Desktop\outlines\mode89.tif"
    elif mode in [90, 901]:
        # plir
        if True:
            fn1 = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west short pass.tif"
            fn2 = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west long pass.tif"
            fn3 = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west no filter.tif"
        else:
            fn1 = r"C:\Users\Neil\Desktop\B35 files for B3\Face 1\plg.meas.block.b3bl.north.shortpass.img.tif"
            fn2 = r"C:\Users\Neil\Desktop\B35 files for B3\Face 1\plg.meas.block.b3bl.north.raw.img.tif"
            fn3 = r"C:\Users\Neil\Desktop\B35 files for B3\Face 1\plg.meas.block.b3bl.north.longpass.img.tif"
        im_sp = ip.open_image(fn1, cast_long=False).astype(np.uint16)
        im_lp = ip.open_image(fn2, cast_long=False).astype(np.uint16)
        im_pl = ip.open_image(fn3, cast_long=False).astype(np.uint16)
        if True:
            images = {'im_sp': im_sp, 'im_lp': im_lp, 'im_pl': im_pl}
        else:
            images = {'im_sp': im_sp, 'im_lp': im_lp}
        fn_xfer = r"C:\Users\Neil\BT\Data\2017-09-06 TransferFunctions.TXT"
        vals = block.load_transfer(fn_xfer)
        images['im_xfer'] = vals

        if mode == 901:
            del images['im_pl']
            mode = 90
    elif mode == 92:
        # brick markers
        fn = r"C:\Users\Neil\Desktop\20160826\1267 - Ref-C-25chiller-2 (North - Shortpass Image).tif"
    elif mode == 95:
        # resolution
        fn = r"C:\Users\Neil\BT\Data\2017-09-06 new calibration target.tif"
    elif mode == 100:
        if True:
            fn_pl = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0245_P93_2x2_OCPL.tif"
            fn_el = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0160_CV43.00_2x2_EL.tif"
        else:
            fn_pl = r"C:\Users\Neil\Desktop\Processed\CNY-098\CNY-098_G00_LR0090_P93_2x2_OCPL.tif"
            fn_el = r"C:\Users\Neil\Desktop\Processed\CNY-098\CNY-098_G00_LR0090_CC10.80_2x2_EL.tif"
        im_pl = ip.open_image(fn_pl).astype(np.uint16)
        im_el = ip.open_image(fn_el).astype(np.uint16)
        images = {'im_pl': im_pl}  # , 'im_el': im_el}
        param_names_float += ["ORIGINAL_ORIENTATION"]
        param_vals_float += [0]
    elif mode == 255:
        msg = struct.pack('B', 255)
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect((HOST, PORT))
        send_data(sock, msg)
        return [], []
    else:
        print "Unknown mode"
        sys.exit()

    if images is None:
        # open im_pl
        im = ip.open_image(fn).astype(np.uint16)
        if False:
            im = im.T
        images = {'im_pl': im}

    if False and images['im_pl'].shape[0] > 800:
        print 'WARNING: Image resized'
        images['im_pl'] = ndimage.zoom(images['im_pl'], 0.25)

    if False:
        view = ImageViewer(images['im_pl'])
        view.show()

    # gather images
    image_names = ','.join(images.keys())
    msg = struct.pack('=BI', mode, len(image_names))
    msg += image_names
    for image_name, im in images.iteritems():
        assert image_name[:2] in ['bl', 'mk', 'im', 'ov']
        if image_name == 'im_xfer':
            bit_depth = 32
        else:
            bit_depth = 16
        binning = 1
        if send_path:
            # pass by path
            msg += struct.pack('=HHBBB', 0, 0, bit_depth, binning, len(fn))
            msg += fn
        else:
            # pass data
            msg += struct.pack('=HHBB', im.shape[1], im.shape[0], bit_depth,
                               binning)
            msg += im.ravel().tostring()

    if False:
        param_names_float = []
        param_vals_float = []
        param_names_str = []
        param_vals_str = []

    # numerical parameter list
    param_names = ','.join(param_names_float)
    msg += struct.pack('=I', len(param_names))
    msg += param_names
    msg += np.array(param_vals_float, np.float32).tostring()

    # string input parameters
    param_names = ','.join(param_names_str)
    msg += struct.pack('=I', len(param_names))
    msg += param_names
    param_vals = ','.join(param_vals_str)
    msg += struct.pack('=I', len(param_vals))
    msg += param_vals

    t1 = timeit.default_timer()

    # send to server
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect((HOST, PORT))
    send_data(sock, msg)

    ############
    # RESPONSE #
    ############

    features = {}

    # get response code
    response = get_data(sock, 1)
    success = struct.unpack('B', response)[0]
    if success != 0:
        print("Error occurred: %d" % success)
        sys.exit()

    # get images & masks
    data = get_data(sock, 4)
    image_names_length = struct.unpack('=I', data)[0]
    if image_names_length > 0:
        image_names = get_data(sock, image_names_length).split(",")
        for im_name in image_names:
            if im_name[:3] not in ['bl_', 'mk_', 'im_', 'ov_']:
                print "ERROR: Invalid image name: %s" % im_name
                sys.exit()

            data = get_data(sock, 6)
            im_w, im_h, bit_depth, binning = struct.unpack('=hhBB', data)

            if im_w == 0 or im_h == 0:
                # read from disk
                fn_len = struct.unpack('=B', get_data(sock, 1))[0]
                fn = str(get_data(sock, fn_len))
                features[im_name] = ip.open_image(fn)
            else:
                if bit_depth == 8:
                    data = get_data(sock, 4)
                    encoding_length = struct.unpack('I', data)[0]
                    png_data = get_data(sock, encoding_length)
                    features[im_name] = ip.decode_png(png_data)
                    num_pixels = features[im_name].shape[0] * features[
                        im_name].shape[1]
                    print "%s compression: %0.1f%%" % (
                        im_name, (100 * encoding_length) / float(num_pixels))
                elif bit_depth == 16:
                    pixel_data = get_data(sock, im_w * im_h * 2)
                    features[im_name] = np.frombuffer(pixel_data,
                                                      np.uint16).reshape(
                                                          im_h, im_w)
                elif bit_depth == 32:
                    pixel_data = get_data(sock, im_w * im_h * 4)
                    features[im_name] = np.frombuffer(pixel_data,
                                                      np.float32).reshape(
                                                          im_h, im_w)
                else:
                    print '****', im_name
    else:
        image_names = []

    # get numerical metric
    response = get_data(sock, 4)
    string_size = struct.unpack('I', response)[0]
    if string_size > 0:
        feature_names = get_data(sock, string_size)
        feature_names = feature_names.split(',')
        num_features = len(feature_names)
        bytes_expected = num_features * 4
        feature_data = get_data(sock, bytes_expected)
        feature_data = list(np.frombuffer(feature_data, np.float32))
    else:
        feature_names = []
        feature_data = []

    # get string metrics
    string_size = struct.unpack('I', get_data(sock, 4))[0]
    if string_size > 0:
        feature_names += get_data(sock, string_size).split(',')
    string_size = struct.unpack('I', get_data(sock, 4))[0]
    if string_size > 0:
        feature_data += get_data(sock, string_size).split(',')

    metric_vals = zip(feature_names, feature_data)

    ###################
    # DISPLAY RESULTS #
    ###################
    metrics = {}
    for i in range(len(feature_names)):
        features[feature_names[i]] = feature_data[i]
        metrics[feature_names[i]] = feature_data[i]

    print "Returned images:"
    for image_name in image_names:
        print "  %s" % image_name
    print "Metrics:"
    pprint(metrics)

    t2 = timeit.default_timer()
    print('Total time: %0.03f seconds' % (t2 - t1))

    rgb = None
    view = None
    if "im_cropped_u8" in features:
        if mode == 80:
            rgb = perc.create_overlay(features)
        elif mode == 81:
            rgb = perc.create_overlay_multi(features)
        elif mode == 82:
            rgb = cz_cell.create_overlay(features)
        elif mode == 83:
            rgb = multi_cell.create_overlay(features)
        elif mode == 84:
            rgb = cz_wafer.create_overlay(features)
        elif mode == 85:
            if 'skip_features' not in params_dict or params_dict[
                    'skip_features'] != 1:
                rgb = multi_wafer.create_overlay(features)
        elif mode == 86:
            rgb = x3.create_overlay(features)

    if False:
        # save cropped version for testing
        fn_cropped = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                                  os.path.split(fn)[1])
        ip.save_image(fn_cropped, features['im_cropped_u16'])

    if display and mode != 100:
        print 'Images:'
        if 'im_pl' in images:
            print '  1: Input PL image'
            im = images['im_pl']
            view = ImageViewer(im)
        e = 2
        for feature in features.keys():
            if (feature.startswith('im_') or feature.startswith('mk_')
                    or feature.startswith('ov_') or feature.startswith('bl_')):
                print '  %d: %s' % (e, feature)
                ImageViewer(features[feature])
                e += 1
        if rgb is not None:
            print '  %d: Colour overlay' % e
            e += 1
            ImageViewer(rgb)
        if view is not None:
            view.show()

    return image_names, metric_vals