示例#1
0
def module_rotate(im, rotation=None):
    if rotation is None:
        # downsize
        max_side = max(im.shape)
        down_factor = max_side // 2500
        im_down = im[::down_factor, ::down_factor].astype(np.float32)

        # correct rotation
        features = {}
        find_module_rotation(im_down, features)
        rotation = features['rotation']

    if abs(rotation) > 0.01:
        h, w = im.shape
        dsize = (w, h)
        rot_mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, 1.0)
        im_rotated = np.empty((dsize[1], dsize[0]), np.float32)
        cv2.warpAffine(im,
                       rot_mat,
                       dsize,
                       flags=cv2.INTER_LINEAR,
                       borderMode=cv2.BORDER_REPLICATE,
                       dst=im_rotated)

        if False:
            view = ImageViewer(im[::2, ::2])
            ImageViewer(im_rotated[::2, ::2])
            view.show()
    else:
        im_rotated = im.copy()

    return rotation, im_rotated
def feature_extraction(im, features):
    t_start = timeit.default_timer()

    # crop
    crop_props = crop(im)
    features['corners'] = crop_props['corners']
    #print crop_props.keys()
    #features['crop_top'] = crop_props['crop_top']
    # features['corner_tl_x'] = crop_props['corners'][0][1]
    # features['corner_tl_y'] = crop_props['corners'][0][0]
    # features['corner_tr_x'] = crop_props['corners'][1][1]
    # features['corner_tr_y'] = crop_props['corners'][1][0]
    # features['corner_br_x'] = crop_props['corners'][2][1]
    # features['corner_br_y'] = crop_props['corners'][2][0]
    # features['corner_bl_x'] = crop_props['corners'][3][1]
    # features['corner_bl_y'] = crop_props['corners'][3][0]
    features['wafer_radius'] = crop_props['radius']
    features['_wafer_middle_orig'] = crop_props['center']
    features['crop_rotation'] = crop_props['estimated_rotation']
    cropped = cropping.correct_rotation(im, crop_props, pad=False, border_erode=parameters.BORDER_ERODE_CZ,
                                        fix_chamfer=False)
    if not cropped.flags['C_CONTIGUOUS']:
        cropped = np.ascontiguousarray(cropped)

    if False:
        view = ImageViewer(im)
        ImageViewer(cropped)
        view.show()

    # histogram features
    h, w = cropped.shape
    ip.histogram_percentiles(cropped, features, h // 2, w // 2, features['wafer_radius'])

    # normalise image
    min_val = features['hist_percentile_01'] / float(features['hist_percentile_99'])
    norm_upper = features['hist_percentile_99']
    norm_lower = min(0.2, min_val)
    normed = ((cropped / norm_upper) - norm_lower) / (1 - norm_lower)

    # calculate distance from wafer rotation middle
    r, theta = np.empty_like(normed, np.float32), np.empty_like(normed, np.float32)
    pixel_ops.CenterDistance(r, theta, h // 2, w // 2)
    features['im_center_dist_im'] = r

    # create mask: 1=background
    wafer_mask = np.zeros_like(cropped, np.uint8)
    pixel_ops.ApplyThresholdGT_F32_U8(features['im_center_dist_im'], wafer_mask, features['wafer_radius'], 1)
    features['bl_cropped_u8'] = wafer_mask

    features['im_cropped_u8'] = (np.clip(normed, 0.0, 1.0) * 255).astype(np.uint8)
    if cropped.dtype.type is np.uint16:
        features['im_cropped_u16'] = cropped
    else:
        features['im_cropped_u16'] = cropped.astype(np.uint16)

    # compute runtime
    t_stop = timeit.default_timer()
    features['runtime'] = t_stop - t_start

    return crop_props
def run_single(fn, mode, display=True, downsize=True):
    features = {}
    im = ip.open_image(fn).astype(np.float32)

    if downsize and im.shape[0] > 750:
        print '    WARNING: Image resized'
        im_max = im.max()
        im = ndimage.zoom(im, 0.5)
        if im.max() > im_max:
            im[im > im_max] = im_max

    if False:
        view = ImageViewer(im)
        view.show()

    features['_fn'] = os.path.splitext(os.path.split(fn)[1])[0]

    if mode == "multi":
        features['_alg_mode'] = 'multi wafer'
        multi_cell.feature_extraction(im, features=features)
    elif mode == "mono":
        features['_alg_mode'] = 'mono wafer'
        mono_cell.feature_extraction(im, features=features)

    f = ip.print_metrics(features)
    if display:
        rgb = multi_cell.create_overlay(features)
        view = ImageViewer(im)
        ImageViewer(rgb)
        view.show()

    return f
def cell_edge_width(im, features):
    h, w = im.shape
    h2 = h // 2

    # look for frequency content at the frequency of the finger period
    if False:
        mid = im[h2 - 50:h2 + 50, :]
        period = int(round(features['finger_period']))
        period_avg = np.empty((period, im.shape[1]), np.float32)
        for offset in range(period):
            period_avg[offset, :] = mid[offset::period, :].mean(axis=0)
        col_var = period_avg.max(axis=0) - period_avg.min(axis=0)
    else:
        im_peaks = im[features['_peak_row_nums'], :]
        im_fingers = im[features['_finger_row_nums'][:-1], :]
        diff = (im_peaks - im_fingers)
        # col_var = diff.mean(axis=0)
        col_var = np.median(diff, axis=0)

        if False:
            view = ImageViewer(im_fingers)
            ImageViewer(im_peaks)
            ImageViewer(diff)
            view.show()

    col_var -= col_var.min()
    col_var /= col_var.max()
    interior = np.where(col_var > parameters.CELL_EDGE_STD_THRESH)[0]
    left, right = interior[[0, -1]]

    features['_col_var'] = col_var
    if features['_alg_mode'] == 'multi cell':
        # since one side might be impure (= low intensity & low variation) select the
        #  smaller of the two estimates
        edge_width = max(1, min(left, w - right))
        left, right = edge_width, w - edge_width
        features['cell_edge_left'] = left
        features['cell_edge_right'] = right
        features['cell_edge_tb'] = edge_width
    else:
        features['cell_edge_left'] = max(left, 1)
        features['cell_edge_right'] = min(w - 1, right)
        features['cell_edge_tb'] = ((w - right) + left) // 2

    if False:
        print left, (w - right)
        # print features['cell_edge_width']
        plt.figure()
        plt.plot(col_var)
        plt.vlines([left, right], 0, col_var.max())
        view = ImageViewer(im)
        view.show()
        sys.exit()
示例#5
0
def run_block():
    fn = r"C:\Users\Neil\BT\Data\blocks\misc\brick JW - Test PL Image %28PL Image%29.tif"
    #fn = r"C:\Users\Neil\BT\Data\blocks\B4\691 - PL Image B4 N2 4V (PL Image - Composite).tif"
    #fn = r"C:\Users\Neil\BT\Data\blocks\P3045564-20 ten times\.tif"
    #fn = r"C:\Users\Neil\BT\Data\blocks\P3045564-20 ten times\427 - P3045564-20-1 (PL Image).tif"
    im_pl = ip.open_image(fn).astype(np.float32)
    features = {}
    features_block.feature_extraction(im_pl, features)
    rgb = features_block.create_overlay(features)
    ip.print_metrics(features)
    view = ImageViewer(im_pl)
    ImageViewer(rgb)
    view.show()
def create_overlay(features):
    normed = features['im_cropped_u8']

    orig = normed.astype(np.int32)

    if False:
        view = ImageViewer(normed)
        view.show()

    b = orig
    g = orig
    r = orig

    if features['_cell_type'] == 'mono':
        pass
    elif features['_cell_type'] == 'multi':
        foreground = features['ov_dislocations_u8']
        b = orig + foreground
        g = orig - foreground
        r = orig - foreground

        impure = features['ov_impure2_u8']
        b -= impure
        g -= impure
        r += impure
    else:
        assert False

    r = np.clip(r, 0, 255)
    g = np.clip(g, 0, 255)
    b = np.clip(b, 0, 255)
    rgb = np.empty((normed.shape[0], normed.shape[1], 3), np.uint8)
    rgb[:, :, 0] = r.astype(np.uint8)
    rgb[:, :, 1] = g.astype(np.uint8)
    rgb[:, :, 2] = b.astype(np.uint8)

    # cracks
    if "mk_cracks_u8" in features:
        rgb = ip.overlay_mask(rgb, features['mk_cracks_u8'], 'r')

    if features['_cell_type'] == 'mono':
        # dark spots
        rgb = ip.overlay_mask(rgb, features['mk_dark_spots_outline_u8'], 'b')
        # dark areas
        if 'ov_dark_areas_u8' in features:
            dark = features["ov_dark_areas_u8"]
            rgb[:, :, 0] += dark
            rgb[:, :, 1] -= dark
            rgb[:, :, 2] += dark

    return rgb
def main():
    features = {}
    fn = r"C:\Users\Neil\Desktop\R3 crack\raw PL images\cracked wafer PL image.tif"
    im = ip.open_image(fn).astype(np.float32)

    if im.shape[0] > 700:
        print '    WARNING: Image resized'
        im_max = im.max()
        im = ndimage.zoom(im, 0.5)
        if im.max() > im_max:
            im[im > im_max] = im_max

    if False:
        view = ImageViewer(im)
        view.show()

    features['_alg_mode'] = 'mono wafer'
    crop_props = cropping.crop_wafer_cz(im, create_mask=True, skip_crop=False)
    features['corners'] = crop_props['corners']
    cropped = cropping.correct_rotation(
        im,
        crop_props,
        pad=False,
        border_erode=parameters.BORDER_ERODE_CZ,
        fix_chamfer=False)
    mono_wafer.feature_extraction(cropped, crop_props, features=features)

    ip.print_metrics(features)
    rgb = mono_wafer.create_overlay(features)
    view = ImageViewer(rgb)
    view.show()
示例#8
0
def analyse_module(features):
    im = np.ascontiguousarray(features["_im_ratio_cropped"])
    h, w = im.shape
    # mask out rows and columns
    border = 20
    border_cols = features['_divisions_cols'] - features['_divisions_cols'][0]
    for c in border_cols:
        im[:, max(c - border, 0):min(c + border + 1, w)] = 0
    border_rows = features['_divisions_rows'] - features['_divisions_rows'][0]
    for r in border_rows:
        im[max(r - border, 0):min(r + border + 1, h), :] = 0

    # scale so max is around
    scale = ((2**15) / im.max())
    im *= scale

    f = {}
    hist = ip.histogram_percentiles(im, f, skip_zero=True)
    hist = hist[:f['hist_percentile_99.9']]
    hist_norm = hist / hist.max()
    lower = np.where(hist_norm > 0.02)[0][0]
    upper = 2 * f['hist_peak'] - lower
    high_vals = (hist[upper:].sum() / float(hist.sum()))
    features['module_bright_area_fraction'] = high_vals

    if False:
        print "%s: %0.01f%%" % (features['fn'], high_vals)
        ip.print_metrics(f)
        plt.figure()
        plt.xlabel("PL/EL ratio")
        plt.ylabel("Count")
        plt.title("Above threshold: %0.02f%%" % high_vals)
        xs = np.arange(len(hist)) / float(scale)
        plt.plot(xs, hist)
        plt.vlines([upper / float(scale)], ymin=0, ymax=hist.max())
        if False:
            plt.savefig(
                os.path.join(r"C:\Users\Neil\Desktop\M1\hist",
                             features['fn'] + '_1.png'))
            im = features["_im_ratio_cropped"]
            im[im > f['hist_percentile_99']] = f['hist_percentile_99']
            ip.save_image(
                os.path.join(r"C:\Users\Neil\Desktop\M1\hist",
                             features['fn'] + '_0.png'), im)
        else:
            plt.show()
            view = ImageViewer(im[::3, ::3])
            view.show()
        sys.exit()
def dark_spots(features):
    im = features['im_no_fingers']
    h, w = im.shape

    im_mini = im[::6, ::6]
    im_mini_med = cv2.medianBlur(im_mini, ksize=5)
    im_mini_smooth = cv2.GaussianBlur(im_mini_med, ksize=(0, 0), sigmaX=1)
    background = cv2.resize(im_mini_smooth, (w, h))
    dark_areas = background - im
    pixel_ops.ApplyThresholdLT_F32(dark_areas, dark_areas, 0.0, 0.0)

    foreground_mask = ((features['bl_cropped_u8'] == 0) |
                       (features['bl_cropped_u8'] == 4))
    structure = ndimage.generate_binary_structure(2, 1)
    foreground_mask = ndimage.binary_erosion(foreground_mask,
                                             structure=structure,
                                             iterations=3)
    dark_areas[~foreground_mask] = 0

    DARK_SPOT_SENSITIVITY = 0.08
    dark_spots = (dark_areas > DARK_SPOT_SENSITIVITY).astype(np.uint8)
    min_size = int(h * w * 0.0001)
    ip.remove_small_ccs(dark_spots, min_size)

    dark_spots_outline = ndimage.binary_dilation(
        dark_spots, structure=structure, iterations=2) - dark_spots
    features['mk_dark_spots_filled_u8'] = dark_spots
    features['mk_dark_spots_outline_u8'] = dark_spots_outline

    if False:
        view = ImageViewer(im)
        ImageViewer(background)
        ImageViewer(dark_spots)
        ImageViewer(ip.overlay_mask(im, dark_spots_outline))
        view.show()
示例#10
0
def find_module_rotation(im, features):
    if False:
        view = ImageViewer(im)
        view.show()
        sys.exit()

    rotated = np.empty_like(im)
    h, w = im.shape
    max_r = 0.5
    crop = int(round(math.tan(math.radians(max_r)) * (im.shape[0] / 2.0)))
    rotations = np.linspace(-max_r, max_r, num=11)
    scores = []
    for rotation in rotations:
        rot_mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, 1.0)
        cv2.warpAffine(im,
                       rot_mat, (w, h),
                       flags=cv2.INTER_LINEAR,
                       borderMode=cv2.BORDER_CONSTANT,
                       dst=rotated)

        cropped = rotated[crop:-crop, crop:-crop]
        # cols = cropped.mean(axis=0)
        # dog1 = ndimage.gaussian_filter1d(cols, sigma=10) - cols
        rows = cropped.mean(axis=1)
        dog2 = ndimage.gaussian_filter1d(rows, sigma=10) - rows

        # scores.append(dog1.std() + dog2.std())
        scores.append(dog2.std())

        if False:
            print dog2.std()
            # plt.figure()
            # plt.plot(cols)
            # plt.plot(dog1)
            plt.figure()
            plt.plot(rows)
            plt.plot(dog2)
            plt.show()

    if False:
        plt.figure()
        plt.plot(rotations, scores)
        plt.show()

    features['rotation'] = rotations[np.argmax(scores)]
def run_module():
    if False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\REC-144\REC-144_G00_LR0086_P35_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\REC-144\REC-144_G00_LR0086_CC7.80_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\REC-143\REC-143_G00_LR0086_P35_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\REC-143\REC-143_G00_LR0086_CC7.50_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_CC13.00_2x2_EL.tif"
    elif True:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\STP-410\STP-410_G00_LR0052_P53_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\STP-410\STP-410_G00_LR0045_CC5.50_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0245_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0160_CV43.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\APO-217\APO-217_G00_LR0089_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\APO-217\APO-217_G00_LR0089_CC13.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-098\CNY-098_G00_LR0090_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-098\CNY-098_G00_LR0090_CC10.80_2x2_EL.tif"
    elif False:
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-101\CNY-101_G00_LR0090_CC10.80_2x2_EL.tif"
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-101\CNY-101_G00_LR0090_P93_2x2_OCPL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-139\CNY-139_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-139\CNY-139_G00_LR0106_CC13.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_CC13.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-449\CNY-449_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-449\CNY-449_G00_LR0106_CC13.00_2x2_EL.tif"

    im_pl = ip.open_image(fn_pl).astype(np.float32)
    im_el = ip.open_image(fn_el).astype(np.float32)
    features = {'fn': os.path.splitext(os.path.split(fn_pl)[1])[0]}
    features_module.feature_extraction(im_pl, im_el, features)
    ip.print_metrics(features)
    ratio = features['im_pl_el']
    view = ImageViewer(ratio[::4, ::4])
    view.show()
示例#12
0
def create_overlay(features):
    normed = features['im_cropped_u8']
    background = features['ov_impure_u8']
    foreground = features['ov_defects_u8']

    orig = normed.astype(np.int32)

    if False:
        view = ImageViewer(orig)
        ImageViewer(background)
        ImageViewer(foreground)
        view.show()

    rgb = np.empty((background.shape[0], background.shape[1], 3), np.uint8)

    # foreground
    b = orig + foreground
    g = orig - foreground
    r = orig - foreground

    # background
    b -= background
    g -= background
    r += background

    r = np.clip(r, 0, 255)
    g = np.clip(g, 0, 255)
    b = np.clip(b, 0, 255)

    rgb[:, :, 0] = r.astype(np.uint8)
    rgb[:, :, 1] = g.astype(np.uint8)
    rgb[:, :, 2] = b.astype(np.uint8)

    return rgb
def run_cropping(files, mode=None, display=True):
    for e, fn in enumerate(files):
        print "%s (%d/%d)" % (fn, e, len(files))
        features = {}
        im = ip.open_image(fn).astype(np.float32)
        if mode == "cell":
            rotated = cropping.correct_cell_rotation(im,
                                                     features,
                                                     already_cropped=False)
            cropped = cropping.crop_cell(rotated,
                                         im,
                                         features,
                                         width=None,
                                         already_cropped=False)
        elif mode == "mono wafer":
            features['_alg_mode'] = 'mono wafer'
            crop_props = cropping.crop_wafer_cz(im,
                                                create_mask=True,
                                                skip_crop=False)
            features.update(crop_props)

            cropped = cropping.correct_rotation(
                im,
                crop_props,
                pad=False,
                border_erode=parameters.BORDER_ERODE_CZ,
                fix_chamfer=False)

        if False:
            # save crop results
            pil_im = cropping.draw_crop_box(im, features, mode="pil")
            fn_root = os.path.splitext(os.path.split(fn)[1])[0]
            fn_out = os.path.join(r"C:\Users\Neil\Desktop\results\crop",
                                  fn_root + ".png")
            pil_im.save(fn_out)
        else:
            rgb = cropping.draw_crop_box(im, features, mode="rgb")
            pprint(features)
            view = ImageViewer(rgb)
            view.show()
示例#14
0
def run_plir():
    fn = r"C:\Users\Neil\BT\Data\2017-09-06 TransferFunctions.TXT"
    vals = features_block.load_transfer(fn)
    spline_plir, spline_nf, spline_sp, spline_lp = features_block.interpolate_transfer(
        vals, debug=False)

    if False:
        fn_sp = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west short pass.tif"
        fn_lp = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west long pass.tif"
        fn_nf = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west no filter.tif"
    elif False:
        fn_sp = r"C:\Users\Neil\BT\Data\blocks\PLIR\marker\S0069_20170807.033044_ID4624_plg.meas.block.b3BL.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\BT\Data\blocks\PLIR\marker\S0069_20170807.033044_ID4624_plg.meas.block.b3BL.north.lp.img.tif"
        fn_nf = r"C:\Users\Neil\BT\Data\blocks\PLIR\marker\S0069_20170807.033044_ID4624_plg.meas.block.b3BL.north.std.img.tif"
    else:
        fn_sp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.lp.img.tif"
        fn_nf = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3pl.img.tif"

    im_sp = ip.open_image(fn_sp, cast_long=False).astype(np.float32)
    im_lp = ip.open_image(fn_lp, cast_long=False).astype(np.float32)
    im_pl = ip.open_image(fn_nf, cast_long=False).astype(np.float32)

    if False:
        im_sp = ndimage.zoom(im_sp, zoom=0.5)
        im_lp = ndimage.zoom(im_lp, zoom=0.5)

    features = {}
    features_block.plir(im_sp,
                        im_lp,
                        im_pl,
                        features,
                        spline_plir=spline_plir,
                        spline_plc=spline_nf)
    ip.print_metrics(features)
    log = np.log(features['im_tau_bulk_f32'])
    view = ImageViewer(features['im_tau_bulk_f32'])
    #ImageViewer(log)
    view.show()
示例#15
0
def compute_batch_correction(fn_samples, debug=False):
    df = pd.read_csv(fn_samples)
    if not debug and len(df) != RECOMPUTE_RATE:
        print "Incomplete file"
        return None

    # find the foreground top and bottom
    top = int(round(np.median(df.ix[:, 'top'])))
    bottom = int(round(np.median(df.ix[:, 'bottom'])))

    # average and smooth FF
    ff_data = df.ix[:, 'row_0':].values.astype(np.float32)
    ff_norm = ff_data / np.c_[np.median(ff_data, axis=1)]

    if False:
        # import matplotlib.pylab as plt
        # plt.figure()
        # plt.imshow(ff_norm, interpolation="nearest")
        # plt.show()
        view = ImageViewer(ff_data)
        view = ImageViewer(ff_norm)
        view.show()

    # ff_avg = np.median(ff_norm, axis=0)
    ff_avg = np.mean(ff_norm, axis=0)
    ff_avg[:top] = ff_avg[top]
    ff_avg[bottom:] = ff_avg[bottom]
    sigma = 0.005 * len(ff_avg)
    ff_smooth = ndimage.gaussian_filter1d(ff_avg, sigma=sigma)
    correction = 1.0 / ff_smooth

    if False:
        import matplotlib.pylab as plt
        plt.figure()
        plt.plot(ff_smooth)
        plt.plot(ff_smooth * correction)
        plt.show()

    return correction
def crop(im):
    im_orig = im
    im = cv2.medianBlur(im, ksize=5)

    # these images have bright spots, so reduce range by:
    # - clipping: bottom 20% and top 20% (variations in this range have no useful info)
    # - square root
    vals = np.sort(im.flat)
    p20 = vals[int(0.05 * vals.shape[0])]
    p80 = vals[int(0.8 * vals.shape[0])]

    im[im > p80] = p80
    im[im < p20] = p20
    im -= p20
    im /= (p80 - p20)
    im = np.sqrt(im)

    if False:
        view = ImageViewer(im_orig)
        ImageViewer(im)
        view.show()

    return cropping.crop_wafer_cz(im, check_foreground=False, outermost_peak=False, create_mask=True)
示例#17
0
def run_plir2():
    fn = r"C:\Users\Neil\BT\Data\2017-09-06 TransferFunctions.TXT"
    vals = features_block.load_transfer(fn)
    spline_plir, spline_nf, spline_sp, spline_lp = features_block.interpolate_transfer(
        vals, debug=False)

    if False:
        fn_sp = r"C:\Users\Neil\BT\Data\blocks\PLIR\2017-11-01\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\BT\Data\blocks\PLIR\2017-11-01\plg.meas.block.b3bl.north.lp.img.tif"
    elif False:
        fn_sp = r"C:\Users\Neil\Desktop\1172\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\Desktop\1172\plg.meas.block.b3bl.north.lp.img.tif"
    else:
        fn_sp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.lp.img.tif"

    im_sp = ip.open_image(fn_sp).astype(np.float32)
    im_lp = ip.open_image(fn_lp).astype(np.float32)

    if False:
        im_sp = ndimage.zoom(im_sp, zoom=0.5)
        im_lp = ndimage.zoom(im_lp, zoom=0.5)

    features = {}
    features_block.plir2(im_sp,
                         im_lp,
                         features,
                         spline_plir=spline_plir,
                         spline_sp=spline_sp)
    ip.print_metrics(features)
    log = np.log(features['im_tau_bulk_f32'])
    view = ImageViewer(features['im_tau_bulk_f32'])
    ImageViewer(log)
    plt.figure()
    plt.plot(features['im_tau_bulk_f32'].mean(axis=0))
    view.show()
示例#18
0
def run_single(fn, display=True, downsize=True):
    features = {}
    im = ip.open_image(fn).astype(np.float32)

    if downsize and im.shape[0] > 750:
        print '    WARNING: Image resized'
        im_max = im.max()
        im = ndimage.zoom(im, 0.5)
        if im.max() > im_max:
            im[im > im_max] = im_max

    if False:
        view = ImageViewer(im)
        view.show()

    parameters.SLOPE_MULTI_WAFER = True
    parameters.BORDER_ERODE = 3
    parameters.MIN_IMPURE_AREA = 0.01

    features['_alg_mode'] = 'multi wafer'
    features['_fn'] = os.path.splitext(os.path.split(fn)[1])[0]
    crop_props = cropping.crop_wafer(im, create_mask=True)
    features['corners'] = crop_props['corners']
    cropped = cropping.correct_rotation(im,
                                        crop_props,
                                        pad=False,
                                        border_erode=parameters.BORDER_ERODE)
    multi_wafer.feature_extraction(cropped, crop_props, features=features)
    multi_wafer.combined_features(features)
    rgb = multi_wafer.create_overlay(features)

    f = ip.print_metrics(features, display=display)
    if display:
        print "Wafer type: %s" % multi_wafer.WaferType.types[
            features['wafer_type']]
        view = ImageViewer(rgb)
        ImageViewer(im)
        view.show()

    return f, features['im_cropped_u8'], rgb
def run_single(fn, display=True, downsize=True):

    if False:
        mode = "mono"
    else:
        mode = "multi"

    features = {"_cell_type": mode}
    im = ip.open_image(fn).astype(np.float32)

    if False:
        view = ImageViewer(im)
        view.show()

    skip_crop = True
    features_stripes.feature_extraction(im, features, skip_crop)
    f = ip.print_metrics(features)
    if display:
        view = ImageViewer(im)
        rgb = features_stripes.create_overlay(features)
        ImageViewer(rgb)
        view.show()

    return f
def bright_lines(features):
    im = features['im_no_fingers']
    h, w = im.shape
    if 'finger_period_row' in features:
        rh = int(round(features['finger_period_row']))
        cw = int(round(features['finger_period_col']))
    else:
        rh = int(round(features['finger_period']))
        cw = int(round(features['finger_period']))

    f_v = im - np.maximum(np.roll(im, shift=2 * cw, axis=1),
                          np.roll(im, shift=-2 * cw, axis=1))
    pixel_ops.ApplyThresholdLT_F32(f_v, f_v, 0.0, 0.0)

    # filter
    mask = (f_v > 0.02).astype(np.uint8)
    min_size = 0.0005 * h * w
    ip.remove_small_ccs(mask, min_size)
    f_v[mask == 0] = 0
    # features['_f_v'] = f_v.copy()

    f_h = im - np.maximum(np.roll(im, shift=2 * rh, axis=0),
                          np.roll(im, shift=-2 * rh, axis=0))
    pixel_ops.ApplyThresholdLT_F32(f_h, f_h, 0.0, 0.0)

    # filter
    mask = (f_h > 0.02).astype(np.uint8)
    min_size = 0.0005 * h * w
    ip.remove_small_ccs(mask, min_size)
    f_h[mask == 0] = 0
    # features['_f_h'] = f_h.copy()

    # normalize
    f_h /= 0.3
    f_v /= 0.3

    pixel_ops.ClipImage(f_h, 0.0, 1.0)
    pixel_ops.ClipImage(f_v, 0.0, 1.0)
    features['ov_lines_horizontal_u8'] = (f_h * 255).astype(np.uint8)
    features['ov_lines_vertical_u8'] = (f_v * 255).astype(np.uint8)

    features['bright_lines_horizontal'] = f_h.mean() * 100
    features['bright_lines_vertical'] = f_v.mean() * 100

    if False:
        view = ImageViewer(im)
        ImageViewer(f_v)
        ImageViewer(f_h)
        view.show()
        sys.exit()
def register(im1, im2):
    if False:

        def Dist(params):
            tx, ty = params
            sy = 1
            M = np.float32([[1, 0, tx], [0, sy, ty]])
            im2_reg = cv2.warpAffine(im2, M, (im2.shape[1], im2.shape[0]))
            return np.power(im2_reg - im1, 2).mean()

        params_op = optimize.fmin_powell(Dist, (0, 0), ftol=1.0, disp=False)
        tx, ty = params_op
        M = np.float32([[1, 0, tx], [0, 1, ty]])
        im2_reg = cv2.warpAffine(im2, M, (im2.shape[1], im2.shape[0]))
    else:
        h, w = im1.shape

        def Dist(params):
            tx, ty, r = params
            M = cv2.getRotationMatrix2D((w // 2, h // 2), r, 1.0)
            M[0, 2] += tx
            M[1, 2] += ty

            im2_reg = cv2.warpAffine(im2, M, (im2.shape[1], im2.shape[0]))
            # return np.power(im2_reg-im1, 2).mean()
            return np.abs(im2_reg - im1).mean()

        params_op = optimize.fmin_powell(Dist, (0, 0, 0), ftol=1.0, disp=False)
        tx, ty, r = params_op
        M = cv2.getRotationMatrix2D((w // 2, h // 2), r, 1.0)
        M[0, 2] += tx
        M[1, 2] += ty
        im2_reg = cv2.warpAffine(im2, M, (im2.shape[1], im2.shape[0]))

    if False:
        print np.power(im2_reg - im1, 2).mean()
        view = ImageViewer(im1)
        view = ImageViewer(im2_reg)
        view.show()
        sys.exit()

    return im2_reg, np.power(im2_reg - im1, 2).mean()
def filter_h(filtered_v, features):
    #  - set busbars to high value so that adjoining defects are detected
    filered_filled = filtered_v.copy()
    if False:
        filered_filled[features['mask_busbar_filled']] = 255
        m = np.ones((1, 31), np.uint8)
        filtered_h = filter.rank.median(filered_filled, m)
    else:
        filered_filled[features['mask_busbar_filled']] = 1.0
        filtered_h = filered_filled.copy()
        pixel_ops.FilterH(filered_filled, filtered_h, parameters.F_LEN_H)

    if False:
        view = ImageViewer(filtered_v)
        ImageViewer(filered_filled)
        ImageViewer(filtered_h)
        view.show()
        sys.exit()

    return filtered_h
def dark_spots(features):
    im = features['im_no_fingers']

    # shrink to standard size
    h, w = 300, 300
    im_small = cv2.resize(im, (h, w))

    dark_areas = np.zeros_like(im_small)
    pixel_ops.DarkSpots(im_small, dark_areas, 8)

    candidates = (dark_areas > parameters.DARK_SPOT_MIN_STRENGTH).astype(np.uint8)
    ip.remove_small_ccs(candidates, parameters.DARK_SPOT_MIN_SIZE)

    candidates = cv2.resize(candidates, (im.shape[1], im.shape[0]))
    candidates[features['mask_busbar_filled']] = 0

    dark_spots_outline = ndimage.binary_dilation(candidates, iterations=3).astype(np.uint8) - \
                         ndimage.binary_dilation(candidates, iterations=1).astype(np.uint8)
    features['mk_dark_spots_outline_u8'] = dark_spots_outline
    features['mk_dark_spots_filled_u8'] = candidates
    features['dark_spots_area_fraction'] = candidates.mean()
    dark_areas_no_noise = dark_areas - parameters.DARK_SPOT_MIN_STRENGTH
    pixel_ops.ApplyThresholdLT_F32(dark_areas_no_noise, dark_areas_no_noise, 0.0, 0.0)
    features['dark_spots_strength'] = dark_areas_no_noise.mean() * 10000
    features['dark_spots_count'] = ip.connected_components(candidates)[1]

    if False:
        print features['dark_spots_area_fraction']
        print features['dark_spots_strength']
        print features['dark_spots_count']
        rgb = ip.overlay_mask(im, dark_spots_outline)
        view = ImageViewer(rgb)
        ImageViewer(dark_areas)
        ImageViewer(dark_areas_no_noise)
        ImageViewer(candidates)
        view.show()
def run_stripe():
    if True:
        mode = "mono"
        # crack
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.120013_Baccini 1 in 1 test_ID2_raw.tif"
        # corner
        #fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.113711_Baccini 1 in 1_ID5_raw.tif"
    else:
        mode = "multi"
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.121040_Baccini 1 in 1 test_ID8_raw.tif"

    im_pl = ip.open_image(fn).astype(np.float32)
    features = {"mode": mode}
    features_stripes.feature_extraction(im_pl, features)
    rgb = features_stripes.create_overlay(features)
    ip.print_metrics(features)
    print ip.list_images(features)
    view = ImageViewer(im_pl)
    ImageViewer(features['bl_cropped_u8'])
    ImageViewer(rgb)
    view.show()
def dark_spot_props(win_orig, win_flat, mask_pixels, ys, xs, y, x, h, w):
    struct = ndimage.generate_binary_structure(2, 1)
    mask_crack2 = ndimage.binary_dilation(mask_pixels, struct, iterations=1)
    mask_crack3 = ndimage.binary_dilation(mask_crack2, struct, iterations=1)
    defect_outline = mask_crack3 - mask_crack2

    if False:
        view = ImageViewer(win_flat)
        ImageViewer(mask_pixels)
        ImageViewer(defect_outline)
        view.show()

    # compute some features of the defect that will be used for classification
    defect_features = {}
    defect_features['strength_median'] = np.median(
        win_orig[defect_outline]) - np.median(win_orig[mask_pixels])
    defect_features['strength_mean'] = win_orig[defect_outline].mean(
    ) - win_orig[mask_pixels].mean()
    defect_features['strength_median_flat'] = np.median(
        win_flat[defect_outline]) - np.median(win_flat[mask_pixels])
    defect_features['strength_mean_flat'] = win_flat[defect_outline].mean(
    ) - win_flat[mask_pixels].mean()
    defect_features['strength_flat_max'] = win_flat.min() * -1
    defect_features['num_pixels'] = mask_pixels.sum()
    defect_features['edge_dist'] = min(x, y, w - 1 - x, h - 1 - y)
    defect_features['aspect_ratio'] = (
        max(ys.max() - ys.min(),
            xs.max() - xs.min()) /
        float(max(1, min(ys.max() - ys.min(),
                         xs.max() - xs.min()))))
    defect_features['fill_ratio'] = defect_features['num_pixels'] / float(
        mask_pixels.shape[0] * mask_pixels.shape[1])
    defect_features['location_y'] = y
    defect_features['location_x'] = x

    return defect_features
示例#26
0
def request(mode,
            display=False,
            send_path=False,
            return_path=False,
            skip_features=False,
            return_cropped=True,
            return_uncropped=False,
            return_outline=False):
    ###########
    # REQUEST #
    ###########
    param_names_float = [
        "verbose", "already_cropped", "skip_features", "return_cropped",
        "return_uncropped", "return_outline", "ORIGINAL_ORIENTATION"
    ]
    param_vals_float = [
        0, 0,
        int(skip_features),
        int(return_cropped),
        int(return_uncropped),
        int(return_outline), 1
    ]
    params_dict = dict(zip(param_names_float, param_vals_float))
    param_names_str = []
    param_vals_str = []
    if return_path:
        param_names_str.append("im_output_path")
        param_vals_str.append("C:\Users\Neil\Desktop\im_out")
    images = None

    # assemble image data
    print "Mode = %d" % mode
    if mode == 0:
        msg = struct.pack('=B', mode)
        # send to server
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect((HOST, PORT))
        send_data(sock, msg)
        response = get_data(sock, 1)
        success = struct.unpack('B', response)[0]
        print "Success: %s" % str(success == 0)
        return [], []
    if mode == 10:
        fn = r"C:\Users\Neil\BT\Data\R2 FFT\multi\raw 10 sec.tif"
    elif mode == 40:
        if int(params_dict['already_cropped']) == 0:
            fn = r"C:\Users\Neil\BT\Data\blocks\B4\693 - PL Image B4 W2 4V (PL Image - Composite).tif"
        else:
            fn = r"C:\Users\Neil\BT\Data\blocks\2015-08\tifs\120815_ISE_E_nf_14A_22C_PL_600000-dark&FFcor_cropped.tif"
    elif mode in [70, 71]:
        if mode == 70:
            fn = r"C:\Users\Neil\BT\Data\slugs\zhonghuan\tifs\219609 - 160-1-6 (Uncalibrated PL Image).tif"
        elif mode == 71:
            fn = r"C:\Users\Neil\BT\Data\slugs\pseudo round\2861 - THICK SAMPLE TEST-2 %28Uncalibrated PL Image%29.tif"
        param_names_float += ['rds_percent', 'slug_radius']
        param_vals_float += [50, 0]
    elif mode == 80:
        # PERC mono cell
        # fn = r"C:\Users\Neil\BT\Data\C3\perc\mono\BAC_1024_100\20150910_122155.612_BAC_1024_100_201.tif"
        # fn = r"C:\Users\Neil\BT\Data\cropping_test_set\cells\tifs\plg.meas.cell.plqrs.a.img.tif"
        fn = r"C:\Users\Neil\BT\Data\C3\perc\mono\BAC_1024_100\20150910_122155.612_BAC_1024_100_201.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 81:
        # PERC multi cell
        fn = r"C:\Users\Neil\BT\Data\C3\perc\multi\Point\1329 - REC test E1 PL Image (PL Open-circuit Image).tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 82:
        # mono cell
        fn = r"C:\Users\Neil\BT\Data\C3\mono\INES_c-Si_100_1024\20150908_175300.680_INES_c-Si_100_1024_46.tif"
        if True:
            param_names_float.append("no_post_processing")
            param_vals_float.append(1)
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 83:
        # multi cell
        fn = r"C:\Users\Neil\BT\Data\C3\multi\misc\20170302T110107.328_Batch 3_ID467.tif"
        # fn = r"C:\Users\Neil\BT\Data\C3\multi\Astronergy\20170831T153538.783_zt-DJ--5_ID-8.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 84:
        # mono wafer
        # fn = r"C:\Users\Neil\BT\Data\CIC\cracks\tifs\S0067_20140821.131519_VI_PL21F_ID10063_GRADEB1_BIN2_raw_image.tif"
        # fn = r"C:\Users\Neil\BT\Data\mono wafer\2015-10-26\S0041_20151026.161500_longi DCA 1-2_ID2_GRADEA2_BIN4_raw.tif"
        fn = r"C:\Users\Neil\Desktop\outlines\mode84.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 85:
        # multi wafer
        fn = r"C:\Users\Neil\BT\Data\overlay test set\unnormalised\tifs\S0050_20120516.193034__ID10586 - Cor.tiff"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 86:
        # X3
        fn = r"C:\Users\Neil\BT\Data\X3\mono PERC\20161024_103301.320_a_00058101.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
        param_names_float += [
            "num_stripes", "multi", "no_stripe_images", "ORIGINAL_ORIENTATION"
        ]
        param_vals_float += [5, 0, 1, 1]
    elif mode == 87:
        # mono stripe
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.120710_Baccini 1 in 1 test_ID6_raw.tif"
    elif mode == 88:
        # multi stripe
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.120917_Baccini 1 in 1 test_ID7_raw.tif"
    elif mode == 89:
        # QC-C3
        #fn = r"C:\Users\Neil\BT\Data\half processed\1390 - Tet P4604 PLOC 0.2s 1Sun (Uncalibrated PL Image).tif"
        fn = r"C:\Users\Neil\Desktop\outlines\mode89.tif"
    elif mode in [90, 901]:
        # plir
        if True:
            fn1 = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west short pass.tif"
            fn2 = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west long pass.tif"
            fn3 = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west no filter.tif"
        else:
            fn1 = r"C:\Users\Neil\Desktop\B35 files for B3\Face 1\plg.meas.block.b3bl.north.shortpass.img.tif"
            fn2 = r"C:\Users\Neil\Desktop\B35 files for B3\Face 1\plg.meas.block.b3bl.north.raw.img.tif"
            fn3 = r"C:\Users\Neil\Desktop\B35 files for B3\Face 1\plg.meas.block.b3bl.north.longpass.img.tif"
        im_sp = ip.open_image(fn1, cast_long=False).astype(np.uint16)
        im_lp = ip.open_image(fn2, cast_long=False).astype(np.uint16)
        im_pl = ip.open_image(fn3, cast_long=False).astype(np.uint16)
        if True:
            images = {'im_sp': im_sp, 'im_lp': im_lp, 'im_pl': im_pl}
        else:
            images = {'im_sp': im_sp, 'im_lp': im_lp}
        fn_xfer = r"C:\Users\Neil\BT\Data\2017-09-06 TransferFunctions.TXT"
        vals = block.load_transfer(fn_xfer)
        images['im_xfer'] = vals

        if mode == 901:
            del images['im_pl']
            mode = 90
    elif mode == 92:
        # brick markers
        fn = r"C:\Users\Neil\Desktop\20160826\1267 - Ref-C-25chiller-2 (North - Shortpass Image).tif"
    elif mode == 95:
        # resolution
        fn = r"C:\Users\Neil\BT\Data\2017-09-06 new calibration target.tif"
    elif mode == 100:
        if True:
            fn_pl = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0245_P93_2x2_OCPL.tif"
            fn_el = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0160_CV43.00_2x2_EL.tif"
        else:
            fn_pl = r"C:\Users\Neil\Desktop\Processed\CNY-098\CNY-098_G00_LR0090_P93_2x2_OCPL.tif"
            fn_el = r"C:\Users\Neil\Desktop\Processed\CNY-098\CNY-098_G00_LR0090_CC10.80_2x2_EL.tif"
        im_pl = ip.open_image(fn_pl).astype(np.uint16)
        im_el = ip.open_image(fn_el).astype(np.uint16)
        images = {'im_pl': im_pl}  # , 'im_el': im_el}
        param_names_float += ["ORIGINAL_ORIENTATION"]
        param_vals_float += [0]
    elif mode == 255:
        msg = struct.pack('B', 255)
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect((HOST, PORT))
        send_data(sock, msg)
        return [], []
    else:
        print "Unknown mode"
        sys.exit()

    if images is None:
        # open im_pl
        im = ip.open_image(fn).astype(np.uint16)
        if False:
            im = im.T
        images = {'im_pl': im}

    if False and images['im_pl'].shape[0] > 800:
        print 'WARNING: Image resized'
        images['im_pl'] = ndimage.zoom(images['im_pl'], 0.25)

    if False:
        view = ImageViewer(images['im_pl'])
        view.show()

    # gather images
    image_names = ','.join(images.keys())
    msg = struct.pack('=BI', mode, len(image_names))
    msg += image_names
    for image_name, im in images.iteritems():
        assert image_name[:2] in ['bl', 'mk', 'im', 'ov']
        if image_name == 'im_xfer':
            bit_depth = 32
        else:
            bit_depth = 16
        binning = 1
        if send_path:
            # pass by path
            msg += struct.pack('=HHBBB', 0, 0, bit_depth, binning, len(fn))
            msg += fn
        else:
            # pass data
            msg += struct.pack('=HHBB', im.shape[1], im.shape[0], bit_depth,
                               binning)
            msg += im.ravel().tostring()

    if False:
        param_names_float = []
        param_vals_float = []
        param_names_str = []
        param_vals_str = []

    # numerical parameter list
    param_names = ','.join(param_names_float)
    msg += struct.pack('=I', len(param_names))
    msg += param_names
    msg += np.array(param_vals_float, np.float32).tostring()

    # string input parameters
    param_names = ','.join(param_names_str)
    msg += struct.pack('=I', len(param_names))
    msg += param_names
    param_vals = ','.join(param_vals_str)
    msg += struct.pack('=I', len(param_vals))
    msg += param_vals

    t1 = timeit.default_timer()

    # send to server
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect((HOST, PORT))
    send_data(sock, msg)

    ############
    # RESPONSE #
    ############

    features = {}

    # get response code
    response = get_data(sock, 1)
    success = struct.unpack('B', response)[0]
    if success != 0:
        print("Error occurred: %d" % success)
        sys.exit()

    # get images & masks
    data = get_data(sock, 4)
    image_names_length = struct.unpack('=I', data)[0]
    if image_names_length > 0:
        image_names = get_data(sock, image_names_length).split(",")
        for im_name in image_names:
            if im_name[:3] not in ['bl_', 'mk_', 'im_', 'ov_']:
                print "ERROR: Invalid image name: %s" % im_name
                sys.exit()

            data = get_data(sock, 6)
            im_w, im_h, bit_depth, binning = struct.unpack('=hhBB', data)

            if im_w == 0 or im_h == 0:
                # read from disk
                fn_len = struct.unpack('=B', get_data(sock, 1))[0]
                fn = str(get_data(sock, fn_len))
                features[im_name] = ip.open_image(fn)
            else:
                if bit_depth == 8:
                    data = get_data(sock, 4)
                    encoding_length = struct.unpack('I', data)[0]
                    png_data = get_data(sock, encoding_length)
                    features[im_name] = ip.decode_png(png_data)
                    num_pixels = features[im_name].shape[0] * features[
                        im_name].shape[1]
                    print "%s compression: %0.1f%%" % (
                        im_name, (100 * encoding_length) / float(num_pixels))
                elif bit_depth == 16:
                    pixel_data = get_data(sock, im_w * im_h * 2)
                    features[im_name] = np.frombuffer(pixel_data,
                                                      np.uint16).reshape(
                                                          im_h, im_w)
                elif bit_depth == 32:
                    pixel_data = get_data(sock, im_w * im_h * 4)
                    features[im_name] = np.frombuffer(pixel_data,
                                                      np.float32).reshape(
                                                          im_h, im_w)
                else:
                    print '****', im_name
    else:
        image_names = []

    # get numerical metric
    response = get_data(sock, 4)
    string_size = struct.unpack('I', response)[0]
    if string_size > 0:
        feature_names = get_data(sock, string_size)
        feature_names = feature_names.split(',')
        num_features = len(feature_names)
        bytes_expected = num_features * 4
        feature_data = get_data(sock, bytes_expected)
        feature_data = list(np.frombuffer(feature_data, np.float32))
    else:
        feature_names = []
        feature_data = []

    # get string metrics
    string_size = struct.unpack('I', get_data(sock, 4))[0]
    if string_size > 0:
        feature_names += get_data(sock, string_size).split(',')
    string_size = struct.unpack('I', get_data(sock, 4))[0]
    if string_size > 0:
        feature_data += get_data(sock, string_size).split(',')

    metric_vals = zip(feature_names, feature_data)

    ###################
    # DISPLAY RESULTS #
    ###################
    metrics = {}
    for i in range(len(feature_names)):
        features[feature_names[i]] = feature_data[i]
        metrics[feature_names[i]] = feature_data[i]

    print "Returned images:"
    for image_name in image_names:
        print "  %s" % image_name
    print "Metrics:"
    pprint(metrics)

    t2 = timeit.default_timer()
    print('Total time: %0.03f seconds' % (t2 - t1))

    rgb = None
    view = None
    if "im_cropped_u8" in features:
        if mode == 80:
            rgb = perc.create_overlay(features)
        elif mode == 81:
            rgb = perc.create_overlay_multi(features)
        elif mode == 82:
            rgb = cz_cell.create_overlay(features)
        elif mode == 83:
            rgb = multi_cell.create_overlay(features)
        elif mode == 84:
            rgb = cz_wafer.create_overlay(features)
        elif mode == 85:
            if 'skip_features' not in params_dict or params_dict[
                    'skip_features'] != 1:
                rgb = multi_wafer.create_overlay(features)
        elif mode == 86:
            rgb = x3.create_overlay(features)

    if False:
        # save cropped version for testing
        fn_cropped = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                                  os.path.split(fn)[1])
        ip.save_image(fn_cropped, features['im_cropped_u16'])

    if display and mode != 100:
        print 'Images:'
        if 'im_pl' in images:
            print '  1: Input PL image'
            im = images['im_pl']
            view = ImageViewer(im)
        e = 2
        for feature in features.keys():
            if (feature.startswith('im_') or feature.startswith('mk_')
                    or feature.startswith('ov_') or feature.startswith('bl_')):
                print '  %d: %s' % (e, feature)
                ImageViewer(features[feature])
                e += 1
        if rgb is not None:
            print '  %d: Colour overlay' % e
            e += 1
            ImageViewer(rgb)
        if view is not None:
            view.show()

    return image_names, metric_vals
def ring_strength(im, features):
    DEBUG = False

    # remove a lot of the defects by taking the max of a few positions at equal distance
    h, w = im.shape
    if 'im_center_dist_rot' in features:
        # being called by wafers alg
        dist = features['im_center_dist_rot']
        theta = features['im_center_theta_rot']
        center_x = int(round(features['wafer_middle_x']))
        center_y = int(round(features['wafer_middle_y']))
        radius = int(features['wafer_radius'] - 10)
    else:
        dist, theta = np.empty_like(im, np.float32), np.empty_like(im, np.float32)
        pixel_ops.CenterDistance(dist, theta, features['center_y'], features['center_x'])
        center_x = int(round(features['center_x']))
        center_y = int(round(features['center_y']))
        radius = int(features['radius'] - 10)

    corner_filled, corner_avg = fill_corners(im, features, 10, dist)

    if False:
        view = ImageViewer(im)
        ImageViewer(corner_filled)
        view.show()
        sys.exit()

    maxes = corner_filled.copy()
    rotated = np.empty_like(im)
    for r in [-4.0, -2.0, 2.0, 4.0]:
        rot_mat = cv2.getRotationMatrix2D((center_x, center_y), r, 1.0)
        cv2.warpAffine(corner_filled, rot_mat, (w, h), flags=cv2.INTER_LINEAR,
                       borderMode=cv2.BORDER_CONSTANT, dst=rotated, borderValue=0)
        maxes = np.maximum(maxes, rotated)

    if False:
        view = ImageViewer(im)
        ImageViewer(maxes)
        view.show()
        sys.exit()

    # A spiral smooth
    # - get coordinates that start at the middle and rotate outwards
    dist = np.round(dist).astype(np.int32)
    dist_flat = dist.flat
    theta_flat = theta.flat

    # first sort by distance from center
    args = np.argsort(dist_flat)
    dist_flat = dist_flat[args]
    theta_flat = theta_flat[args]

    # for pixels at an equal distance, sort by theta
    boundaires = np.where((dist_flat - np.roll(dist_flat, 1)) > 0)[0]
    for i in range(len(boundaires) - 1):
        start = boundaires[i]
        stop = boundaires[i + 1]
        args_t = np.argsort(theta_flat[start:stop])

        args[start:stop] = args[start:stop][args_t]

    # apply smoothing to flattened, ordered image
    im1D = maxes.flatten()
    im1D = im1D[args]
    if False:
        im_smooth = ndimage.gaussian_filter1d(im1D, sigma=30)
    else:
        # faster: smooth downsized
        im_smooth = ndimage.gaussian_filter1d(im1D[::3], sigma=10)
        zoom = len(im1D) / float(len(im_smooth))
        im_smooth = ndimage.zoom(im_smooth, zoom=zoom, order=0)
        assert len(im_smooth) == len(im1D)

    im_rings = im_smooth[np.argsort(args)].reshape((h, w))
    if False:
        im_rings, _ = cz_wafer.fill_corners_edges(im_rings, features, 4, corner_fill=corner_avg)

    if False:
        view = ImageViewer(im)
        ImageViewer(im_rings)
        view.show()
        sys.exit()

    if DEBUG:
        plt.figure()

    rotations = range(0, 361, 10)
    dip_profiles = np.zeros((len(rotations), radius), np.float32)
    circle_strengths = []
    for e, r in enumerate(rotations):
        ys, xs = draw.line(center_y, center_x,
                           center_y + int(radius * math.cos(math.radians(r))),
                           center_x + int(radius * math.sin(math.radians(r))))
        mask = ((ys >= 0) & (xs >= 0) & (ys < h) & (xs < w))
        ys, xs = ys[mask], xs[mask]
        if DEBUG:
            im[ys, xs] = 0

        profile = im_rings[ys, xs]
        sample_r = dist[ys[-1], xs[-1]]

        # resample to standard length
        rs = np.linspace(0, sample_r, num=len(profile), endpoint=True)
        f = interpolate.interp1d(rs, profile)
        profile = f(np.arange(sample_r))

        if parameters.RING_SIGMA1 > 0:
            profile = ndimage.gaussian_filter1d(profile, sigma=parameters.RING_SIGMA1)
        if parameters.RING_SIGMA2 > 0:
            profile_upper = ndimage.gaussian_filter1d(profile, sigma=parameters.RING_SIGMA2)

        # interpolate peaks
        peaks = np.where((profile_upper > np.roll(profile_upper, 1)) &
                         (profile_upper > np.roll(profile_upper, -1)))[0]
        if len(peaks) < 2:
            dip_profiles[e, :len(profile)] = 0
        else:
            f = interpolate.interp1d(peaks, profile_upper[peaks])
            xs = np.arange(peaks[0], peaks[-1])
            f_upper = profile.copy()
            f_upper[xs] = f(xs)

            # find dips
            dip_shape = f_upper - profile

            # ignore middle (small artifacts near middle have disproportionally high radius)
            dip_shape[:100] = 0
            dip_profiles[e, :len(profile)] = dip_shape

            # a second strategy for telling difference between slugs with 1 small dark
            #  and lots/large rings
            zeros = np.where(dip_shape == 0)[0]
            gaps = np.where(zeros[1:] - zeros[:-1] > 1)[0]
            big_dips = []
            for g in gaps:
                start, stop = zeros[g], zeros[g + 1]
                dip_strength = 1000.0 * dip_shape[start:stop].sum() / float(len(profile))
                if dip_strength > 0.5:
                    big_dips.append(dip_strength)
            circle_strengths.append(np.array(big_dips).sum())

        if DEBUG:
            plt.plot(profile)
            plt.plot(dip_profiles[e, :])
            plt.plot(f_upper, '--')

    path_xs = np.zeros(dip_profiles.shape[0], np.int32)
    path_strength = np.zeros_like(dip_profiles, np.float32)
    pixel_ops.strongest_path(dip_profiles, path_strength, path_xs, 15)
    path_vals = dip_profiles[np.arange(dip_profiles.shape[0]), path_xs]

    if False:
        dip_profiles[np.arange(dip_profiles.shape[0]), path_xs] = dip_profiles.max() * 1.1
        view = ImageViewer(dip_profiles)
        ImageViewer(path_strength)
        plt.figure()
        plt.plot(path_strength[-1, :])
        plt.figure()
        plt.plot(path_vals)
        view.show()
        sys.exit()

    # a path might have a few peaks due to non-ring artifacts.
    # - ignore some of the highest areas
    path2 = path_vals.copy()
    for i in range(parameters.NUM_PEAKS):
        m = np.argmax(path2)
        path2[max(0, m - 2):min(path2.shape[0], m + 3)] = 0
    path2[[0, -1]] = 0
    # plt.figure()
    # plt.plot(path_vals)
    # plt.plot(path2)
    # plt.show()

    features['circle_strength'] = 100 * path2.max()
    features['circle_strength_2'] = np.median(circle_strengths) * 10
    # print features['circle_strength_2']

    if DEBUG:
        # plt.plot(dip_profiles.sum(axis=0))
        print features['circle_strength']
        ImageViewer(im)
        ImageViewer(im_rings)
        dip_profiles[np.arange(dip_profiles.shape[0]), path_xs] = dip_profiles.max() * 1.1
        ImageViewer(dip_profiles)
        plt.figure()
        plt.plot(path_vals)
        plt.plot(path2)
        plt.show()

    return im_rings
def find_slug(im, features):
    h, w = im.shape
    h2, w2 = h // 2, w // 2

    # highlight edges in each quadrant
    edgesH = cv2.Sobel(im, cv2.CV_32F, 0, 1)
    edgesV = cv2.Sobel(im, cv2.CV_32F, 1, 0)
    corner_edges = np.zeros_like(im)
    corner_edges[:h2, :w2] = edgesH[:h2, :w2] + edgesV[:h2, :w2]
    corner_edges[:h2, -w2:] = edgesH[:h2, -w2:] - edgesV[:h2, -w2:]
    corner_edges[-h2:, -w2:] = -1 * edgesH[-h2:, -w2:] - edgesV[-h2:, -w2:]
    corner_edges[-h2:, :w2] = -1 * edgesH[-h2:, :w2] + edgesV[-h2:, :w2]

    # find points on the corners
    left = corner_edges[:, :w2]
    ys = np.arange(left.shape[0])
    xs = np.argmax(left, axis=1)
    mask = corner_edges[ys, xs] > 0.4
    ys = ys[mask]
    xs = xs[mask]
    right = corner_edges[:, w2:]
    ys2 = np.arange(right.shape[0])
    xs2 = w2 + np.argmax(right, axis=1)
    mask = corner_edges[ys2, xs2] > 0.4
    ys2 = ys2[mask]
    xs2 = xs2[mask]
    ys = np.r_[ys, ys2]
    xs = np.r_[xs, xs2]

    if False:
        ImageViewer(corner_edges)
        plt.figure()
        plt.imshow(im, cmap="gray")
        plt.plot(xs, ys, "o")
        plt.show()
        sys.exit()

    t1 = default_timer()

    # user Hough transform to vote on most likely center/radius
    # - assume true center is within 150 pixels of image middle

    # phrase 1: rough fit
    MAX_OFFSET = 200
    step = 3
    acc_ys = np.arange(h2 - MAX_OFFSET, h2 + MAX_OFFSET + 1, step)
    acc_xs = np.arange(w2 - MAX_OFFSET, w2 + MAX_OFFSET + 1, step)
    diag = math.sqrt(h2 ** 2 + w2 ** 2)
    min_r = int(0.5 * diag)
    max_r = int(diag)
    acc = np.zeros((acc_ys.shape[0], acc_xs.shape[0], max_r - min_r), np.int32)
    pixel_ops.CircleHoughAcc2(ys, xs, acc_ys, acc_xs, acc, min_r, max_r)
    acc = ndimage.gaussian_filter(acc.astype(np.float32), sigma=(1, 1, 0))
    i, j, r = ndimage.maximum_position(acc)
    middle_y, middle_x, radius = acc_ys[i], acc_xs[j], r + min_r

    if True:
        # phrase 2: fine tune
        acc_ys = np.arange(middle_y - (2 * step), middle_y + (2 * step) + 1)
        acc_xs = np.arange(middle_x - (2 * step), middle_x + (2 * step) + 1)
        min_r = int(radius - 10)
        max_r = int(radius + 10)
        acc = np.zeros((acc_ys.shape[0], acc_xs.shape[0], max_r - min_r), np.int32)
        pixel_ops.CircleHoughAcc2(ys, xs, acc_ys, acc_xs, acc, min_r, max_r)
        acc = ndimage.gaussian_filter(acc.astype(np.float32), sigma=(1, 1, 0))
        i, j, r = ndimage.maximum_position(acc)

        middle_y, middle_x, radius = acc_ys[i], acc_xs[j], r + min_r

    features['center_y'] = middle_y
    features['center_x'] = middle_x
    features['radius'] = radius
    features['crop_rotation'] = 0
    features['crop_left'] = 0
    features['crop_right'] = im.shape[1] - 1
    features['crop_top'] = 0
    features['crop_bottom'] = im.shape[0] - 1

    mask = np.zeros_like(im, np.uint8)
    r, theta = np.empty_like(im, np.float32), np.empty_like(im, np.float32)
    pixel_ops.CenterDistance(r, theta, middle_y, middle_x)
    pixel_ops.ApplyThresholdGT_F32_U8(r, mask, radius, 1)

    features['bl_uncropped_u8'] = mask
    features['bl_cropped_u8'] = mask

    if False:
        print default_timer() - t1
        rgb = create_overlay(im, features)
        view = ImageViewer(rgb)
        # ImageViewer(mask)
        view.show()
        sys.exit()
def fill_corners(im, features, edge, dist):
    h, w = im.shape
    if 'radius' in features:
        radius = int(round(features['radius']))
        y2 = int(round(features['center_y']))
        x2 = int(round(features['center_x']))
    elif 'wafer_radius' in features:
        radius = int(round(features['wafer_radius']))
        y2 = int(round(features['wafer_middle_y']))
        x2 = int(round(features['wafer_middle_x']))
    else:
        print "ERROR: No radius found"
        assert False

    h2 = h // 2
    w2 = w // 2

    # pixels to sample intensities along corners
    ys, xs = draw.circle_perimeter(y2, x2, radius - edge)
    mask = ((ys >= 0) & (ys < h) & (xs >= 0) & (xs < w))
    ys = ys[mask]
    xs = xs[mask]
    corner_filled = im.copy()
    corner_avg = 0

    if False:
        im[ys, xs] = im.max() * 1.1
        view = ImageViewer(im)
        view.show()

    # top left
    mask = ((ys < h2) & (xs < w2))
    if mask.sum() > 0:
        corner_val = im[ys[mask], xs[mask]].mean()
        corner_avg += corner_val
        corner_filled[:h2, :w2][dist[:h2, :w2] > radius - edge] = corner_val

    # top right
    mask = ((ys < h2) & (xs > w2))
    if mask.sum() > 0:
        corner_val = im[ys[mask], xs[mask]].mean()
        corner_avg += corner_val
        corner_filled[:h2, w2:][dist[:h2, w2:] > radius - edge] = corner_val

    # bottom left
    mask = ((ys > h2) & (xs < w2))
    if mask.sum() > 0:
        corner_val = im[ys[mask], xs[mask]].mean()
        corner_avg += corner_val
        corner_filled[h2:, :w2][dist[h2:, :w2] > radius - edge] = corner_val

    # bottom right
    mask = ((ys > h2) & (xs > w2))
    if mask.sum() > 0:
        corner_val = im[ys[mask], xs[mask]].mean()
        corner_avg += corner_val
        corner_filled[h2:, w2:][dist[h2:, w2:] > radius - edge] = corner_val

    corner_avg /= 4.0

    # edges
    corner_filled[:, :edge] = np.c_[corner_filled[:, edge]]
    corner_filled[:, -edge:] = np.c_[corner_filled[:, -edge]]
    corner_filled[:edge, :] = np.r_[corner_filled[edge, :]]
    corner_filled[-edge:, :] = np.r_[corner_filled[-edge, :]]

    if False:
        r_mask = np.zeros_like(im, np.uint8)
        r_mask[ys, xs] = 1
        rgb = ip.overlay_mask(im, r_mask)
        view = ImageViewer(rgb)
        ImageViewer(corner_filled)
        view.show()
        sys.exit()

    return corner_filled, corner_avg
def feature_extraction(im, features, skip_features=False):
    # median filter to remove noise
    im = cv2.medianBlur(im, 3)
    h, w = im.shape

    # normalize
    hist_features = {}
    ip.histogram_percentiles(im, hist_features)
    norm = im / hist_features['hist_percentile_99.9']
    pixel_ops.ClipImage(norm, 0, 1)

    # automatically determine if square or round
    is_round = True
    crop_props = None
    try:
        # try cropping using wafer alg
        # im = np.ascontiguousarray(im[::-1, :])
        crop_props = cropping.crop_wafer_cz(im, create_mask=True, output_error=False)

        # if round, rotation will likely be high
        if abs(crop_props['estimated_rotation']) < 5:
            # make sure most of foreground mask is actually foreground
            f = {}
            ip.histogram_percentiles(im, f)
            norm = im / f['hist_percentile_99.9']
            coverage = (norm[crop_props['mask'] == 0] > 0.5).mean()
            if coverage > 0.97:
                is_round = False
            if False:
                print coverage
                view = ImageViewer(norm)
                ImageViewer(crop_props['mask'])
                view.show()
    except:
        pass

    if False:
        print "Is round:", is_round
        view = ImageViewer(im)
        view.show()

    if is_round:
        # find center and radius
        find_slug(norm, features)
    else:
        # pre-crop
        cropped = cropping.correct_rotation(im, crop_props, pad=False, border_erode=parameters.BORDER_ERODE_CZ,
                                            fix_chamfer=False)
        features['bl_uncropped_u8'] = crop_props['mask']
        features['bl_cropped_u8'] = crop_props['mask']
        features['center_y'] = crop_props['center'][0]
        features['center_x'] = crop_props['center'][1]
        features['radius'] = crop_props['radius']
        features['corners'] = crop_props['corners']
        features['center'] = crop_props['center']
        features['crop_rotation'] = 0

        if False:
            view = ImageViewer(im)
            ImageViewer(cropped)
            ImageViewer(crop_props['mask'])
            view.show()

        im = np.ascontiguousarray(cropped, dtype=im.dtype)
        norm = im / hist_features['hist_percentile_99.9']

    # set corners (note: this is for consistency. in current implementation there is no cropping)
    features['corner_tl_x'] = 0
    features['corner_tl_y'] = 0
    features['corner_tr_x'] = w - 1
    features['corner_tr_y'] = 0
    features['corner_br_x'] = w - 1
    features['corner_br_y'] = h - 1
    features['corner_bl_x'] = 0
    features['corner_bl_y'] = h - 1

    if False:
        view = ImageViewer(norm)
        ImageViewer(features['bl_uncropped_u8'])
        view.show()

    if skip_features or ('input_param_skip_features' in features and int(features['input_param_skip_features']) == 1):
        return

    # PL metrics
    hist = ip.histogram_percentiles(im, features, features['center_y'], features['center_x'],
                                    features['radius'])
    if False:
        # features['radius'] = features['radius']
        rgb = create_overlay(im, features)
        # ImageViewer(im)
        ImageViewer(rgb)
        plt.figure()
        plt.plot(hist)
        plt.show()

    # rds
    rds(norm, features)

    # dark/bright corners
    radial_profile(norm, features)

    # rings
    ring_strength(norm, features)