示例#1
0
def compute_hash_pattern_correction(folder):
    fns = glob.glob(os.path.join(folder, "*.tif*"))

    if len(fns) == 0:
        print "No tif files found in: %s" % (folder)
        sys.exit()

    if True:
        ims = [ip.open_image(fn).astype(np.float32) for fn in fns]
        im_mean = ims[0].copy()
        for im in ims[1:]:
            im_mean += im
        im_mean /= len(ims)
        background = cv2.GaussianBlur(im_mean, (0, 0),
                                      8,
                                      borderType=cv2.BORDER_REPLICATE)
        pattern = im_mean - background
        pattern -= pattern.mean()
    else:
        background = ip.open_image(
            r"C:\Users\Neil\BT\Data\R2 FFT\FF Wafer Images\precomputed\std - ff.tif"
        ).astype(np.float32) / 4.0
        im_mean = ip.open_image(
            r"C:\Users\Neil\BT\Data\R2 FFT\FF Wafer Images\precomputed\SUM_Stack.tif"
        ).astype(np.float32) / 4.0
        pattern = im_mean - background
        pattern -= pattern.mean()

    if False:
        view = ImageViewer(im_mean)
        ImageViewer(background)
        ImageViewer(pattern)
        view.show()
        sys.exit()

    # find a mask of the peaks
    fft = fftshift(cv2.dft(pattern, flags=cv2.DFT_COMPLEX_OUTPUT))
    fft_mag = cv2.magnitude(fft[:, :, 0], fft[:, :, 1])
    fft_smooth = cv2.GaussianBlur(cv2.medianBlur(fft_mag, ksize=5),
                                  ksize=(0, 0),
                                  sigmaX=5)
    fft_log = cv2.log(fft_smooth)
    THRESH = 13.75
    mask = fft_log > THRESH

    # ignore middle (low frequency stuff)
    RADIUS = 35

    h, w = pattern.shape
    ys, xs = draw.circle(h // 2, w // 2, RADIUS)
    mask[ys, xs] = 0

    np.save("hash_fft_mask.npy", mask)
    print "FFT mask saved to 'hash_fft_mask.npy'"

    if False:
        view = ImageViewer(fft_log)
        view = ImageViewer(mask)
        view.show()
def main():
    features = {}
    fn = r"C:\Users\Neil\Desktop\R3 crack\raw PL images\cracked wafer PL image.tif"
    im = ip.open_image(fn).astype(np.float32)

    if im.shape[0] > 700:
        print '    WARNING: Image resized'
        im_max = im.max()
        im = ndimage.zoom(im, 0.5)
        if im.max() > im_max:
            im[im > im_max] = im_max

    if False:
        view = ImageViewer(im)
        view.show()

    features['_alg_mode'] = 'mono wafer'
    crop_props = cropping.crop_wafer_cz(im, create_mask=True, skip_crop=False)
    features['corners'] = crop_props['corners']
    cropped = cropping.correct_rotation(
        im,
        crop_props,
        pad=False,
        border_erode=parameters.BORDER_ERODE_CZ,
        fix_chamfer=False)
    mono_wafer.feature_extraction(cropped, crop_props, features=features)

    ip.print_metrics(features)
    rgb = mono_wafer.create_overlay(features)
    view = ImageViewer(rgb)
    view.show()
def run_single(fn, mode, display=True, downsize=True):
    features = {}
    im = ip.open_image(fn).astype(np.float32)

    if downsize and im.shape[0] > 750:
        print '    WARNING: Image resized'
        im_max = im.max()
        im = ndimage.zoom(im, 0.5)
        if im.max() > im_max:
            im[im > im_max] = im_max

    if False:
        view = ImageViewer(im)
        view.show()

    features['_fn'] = os.path.splitext(os.path.split(fn)[1])[0]

    if mode == "multi":
        features['_alg_mode'] = 'multi wafer'
        multi_cell.feature_extraction(im, features=features)
    elif mode == "mono":
        features['_alg_mode'] = 'mono wafer'
        mono_cell.feature_extraction(im, features=features)

    f = ip.print_metrics(features)
    if display:
        rgb = multi_cell.create_overlay(features)
        view = ImageViewer(im)
        ImageViewer(rgb)
        view.show()

    return f
def run_module():
    if False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\REC-144\REC-144_G00_LR0086_P35_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\REC-144\REC-144_G00_LR0086_CC7.80_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\REC-143\REC-143_G00_LR0086_P35_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\REC-143\REC-143_G00_LR0086_CC7.50_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_CC13.00_2x2_EL.tif"
    elif True:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\STP-410\STP-410_G00_LR0052_P53_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\STP-410\STP-410_G00_LR0045_CC5.50_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0245_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0160_CV43.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\APO-217\APO-217_G00_LR0089_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\APO-217\APO-217_G00_LR0089_CC13.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-098\CNY-098_G00_LR0090_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-098\CNY-098_G00_LR0090_CC10.80_2x2_EL.tif"
    elif False:
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-101\CNY-101_G00_LR0090_CC10.80_2x2_EL.tif"
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-101\CNY-101_G00_LR0090_P93_2x2_OCPL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-139\CNY-139_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-139\CNY-139_G00_LR0106_CC13.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-232\CNY-232_G00_LR0106_CC13.00_2x2_EL.tif"
    elif False:
        fn_pl = r"C:\Users\Neil\BT\Data\modules\CNY-449\CNY-449_G00_LR0106_P93_2x2_OCPL.tif"
        fn_el = r"C:\Users\Neil\BT\Data\modules\CNY-449\CNY-449_G00_LR0106_CC13.00_2x2_EL.tif"

    im_pl = ip.open_image(fn_pl).astype(np.float32)
    im_el = ip.open_image(fn_el).astype(np.float32)
    features = {'fn': os.path.splitext(os.path.split(fn_pl)[1])[0]}
    features_module.feature_extraction(im_pl, im_el, features)
    ip.print_metrics(features)
    ratio = features['im_pl_el']
    view = ImageViewer(ratio[::4, ::4])
    view.show()
示例#5
0
def run_block():
    fn = r"C:\Users\Neil\BT\Data\blocks\misc\brick JW - Test PL Image %28PL Image%29.tif"
    #fn = r"C:\Users\Neil\BT\Data\blocks\B4\691 - PL Image B4 N2 4V (PL Image - Composite).tif"
    #fn = r"C:\Users\Neil\BT\Data\blocks\P3045564-20 ten times\.tif"
    #fn = r"C:\Users\Neil\BT\Data\blocks\P3045564-20 ten times\427 - P3045564-20-1 (PL Image).tif"
    im_pl = ip.open_image(fn).astype(np.float32)
    features = {}
    features_block.feature_extraction(im_pl, features)
    rgb = features_block.create_overlay(features)
    ip.print_metrics(features)
    view = ImageViewer(im_pl)
    ImageViewer(rgb)
    view.show()
示例#6
0
def run_plir():
    fn = r"C:\Users\Neil\BT\Data\2017-09-06 TransferFunctions.TXT"
    vals = features_block.load_transfer(fn)
    spline_plir, spline_nf, spline_sp, spline_lp = features_block.interpolate_transfer(
        vals, debug=False)

    if False:
        fn_sp = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west short pass.tif"
        fn_lp = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west long pass.tif"
        fn_nf = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west no filter.tif"
    elif False:
        fn_sp = r"C:\Users\Neil\BT\Data\blocks\PLIR\marker\S0069_20170807.033044_ID4624_plg.meas.block.b3BL.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\BT\Data\blocks\PLIR\marker\S0069_20170807.033044_ID4624_plg.meas.block.b3BL.north.lp.img.tif"
        fn_nf = r"C:\Users\Neil\BT\Data\blocks\PLIR\marker\S0069_20170807.033044_ID4624_plg.meas.block.b3BL.north.std.img.tif"
    else:
        fn_sp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.lp.img.tif"
        fn_nf = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3pl.img.tif"

    im_sp = ip.open_image(fn_sp, cast_long=False).astype(np.float32)
    im_lp = ip.open_image(fn_lp, cast_long=False).astype(np.float32)
    im_pl = ip.open_image(fn_nf, cast_long=False).astype(np.float32)

    if False:
        im_sp = ndimage.zoom(im_sp, zoom=0.5)
        im_lp = ndimage.zoom(im_lp, zoom=0.5)

    features = {}
    features_block.plir(im_sp,
                        im_lp,
                        im_pl,
                        features,
                        spline_plir=spline_plir,
                        spline_plc=spline_nf)
    ip.print_metrics(features)
    log = np.log(features['im_tau_bulk_f32'])
    view = ImageViewer(features['im_tau_bulk_f32'])
    #ImageViewer(log)
    view.show()
示例#7
0
def run_plir2():
    fn = r"C:\Users\Neil\BT\Data\2017-09-06 TransferFunctions.TXT"
    vals = features_block.load_transfer(fn)
    spline_plir, spline_nf, spline_sp, spline_lp = features_block.interpolate_transfer(
        vals, debug=False)

    if False:
        fn_sp = r"C:\Users\Neil\BT\Data\blocks\PLIR\2017-11-01\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\BT\Data\blocks\PLIR\2017-11-01\plg.meas.block.b3bl.north.lp.img.tif"
    elif False:
        fn_sp = r"C:\Users\Neil\Desktop\1172\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\Desktop\1172\plg.meas.block.b3bl.north.lp.img.tif"
    else:
        fn_sp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.sp.img.tif"
        fn_lp = r"C:\Users\Neil\Desktop\Rietech.2.1172\tifs\plg.meas.block.b3bl.north.lp.img.tif"

    im_sp = ip.open_image(fn_sp).astype(np.float32)
    im_lp = ip.open_image(fn_lp).astype(np.float32)

    if False:
        im_sp = ndimage.zoom(im_sp, zoom=0.5)
        im_lp = ndimage.zoom(im_lp, zoom=0.5)

    features = {}
    features_block.plir2(im_sp,
                         im_lp,
                         features,
                         spline_plir=spline_plir,
                         spline_sp=spline_sp)
    ip.print_metrics(features)
    log = np.log(features['im_tau_bulk_f32'])
    view = ImageViewer(features['im_tau_bulk_f32'])
    ImageViewer(log)
    plt.figure()
    plt.plot(features['im_tau_bulk_f32'].mean(axis=0))
    view.show()
def main():
    folder = r"C:\Users\Neil\BT\Data\half processed"
    files = glob.glob(os.path.join(folder, "*.tif"))
    for e, fn in enumerate(files):
        #if e != 34:
        #    continue
        print "%s (%d/%d)" % (fn, e, len(files))
        features = {}
        im = ip.open_image(fn).astype(np.float32)
        crop_props = feature_extraction(im, features)

        if True:
            # save crop results
            pil_im = cropping.draw_crop_box(im, crop_props, pil_im=True)
            fn_root = os.path.splitext(os.path.split(fn)[1])[0]
            fn_out = os.path.join(r"C:\Users\Neil\Desktop\results\crop", fn_root + ".png")
            pil_im.save(fn_out)
def run_stripe():
    if True:
        mode = "mono"
        # crack
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.120013_Baccini 1 in 1 test_ID2_raw.tif"
        # corner
        #fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.113711_Baccini 1 in 1_ID5_raw.tif"
    else:
        mode = "multi"
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.121040_Baccini 1 in 1 test_ID8_raw.tif"

    im_pl = ip.open_image(fn).astype(np.float32)
    features = {"mode": mode}
    features_stripes.feature_extraction(im_pl, features)
    rgb = features_stripes.create_overlay(features)
    ip.print_metrics(features)
    print ip.list_images(features)
    view = ImageViewer(im_pl)
    ImageViewer(features['bl_cropped_u8'])
    ImageViewer(rgb)
    view.show()
示例#10
0
def run_single(fn, display=True, downsize=True):
    features = {}
    im = ip.open_image(fn).astype(np.float32)

    if downsize and im.shape[0] > 750:
        print '    WARNING: Image resized'
        im_max = im.max()
        im = ndimage.zoom(im, 0.5)
        if im.max() > im_max:
            im[im > im_max] = im_max

    if False:
        view = ImageViewer(im)
        view.show()

    parameters.SLOPE_MULTI_WAFER = True
    parameters.BORDER_ERODE = 3
    parameters.MIN_IMPURE_AREA = 0.01

    features['_alg_mode'] = 'multi wafer'
    features['_fn'] = os.path.splitext(os.path.split(fn)[1])[0]
    crop_props = cropping.crop_wafer(im, create_mask=True)
    features['corners'] = crop_props['corners']
    cropped = cropping.correct_rotation(im,
                                        crop_props,
                                        pad=False,
                                        border_erode=parameters.BORDER_ERODE)
    multi_wafer.feature_extraction(cropped, crop_props, features=features)
    multi_wafer.combined_features(features)
    rgb = multi_wafer.create_overlay(features)

    f = ip.print_metrics(features, display=display)
    if display:
        print "Wafer type: %s" % multi_wafer.WaferType.types[
            features['wafer_type']]
        view = ImageViewer(rgb)
        ImageViewer(im)
        view.show()

    return f, features['im_cropped_u8'], rgb
def run_cropping(files, mode=None, display=True):
    for e, fn in enumerate(files):
        print "%s (%d/%d)" % (fn, e, len(files))
        features = {}
        im = ip.open_image(fn).astype(np.float32)
        if mode == "cell":
            rotated = cropping.correct_cell_rotation(im,
                                                     features,
                                                     already_cropped=False)
            cropped = cropping.crop_cell(rotated,
                                         im,
                                         features,
                                         width=None,
                                         already_cropped=False)
        elif mode == "mono wafer":
            features['_alg_mode'] = 'mono wafer'
            crop_props = cropping.crop_wafer_cz(im,
                                                create_mask=True,
                                                skip_crop=False)
            features.update(crop_props)

            cropped = cropping.correct_rotation(
                im,
                crop_props,
                pad=False,
                border_erode=parameters.BORDER_ERODE_CZ,
                fix_chamfer=False)

        if False:
            # save crop results
            pil_im = cropping.draw_crop_box(im, features, mode="pil")
            fn_root = os.path.splitext(os.path.split(fn)[1])[0]
            fn_out = os.path.join(r"C:\Users\Neil\Desktop\results\crop",
                                  fn_root + ".png")
            pil_im.save(fn_out)
        else:
            rgb = cropping.draw_crop_box(im, features, mode="rgb")
            pprint(features)
            view = ImageViewer(rgb)
            view.show()
def run_single(fn, display=True, downsize=True):

    if False:
        mode = "mono"
    else:
        mode = "multi"

    features = {"_cell_type": mode}
    im = ip.open_image(fn).astype(np.float32)

    if False:
        view = ImageViewer(im)
        view.show()

    skip_crop = True
    features_stripes.feature_extraction(im, features, skip_crop)
    f = ip.print_metrics(features)
    if display:
        view = ImageViewer(im)
        rgb = features_stripes.create_overlay(features)
        ImageViewer(rgb)
        view.show()

    return f
示例#13
0
def request(mode,
            display=False,
            send_path=False,
            return_path=False,
            skip_features=False,
            return_cropped=True,
            return_uncropped=False,
            return_outline=False):
    ###########
    # REQUEST #
    ###########
    param_names_float = [
        "verbose", "already_cropped", "skip_features", "return_cropped",
        "return_uncropped", "return_outline", "ORIGINAL_ORIENTATION"
    ]
    param_vals_float = [
        0, 0,
        int(skip_features),
        int(return_cropped),
        int(return_uncropped),
        int(return_outline), 1
    ]
    params_dict = dict(zip(param_names_float, param_vals_float))
    param_names_str = []
    param_vals_str = []
    if return_path:
        param_names_str.append("im_output_path")
        param_vals_str.append("C:\Users\Neil\Desktop\im_out")
    images = None

    # assemble image data
    print "Mode = %d" % mode
    if mode == 0:
        msg = struct.pack('=B', mode)
        # send to server
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect((HOST, PORT))
        send_data(sock, msg)
        response = get_data(sock, 1)
        success = struct.unpack('B', response)[0]
        print "Success: %s" % str(success == 0)
        return [], []
    if mode == 10:
        fn = r"C:\Users\Neil\BT\Data\R2 FFT\multi\raw 10 sec.tif"
    elif mode == 40:
        if int(params_dict['already_cropped']) == 0:
            fn = r"C:\Users\Neil\BT\Data\blocks\B4\693 - PL Image B4 W2 4V (PL Image - Composite).tif"
        else:
            fn = r"C:\Users\Neil\BT\Data\blocks\2015-08\tifs\120815_ISE_E_nf_14A_22C_PL_600000-dark&FFcor_cropped.tif"
    elif mode in [70, 71]:
        if mode == 70:
            fn = r"C:\Users\Neil\BT\Data\slugs\zhonghuan\tifs\219609 - 160-1-6 (Uncalibrated PL Image).tif"
        elif mode == 71:
            fn = r"C:\Users\Neil\BT\Data\slugs\pseudo round\2861 - THICK SAMPLE TEST-2 %28Uncalibrated PL Image%29.tif"
        param_names_float += ['rds_percent', 'slug_radius']
        param_vals_float += [50, 0]
    elif mode == 80:
        # PERC mono cell
        # fn = r"C:\Users\Neil\BT\Data\C3\perc\mono\BAC_1024_100\20150910_122155.612_BAC_1024_100_201.tif"
        # fn = r"C:\Users\Neil\BT\Data\cropping_test_set\cells\tifs\plg.meas.cell.plqrs.a.img.tif"
        fn = r"C:\Users\Neil\BT\Data\C3\perc\mono\BAC_1024_100\20150910_122155.612_BAC_1024_100_201.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 81:
        # PERC multi cell
        fn = r"C:\Users\Neil\BT\Data\C3\perc\multi\Point\1329 - REC test E1 PL Image (PL Open-circuit Image).tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 82:
        # mono cell
        fn = r"C:\Users\Neil\BT\Data\C3\mono\INES_c-Si_100_1024\20150908_175300.680_INES_c-Si_100_1024_46.tif"
        if True:
            param_names_float.append("no_post_processing")
            param_vals_float.append(1)
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 83:
        # multi cell
        fn = r"C:\Users\Neil\BT\Data\C3\multi\misc\20170302T110107.328_Batch 3_ID467.tif"
        # fn = r"C:\Users\Neil\BT\Data\C3\multi\Astronergy\20170831T153538.783_zt-DJ--5_ID-8.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 84:
        # mono wafer
        # fn = r"C:\Users\Neil\BT\Data\CIC\cracks\tifs\S0067_20140821.131519_VI_PL21F_ID10063_GRADEB1_BIN2_raw_image.tif"
        # fn = r"C:\Users\Neil\BT\Data\mono wafer\2015-10-26\S0041_20151026.161500_longi DCA 1-2_ID2_GRADEA2_BIN4_raw.tif"
        fn = r"C:\Users\Neil\Desktop\outlines\mode84.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 85:
        # multi wafer
        fn = r"C:\Users\Neil\BT\Data\overlay test set\unnormalised\tifs\S0050_20120516.193034__ID10586 - Cor.tiff"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
    elif mode == 86:
        # X3
        fn = r"C:\Users\Neil\BT\Data\X3\mono PERC\20161024_103301.320_a_00058101.tif"
        if int(params_dict['already_cropped']) == 1:
            fn = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                              os.path.split(fn)[1])
        param_names_float += [
            "num_stripes", "multi", "no_stripe_images", "ORIGINAL_ORIENTATION"
        ]
        param_vals_float += [5, 0, 1, 1]
    elif mode == 87:
        # mono stripe
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.120710_Baccini 1 in 1 test_ID6_raw.tif"
    elif mode == 88:
        # multi stripe
        fn = r"C:\Users\Neil\BT\Data\stripe\2017-09-07 Baccini 1 in 1\S0041_20170907.120917_Baccini 1 in 1 test_ID7_raw.tif"
    elif mode == 89:
        # QC-C3
        #fn = r"C:\Users\Neil\BT\Data\half processed\1390 - Tet P4604 PLOC 0.2s 1Sun (Uncalibrated PL Image).tif"
        fn = r"C:\Users\Neil\Desktop\outlines\mode89.tif"
    elif mode in [90, 901]:
        # plir
        if True:
            fn1 = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west short pass.tif"
            fn2 = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west long pass.tif"
            fn3 = r"C:\Users\Neil\BT\Data\blocks\PLIR\Trina\2016-05-12\5.4V W (Uncalibrated PL Image) west no filter.tif"
        else:
            fn1 = r"C:\Users\Neil\Desktop\B35 files for B3\Face 1\plg.meas.block.b3bl.north.shortpass.img.tif"
            fn2 = r"C:\Users\Neil\Desktop\B35 files for B3\Face 1\plg.meas.block.b3bl.north.raw.img.tif"
            fn3 = r"C:\Users\Neil\Desktop\B35 files for B3\Face 1\plg.meas.block.b3bl.north.longpass.img.tif"
        im_sp = ip.open_image(fn1, cast_long=False).astype(np.uint16)
        im_lp = ip.open_image(fn2, cast_long=False).astype(np.uint16)
        im_pl = ip.open_image(fn3, cast_long=False).astype(np.uint16)
        if True:
            images = {'im_sp': im_sp, 'im_lp': im_lp, 'im_pl': im_pl}
        else:
            images = {'im_sp': im_sp, 'im_lp': im_lp}
        fn_xfer = r"C:\Users\Neil\BT\Data\2017-09-06 TransferFunctions.TXT"
        vals = block.load_transfer(fn_xfer)
        images['im_xfer'] = vals

        if mode == 901:
            del images['im_pl']
            mode = 90
    elif mode == 92:
        # brick markers
        fn = r"C:\Users\Neil\Desktop\20160826\1267 - Ref-C-25chiller-2 (North - Shortpass Image).tif"
    elif mode == 95:
        # resolution
        fn = r"C:\Users\Neil\BT\Data\2017-09-06 new calibration target.tif"
    elif mode == 100:
        if True:
            fn_pl = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0245_P93_2x2_OCPL.tif"
            fn_el = r"C:\Users\Neil\BT\Data\modules\WIN-555\WIN-555_LR0160_CV43.00_2x2_EL.tif"
        else:
            fn_pl = r"C:\Users\Neil\Desktop\Processed\CNY-098\CNY-098_G00_LR0090_P93_2x2_OCPL.tif"
            fn_el = r"C:\Users\Neil\Desktop\Processed\CNY-098\CNY-098_G00_LR0090_CC10.80_2x2_EL.tif"
        im_pl = ip.open_image(fn_pl).astype(np.uint16)
        im_el = ip.open_image(fn_el).astype(np.uint16)
        images = {'im_pl': im_pl}  # , 'im_el': im_el}
        param_names_float += ["ORIGINAL_ORIENTATION"]
        param_vals_float += [0]
    elif mode == 255:
        msg = struct.pack('B', 255)
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect((HOST, PORT))
        send_data(sock, msg)
        return [], []
    else:
        print "Unknown mode"
        sys.exit()

    if images is None:
        # open im_pl
        im = ip.open_image(fn).astype(np.uint16)
        if False:
            im = im.T
        images = {'im_pl': im}

    if False and images['im_pl'].shape[0] > 800:
        print 'WARNING: Image resized'
        images['im_pl'] = ndimage.zoom(images['im_pl'], 0.25)

    if False:
        view = ImageViewer(images['im_pl'])
        view.show()

    # gather images
    image_names = ','.join(images.keys())
    msg = struct.pack('=BI', mode, len(image_names))
    msg += image_names
    for image_name, im in images.iteritems():
        assert image_name[:2] in ['bl', 'mk', 'im', 'ov']
        if image_name == 'im_xfer':
            bit_depth = 32
        else:
            bit_depth = 16
        binning = 1
        if send_path:
            # pass by path
            msg += struct.pack('=HHBBB', 0, 0, bit_depth, binning, len(fn))
            msg += fn
        else:
            # pass data
            msg += struct.pack('=HHBB', im.shape[1], im.shape[0], bit_depth,
                               binning)
            msg += im.ravel().tostring()

    if False:
        param_names_float = []
        param_vals_float = []
        param_names_str = []
        param_vals_str = []

    # numerical parameter list
    param_names = ','.join(param_names_float)
    msg += struct.pack('=I', len(param_names))
    msg += param_names
    msg += np.array(param_vals_float, np.float32).tostring()

    # string input parameters
    param_names = ','.join(param_names_str)
    msg += struct.pack('=I', len(param_names))
    msg += param_names
    param_vals = ','.join(param_vals_str)
    msg += struct.pack('=I', len(param_vals))
    msg += param_vals

    t1 = timeit.default_timer()

    # send to server
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect((HOST, PORT))
    send_data(sock, msg)

    ############
    # RESPONSE #
    ############

    features = {}

    # get response code
    response = get_data(sock, 1)
    success = struct.unpack('B', response)[0]
    if success != 0:
        print("Error occurred: %d" % success)
        sys.exit()

    # get images & masks
    data = get_data(sock, 4)
    image_names_length = struct.unpack('=I', data)[0]
    if image_names_length > 0:
        image_names = get_data(sock, image_names_length).split(",")
        for im_name in image_names:
            if im_name[:3] not in ['bl_', 'mk_', 'im_', 'ov_']:
                print "ERROR: Invalid image name: %s" % im_name
                sys.exit()

            data = get_data(sock, 6)
            im_w, im_h, bit_depth, binning = struct.unpack('=hhBB', data)

            if im_w == 0 or im_h == 0:
                # read from disk
                fn_len = struct.unpack('=B', get_data(sock, 1))[0]
                fn = str(get_data(sock, fn_len))
                features[im_name] = ip.open_image(fn)
            else:
                if bit_depth == 8:
                    data = get_data(sock, 4)
                    encoding_length = struct.unpack('I', data)[0]
                    png_data = get_data(sock, encoding_length)
                    features[im_name] = ip.decode_png(png_data)
                    num_pixels = features[im_name].shape[0] * features[
                        im_name].shape[1]
                    print "%s compression: %0.1f%%" % (
                        im_name, (100 * encoding_length) / float(num_pixels))
                elif bit_depth == 16:
                    pixel_data = get_data(sock, im_w * im_h * 2)
                    features[im_name] = np.frombuffer(pixel_data,
                                                      np.uint16).reshape(
                                                          im_h, im_w)
                elif bit_depth == 32:
                    pixel_data = get_data(sock, im_w * im_h * 4)
                    features[im_name] = np.frombuffer(pixel_data,
                                                      np.float32).reshape(
                                                          im_h, im_w)
                else:
                    print '****', im_name
    else:
        image_names = []

    # get numerical metric
    response = get_data(sock, 4)
    string_size = struct.unpack('I', response)[0]
    if string_size > 0:
        feature_names = get_data(sock, string_size)
        feature_names = feature_names.split(',')
        num_features = len(feature_names)
        bytes_expected = num_features * 4
        feature_data = get_data(sock, bytes_expected)
        feature_data = list(np.frombuffer(feature_data, np.float32))
    else:
        feature_names = []
        feature_data = []

    # get string metrics
    string_size = struct.unpack('I', get_data(sock, 4))[0]
    if string_size > 0:
        feature_names += get_data(sock, string_size).split(',')
    string_size = struct.unpack('I', get_data(sock, 4))[0]
    if string_size > 0:
        feature_data += get_data(sock, string_size).split(',')

    metric_vals = zip(feature_names, feature_data)

    ###################
    # DISPLAY RESULTS #
    ###################
    metrics = {}
    for i in range(len(feature_names)):
        features[feature_names[i]] = feature_data[i]
        metrics[feature_names[i]] = feature_data[i]

    print "Returned images:"
    for image_name in image_names:
        print "  %s" % image_name
    print "Metrics:"
    pprint(metrics)

    t2 = timeit.default_timer()
    print('Total time: %0.03f seconds' % (t2 - t1))

    rgb = None
    view = None
    if "im_cropped_u8" in features:
        if mode == 80:
            rgb = perc.create_overlay(features)
        elif mode == 81:
            rgb = perc.create_overlay_multi(features)
        elif mode == 82:
            rgb = cz_cell.create_overlay(features)
        elif mode == 83:
            rgb = multi_cell.create_overlay(features)
        elif mode == 84:
            rgb = cz_wafer.create_overlay(features)
        elif mode == 85:
            if 'skip_features' not in params_dict or params_dict[
                    'skip_features'] != 1:
                rgb = multi_wafer.create_overlay(features)
        elif mode == 86:
            rgb = x3.create_overlay(features)

    if False:
        # save cropped version for testing
        fn_cropped = os.path.join(r"C:\Users\Neil\BT\Data\cropped",
                                  os.path.split(fn)[1])
        ip.save_image(fn_cropped, features['im_cropped_u16'])

    if display and mode != 100:
        print 'Images:'
        if 'im_pl' in images:
            print '  1: Input PL image'
            im = images['im_pl']
            view = ImageViewer(im)
        e = 2
        for feature in features.keys():
            if (feature.startswith('im_') or feature.startswith('mk_')
                    or feature.startswith('ov_') or feature.startswith('bl_')):
                print '  %d: %s' % (e, feature)
                ImageViewer(features[feature])
                e += 1
        if rgb is not None:
            print '  %d: Colour overlay' % e
            e += 1
            ImageViewer(rgb)
        if view is not None:
            view.show()

    return image_names, metric_vals
示例#14
0
    def handle(self):
        reload(parameters)

        # self.request is the TCP socket connected to the client
        # get the image dimensions, which is contain in the first two
        #  unsigned shorts (two bytes each)
        start_time = str(datetime.datetime.now())
        mode = struct.unpack('B', self.get_data(1))[0]
        print('Request received at %s (mode=%d)' % (start_time, mode))

        if mode == 255:
            print('  Mode: Exit')
            self.server.shutdown()
            return

        if mode == 0:
            msg = struct.pack('=B', 0)
            self.send_data(msg)
            return

        # get input images
        image_desc_length = struct.unpack('=I', self.get_data(4))[0]
        if image_desc_length == 0:
            print "ERROR: No images passed as input"
            return
        image_names_in = self.get_data(image_desc_length).split(',')
        images = {}
        for im_name in image_names_in:
            data = self.get_data(6)
            width, height, bit_depth, binning = struct.unpack('=HHBB', data)
            num_pixels = width * height
            if num_pixels == 0:
                # read from disk
                fn_len = struct.unpack('=B', self.get_data(1))[0]
                fn = str(self.get_data(fn_len))
                images[im_name] = ip.open_image(fn)
            else:
                if bit_depth == 8:
                    pixel_data = self.get_data(num_pixels)
                    im_data = np.frombuffer(pixel_data, np.uint8)
                elif bit_depth == 16:
                    pixel_data = self.get_data(num_pixels * 2)
                    im_data = np.frombuffer(pixel_data, np.uint16)
                elif bit_depth == 32:
                    pixel_data = self.get_data(num_pixels * 4)
                    im_data = np.frombuffer(pixel_data, np.float32)
                images[im_name] = im_data.reshape(height,
                                                  width).astype(np.float32)

        # get numerical parameters
        data = self.get_data(4)
        param_desc_length = struct.unpack('=I', data)[0]
        if param_desc_length > 0:
            param_names = self.get_data(param_desc_length).split(",")
            num_params = len(param_names)
            param_data = self.get_data(num_params * 4)
            params_array = list(np.frombuffer(param_data, np.float32))
        else:
            param_names = []
            params_array = []

        # get string parameters
        data = self.get_data(4)
        param_desc_length = struct.unpack('=I', data)[0]
        if param_desc_length > 0:
            param_names += self.get_data(param_desc_length).split(",")
            param_vals_length = struct.unpack('=I', self.get_data(4))[0]
            params_array += self.get_data(param_vals_length).split(",")

        # override defaults in parameters.py
        for pn, pv in zip(param_names, params_array):
            if pn.upper() in dir(parameters):
                setattr(parameters, pn.upper(), pv)

        # store input parameters in the features dict
        param_names = ['input_param_' + pn for pn in param_names]
        features = dict(zip(param_names, params_array))
        if 'input_param_already_cropped' in features and int(
                features['input_param_already_cropped']) == 1:
            already_cropped = True
        else:
            already_cropped = False

        if 'input_param_return_uncropped' in features and int(
                features['input_param_return_uncropped']) == 1:
            return_uncropped = True
        else:
            return_uncropped = False

        if 'input_param_return_cropped' in features and int(
                features['input_param_return_cropped']) == 0:
            return_cropped = False
        else:
            return_cropped = True

        if 'input_param_return_outline' in features and int(
                features['input_param_return_outline']) == 1:
            return_outline = True
        else:
            return_outline = False

        # call image processing algorithm
        try:
            if mode == 10:
                print('  Mode: Hash Pattern correction')
                im_raw = images['im_pl'].astype(np.float32)
                im_corrected = FF.correct_hash_pattern(im_raw)
                features['im_corrected_u16'] = im_corrected.astype(np.uint16)
            elif mode == 40:
                print('  Mode: Block processing')
                im = images['im_pl'].astype(np.float32)
                block.feature_extraction(im,
                                         features,
                                         crop=not already_cropped)
                features['crop_left'] = features['_crop_bounds'][0]
                features['crop_right'] = features['_crop_bounds'][1]
                features['crop_top'] = features['_crop_bounds'][2]
                features['crop_bottom'] = features['_crop_bounds'][3]
                features['bl_cropped_u8'] = np.zeros_like(
                    features['im_cropped_u8'], np.uint8)

                if return_uncropped or return_outline:
                    left, right, top, bottom = features['_crop_bounds']
                    mask = np.ones_like(images['im_pl'], np.uint8)
                    mask[top:bottom, left:right] = 0
                    if abs(features['crop_rotation']) > 0.01:
                        h, w = mask.shape
                        rot_mat = cv2.getRotationMatrix2D(
                            (w // 2, h // 2), features['crop_rotation'] * -1,
                            1.0)
                        mask = cv2.warpAffine(mask,
                                              rot_mat, (w, h),
                                              flags=cv2.INTER_LINEAR,
                                              borderMode=cv2.BORDER_REPLICATE
                                              )  # .astype(np.uint8)
                    if return_uncropped:
                        features['bl_uncropped_u8'] = mask
            elif mode in [70, 71]:
                print('  Mode: Slugs')
                im = images['im_pl'].astype(np.float32)
                if 'input_param_rds_percent' not in features:
                    features['param_rds_percent'] = 50
                else:
                    features['param_rds_percent'] = int(
                        features['input_param_rds_percent'])
                if 'param_radius_prior' not in features:
                    features['param_radius_prior'] = 0
                else:
                    features['param_radius_prior'] = int(
                        features['input_param_slug_radius'])
                slugs.feature_extraction(im, features)
                update_corner_features(features, features)
                features['im_cropped_u8'] = (ip.scale_image(images['im_pl']) *
                                             255).astype(np.uint8)
                features['im_cropped_u16'] = images['im_pl'].astype(np.uint16)
                mask = features['bl_uncropped_u8']
                if not return_uncropped:
                    del features['bl_uncropped_u8']
            elif mode in [84, 85, 89]:
                if mode == 84:
                    print('  Mode: Mono wafer')
                    im = images['im_pl'].astype(np.float32)
                    features['_alg_mode'] = 'mono wafer'
                    crop_props = cropping.crop_wafer_cz(
                        im, create_mask=True, skip_crop=already_cropped)
                    features['corners'] = crop_props['corners']
                    features['_wafer_middle_orig'] = crop_props['center']
                    cropped = cropping.correct_rotation(
                        im,
                        crop_props,
                        pad=False,
                        border_erode=parameters.BORDER_ERODE_CZ,
                        fix_chamfer=False)
                    cz_wafer.feature_extraction(cropped,
                                                crop_props,
                                                features=features)
                    update_corner_features(features, crop_props)
                elif mode == 85:
                    print('  Mode: Multi wafer')
                    im = images['im_pl'].astype(np.float32)
                    features['_alg_mode'] = 'multi wafer'
                    if not already_cropped:
                        crop_props = cropping.crop_wafer(im, create_mask=True)
                        features['corners'] = crop_props['corners']
                        cropped = cropping.correct_rotation(
                            im,
                            crop_props,
                            pad=False,
                            border_erode=parameters.BORDER_ERODE)
                    else:
                        crop_props = {}
                        crop_props['estimated_width'] = im.shape[0]
                        crop_props['center'] = (im.shape[0] / 2,
                                                im.shape[1] / 2)
                        crop_props['corners'] = [
                            [0, 0],
                            [0, im.shape[1]],
                            [im.shape[0], im.shape[1]],
                            [im.shape[0], 0],
                        ]
                        crop_props['corners_floats'] = crop_props['corners']
                        crop_props['estimated_rotation'] = 0
                        crop_props['mask'] = np.ones_like(im, np.uint8)
                        cropped = im
                    multi_wafer.feature_extraction(cropped,
                                                   crop_props,
                                                   features=features)
                    multi_wafer.combined_features(features)
                    update_corner_features(features, crop_props)
                elif mode == 89:
                    print('  Mode: QC-C3')
                    features['_alg_mode'] = 'qc'
                    im = images['im_pl'].astype(np.float32)
                    crop_props = qc.feature_extraction(im, features)

                if return_uncropped:
                    features['bl_uncropped_u8'] = crop_props['mask']
            elif mode in [80, 81, 82, 83, 86, 87, 88]:
                if mode == 80:
                    print('  Mode: PERC mono')
                    im = images['im_pl'].astype(np.float32)
                    features['_alg_mode'] = 'perc mono'
                    perc.feature_extraction(im,
                                            features,
                                            already_cropped=already_cropped)
                elif mode == 81:
                    print('  Mode: PERC multi')
                    im = images['im_pl'].astype(np.float32)
                    features['_alg_mode'] = 'perc multi'
                    perc.feature_extraction_multi(
                        im, features, already_cropped=already_cropped)
                elif mode == 82:
                    print('  Mode: Mono cells')
                    im = images['im_pl'].astype(np.float32)
                    features['_alg_mode'] = 'mono cell'
                    cz_cell.feature_extraction(im,
                                               features,
                                               skip_crop=already_cropped)
                elif mode == 83:
                    print('  Mode: Multi cells')
                    im = images['im_pl'].astype(np.float32)
                    features['_alg_mode'] = 'multi cell'
                    multi_cell.feature_extraction(
                        im, features, already_cropped=already_cropped)
                elif mode == 86:
                    print('  Mode: X3')
                    features['_alg_mode'] = 'x3'
                    im = images['im_pl'].astype(np.float32)
                    x3.feature_extraction(im,
                                          features,
                                          already_cropped=already_cropped)
                elif mode == 87:
                    print('  Mode: Stripe (mono)')
                    features['_alg_mode'] = 'stripe'
                    features['_cell_type'] = 'mono'
                    im = images['im_pl'].astype(np.float32)
                    stripe.feature_extraction(im,
                                              features,
                                              skip_crop=already_cropped)
                elif mode == 88:
                    print('  Mode: Stripe (multi)')
                    features['_alg_mode'] = 'stripe'
                    features['_cell_type'] = 'multi'
                    im = images['im_pl'].astype(np.float32)
                    stripe.feature_extraction(im,
                                              features,
                                              skip_crop=already_cropped)
                update_corner_features(features, features)

                if return_uncropped:
                    mask = features['bl_cropped_u8']
                    im_h, im_w = im.shape
                    if 'cell_rotated' in features and features['cell_rotated']:
                        if parameters.ORIGINAL_ORIENTATION:
                            mask = mask[:, ::-1].T
                        im_h = im.shape[1]
                        im_w = im.shape[0]

                    # undo rotation and cropping
                    mask = np.pad(mask, ((features['crop_top'],
                                          im_h - features['crop_bottom']),
                                         (features['crop_left'],
                                          im_w - features['crop_right'])),
                                  mode='constant',
                                  constant_values=((1, 1), (1, 1)))

                    # created rotated version of full image
                    mask_rotated = np.empty(im.shape, np.float32)
                    h, w = mask.shape
                    if 'cell_rotated' not in features or not features[
                            'cell_rotated']:
                        rot_mat = cv2.getRotationMatrix2D(
                            (w // 2, h // 2), -features['crop_rotation'], 1.0)
                    else:
                        rot_mat = cv2.getRotationMatrix2D(
                            (h // 2, h // 2), -features['crop_rotation'], 1.0)
                    cv2.warpAffine(mask.astype(np.float32),
                                   rot_mat, (im.shape[1], im.shape[0]),
                                   flags=cv2.INTER_NEAREST,
                                   borderMode=cv2.BORDER_CONSTANT,
                                   dst=mask_rotated,
                                   borderValue=1)
                    #print mask.shape, im.shape
                    assert mask_rotated.shape == im.shape
                    features['bl_uncropped_u8'] = np.round(
                        mask_rotated).astype(np.uint8)
            elif mode == 90:
                print('  Mode: plir')
                im_sp = images['im_sp'].astype(np.float32)
                im_lp = images['im_lp'].astype(np.float32)
                if 'im_xfer' not in images:
                    print "ERROR: Transfer functions not found"
                    self.send_data(struct.pack('=B', 6))
                    return

                spline_plir, spline_nf, spline_sp, spline_lp = block.interpolate_transfer(
                    images['im_xfer'])

                if 'im_pl' in images:
                    im_pl = images['im_pl'].astype(np.float32)
                    plc_found = block.plir(im_sp, im_lp, im_pl, features,
                                           spline_plir, spline_nf)
                else:
                    plc_found = block.plir2(im_sp, im_lp, features,
                                            spline_plir, spline_sp)
                if not plc_found:
                    self.send_data(struct.pack('=B', 5))
                    return

                if return_uncropped or return_outline:
                    left, right, top, bottom = features['_crop_bounds']
                    if 'im_pl' in images:
                        left *= 2
                        right *= 2
                        top *= 2
                        bottom *= 2
                        mask = np.ones_like(images['im_pl'], np.uint8)
                    else:
                        mask = np.ones_like(images['im_sp'], np.uint8)

                    mask[top:bottom, left:right] = 0
                    if abs(features['crop_rotation']) > 0.01:
                        h, w = mask.shape
                        rot_mat = cv2.getRotationMatrix2D(
                            (w // 2, h // 2), features['crop_rotation'] * -1,
                            1.0)
                        mask = cv2.warpAffine(mask,
                                              rot_mat, (w, h),
                                              flags=cv2.INTER_LINEAR,
                                              borderMode=cv2.BORDER_REPLICATE
                                              )  # .astype(np.uint8)
                    if return_uncropped:
                        features['bl_uncropped_u8'] = mask
            elif mode == 92:
                print('  Mode: Distance between brick markers')
                im = images['im_pl'].astype(np.float32)
                block.MarkerLineDist(im, features)
            elif mode == 95:
                print('  Mode: Pixels per mm')
                im = images['im_pl'].astype(np.float32)
                resolution.resolution(im, features)
            elif mode == 100:
                print('  Mode: M1')
                if 'im_el' in images:
                    im_el = images['im_el'].astype(np.float32)
                else:
                    im_el = None
                im_pl = images['im_pl'].astype(np.float32)
                m1.feature_extraction(im_pl, im_el, features)
            else:
                print("ERROR: Mode %d not supported" % mode)
                self.send_data(struct.pack('=B', 1))
                return

            if not return_cropped:
                for im_name in [
                        'im_cropped_u16', 'im_cropped_u8', 'bl_cropped_u8',
                        "im_cropped_sp_u8", 'im_cropped_nf_u8',
                        'im_cropped_sp_u16', 'im_cropped_nf_u16',
                        'im_cropped_lp_u16'
                ]:
                    if im_name in features:
                        del features[im_name]

            if return_outline:
                if mode in [40, 70, 90]:
                    binary_struct = ndimage.generate_binary_structure(2, 1)
                    foreground = 1 - mask
                    outline = foreground - ndimage.binary_erosion(
                        foreground, binary_struct)
                    features['bl_crop_outline_u8'] = outline.astype(np.uint8)
                else:
                    features['bl_crop_outline_u8'] = cropping.draw_crop_box(
                        im, features, mode="mask")

        except cropping.WaferMissingException:
            self.send_data(struct.pack('=B', 2))
            return
        except cell.MissingBusbarsException:
            self.send_data(struct.pack('=B', 3))
            return
        except cell.CellFingersException:
            self.send_data(struct.pack('=B', 4))
            return
        except:
            traceback.print_exc(file=sys.stdout)
            self.send_data(struct.pack('=B', 1))
            return

        # success
        msg = struct.pack('=B', 0)
        self.send_data(msg)

        # return images
        image_names = []
        for f in features.keys():
            if f.split('_')[-1] not in ['u8', 'u16', 'f32'] or f[0] == '_':
                continue
            if f[:3] not in ['bl_', 'mk_', 'im_', 'ov_']:
                print "ERROR: invalid image name: %s" % f

            image_names.append(f)
        image_names.sort()

        image_names_send = ','.join(image_names)

        self.send_data(struct.pack('I', len(image_names_send)))
        self.send_data(image_names_send)
        for im_name in image_names:
            fields = im_name.split('_')
            if fields[-1] == "u8":
                bit_depth = 8
            elif fields[-1] == "u16":
                bit_depth = 16
            elif fields[-1] == "f32":
                bit_depth = 32

            # convert binary masks from 0,1 to 0,255
            if fields[0] == 'mk' and bit_depth == 8:
                features[im_name] *= 255

            if ('input_param_im_output_path' in features
                    and len(features['input_param_im_output_path']) > 0
                    and bit_depth in [8, 16]):
                # send back as path.
                msg = struct.pack('=hhBB', 0, 0, 0, 1)
                if bit_depth == 8:
                    ext = '.png'
                else:
                    ext = '.tif'
                fn_out = os.path.join(features['input_param_im_output_path'],
                                      im_name + ext)
                ip.save_image(fn_out, features[im_name], scale=False)
                fn_len = len(fn_out)
                msg += struct.pack('=B', fn_len)
                msg += fn_out
            else:
                # image data
                height, width = features[im_name].shape
                binning = 1
                msg = struct.pack('=hhBB', width, height, bit_depth, binning)

                if fields[-1] == "u8":
                    png = ip.encode_png(features[im_name])
                    msg += struct.pack('=I', len(png))
                    msg += png
                elif fields[-1] in ["u16", "f32"]:
                    msg += features[im_name].tostring()

            self.send_data(msg)

        # numerical features
        feature_names = []
        feature_vals = []
        for k in features.keys():
            if (k in ['cropped', 'corners', 'filename', 'center']
                    or k.startswith("bl_") or k.startswith('_')
                    or k.startswith("mask_") or k.startswith("mk_")
                    or k.startswith("im_") or k.startswith("ov_")):
                continue
            if type(features[k]) is str:
                continue
            feature_names.append(k)
        feature_names.sort()
        for feature in feature_names:
            feature_vals.append(float(features[feature]))
        feature_names = ','.join(feature_names)
        feature_vals = np.array(feature_vals, np.float32)
        bytes_to_send = len(feature_names)
        self.send_data(struct.pack('=I', bytes_to_send))
        self.send_data(feature_names)
        msg = feature_vals.ravel().tostring()
        self.send_data(msg)

        # string features
        feature_names = []
        feature_vals = []
        for k in features.keys():
            if k.startswith('_'):
                continue
            if type(features[k]) is not str:
                continue
            feature_names.append(k)
        feature_names.sort()
        for feature in feature_names:
            feature_vals.append(features[feature])
        feature_names = ','.join(feature_names)
        feature_vals = ','.join(feature_vals)
        bytes_to_send = len(feature_names)
        self.send_data(struct.pack('=I', bytes_to_send))
        if bytes_to_send > 0:
            self.send_data(feature_names)
        bytes_to_send = len(feature_vals)
        self.send_data(struct.pack('=I', bytes_to_send))
        if bytes_to_send > 0:
            self.send_data(feature_vals)

        return
示例#15
0
def correct_waffle(im):
    # load FFT of
    fn = "fft_hash_pattern.npy"
    if os.path.isfile(fn):
        fft_pattern = np.load(fn)
    else:
        # isolate waffle pattern
        ff = ip.open_image(
            r"C:\Users\Neil\Dropbox (Personal)\BT\Data\R2 FFT\FF Wafer Images\std - ff.tif"
        ).astype(np.float32) / 4.0
        stack = ip.open_image(
            r"C:\Users\Neil\Dropbox (Personal)\BT\Data\R2 FFT\FF Wafer Images\SUM_Stack.tif"
        ).astype(np.float32) / 4.0
        pattern = stack - ff
        pattern -= pattern.mean()
        pattern /= pattern.std()

        # FFT
        fft_pattern = fftshift(cv2.dft(pattern, flags=cv2.DFT_COMPLEX_OUTPUT))

        # smooth
        fft_pattern_mag = cv2.magnitude(fft_pattern[:, :, 0], fft_pattern[:, :,
                                                                          1])
        fft_pattern_smooth = cv2.GaussianBlur(cv2.medianBlur(fft_pattern_mag,
                                                             ksize=5),
                                              ksize=(0, 0),
                                              sigmaX=5)
        fft_pattern = cv2.log(fft_pattern_smooth)

        # remove non-peaks
        fft_pattern -= np.mean(fft_pattern)
        fft_pattern[fft_pattern < 0] = 0

        np.save(fn, fft_pattern)

    # mask for middle '+'
    mask_edges = np.zeros_like(fft_pattern, np.bool)
    T = 2
    h, w = mask_edges.shape
    mask_edges[h // 2 - T:h // 2 + T + 1, :] = True
    mask_edges[:, w // 2 - T:w // 2 + T + 1] = True
    RADIUS = 50
    ys, xs = draw.circle(h // 2, w // 2, RADIUS)
    mask_edges[ys, xs] = True

    if False:
        # view = ImageViewer(fft_pattern_unmasked)
        view = ImageViewer(fft_pattern)
        view.show()
        sys.exit()

    # fft of wafer image
    fft = fftshift(cv2.dft(im, flags=cv2.DFT_COMPLEX_OUTPUT))
    fft_mag = cv2.magnitude(fft[:, :, 0], fft[:, :, 1])
    fft_phase = cv2.phase(fft[:, :, 0], fft[:, :, 1])
    fft_log = cv2.log(fft_mag)
    fft_wafer_smooth = cv2.GaussianBlur(cv2.medianBlur(fft_log, ksize=5),
                                        ksize=(0, 0),
                                        sigmaX=5)

    if True:
        view = ImageViewer(fft_pattern)
        view = ImageViewer(fft_wafer_smooth)
        view.show()
        sys.exit()

    # find fit between background of waffle FFT and wafer FFT
    # 1. fit background
    background_mask = ((fft_pattern == 0) & (~mask_edges))
    peak_mask = ((fft_pattern > 0.2) & (~mask_edges))

    if False:
        view = ImageViewer(background_mask)
        view = ImageViewer(peak_mask)
        view.show()
        sys.exit()

    # 2. fit peaks
    pattern_vals = fft_pattern[peak_mask]
    wafer_vals = fft_wafer_smooth[peak_mask]

    def dist(params, pattern_vals, wafer_vals):
        shift, scale = params

        pattern_vals_fit = (pattern_vals * scale) + shift

        return ((pattern_vals_fit - wafer_vals)**2).mean()

    from scipy import optimize
    params = (wafer_vals.mean(), 1)
    t1 = timeit.default_timer()
    shift, scale = optimize.fmin(dist, params, args=(pattern_vals, wafer_vals))
    t2 = timeit.default_timer()

    if False:
        print "Optimization time: ", t2 - t1
        print shift, scale
        fft_fit = (fft_pattern * scale) + shift
        vmin = min(fft_fit.min(), fft_wafer_smooth.min())
        vmax = max(fft_fit.max(), fft_wafer_smooth.max())

        view = ImageViewer(fft_fit, vmin=vmin, vmax=vmax)
        view = ImageViewer(fft_wafer_smooth, vmin=vmin, vmax=vmax)
        view.show()
        sys.exit()

    # apply correction
    correction = fft_pattern * -scale
    correction[mask_edges] = 0
    corrected_log = fft_log + correction
    corrected_mag = np.e**corrected_log
    fft_real = np.cos(fft_phase) * corrected_mag
    fft_imag = np.sin(fft_phase) * corrected_mag
    fft_corrected = np.dstack((fft_real, fft_imag))
    im_corrected = cv2.idft(ifftshift(fft_corrected),
                            flags=cv2.DFT_REAL_OUTPUT | cv2.DFT_SCALE)

    if False:
        view = ImageViewer(im)
        view = ImageViewer(im_corrected)
        view.show()
        sys.exit()

    # create a mask for the + pattern (to prevent ringing at edges)
    # find rotation using cropping
    if True:
        try:
            crop_props = cropping.crop_wafer(im, create_mask=True)
            pixel_ops.CopyMaskF32(im, im_corrected, crop_props['mask'], 0)
        except:
            print("WARNING: Crop failed")
            return im

    return im_corrected