示例#1
0
def test_invert_bool():
    dtype = 'bool'
    image = np.zeros((3, 3), dtype=dtype)
    image[1, :] = dtype_limits(image)[1]
    expected = np.zeros((3, 3), dtype=dtype) + dtype_limits(image)[1]
    expected[1, :] = 0
    result = invert(image)
    assert_array_equal(expected, result)
    def test_area_closing(self):
        "Test for Area Closing (2 thresholds, all types)"

        # original image
        img = np.array(
            [[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
             [240, 200, 200, 240, 200, 240, 200, 200, 240, 240, 200, 240],
             [240, 200, 40, 240, 240, 240, 240, 240, 240, 240, 40, 240],
             [240, 240, 240, 240, 100, 240, 100, 100, 240, 240, 200, 240],
             [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
             [200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],
             [200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 40],
             [200, 200, 200, 100, 200, 200, 200, 240, 255, 255, 255, 255],
             [200, 200, 200, 100, 200, 200, 200, 240, 200, 200, 255, 255],
             [200, 200, 200, 200, 200, 40, 200, 240, 240, 100, 255, 255],
             [200, 40, 255, 255, 255, 40, 200, 255, 200, 200, 255, 255],
             [200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255]],
            dtype=np.uint8)

        # expected area closing with area 2
        expected_2 = np.array(
            [[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
             [240, 200, 200, 240, 240, 240, 200, 200, 240, 240, 200, 240],
             [240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 200, 240],
             [240, 240, 240, 240, 240, 240, 100, 100, 240, 240, 200, 240],
             [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
             [200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],
             [200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 255],
             [200, 200, 200, 100, 200, 200, 200, 240, 255, 255, 255, 255],
             [200, 200, 200, 100, 200, 200, 200, 240, 200, 200, 255, 255],
             [200, 200, 200, 200, 200, 40, 200, 240, 240, 200, 255, 255],
             [200, 200, 255, 255, 255, 40, 200, 255, 200, 200, 255, 255],
             [200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255]],
            dtype=np.uint8)

        # expected diameter closing with diameter 4
        expected_4 = np.array(
            [[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
             [240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 240, 240],
             [240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 240, 240],
             [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
             [240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
             [200, 200, 200, 200, 200, 200, 200, 240, 240, 240, 255, 255],
             [200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 255],
             [200, 200, 200, 200, 200, 200, 200, 240, 255, 255, 255, 255],
             [200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],
             [200, 200, 200, 200, 200, 200, 200, 240, 240, 200, 255, 255],
             [200, 200, 255, 255, 255, 200, 200, 255, 200, 200, 255, 255],
             [200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255]],
            dtype=np.uint8)

        # _full_type_test makes a test with many image types.
        _full_type_test(img, 2, expected_2, area_closing, connectivity=2)
        _full_type_test(img, 4, expected_4, area_closing, connectivity=2)

        P, S = max_tree(invert(img), connectivity=2)
        _full_type_test(img, 4, expected_4, area_closing,
                        parent=P, tree_traverser=S)
示例#3
0
def test_invert_bool():
    dtype = 'bool'
    image = np.zeros((3, 3), dtype=dtype)
    upper_dtype_limit = dtype_limits(image, clip_negative=False)[1]
    image[1, :] = upper_dtype_limit
    expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit
    expected[1, :] = 0
    result = invert(image)
    assert_array_equal(expected, result)
示例#4
0
def test_invert_float64_unsigned():
    dtype = 'float64'
    image = np.zeros((3, 3), dtype=dtype)
    lower_dtype_limit, upper_dtype_limit = \
        dtype_limits(image, clip_negative=True)
    image[2, :] = upper_dtype_limit
    expected = np.zeros((3, 3), dtype=dtype)
    expected[0, :] = upper_dtype_limit
    expected[1, :] = upper_dtype_limit
    result = invert(image)
    assert_array_equal(expected, result)
示例#5
0
def test_invert_int8():
    dtype = 'int8'
    image = np.zeros((3, 3), dtype=dtype)
    lower_dtype_limit, upper_dtype_limit = \
        dtype_limits(image, clip_negative=False)
    image[1, :] = lower_dtype_limit
    image[2, :] = upper_dtype_limit
    expected = np.zeros((3, 3), dtype=dtype)
    expected[2, :] = lower_dtype_limit
    expected[1, :] = upper_dtype_limit
    expected[0, :] = -1
    result = invert(image)
    assert_array_equal(expected, result)
示例#6
0
    def segment(self, img, include_intermediate_results=False, **kwargs):
        assert img.ndim == 3, 'Expecting 3D image, got shape {}'.format(
            img.shape)
        img = ndi.median_filter(img, size=(1, 3, 3))
        img = img_as_float(img)
        img = util.invert(img)

        img_mz = img.max(axis=0)
        img_mz = exposure.rescale_intensity(img_mz, out_range=(0, 1))
        peaks, img_dog, sigmas = blob_dog(img_mz,
                                          min_sigma=8,
                                          max_sigma=128,
                                          sigma_ratio=1.6,
                                          overlap=.25,
                                          threshold=1.75)

        img_pk = np.zeros(img_mz.shape, dtype=bool)
        img_pk[peaks[:, 0].astype(int), peaks[:, 1].astype(int)] = True
        img_pk = morphology.label(img_pk)

        # Get mask to conduct segmentation over
        img_pm = self.get_primary_object_mask(
            img, morphology.binary_dilation(img_pk > 0, morphology.disk(32)))

        img_dt = ndi.distance_transform_edt(img_pm)

        # Use propogation rather than watershed as it often captures a much more accurate boundary
        img_obj = propagate.propagate(img_mz, img_pk, img_pm,
                                      .01)[0].astype(np.uint16)
        img_bnd = img_obj * segmentation.find_boundaries(
            img_obj, mode='inner', background=0)

        img_seg = [img_obj, img_obj, img_bnd, img_bnd]
        if include_intermediate_results:
            to_uint16 = lambda im: exposure.rescale_intensity(
                im, out_range='uint16').astype(np.uint16)
            img_seg += [
                to_uint16(img_mz),
                to_uint16(img_dog[0]),
                to_uint16(img_dog[1]),
                img_pm.astype(np.uint16),
                img_pk.astype(np.uint16)
            ]

        # Stack and add new axis to give to (z, ch, h, w)
        img_seg = np.stack(img_seg)[np.newaxis]
        assert img_seg.dtype == np.uint16, 'Expecting 16bit result, got type {}'.format(
            img_seg.dtype)
        assert img_seg.ndim == 4, 'Expecting 4D result, got shape {}'.format(
            img_seg.shape)
        return img_seg
示例#7
0
def lineOrder(img, n_steps, line_thresh=50, len_thresh=60, wsz=10):
    img = invert(img)
    img[img <= 127] = 0
    img[img > 127] = 1
    i2 = img.copy() * 255
    i2 = cv2.cvtColor(i2, cv2.COLOR_GRAY2RGB)
    lr, lc = hough(img)
    skel = skeletonize(img).astype(np.float32)
    skt = skel.copy()
    S = min(line_thresh, len(lr))
    rlist = []
    clist = []
    for sel in trange(S):
        lrs = []
        lcs = []
        resp = []
        for i in range(len(lr)):
            lsz = len(lr[i])
            r = []
            for j in range(lsz):
                x = lr[i][j]
                y = lc[i][j]
                r.append(1 if np.sum(skt[x:x + wsz, y:y + wsz]) > 0 else 0)
            r = np.array(r)
            dp = np.zeros(r.shape, np.uint8)
            dp[0] = r[0]
            for j in range(1, r.shape[0]):
                dp[j] = r[j] * (dp[j - 1] + 1)
            mxi = np.argmax(dp)
            lrs.append(lr[i][mxi - dp[mxi] + 1:mxi + 1])
            lcs.append(lc[i][mxi - dp[mxi] + 1:mxi + 1])
            resp.append(dp[mxi])
        bind = np.argsort(resp)[-1]
        if resp[bind] < len_thresh:
            break
        lre, lce = eline2(lrs[bind], lcs[bind], img, s=wsz)
        lrp, lcp = eline2(lrs[bind], lcs[bind], img, s=10)
        skt[lre, lce] = 0
        rlist.append(lrp)
        clist.append(lcp)
    step = len(rlist) // n_steps
    if step == 0:
        step += 1
    splits = list(range(0, len(rlist), step))
    splits.append(len(rlist))
    rans = []
    cans = []
    for i in range(len(splits) - 1):
        rans.append(rlist[splits[i]:splits[i + 1]])
        cans.append(clist[splits[i]:splits[i + 1]])
    return rans, cans
    def test_diameter_closing(self):
        "Test for Diameter Opening (2 thresholds, all types)"
        img = np.array([[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],
                        [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],
                        [93, 63, 63, 63, 63, 86, 86, 86, 87, 43, 43, 91],
                        [92, 89, 88, 86, 85, 85, 84, 85, 85, 43, 43, 89],
                        [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
                        [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
                        [90, 88, 86, 84, 83, 83, 82, 83, 83, 84, 86, 88],
                        [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
                        [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
                        [92, 89, 23, 23, 85, 85, 84, 85, 85, 3, 3, 89],
                        [93, 91, 23, 23, 87, 86, 86, 86, 87, 88, 3, 91],
                        [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93]],
                       dtype=np.uint8)

        ex2 = np.array([[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],
                        [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],
                        [93, 63, 63, 63, 63, 86, 86, 86, 87, 43, 43, 91],
                        [92, 89, 88, 86, 85, 85, 84, 85, 85, 43, 43, 89],
                        [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
                        [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
                        [90, 88, 86, 84, 83, 83, 83, 83, 83, 84, 86, 88],
                        [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
                        [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
                        [92, 89, 23, 23, 85, 85, 84, 85, 85, 3, 3, 89],
                        [93, 91, 23, 23, 87, 86, 86, 86, 87, 88, 3, 91],
                        [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93]],
                       dtype=np.uint8)

        ex4 = np.array([[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],
                        [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],
                        [93, 63, 63, 63, 63, 86, 86, 86, 87, 84, 84, 91],
                        [92, 89, 88, 86, 85, 85, 84, 85, 85, 84, 84, 89],
                        [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
                        [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
                        [90, 88, 86, 84, 83, 83, 83, 83, 83, 84, 86, 88],
                        [90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
                        [91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
                        [92, 89, 84, 84, 85, 85, 84, 85, 85, 84, 84, 89],
                        [93, 91, 84, 84, 87, 86, 86, 86, 87, 88, 84, 91],
                        [95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93]],
                       dtype=np.uint8)

        # _full_type_test makes a test with many image types.
        _full_type_test(img, 2, ex2, diameter_closing, connectivity=2)
        _full_type_test(img, 4, ex4, diameter_closing, connectivity=2)

        P, S = max_tree(invert(img), connectivity=2)
        _full_type_test(img, 4, ex4, diameter_opening,
                        parent=P, tree_traverser=S)
def func_images_skeletonize(images, random_state, parents, hooks):
    results = []
    for img in images:
        # make it a binary image
        img = np.squeeze(img)
        thresh = threshold_otsu(img)
        inv_binary = img < thresh # inverts, because normally called with `>`
        inv_skeleton = morphology.skeletonize(inv_binary)
        # formatting
        skeleton = util.invert(inv_skeleton) * 255
        skeleton = np.expand_dims(skeleton, axis=2)
        skeleton = skeleton.astype(np.uint8)
        results.append(skeleton)
    return results
示例#10
0
    def initialize(self, opt):
        self.opt = opt
        self.root = opt.dataroot

        ### input A (label maps)
        if opt.primitive != "seg_edges":
            dir_A = "_" + opt.primitive
            self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A)
            self.A_paths = sorted(make_dataset(self.dir_A))
            self.A = Image.open(self.A_paths[0])

        else:
            self.dir_A = os.path.join(opt.dataroot, opt.phase + "_seg")
            self.A_paths = sorted(make_dataset(self.dir_A))
            # the seg input will be saved as "A"
            self.A = Image.open(self.A_paths[0])
            self.dir_A_edges = os.path.join(opt.dataroot, opt.phase + "_edges")
            if not os.path.exists(self.dir_A_edges):
                os.mkdir(self.dir_A_edges)
            self.A_paths_edges = sorted(make_dataset(self.dir_A_edges))
            if not os.path.exists(self.dir_A_edges):
                os.makedirs(self.dir_A_edges)
            self.A_edges = Image.open(
                self.A_paths_edges[0]) if self.A_paths_edges else None

        ### input B (real images)
        if opt.isTrain or opt.use_encoded_image:
            dir_B = '_B' if self.opt.label_nc == 0 else '_img'
            self.dir_B = os.path.join(opt.dataroot, opt.phase + dir_B)
            self.B_paths = sorted(make_dataset(self.dir_B))
            self.B = Image.open(self.B_paths[0]).convert('RGB')
            if opt.primitive == "seg_edges" and not self.A_edges:
                self.A_edges = Image.fromarray(
                    util.invert(
                        feature.canny(rgb2gray(np.array(self.B)), sigma=0.5)))

        self.adjust_input_size(opt)

        ### instance maps
        if not opt.no_instance:
            self.dir_inst = os.path.join(opt.dataroot, opt.phase + '_inst')
            self.inst_paths = sorted(make_dataset(self.dir_inst))

        ### load precomputed instance-wise encoded features
        if opt.load_features:
            self.dir_feat = os.path.join(opt.dataroot, opt.phase + '_feat')
            print('----------- loading features from %s ----------' %
                  self.dir_feat)
            self.feat_paths = sorted(make_dataset(self.dir_feat))
        self.dataset_size = len(self.A_paths)
示例#11
0
def reverse_img(img):
    """
    Take address of image to be processed as img_loc.
    Computes log correction and returns both the input img and corrected img.

    param:
    img - address to access image to be processed

    returns:
    img - 2D array of input image
    reverse_img - 2D array of reversed image
    """
    reverse_img = np.asarray(util.invert(img), dtype='uint8')
    return reverse_img[:, :, 0:3]
示例#12
0
def fillGreen(districtName):
    global year
    imagenir = imread('../Imgs/LayersNIR/%s.png' % districtName, mode='RGB')
    imagergb = imread('../Imgs/LayersRGB %d/%s.png' % (year, districtName),
                      mode='RGB')

    invnir = imagenir[:, :, 0].astype('float32') / 255.0
    imgred = imagergb[:, :, 0].astype('float32') / 255.0

    ndvi = NDVI(imgred, util.invert(invnir))
    greeniness = Greeniness(imagergb)
    dmap = DMAP(imgred, invnir)
    print(year, districtName, ndvi, greeniness, dmap)
    return (ndvi, greeniness, dmap)
示例#13
0
def cleanliorimage(alignedpath):
        rescale = MinMaxScaler(feature_range=(0, 1))
        pt = QuantileTransformer(output_distribution='normal')
        img = skio.imread(alignedpath)
        img = pt.fit_transform(img)
        img = rescale.fit_transform(img)
        img = exposure.equalize_adapthist(img, clip_limit=0.05)
        imrescaled = cv2.resize(img, dsize=(1024, 1024),
                                interpolation=cv2.INTER_AREA)
        imrescaled = util.invert(imrescaled)
        imrescaled = imrescaled*(255**2)
        imrescaled = imrescaled.astype('u2')
#         skio.imsave(os.path.join('/'.join(alignedpath.split('/')[:-1]),"inference/")+alignedpath.split('/')[-1], imrescaled)
        return(imrescaled)
示例#14
0
def predict_classified_image(encoder_data_file_path=r"D:\encoded_data.npz",
                             encoder_file_path="encoder.h5",
                             rec_input_path=r"D:\img_test",
                             out_put_path=r"D:\img_out"):
    '''
    用于分类已经经过初步分类好的未进行人工标记的数据。由于模型已经训练好,此方法可以不用
    :param encoder_data_file_path: 
    :param encoder_file_path: 
    :param rec_input_path: 
    :param out_put_path: 
    :return: 
    '''
    labeled_data = np.load("D:/data.npz")
    x_train, y_train = labeled_data['x_train'], labeled_data['y_train']
    import keras
    encoder = keras.models.load_model(encoder_file_path)
    input_folders = os.listdir(rec_input_path)
    x_train_coded = np.load(encoder_data_file_path)['arr_0']
    tibetan_word_code = []
    with open("words_Titan.txt") as f:
        for line in f.readlines():
            tibetan_word_code.append(line.strip())

    for i, folder in enumerate(input_folders):
        img_folder_full_path = rec_input_path + os.sep + folder
        img_name = os.listdir(img_folder_full_path)[0]
        rec_img_path = rec_input_path + os.sep + folder + os.sep + img_name
        img = io.imread(rec_img_path, as_grey=True)
        img_resize = transform.resize(util.invert(img), (60, 60),
                                      mode='reflect')
        img2 = np.reshape(img_resize, (1, 60, 60, 1))
        encoded_img = encoder.predict(img2)
        lbs = []
        for img_t in x_train_coded:
            lbs.append(cosine_similarity_2(encoded_img, img_t))
        mx = np.argmax(lbs)

        if lbs[mx] > 0.9:
            out_classified_path = tibetan_word_code[y_train[mx]]
        else:
            out_classified_path = "unrecognized"

        for img_file in os.listdir(img_folder_full_path):
            img_full_path = os.path.join(img_folder_full_path, img_file)
            img_out_put_path = os.path.join(out_put_path, out_classified_path)
            if not os.path.exists(img_out_put_path):
                os.mkdir(img_out_put_path)
            copyfile(img_full_path, os.path.join(img_out_put_path, img_file))

        print("%d of %d" % (i, len(input_folders)))
示例#15
0
    def invert_imgs(self, set=[], labels=[]):
        """Inverts each image"""
        print('Inverting images...')
        out_set = []
        out_labels = []
        for i in range(0, len(set)):
            out_set.append(set[i])
            out_labels.append(labels[i])

            inverted_img = util.invert(set[i])
            out_set.append(inverted_img)
            out_labels.append(labels[i])

        return np.asarray(out_set), np.asarray(out_labels)
示例#16
0
def add_symbol_to_image(img,
                        folder,
                        choices,
                        padding,
                        minsize,
                        maxsize,
                        bpower=False,
                        bsmall=False,
                        bnom=False,
                        bden=False,
                        width=False):
    choice = np.random.randint(len(choices))
    symbol_img = io.imread(read_path + "/" + folder + "/" + choices[choice])
    new_width = np.random.randint(minsize, maxsize + 1)
    new_height = np.random.randint(minsize, maxsize + 1)
    if width is not False:
        new_width = width
    symbol_img_res = resize(symbol_img, (new_height, new_width), cval=1) * 255
    symbol_img_res = crop(symbol_img_res)
    new_height, new_width = symbol_img_res.shape
    shift = np.random.randint(-4 + (60 - new_height) // 2,
                              4 + (60 - new_height) // 2)

    bounding_box = {
        'xmin':
        padding,
        'xmax':
        padding + new_width,
        'ymin':
        65 + shift - 15 * bpower + 10 * bsmall - 30 * bnom + 30 * bden,
        'ymax':
        65 + shift + new_height - 15 * bpower + 10 * bsmall - 30 * bnom +
        30 * bden,
        'class_text':
        folder,
        'class':
        label_names_dict[folder]
    }

    if folder == "y" or folder == "beta":
        bounding_box['ymin'] += 10
        bounding_box['ymax'] += 10

    xmin, xmax = bounding_box['xmin'], bounding_box['xmax']
    ymin, ymax = bounding_box['ymin'], bounding_box['ymax']

    img[ymin:ymax, xmin:xmax] += invert(symbol_img_res) + 254
    padding += new_width + np.random.randint(2, 5)

    return img, padding, bounding_box
示例#17
0
def basic_circle_mask(input):
    hsv = bgr2hsv(input)
    msk = cv.inRange(hsv, (260 / 2, 0.1 * 255, 0.4 * 255),
                     (360 / 2, 255, 255))  # purples
    msk += cv.inRange(hsv, (30 / 2, 0.3 * 255, 0),
                      (80 / 2, 255, 0.9 * 255))  # yellows
    msk += cv.inRange(hsv, (0, 0, 0), (360 / 2, 255, 0.2 * 255))  # blacks
    # saturated colors
    window = 0.5
    for s in np.arange(0.4, 1 - window, 0.1):
        msk += cv.inRange(hsv, (0, s * 255, 0),
                          (360 / 2, 255, (s + window) * 255))
    msk = ski2cv(msk > 0)
    return invert(msk)
示例#18
0
def preprocess_image(image):
    """Function that preprocess image.
    Returns:
        image: Preprocessed image.
    """

    # invert grayscale image
    image = util.invert(image)
    # resize and reshape image for model
    image = transform.resize(image, (28,28), anti_aliasing=True, mode="constant")
    image = np.array(image)
    image = image.reshape((1,28*28))

    return image
示例#19
0
def plot_image(stage, chains=None, axis_off=True, inverted=False, title=''):
    if inverted:
        stage = invert(stage)
    fig, ax = plt.subplots(dpi=300)
    ax.imshow(stage, cmap=plt.cm.gray)
    if axis_off:
        ax.set_axis_off()
    if title:
        ax.set_title(title)
    if chains:
        for chain in chains:  # add chains to image plot
            yy, xx = np.array(chain).T
            plt.plot(xx, yy, lw=0.2)
    plt.show()
示例#20
0
def classifiy_chars_by_pages(path_in='/home/lyx2/img_in',
                             path_out=r"/home/lyx2/img_out"):
    encoder_file_path = "./encoder.h5"
    encoder_data_file_path = r"./encoded_data.npz"
    labeled_data = np.load(r"./data.npz")
    temp_folder = r'./temp__'
    x_train, y_train = labeled_data['x_train'], labeled_data['y_train']
    if os.path.exists(temp_folder):
        rmtree(temp_folder)
    os.mkdir(temp_folder)
    batch_extract_char_2_file(char_out_path=temp_folder)
    import keras
    encoder = keras.models.load_model(encoder_file_path)
    x_train_coded = np.load(encoder_data_file_path)['arr_0']

    tibetan_word_code = []
    with open("words_Titan.txt") as f:
        for line in f.readlines():
            tibetan_word_code.append(line.strip())

    image_to_recog_list = os.listdir(temp_folder)
    for i, image_file_name in enumerate(image_to_recog_list):
        page_path = os.path.join(temp_folder, image_file_name)
        t = len(os.listdir(page_path))
        for j, img_name in enumerate(os.listdir(page_path)):
            img_path = os.path.join(page_path, img_name)
            charactor_img = io.imread(img_path, as_grey=True)
            img_resize = transform.resize(util.invert(charactor_img), (60, 60),
                                          mode='reflect')
            img2 = np.reshape(img_resize, (1, 60, 60, 1))
            encoded_img = encoder.predict(img2)
            lbs = []
            for img_t in x_train_coded:
                lbs.append(cosine_similarity_2(encoded_img, img_t))
            mx = np.argmax(lbs)

            if lbs[mx] > 0.9:
                out_classified_path = tibetan_word_code[y_train[mx]]
            else:
                out_classified_path = "unrecognized"

            img_out_put_path = os.path.join(path_out, out_classified_path)
            if not os.path.exists(img_out_put_path):
                os.mkdir(img_out_put_path)
            img_out_put_path = os.path.join(img_out_put_path, img_name)
            copyfile(img_path, img_out_put_path)

            print("Page %d, %d of %d" % (i, j, t))
    rmtree(temp_folder)
示例#21
0
    def imageReady(self):
        self.sendToConsole('Mouse released')
        predict = self.predict
        x = self.x
        sess = self.sess
        root_dir = self.root_dir

        self.sendToConsole('Image ready')
        image = QPixmap('img.png')
        h = self.label_classifierInputImage.height()
        w = self.label_classifierInputImage.width()
        self.label_classifierInputImage.setPixmap(image.scaled(w, h))

        with sess.as_default():
            #load picture to be classified
            temp = []
            image_path = os.path.join(root_dir, 'img.png')
            img = imread(image_path, flatten=True)
            img = img.astype('float32')
            img_28 = scipy.misc.imresize(img, (28, 28),
                                         interp='bilinear',
                                         mode=None)
            img_inv = util.invert(img_28)  #invert_color
            temp.append(img_inv)
            test_img = np.stack(temp)
            self.sendToConsole('Classifying image...')
            pred = predict.eval({x: test_img.reshape(-1, 784)})
            self.sendToConsole('Classifier output: ' + str(pred) + '\n')

            # save inverse 28x28image
            image_inv28_save_path = os.path.join(root_dir, 'img28_inv.png')
            imsave(image_inv28_save_path, img_inv)

            # Load inverse 28x28image into the GUI
            image = QPixmap('img28_inv.png')
            h = self.label_classifierInputImage.height()
            w = self.label_classifierInputImage.width()
            self.label_classifierInputImage_downSampled.setPixmap(
                image.scaled(w, h))

            # Update the predict in the GUI
            prediction = np.asscalar(pred)
            self.label_classifiedDigit.setText(str(prediction))

            # Update the classifier history stream
            self.historyText = self.historyText + str(prediction)
            self.textEdit_classificationHistory.setText(self.historyText)
            self.textEdit_classificationHistory.setFocus()
            self.textEdit_classificationHistory.moveCursor(QTextCursor.End)
示例#22
0
def method1(image, gray, debug=0):

    # create mask based upon red values
    # Theory is that dolphins should have more red than the sea...
    imgmask = createMask(image)

    # probably should do something better with imgmask other than just take red channel...
    # setup code such that can drop in different methods to compare effectiveness...

    # estimate background then threshold it to get a mask
    background = estimate_background(gray, sigma=100.)

    bkgMask = invert(get_threshold(background)).astype(int)
    maskArea = 1. - ((np.sum(bkgMask)) / (bkgMask.shape[0] * bkgMask.shape[1]))
    if maskArea > .45:
        bkgFactor = 0.0
        imgmask = imgmask[:, :, 0]
    else:
        bkgFactor = 1.1
        # combine masks
        imgmask = imgmask[:, :, 0] * bkgMask

    # convert to binary
    imgmask = np.where(imgmask > 0, 1, 0)

    # subtract background, apply mask and renormalise
    backgroundSubtracted = gray - (bkgFactor * background)
    if maskArea < 0.45:
        backgroundSubtracted *= bkgMask
    else:
        backgroundSubtracted *= imgmask
    backgroundSubtracted = backgroundSubtracted / np.amax(
        np.abs(backgroundSubtracted))
    backgroundSubtracted = img_as_ubyte(backgroundSubtracted)

    if debug > 2:
        labels = ["gray", "background", "backgroundSubtracted", "imgmask"]
        cmaps = [plt.cm.gray for i in range(0, 4)]
        figd, axs = debug_fig(gray,
                              background,
                              backgroundSubtracted,
                              imgmask,
                              labels,
                              cmaps,
                              pos=1)

        return backgroundSubtracted, imgmask, axs

    return backgroundSubtracted, imgmask, None
示例#23
0
def charsegment(word, word_no, line_no):
    verticalp = np.sum(word, 0)
    # plt.plot(verticalp)
    iszero = np.equal(verticalp, 0).view(np.int8)
    # print(iszero)
    absdiff = np.abs(np.diff(iszero))
    # print(absdiff)
    # plt.show()
    try:
        absdiff[-1] = 1
        gaps = np.where(absdiff == 1)[0].reshape(-1, 2)
    except:
        absdiff[-1] = 0
        gaps = np.where(absdiff == 1)[0].reshape(-1, 2)
    # print(gaps)
    k = 0

    for i in range(len(gaps)):
        crop_char = word[0:word.shape[0], gaps[i][0]:gaps[i][1] + 2]
        crop_char = cv2.rotate(crop_char, cv2.ROTATE_90_CLOCKWISE)
        ret, labels = cv2.connectedComponents(crop_char)
        # print(labels)
        props = regionprops(labels)
        for prop in props:
            # print(prop['label'])
            cropped_shape = prop['image']
            # print(cropped_shape)
            cropped_shape = 255 * cropped_shape
            # print(cropped_shape)
            labeled_img = cv2.rotate(cropped_shape,
                                     cv2.ROTATE_90_COUNTERCLOCKWISE)

            labeled_img = np.array(labeled_img, dtype='uint8')
            image = rescale(labeled_img)
            image = invert(image)
            images = []
            image_copy = np.reshape(image, (50, 50, 1))
            images.append(image_copy)
            x = np.array(images)
            with graph.as_default():
                pre_class = model_segment.predict_classes(x)
            if pre_class[0] == 1:
                k = seg_connected(image, line_no, word_no, k)
            else:
                cv2.imwrite(
                    os.path.join(
                        img_no, 'cropchar_' + str(line_no) + '_' +
                        str(word_no) + '_' + str(k + 1) + '.png'), image)
                k = k + 1
示例#24
0
def stara(
    smap,
    circle_radius: u.deg = 100 * u.arcsec,
    median_box: u.deg = 10 * u.arcsec,
    threshold=6000,
    limb_filter: u.percent = None,
):
    """
    A method for automatically detecting sunspots in white-light data using morphological operations
    Parameters
    ----------
    smap : `sunpy.map.GenericMap`
        The map to apply the algorithm to.
    circle_radius : `astropy.units.Quantity`, optional
        The angular size of the structuring element used in the
        `skimage.morphology.white_tophat`. This is the maximum radius of
        detected features.
    median_box : `astropy.units.Quantity`, optional
        The size of the structuring element for the median filter, features
        smaller than this will be averaged out.
    threshold : `int`, optional
        The threshold used for detection, this will be subject to detector
        degradation. The default is a reasonable value for HMI continuum images.
    limb_filter : `astropy.units.Quantity`, optional
        If set, ignore features close to the limb within a percentage of the
        radius of the disk. A value of 10% generally filters out false
        detections around the limb with HMI continuum images.
    """
    data = invert(smap.data)

    # Filter things that are close to limb to reduce false detections
    if limb_filter is not None:
        hpc_coords = sunpy.map.all_coordinates_from_map(smap)
        r = np.sqrt(hpc_coords.Tx**2 + hpc_coords.Ty**2) / (
            smap.rsun_obs - smap.rsun_obs * limb_filter)
        data[r > 1] = np.nan

    # Median filter to remove detections based on hot pixels
    m_pix = int((median_box / smap.scale[0]).to_value(u.pix))
    med = median(data, square(m_pix), behavior="ndimage")

    # Construct the pixel structuring element
    c_pix = int((circle_radius / smap.scale[0]).to_value(u.pix))
    circle = disk(c_pix / 2)

    finite = white_tophat(med, circle)
    finite[np.isnan(finite)] = 0  # Filter out nans

    return finite > threshold
def main(argv):

    # Loads an image
    image = io.imread(argv[0], True)

    binary = image > threshold_otsu(image)
    skel = skeletonize_3d(invert(binary))

    # Copy edges to the images that will display the results in BGR
    lines = probabilistic_hough_line(skel,
                                     threshold=5,
                                     line_length=20,
                                     line_gap=50)

    plot(image, skel, lines)
示例#26
0
def renumerate(image, max=False, inver=None):
    from skimage import util
    if inver is not None:
        image = util.invert(image)
    image = rgb2gray(image)
    classes = np.unique(image)
    dummy = np.zeros_like(image)
    for idx, value in enumerate(classes):
        mask = np.where(image == value)
        dummy[mask] = idx
    max_class = idx
    if max is True:
        return dummy, max_class
    if max is False:
        return dummy
示例#27
0
def get_skeleton(img):
    '''
    Takes in the route image on the node network 
    and returns a skeletonized version 

    Parameters:
        input: grayscale image [0,1]
        output: grayscale image [0,1]
    '''
    # img_thresh = np.where(img > .55, 1, 0)
    img_thresh = img > .55
    img_thresh = invert(img_thresh) + 2
    skeleton = skeletonize(img_thresh) * 1.

    return skeleton
示例#28
0
def skeleton(pixels, path=0):
    if type(pixels) == str:
        image = io.imread(pixels, 1)
    else:
        image = pixels
    # invertir imagen
    image = invert(image)
    for b, f in enumerate(image):
        for a, p in enumerate(f):
            image[b][a] = 0 if image[b][a] == 255 else 1
    # aplicar skeletonize
    skeleton = skeletonize(image)
    if path != 0:
        misc.imsave(path, skeleton)
    return skeleton
 def get_ft_image(self, idx):
     if torch.is_tensor(idx):
         idx = idx.tolist()
     problem_name = self.df.iloc[idx]['filename']
     problem_image_path = os.path.join(self.image_dir,
                                       problem_name + '-bolded-cs.png')
     image = io.imread(problem_image_path)
     image = util.invert(image)
     image = np.asarray(image)
     image = image.astype('float32')
     image = image / 255.0  # Normalize the data
     transformed_image = fft2(image)
     # transformed_image = transformed_image / transformed_image[0, 0]
     # real_transformed_image = np.real(transformed_image) + np.imag(transformed_image)
     return transformed_image
示例#30
0
def charImageToArray(characters):
    charArray = []
    for character in characters:
        character = util.invert(character)
        character = character * 255
        
        plt.figure()
        plt.imshow(character, cmap='gray')

        charArray.append(character.flatten())
    # np.savetxt(dateTime+".csv", charArray, delimiter=",", fmt = "%s")
    
    # plt.show()

    return charArray
示例#31
0
def media_axis_method(grid, grid_start, grid_goal, check_linear=True):
    #check_linear specify how we do path pruning, if True, we choose colinearity
    #if False, we use bresenham, usually bresenham yield a much better result
    skeleton = medial_axis(invert(grid))
    skel_start, skel_goal = find_start_goal(skeleton, grid_start, grid_goal)
    path, _ = a_star(
        invert(skeleton).astype(np.int), heuristic, tuple(skel_start),
        tuple(skel_goal))
    if len(path) == 0:
        #         print("warning, no path is found, please select another point")
        return path

    #insert the start and end point if necessary
    if tuple(skel_start) != grid_start:
        path.insert(0, grid_start)
    if tuple(skel_goal) != grid_goal:
        path.append(grid_goal)

    #prune the path
    print("path point num = {}, path={}".format(len(path), path))
    path = prune_path(path, grid=grid, check_linear=check_linear)
    print("pruned path point num = {}, path={}".format(len(path), path))
    path = np.array(path).astype(int).tolist()
    return path, skeleton, grid_start, grid_goal
def shear_image(image, slant):
    """ Shear an image to correct its slant
        Args:
            image (matrix): The image to shear
            slant (float): Slant of the image
        Returns:
            matrix: The sheared image
    """
    # Work with inverted colors, otherwise shearing leads to black parts in image
    inverted_img = util.invert(image)

    # Compute shearing angle and transformation matrix
    shear = math.radians(90 - float(slant))
    affine_tf = transform.AffineTransform(shear=shear)

    # Shear the image and crop it
    inverted_img = pad_image_for_shearing(inverted_img, shear)
    sheared_img = transform.warp(inverted_img, inverse_map=affine_tf)
    sheared_img = crop_image_text(sheared_img)

    # Invert back to white background / black text and resize to original height
    sheared_img = util.invert(sheared_img)
    sheared_img = resize_image(sheared_img, new_height=image.shape[0])
    return sheared_img
示例#33
0
def process(img_name, ifprint):
    src = cv2.imread(img_name, cv2.IMREAD_COLOR)

    if len(src.shape) != 2:
        gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    else:
        gray = src

    # robimy splot z maską, która odfiltruje poziome i pionowe linie (kratkę z kartki)
    kernel = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]])
    gray2 = cv2.filter2D(gray, -1, kernel)
    gray2 = invert(gray2)
    gray2 = binary_dilation(gray2, disk(3))

    return find_words(gray2, img_name, src, ifprint)
示例#34
0
def predict_image_with_consine_similarity(
        encoder_data_file_path=r"D:\encoded_data.npz",
        encoder_file_path="encoder.h5",
        rec_input_path=r"D:\Users\Riolu\Desktop\新建文件夹 (2)"):
    '''
    通过余弦相似度 标记 rec_input_path 中的数据
    :param encoder_data_file_path: 已经使用 encoder 编码的数据 
    :param encoder_file_path: encoder 模型文件
    :param rec_input_path: 识别的目录下的文件
    :return: 
    # TODO 将识别的文件归类
    '''
    import keras
    unrecognize_picture_dir = "D:\\img_out"
    x_train, y_train = np.load("D:/data.npz")['x_train'], np.load(
        "D:/data.npz")['y_train']
    encoder = keras.models.load_model(encoder_file_path)
    t = len(os.listdir(rec_input_path))
    utfcode_str_list = []
    x_train_coded = np.load(encoder_data_file_path)['arr_0']
    for i, file_name in enumerate(os.listdir(rec_input_path)):
        full_path = rec_input_path + os.path.sep + file_name
        img = io.imread(full_path, as_grey=True)
        img_resize = transform.resize(util.invert(img), (60, 60),
                                      mode='reflect')
        img2 = np.reshape(img_resize, (1, 60, 60, 1))
        encoded_img = encoder.predict(img2)
        lbs = []
        for img_t in x_train_coded:
            lbs.append(cosine_similarity_2(encoded_img, img_t))
        mx = np.argmax(lbs)

        tibetan_word_code = []
        with open("words_Titan.txt") as f:
            for line in f.readlines():
                tibetan_word_code.append(line.strip())

        if lbs[mx] > 0.9:
            rec_str = tibetan_word_code[y_train[mx]]

            print(rec_str, file_name)
            utfcode_str_list.append(rec_str)
        else:
            utfcode_str_list.append('*')
            io.imsave(unrecognize_picture_dir + os.path.sep + file_name, img)
        # print("%d / %d" % (i,t))

    print(utfcode_str_list)
    def pre_image_processing(resized_image):

        equal_adapt_hist_image = exposure.equalize_adapthist(resized_image)
        rescale_intensity_image = exposure.rescale_intensity(equal_adapt_hist_image)
        adjust_sigmoid_image = exposure.adjust_sigmoid(rescale_intensity_image)
        gray_scale_image = rgb2gray(adjust_sigmoid_image)
        mean_image = mean(gray_scale_image, disk(1))
        mean_image = mean(mean_image, disk(1))
        mean_image = mean(mean_image, disk(1))

        median_image = dilation(median(mean_image, disk(1)), square(2))
        otsu_image = filters.threshold_otsu(median_image)
        closing_image = closing(median_image > otsu_image, square(1))
        #    opening_image = opening(closing_image, square(2))
        opening_image = invert(closing_image)
        return opening_image
示例#36
0
    def detectCirc(image):
        """
        Inverts color of image and detects center of circle shape.
        Asumes circle is the ONLY object in image, so noise
        needs to be filtered out
        """
        #staticBg = genFilter(video[0])

        invFrame = image
        bwFrame = gray2binary(rgb2gray(util.invert(invFrame)))[hMin:hMax,
                                                               wMin:wMax]
        bwFrame = bwFrame  # * staticBg
        # Detects shapes in image
        props = regionprops(label_image=bwFrame.astype(int))

        return props, invFrame, bwFrame
示例#37
0
def image_transformation(X, method_type='blur', **kwargs):
    # https://www.kaggle.com/tomahim/image-manipulation-augmentation-with-skimage
    q = kwargs['percentile'] if 'percentile' in kwargs else (0.2, 99.8)
    angle = kwargs['angle'] if 'angle' in kwargs else 60
    transformation_dict = {
        'blur': normalize(ndimage.uniform_filter(X)),
        'invert': normalize(util.invert(X)),
        'rotate': rotate(X, angle=angle),
        'rescale_intensity': _rescale_intensity(X, q=q),
        'gamma_correction': exposure.adjust_gamma(X, gamma=0.4, gain=0.9),
        'log_correction': exposure.adjust_log(X),
        'sigmoid_correction': exposure.adjust_sigmoid(X),
        'horizontal_flip': X[:, ::-1],
        'vertical_flip': X[::-1, :],
        'rgb2gray': skimage.color.rgb2gray(X)
    }
    return transformation_dict[method_type]
示例#38
0
def test_invert_roundtrip():
    for t, limits in dtype_range.items():
        image = np.array(limits, dtype=t)
        expected = invert(invert(image))
        assert_array_equal(image, expected)
示例#39
0
===========

Skeletonization reduces binary objects to 1 pixel wide representations. This
can be useful for feature extraction, and/or representing an object's topology.

``skeletonize`` works by making successive passes of the image. On each pass,
border pixels are identified and removed on the condition that they do not
break the connectivity of the corresponding object.
"""
from skimage.morphology import skeletonize
from skimage import data
import matplotlib.pyplot as plt
from skimage.util import invert

# Invert the horse image
image = invert(data.horse())

# perform skeletonization
skeleton = skeletonize(image)

# display results
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4),
                         sharex=True, sharey=True,
                         subplot_kw={'adjustable': 'box-forced'})

ax = axes.ravel()

ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].axis('off')
ax[0].set_title('original', fontsize=20)
 def setup(self, *args):
     # we stack the horse data 5 times to get an example volume
     self.image = np.stack(5 * [invert(data.horse())])