Пример #1
0
def lbp_test(afile, bfile):

    ahist = lbp_hist(data.load(afile))
    bhist = lbp_hist(data.load(bfile))
    dis = kullback_leibler_divergence(ahist, bhist)

    print dis

    return dis
Пример #2
0
    def __init__(self,
                 datasetPath,
                 datasetImages,
                 imgHeight=360,
                 imgWidth=480):

        self.np_image = np.array([], dtype=np.float32)
        self.deepview = ''
        self.np_mask = np.array([], dtype=np.float32)
        self.height = imgHeight
        self.width = imgWidth
        self.path = datasetPath
        self.images = datasetImages

        def load_data(files):
            selected_files = []
            for file in files:
                picFound = False
                for selected_file in glob.glob(file + "*"):
                    selected_files.append(selected_file)
                    picFound = True
                # An error message if the picture indicated in the pics.txt file wasn't found in the directory.
                if picFound == False:
                    log.error(str(file) + " file wasn't found!")
            return selected_files

        # change the directory to where the dataset is
        current_dir = os.getcwd()
        chdir(self.path)

        # move to the "image" directory first
        chdir(folder[0])

        # load the raw images
        self.np_image = [
            np.array(Image.open(i), dtype=np.float32) for i in load_data(
                filter(lambda s: not s.startswith('DeepviewOutput'),
                       self.images))
        ]

        # load the DeepView image
        # data.load function needs the path to the picture, otherwise it always looks at skimage/data/ directory by default
        self.np_deepview = data.load(os.getcwd() + os.sep + load_data(
            filter(lambda s: s.startswith('DeepviewOutput'), self.images))[0])
        # now move to the "mask" directory

        # load the mask
        # data.load function needs the path to the picture, otherwise it always looks at skimage/data/ directory by default
        chdir('../' + folder[1])
        self.np_mask = data.load(os.getcwd() + os.sep + load_data(['mask'])[0])
        chdir(current_dir)
Пример #3
0
def glob_dir_search(fextype, testfile):

    filelist = glob('D:\\abc\\pci\\*.jpg')

    testimg = data.load(testfile)

    testfex = fex_rgbhist(testimg)

    for afile in filelist:

        afex = fex_rgbhist(data.load(afile))

        dis = kullback_leibler_divergence(testfex, afex)

        print afile, dis
Пример #4
0
def loop(imgFiles):
	for f in imgFiles:
		img = img_as_float(data.load(os.path.join(inputDir,f)))
		startTime = time.time()
		img = filter.denoise_bilateral(img, sigma_range=0.1, sigma_spatial=3)
		io.imsave(os.path.join(outputDirg,f), img)
		print("Took %f seconds for %s" %(time.time() - startTime, f))
Пример #5
0
    def __update_classifier__(self):
        self.classifier = blog.old_weather.learning.NearestNeighbours()

        cursor = self.conn.cursor()
        cursor.execute(
            "select subject_id,region_id,column_id,row_id,digit_index,pixels from cells"
        )

        for subject_id, region_id, column_id, row_id, digit_index, pixels in cursor.fetchall(
        ):
            cursor.execute(
                "select fname from subject_info where subject_id = " +
                str(subject_id))
            fname = cursor.fetchone()[0]
            image = load(fname)

            pixels = json.loads(pixels)

            _, algorithm_digit, prob = self.classifier.__identify_digit__(
                image, pixels, collect_gold_standard=False)
            cursor.execute("update cells set algorithm_classification = " +
                           str(algorithm_digit) + ", probability = " +
                           str(prob) + " where subject_id = " +
                           str(subject_id) + " and region_id = " +
                           str(region_id) + " and column_id = " +
                           str(column_id) + " and row_id = " + str(row_id) +
                           " and digit_index = " + str(digit_index))
        self.conn.commit()
Пример #6
0
def random_selection(index_list, batch_size, index_table, unit_table,
                     number_unit, t_dir, t_list, t_file_list):
    """
    random_selection: int int list list list str list list-> array
    saca un batch de los ejemplos selecionados aleatoriamente
    """
    out = []
    if index_list == 0:
        rdm.shuffle(index_table)

    end_index = index_list + batch_size
    while index_list < end_index:
        example = index_table[index_list]
        un_example = unit_table[example]
        nr_example = number_unit[example]

        out.append(
            data.load(t_dir + '/' + t_list[un_example] + '/' +
                      t_file_list[un_example][nr_example],
                      as_grey=True))
        index_list += 1
        if index_list == len(index_table):
            end_index = end_index - index_list
            index_list = 0
            rdm.shuffle(index_table)

    return index_list, np.array(out).reshape(batch_size, 64, 64, 1)
Пример #7
0
def add_bernoulli_noise(folder, distfolder):
    if not os.path.exists(folder):
        raise Exception('folder is not exist!')
    if not os.path.exists(distfolder):
        os.makedirs(distfolder)
    filelist = os.listdir(folder)
    # plist = np.random.uniform(0.35, 1, len(filelist))
    p = 0.95
    for index, file in enumerate(filelist):
        filepath = folder + '\\' + file
        # img = IO.imread(filepath)
        img = data.load(filepath)
        if len(img.shape) < 3:
            os.remove(filepath)
            continue
        width, height = img.shape[0], img.shape[1]
        if width < 64 or height < 64:
            os.remove(filepath)
            continue
        mask = bernoulli_noise((width, height, 1), p)
        mask = np.concatenate((mask, mask, mask), 2)
        noiseimg = mask * img
        # IO.imshow(noiseimg)
        # IO.show()
        # return noiseimg
        outpath = distfolder + '\\' + file
        IO.imsave(outpath, noiseimg)
        print(index)
Пример #8
0
def run():
    imagePath = '/home/ggutierrez/RayosX/H3.JPG'
    image = data.load(imagePath)
    rows, cols, dim = image.shape
    pyramid = tuple(
        pyramid_gaussian(image,
                         max_layer=3,
                         sigma=2,
                         downscale=cfg.downScaleFactor))

    fig, ax = plt.subplots(2, 2, figsize=(8, 6))
    fig.subplots_adjust(hspace=.1, wspace=.001)
    ax = ax.ravel()

    for i, p in enumerate(pyramid[::-1]):
        print(p.shape)
        ax[i].imshow(p)
        ax[i].add_patch(
            Rectangle((p.shape[1] / 2 - 20, p.shape[0] / 2 - 20),
                      40,
                      40,
                      fill=False,
                      edgecolor='g',
                      lw=2))
        ax[i].tick_params(labelsize=6)
        # plt.show()
    # plt.savefig(cfg.resultsFolderPath + 'pyramid_gaussian.png', format='png', bbox_inches='tight', pad_inches=0, dpi=500)
    plt.show()
Пример #9
0
def extract_patches(path, numPatchesPerImage, patchSize):

    """
    :param path: path to a RGB fundus image
    :param numPatchesPerImage: number of patches to extract per image
    :param patchSize: patch is nxn size
    :return: patches: matrix with an image patch in each row
    """

    img = load(path)
    img = img[:,:,1]
    #contrast enhancemenet
    img = equalize_adapthist(img)
    windows = view_as_windows(img, (patchSize,patchSize))
    j = 0
    patches = np.zeros((numPatchesPerImage, patchSize*patchSize))
    while(j < numPatchesPerImage):
        
        sx = np.random.randint(0, windows.shape[0] - 1)  
        sy = np.random.randint(0, windows.shape[0] - 1)
        x = (patchSize/2 - 1) + sx
        y = (patchSize/2 - 1) + sy
        r = (img.shape[0]/2) - 1

        if np.sqrt((x - r) ** 2 + (y - r) **2 ) < r:
            patch = windows[sx, sy, :].flatten()            
            patches[j,:] = patch
            j += 1 
        else:
            if j > 0:
                j -= 1 
    
    return patches
Пример #10
0
def loop(imgFiles):
    for f in imgFiles:
        img = data.load(os.path.join(inputDir, f))
        startTime = time.time()
        img = gaussian(img, sigma=1, multichannel=True)
        io.imsave(os.path.join(resultDir, f), img)
        print("Took %f seconds for %s" % (time.time() - startTime, f))
def extract_patches(path, numPatchesPerImage, patchSize):

    """
    :param path: path to a RGB fundus image
    :param numPatchesPerImage: number of patches to extract per image
    :param patchSize: patch is nxn size
    :return: patches: matrix with an image patch in each row
    """

    img = load(path)
    img = img[:,:,1]
    #contrast enhancemenet
    img = equalize_adapthist(img)
    windows = view_as_windows(img, (patchSize,patchSize))
    j = 0
    patches = np.zeros((numPatchesPerImage, patchSize*patchSize))
    while(j < numPatchesPerImage):
        
        sx = np.random.randint(0, windows.shape[0] - 1)  
        sy = np.random.randint(0, windows.shape[0] - 1)
        x = (patchSize/2 - 1) + sx
        y = (patchSize/2 - 1) + sy
        r = (img.shape[0]/2) - 1

        if np.sqrt((x - r) ** 2 + (y - r) **2 ) < r:
            patch = windows[sx, sy, :].flatten()            
            patches[j,:] = patch
            j += 1 
        else:
            if j > 0:
                j -= 1 
    
    return patches
Пример #12
0
def load_image_from_file(filename):
   filename = os.path.abspath(filename)
   
   image = data.load(filename)
   image_gray = rgb2gray(image)

   return image, image_gray
Пример #13
0
def visualization(path):
    image = color.rgb2gray(
        data.load(path))  #Load the image and convert it to grayscale.

    fd, hog_image = hog(
        image,
        orientations=9,
        pixels_per_cell=(8, 8),  # compute the HOG features
        cells_per_block=(2, 2),
        visualise=True,
        transform_sqrt=True,
        block_norm='L2-Hys')

    print("number of features: ", len(fd))

    fig, (ax1, ax2) = plt.subplots(1,
                                   2,
                                   figsize=(8, 4),
                                   sharex=True,
                                   sharey=True)
    ax1.axis('off')
    ax1.imshow(image, cmap=plt.cm.gray)
    ax1.set_title('Input image')
    ax1.set_adjustable('box-forced')

    # Rescale histogram for better display
    hog_image_rescaled = exposure.rescale_intensity(hog_image,
                                                    in_range=(0, 0.02))

    ax2.axis('off')
    ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
    ax2.set_title('Cell size 4 x 4')
    ax1.set_adjustable('box-forced')
    plt.show()
Пример #14
0
def loop(imgFiles,rank):
    for f in imgFiles:
        img = img_as_float(data.load(os.path.join(noisyDir,f))) 
        psf = np.ones((5, 5)) / 25
        startTime = time.time()
        img = richardson_lucy(img, psf, iterations=30)
        io.imsave(os.path.join(denoisedDir,f), img)
        print ("Process %d: Took %f seconds for %s" %(rank, time.time() - startTime, f))
Пример #15
0
def get_ORB_detector_templates():
    directory = os.getcwd() + "\\templates" + '/'
    img_templates = [[.05, "5gr.png"], [1, "1zl.png"]]
    img_templates = [
        histogram_manipulator.contrast_stretching(
            img.load(directory + name[1], True)) for name in img_templates
    ]
    return img_templates
Пример #16
0
def test_color_histogram():
    image = skimage.img_as_float(data.load('color.png'))
    viewer = ImageViewer(image)
    ch = ColorHistogram(dock='right')
    viewer += ch

    assert_almost_equal(viewer.image.std(), 0.352, 3),
    ch.ab_selected((0, 100, 0, 100)),
    assert_almost_equal(viewer.image.std(), 0.325, 3)
Пример #17
0
def create_mask(filename, n_segments=400, n_cuts=10):
    img = data.load(filename)

    labels1 = segmentation.slic(img, n_segments=n_segments)

    rag = graph.rag_mean_color(img, labels1, mode='similarity')
    labels2 = graph.cut_normalized(labels1, rag, num_cuts=n_cuts)

    return labels2, img
Пример #18
0
def loop(imgFiles):
    for f in imgFiles:
        img = img_as_float(data.load(os.path.join(noisyDir,f)))
        startTime = time.time()
        dim = img.shape
        img = denoise_bilateral(img, sigma_color=0.05, sigma_spatial=15,  multichannel=True)
        io.imsave(os.path.join(denoisedDir,f), img)
        #skimage.io.imsave(os.path.join(denoisedDir,f), img)
        print("Took %f seconds for %s" %(time.time() - startTime, f))
Пример #19
0
def loop(imgFiles, rank):
    for f in imgFiles:
        img = img_as_float(data.load(os.path.join(inputDir, f)))
        psf = np.ones((5, 5)) / 25
        startTime = time.time()
        img = gaussian(img, sigma=1, multichannel=True)
        io.imsave(os.path.join(resultDir, f), img)
        print("Process %d: Took %f seconds for %s" %
              (rank, time.time() - startTime, f))
Пример #20
0
def mean(img_path):
    img = load(img_path)
    # flatten image to be 2D and compute mean rgb
    mean_rgb_val = mean_helper(img)
    # convert image to hsv scale
    hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
    # calculate mean
    mean_hsv_val = mean_helper(hsv)
    return mean_rgb_val, mean_hsv_val
Пример #21
0
def test_color_histogram():
    image = skimage.img_as_float(data.load('color.png'))
    viewer = ImageViewer(image)
    ch = ColorHistogram(dock='right')
    viewer += ch

    assert_almost_equal(viewer.image.std(), 0.352, 3),
    ch.ab_selected((0, 100, 0, 100)),
    assert_almost_equal(viewer.image.std(), 0.325, 3)
Пример #22
0
def LBP(img_path):
    # settings for LBP
    radius = 3
    n_points = 8 * radius
    METHOD = 'uniform'
    image = data.load(img_path)
    gray_image = skimage.color.rgb2gray(image)
    lbp = local_binary_pattern(gray_image, n_points, radius, METHOD)

    return lbp
Пример #23
0
def send_batch(list):
    print("Opening image")
    tensor = [None] * len(list)
    for i, path in enumerate(list):
        absolute_path = os.path.dirname(__file__)
        img = data.load(os.path.join(absolute_path, path))
        print("appending image %s" % i)
        tensor[i] = img
    # gc.collect()
    return tensor
Пример #24
0
def predict_object(img_path):
    # load vgg16 model
    vgg_model = VGG16()
    img = load(img_path)
    # reshape to size 224 to fit model
    img = resize(img, (224, 224)) * 255
    img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2]))
    img = preprocess_input(img)
    probabilities = vgg_model.predict(img)
    return probabilities
Пример #25
0
def load_th_image(image_name):
    image = data.load(image_name)
    grey = rgb2grey(image)

    th = threshold_otsu(grey)

    bin = grey >= th

    bin[50:-50,50:-50] = 0
    return bin
Пример #26
0
def parallelCuda():
    total_start_time = time.time()
    imgFloats = []
    startTime = time.time()
    for y in range(0,20):
        for x in range(1,6):
            imgFloats = np.array(img_as_float(data.load(os.path.join(noisyDir,"%.4d.jpg"%x))), dtype=np.float32)
            dest = imgFloats
            processimage(drv.Out(dest), drv.In(imgFloats), block=(1024,1,1), grid=(128,1))
            io.imsave(os.path.join(denoisedDir,"%.4d.jpg"%(x*y)), dest)
    print("Total time %f seconds" %(time.time() - total_start_time))
Пример #27
0
def generate(path):

    # ####### LBP ######## #
    img = data.load(abspath(path))
    img_gray = rgb2gray(img)
    img_gray *= 255

    img_lbp = local_binary_pattern(
                    img_gray, 8, 1, method='nri_uniform'
                    )

    histogram = np.hstack((img_lbp.flatten(), list(range(59))))
    histogram = scipy.stats.itemfreq(histogram)
    # print histogram.shape

    try:
        a, b, c = img.shape
    except:
        a, b = img.shape

    # We know they are equal:
    # print a*b
    # print sum(a[1] for a in histogram)

    # lbp histogram values Normalization
    lbp_features = [(element[1]/float(a*b)) for element in histogram]
    # print len(lbp_features)

    # ####### GLCM ######## #

    distances = [2, 3, 4, 5]
    theta = [0,  np.pi/4,  np.pi/2,  3*np.pi/2,  np.pi]

    glcm = greycomatrix(img_gray, distances, theta, normed=True)
    props = {}
    for prop in [
            'contrast', 'dissimilarity', 'homogeneity', 'energy',
            'correlation', 'ASM'
            ]:
        props[prop] = greycoprops(glcm, prop)
    props['contrast'] /= props['contrast'].max()
    props['dissimilarity'] /= props['dissimilarity'].max()

    glcm_feature = []
    for i in props.keys():
        for d in range(len(distances)):
            for t in range(len(theta)):
                glcm_feature.append(props[i][d][t])

    # print len(glcm_feature)

    return np.hstack([lbp_features, glcm_feature])
Пример #28
0
def generate_all_font_data(font_name):
    points = []

    img_files = os.listdir("data/{}".format(font_name))

    if num_samples is not None:
        img_files = img_files[:num_samples]

    for img_file in img_files:
        img_data = img_as_float(data.load(cwd + '/data/{}/{}'.format(font_name, img_file)))[:, :, 0]
        points.append(compute_feats(img_data, kernels).flatten().tolist())

    return font_name, points
Пример #29
0
def load_png_mask(mask_dir, type, img_key, img_shape):
    if type == "mass":
        mask_dir += "MassSegmentationMasks/"
    elif type == "calc":
        mask_dir += "CalcificationSegmentationMasks/"

    mask_path = mask_dir + str(img_key) + "_mask.png"
    if os.path.isfile(mask_path):
        mask = data.load(mask_path, as_gray=True)
        mask = np.expand_dims(mask, 0)
    else:
        mask = np.zeros(img_shape)

    return mask
Пример #30
0
def auto_save(name, f_path, threshold=None):
    all_path = os.path.join(os.getcwd(), f_path)
    sk_array = sk_data.load(f=all_path, as_gray=True)
    sk_array_str = str(sk_array.tolist())
    threshold = 80 if threshold is None else threshold
    db.delete_data(model_name='ImageTemplate',
                   filter_dic={'template_type': name})
    add_d = {
        'template_type': name,
        'data': sk_array_str,
        'threshold': threshold,
        'status': 1
    }
    db.add(model=ImageTemplate, add_dic=add_d)
Пример #31
0
    def __set_image__(self,f_name):
        c = self.conn.cursor()
        c.execute("select subject_id from subject_info where fname = '" + str(f_name)+"'")
        r = c.fetchone()

        if r is None:
            c.execute("select count(*) from subject_info")
            self.subject_id = c.fetchone()[0]

            params = (self.subject_id,f_name,self.template_id)
            c.execute("insert into subject_info values(?,?,?)",params)
            self.conn.commit()
        else:
            self.subject_id = r[0]
        self.image = load(f_name)
Пример #32
0
def loop(imgFiles):
    for y in range(0, 20):
        for f in imgFiles:
            img = img_as_float(data.load(os.path.join(noisyDir, f)))
            startTime = time.time()
            dim = img
            #img = denoise_bilateral(img, sigma_color=0.05, sigma_spatial=15,  multichannel=True)
            temp = 0.0
            for iaa, aa in enumerate(img):
                for ibb, bb in enumerate(aa):
                    temp = (aa[ibb][0] + aa[ibb][1] + aa[ibb][2]) / 3
                    dim[iaa][ibb][0] = temp
                    dim[iaa][ibb][1] = temp
                    dim[iaa][ibb][2] = temp
            io.imsave(os.path.join(denoisedDir, f), dim)
Пример #33
0
def segment_0(path):
    #image = data.binary_blobs()
    image = data.load(path)
    gray=image[:,:,0]
    val = filters.threshold_local(gray,block_size=131)
    val = gray > val
    #mask = gray < val
    #image_show(gray)
    image_show(val)
    return

    seg.active_contour(gray)
    #segmented = image > (value concluded from histogram i.e 50, 70, 120)
    text_threshold = filters.threshold_local(gray,block_size=51, offset=10)  # Hit tab with the cursor after the underscore to get all the methods.
    image_show(gray < text_threshold);
Пример #34
0
def load_image(file):

    # lee la imagen
    img = data.load(file)

    # la binariza en caso de que sea escala de grises
    if not img.dtype == 'bool':
        thr = filter.threshold_otsu(img)
        img = img > thr

    #si la proporcion de pixels en blanco es mayor a la mitad, la invierte
    if img.sum() > 0.5 * img.size:
        img = np.bitwise_not(img);

    return img
Пример #35
0
    def __update_classifier__(self):
        self.classifier = blog.old_weather.learning.NearestNeighbours()

        cursor = self.conn.cursor()
        cursor.execute("select subject_id,region_id,column_id,row_id,digit_index,pixels from cells")

        for subject_id,region_id,column_id,row_id,digit_index,pixels in cursor.fetchall():
            cursor.execute("select fname from subject_info where subject_id = " + str(subject_id))
            fname = cursor.fetchone()[0]
            image = load(fname)

            pixels = json.loads(pixels)

            _,algorithm_digit,prob = self.classifier.__identify_digit__(image,pixels,collect_gold_standard=False)
            cursor.execute("update cells set algorithm_classification = " + str(algorithm_digit) + ", probability = " + str(prob) + " where subject_id = " + str(subject_id) + " and region_id = " + str(region_id) + " and column_id = " + str(column_id) + " and row_id = " + str(row_id) + " and digit_index = " + str(digit_index))
        self.conn.commit()
Пример #36
0
    def __set_image__(self, f_name):
        c = self.conn.cursor()
        c.execute("select subject_id from subject_info where fname = '" +
                  str(f_name) + "'")
        r = c.fetchone()

        if r is None:
            c.execute("select count(*) from subject_info")
            self.subject_id = c.fetchone()[0]

            params = (self.subject_id, f_name, self.template_id)
            c.execute("insert into subject_info values(?,?,?)", params)
            self.conn.commit()
        else:
            self.subject_id = r[0]
        self.image = load(f_name)
Пример #37
0
def load_data(data_directory):
    directories = [d for d in os.listdir(data_directory)
                   if os.path.isdir(os.path.join(data_directory, d))]
    labels = []
    images = []
    for d in directories:
        label_directory = os.path.join(data_directory, d)
        file_names = [os.path.join(label_directory, f)
                      for f in os.listdir(label_directory)
                      if f.endswith(".jpg")]

        for f in file_names:
            images.append(data.load(f))
            labels.append(__class2id__[d])

    print("{0} images loaded in {1} classes".format(len(images), len(set(labels))))
    return images, labels
def cluster_images(path, k, batch_size):
    """
    :param path: path to a folder with image files
    :return:
    """
    dir = os.listdir(path)
    images = np.zeros((len(dir), 3))

    for i, imgname in enumerate(dir):
        img = load(path + imgname)
        images[i, 0] = np.mean(img)
        images[i, 1] = np.var(img)
        images[i, 2] = calc_entropy(img)
        print str(i) + "/" + str(len(dir))


    estimator = MiniBatchKMeans(n_clusters=k, verbose=True, batch_size=batch_size)
    estimator.fit(images)
    from sklearn.externals import joblib
    joblib.dump(estimator, 'estimator.pkl')
    np.save('data.npy', images)
Пример #39
0
def compareAll(imageFileName, thumbs):
    im = data.load(imageFileName)
    template_summary = summary(uint8(transform.resize(im, [70,70,3])*255))
    
    count = 0
    comparisons = []
    for (title, thumb) in thumbs.items():
        compressed_thumb = fromSummary(thumb)
        count = count + 1
        comparisons.append((dist(template_summary, compressed_thumb),title))
        
        if count > 1000000:
            break
        if count % 1000==0:
            print "\rAt " + str(count),
    
    s = sorted(comparisons)
    
    cutoff = argmax(gradient([ii[0] for ii in s if s > 50])[100:40000])
    cutoff = cutoff + 100
    return s, cutoff
Пример #40
0
    def __example_plot__(self,f_name,region_id,row_index,column_index):
        self.image = load(f_name)
        cursor = self.conn.cursor()

        cursor.execute("select template_id from subject_info where fname = \"" + f_name +"\"")
        template_id = cursor.fetchone()[0]

        cursor.execute("select boundary from cell_boundaries where template_id = " + str(template_id) + " and region_id = " + str(region_id) + " and column_id = " + str(column_index) + " and row_id = " + str(row_index))

        boundary_box = json.loads(cursor.fetchone()[0])

        x,y = zip(*boundary_box)
        x_max = int(max(x))
        y_max = int(max(y))
        x_min = int(min(x))
        y_min = int(min(y))


        sub_image = self.image[np.ix_(range(y_min,y_max+1), range(x_min,x_max+1))]
        fig, ax = plt.subplots()
        im = ax.imshow(sub_image)

        plt.tick_params(
                    axis='x',          # changes apply to the x-axis
                    which='both',      # both major and minor ticks are affected
                    bottom='off',      # ticks along the bottom edge are off
                    top='off',         # ticks along the top edge are off
                    labelbottom='off')

        plt.tick_params(
            axis='y',          # changes apply to the x-axis
            which='both',      # both major and minor ticks are affected
            bottom='off',      # ticks along the bottom edge are off
            top='off',         # ticks along the top edge are off
            labelleft='off')

        plt.show()
        pixels = self.__pixel_generator__(boundary_box)
        self.__pixels_to_clusters__(pixels,True,y_min,y_max)
Пример #41
0
    return min_i


# prepare filter bank kernels
kernels = []
for theta in range(4):
    theta = theta / 4. * np.pi
    for sigma in (1, 3):
        for frequency in (0.05, 0.25):
            kernel = np.real(gabor_kernel(frequency, theta=theta,
                                          sigma_x=sigma, sigma_y=sigma))
            kernels.append(kernel)


shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)

# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)

print('Rotated images matched against references using Gabor filter banks:')

print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
# settings for LBP
radius = 3
n_points = 8 * radius


def overlay_labels(image, lbp, labels):
    mask = np.logical_or.reduce([lbp == each for each in labels])
    return label2rgb(mask, image=image, bg_label=0, alpha=0.5)


def highlight_bars(bars, indexes):
    for i in indexes:
        bars[i].set_facecolor('r')


image = data.load('brick.png')
lbp = local_binary_pattern(image, n_points, radius, METHOD)


def hist(ax, lbp):
    n_bins = int(lbp.max() + 1)
    return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
                   facecolor='0.5')


# plot histograms of LBP of textures
fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
plt.gray()

titles = ('edge', 'flat', 'corner')
w = width = radius - 1
Пример #43
0
"""
from __future__ import print_function
from skimage import data
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import measure

# from skimage.data import astronaut
from skimage.segmentation import felzenszwalb, slic, quickshift
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float

from sklearn.ensemble import RandomForestClassifier

img = img_as_float(data.load('D:/Sea_Ice_Photo/072610/Examples/072610_00022.jpg'))
#[::2, ::2])
#img = img_as_float(astronaut()[::2, ::2])

#[::2, ::2]
#segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
#segments_slic = slic(img, n_segments=1000, compactness=2, sigma=1)
segments_quick = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)

#print("Felzenszwalb's number of segments: %d" % len(np.unique(segments_fz)))
#print("Slic number of segments: %d" % len(np.unique(segments_slic)))
print("Quickshift number of segments: %d" % len(np.unique(segments_quick)))

#fig, ax = plt.subplots(1, 3, sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
#fig.set_size_inches(8, 3, forward=True)
#fig.subplots_adjust(0.05, 0.05, 0.95, 0.95, 0.05, 0.05)
import numpy as np
import skimage
import matplotlib.pyplot as plt
import sklearn.preprocessing


# this is a test function for checking different ways of calculating the histogram
def calculate_histogram(image, number_of_bins):
    # different ways to calculate histogram
    histogram_from_np, bins_from_np = np.histogram(image, bins=number_of_bins)
    histogram_from_scimage, bins_from_scimage = skimage.exposure.histogram(image, nbins=number_of_bins)
    # important - the image data has to be processed by ravel method
    histogram_from_matplotlib, bins_from_matplotlib, patches_from_matplotlib = plt.hist(image.ravel(), bins=number_of_bins, normed=False)

    # all of these methods should return same result
    print("histogram_from_np: \n" + str(histogram_from_np))
    print("histogram_from_scimage: \n" + str(histogram_from_scimage))
    print("histogram_from_matplotlib: \n" + str(histogram_from_matplotlib))

    # how to normalize
    #normalized_histogram = sklearn.preprocessing.normalize(skimage.img_as_float(histogram_from_np[:, np.newaxis]), axis=0).ravel()
    # this can also be done as a parameter for matplotlib hist method

if __name__ == "__main__":
    from skimage import data
    img = data.load('brick.png')
    calculate_histogram(img, 9)
Пример #45
0
import skimage
from skimage import data, io, filter
import numpy as np
import matplotlib.pyplot as plt

test_image = data.load("/home/larry/VisionCode/images_building.jpg")
image1 = data.moon()


def image_show(image):
    f = plt.figure()
    io.imshow(image)
    f.show()


def image_array_create(x,y):
	a = np.ndarray((x, y),dtype = np.ndarray)
	for x in range(0, x):
		for y in range(0, y):
			a[x][y] = None
	return a

def increase_array_size(array,add_x,add_y):
	a = np.ndarray((array.shape[0]+add_x,array.shape[1]+add_y ),dtype = np.ndarray)
	for x in range(0, array.shape[0]):
		for y in range(0, array.shape[1]):
			a[x][y] = array[x][y]
	return a

def image_cut(image,x_starting_location,y_starting_location,x_image_size,y_image_size):
	a = image[x_starting_location: x_starting_location +x_image_size, y_starting_location:y_starting_location+y_image_size]
def match(refs, img):
    best_score = 10
    best_name = None
    lbp = local_binary_pattern(img, P, R, METHOD)
    hist, _ = np.histogram(lbp, normed=True, bins=P + 2, range=(0, P + 2))
    for name, ref in refs.items():
        ref_hist, _ = np.histogram(ref, normed=True, bins=P + 2, range=(0, P + 2))
        score = kullback_leibler_divergence(hist, ref_hist)
        if score < best_score:
            best_score = score
            best_name = name
    return best_name


brick = data.load("brick.png")
grass = data.load("grass.png")
wall = data.load("rough-wall.png")

refs = {
    "brick": local_binary_pattern(brick, P, R, METHOD),
    "grass": local_binary_pattern(grass, P, R, METHOD),
    "wall": local_binary_pattern(wall, P, R, METHOD),
}

# classify rotated textures
print "Rotated images matched against references using LBP:"
print "original: brick, rotated: 30deg, match result:",
print match(refs, rotate(brick, angle=30, resize=False))
print "original: brick, rotated: 70deg, match result:",
print match(refs, rotate(brick, angle=70, resize=False))
def extract_features_img(path, centroids, rfSize, M, U, stride, normal_pooling=True):
    """
    :param path: path to RGB retina image
    :param centroids: learned centroids
    :param rfSize: receptive field size
    :param M: whitening parameter
    :param P: whitening parameter
    :param stride: parameter that defines the density of windows that are extracted from an image
    :param normal_pooling: if true:
                               divide in 4 regions and pool each one
                           else: divide in 16 regions and pool each one
    :return:feature_vector
    """

    img = load(path)
    try:
        img = img[:,:,1]
    except:
        return None

    #contrast enhancing
    img = equalize_adapthist(img)
    numFeats = img.shape[0] * img.shape[1]
    numCentroids = centroids.shape[0]

    #extract dense patches with predefined stride
    #smaller the stride, slower the function
    windows = view_as_windows(img, (rfSize, rfSize), stride)
    patches = np.reshape(windows, (windows.shape[0]*windows.shape[1], rfSize*rfSize))

    #data normalization
    p_mean = np.mean(patches, axis=1, dtype=np.float32, keepdims=True)
    p_var = np.var(patches, axis=1, dtype=np.float32, ddof=1, keepdims=True)
    off_matrix = 10.0 * np.ones(p_var.shape)
    patches = (patches - p_mean) / np.sqrt(p_var + off_matrix)
    
    patches = np.dot((patches - M), U)
    
    #calculate distance from all patches to all centroids
    z = native_cdist(patches, centroids)
    
    #mean distance from each patch to all centroids
    #triangle activation function
    mu = np.tile(np.array([np.mean(z, axis = 1)]).T, (1, centroids.shape[0]))
    patches = np.maximum(mu - z, np.zeros(mu.shape))

    rows = (img.shape[0] - rfSize + stride)/stride
    columns = (img.shape[1] - rfSize + stride)/stride
        
    patches = np.reshape(patches, (rows, columns, numCentroids))

    #starting points
    #central point # of the patches "image"
    halfr = np.round(float(rows)/2)
    halfc = np.round(float(columns)/2)

    #pool quadrants
    if normal_pooling:       
        q1 = np.array([np.sum(np.sum(patches[0:halfc, 0:halfr, :], axis = 1),axis = 0)])
        q2 = np.array([np.sum(np.sum(patches[halfc:patches.shape[0], 0:halfr, :], axis = 1),axis = 0)])
        q3 = np.array([np.sum(np.sum(patches[0:halfc, halfr:patches.shape[1], :], axis = 1),axis = 0)])
        q4 = np.array([np.sum(np.sum(patches[halfc:patches.shape[0], halfr:patches.shape[1], :], axis = 1),axis =     0)])
        feature_vector = np.vstack((q1,q2,q3,q4)).flatten()

    else:
        
        quartr = np.round(float(rows)/4)
        quartc = np.round(float(columns)/2)
        q1 = pool_quadrant(patches, 0, 0, quartc, quartr, halfc, halfr)        
        q2 = pool_quadrant(patches, halfc, 0, quartc, quartr, patches.shape[0], halfr) 
        q3 = pool_quadrant(patches, 0, halfr, quartc, quartr, halfc, patches.shape[1])
        q4 = pool_quadrant(patches, halfc, halfr, quartc, quartr, patches.shape[0], patches.shape[1])
        feature_vector = np.vstack((q1, q2, q3, q4)).flatten()

               
    return feature_vector
Пример #48
0
#!/usr/bin/env python
__author__ = 'greg'

from skimage import data
from skimage import transform as tf
from skimage.feature import CENSURE,canny
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
from skimage.filters.rank import entropy
from skimage.util import img_as_ubyte
from skimage.morphology import disk

img1 = rgb2gray(data.load("/home/greg/Databases/serengeti/blob/50c210188a607540b9000012_0.jpg"))
edges1 = canny(img1)
fig,ax = plt.subplots(nrows=1, ncols=1)
ax.imshow(edges1, cmap=plt.cm.gray)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig("/home/greg/Databases/serengeti/blob/edges.jpg",bbox_inches='tight', pad_inches=0)

fig, ax = plt.subplots(nrows=1, ncols=1)
detector = CENSURE()

img2 = rgb2gray(data.load("/home/greg/Databases/serengeti/blob/edges.jpg"))

# detector.detect(img2)
# ax.scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],2 ** detector.scales, facecolors='none', edgecolors='r')
# plt.show()

# image = img_as_ubyte(rgb2gray(data.load("/home/greg/Databases/serengeti/blob/50c210188a607540b9000012_0.jpg")))
#

References
----------
.. [1] Xie, Yonghong, and Qiang Ji. "A new efficient ellipse detection
       method." Pattern Recognition, 2002. Proceedings. 16th International
       Conference on. Vol. 2. IEEE, 2002
"""
import matplotlib.pyplot as plt

from skimage import data, filter, color
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter

# Load picture, convert to grayscale and detect edges
image_rgb = data.load('coffee.png')[0:220, 100:450]
image_gray = color.rgb2gray(image_rgb)
edges = filter.canny(image_gray, sigma=2.0,
                     low_threshold=0.55, high_threshold=0.8)

# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
accum = hough_ellipse(edges, accuracy=10, threshold=170, min_size=50)
accum.sort(key=lambda x:x[5])
# Estimated parameters for the ellipse
center_y = int(accum[-1][0])
center_x = int(accum[-1][1])
xradius = int(accum[-1][2])
yradius = int(accum[-1][3])
Пример #50
0
def imageProcess(x):
    return resize(rgb2grey(skimData.load(x)), [150, 150])
Пример #51
0
from skimage.viewer import ImageViewer
from skimage.viewer.plugins.color_histogram import ColorHistogram
from skimage import data


image = data.load('color.png')
viewer = ImageViewer(image)
viewer += ColorHistogram(dock='right')
viewer.show()
Пример #52
0
__author__ = 'junwangcas'
import numpy as np

from scipy import ndimage as nd

from skimage import data
from skimage.util import img_as_float
from skimage.filter import gabor_kernel

brick = img_as_float(data.load('brick.png'))

kernel = np.real(gabor_kernel(0.15, theta = 0.5 * np.pi,sigma_x=5, sigma_y=5))

filtered = nd.convolve(brick, kernel, mode='reflect')

mean = filtered.mean()
variance = filtered.var()
Пример #53
0
import matplotlib
import numpy as np
from scipy import ndimage as nd

from skimage import data
from skimage.util import img_as_float
from skimage.filter import gabor_kernel


from skimage.feature import hog
from skimage import data, color, exposure

# Includes code modified from http://scikit-image.org/docs/dev/auto_examples/plot_gabor.html#example-plot-gabor-py

image = img_as_float(data.load('/Users/davidharris/Downloads/untitled.png'))

# prepare filter bank kernels
kernels = []
for theta in (4, 3, 2, 1):
    theta = theta / 4. * np.pi
    for sigma in (1, 3):
        for frequency in (0.05, 0.25):
            kernel = np.real(gabor_kernel(frequency, theta=theta,
                                          sigma_x=sigma, sigma_y=sigma))
            kernels.append(kernel)



brick = img_as_float(data.load('/Users/davidharris/Downloads/untitled.png'))
Пример #54
0
import numpy as np
from matplotlib import pyplot as plt

from skimage import data, img_as_float
from skimage.feature import harris


def plot_harris_points(image, filtered_coords):
    plt.imshow(image)
    y, x = np.transpose(filtered_coords)
    plt.plot(x, y, 'b.')
    plt.axis('off')

# resultados
plt.figure(figsize=(8, 3))
image = img_as_float(data.load("c.jpeg"))

filtered_coords = harris(image, min_distance=4)

plt.axes([0.2, 0, 0.77, 1])
plot_harris_points(image, filtered_coords)

plt.show()
Пример #55
0
img3 = img3[:, :1392]
img3 = cv2.resize(img3, (256, 256))
img3 = img3 / (2 ** (4))


img4 = cv2.imread(path4, -1)
img4 = img4[:, :1392]
img4 = cv2.resize(img4, (256, 256))
img4 = img4 / (2 ** (4))
# img = cv2.resize(img,(256,256))
# img = img/(2**(4))
# img = np.uint8(img)


shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load("brick.png"))[shrink]
grass = img_as_float(data.load("grass.png"))[shrink]
wall = img_as_float(data.load("rough-wall.png"))[shrink]
image_names = ("F1", "F2", "F3", "F4")
images = (img1, img2, img3, img4)

# prepare reference features


def power(image, kernel):
    # Normalize images for better comparison.
    image = (image - image.mean()) / image.std()
    return np.sqrt(
        nd.convolve(image, np.real(kernel), mode="wrap") ** 2 + nd.convolve(image, np.imag(kernel), mode="wrap") ** 2
    )
Пример #56
0
# settings for LBP
radius = 3
n_points = 8 * radius


def overlay_labels(image, lbp, labels):
    mask = np.logical_or.reduce([lbp == each for each in labels])
    return label2rgb(mask, image=image, bg_label=0, alpha=0.5)


def highlight_bars(bars, indexes):
    for i in indexes:
        bars[i].set_facecolor('r')

print data.data_dir
image = data.load('trees.jpg')
lbp = local_binary_pattern(image, n_points, radius, METHOD)

def hist(ax, lbp):
    n_bins = lbp.max() + 1
    return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
                   facecolor='0.5')

# plot histograms of LBP of textures
fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
plt.gray()

titles = ('edge', 'flat', 'corner')
w = width = radius - 1
edge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)
flat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))
Пример #57
0
    def __init__(self):
        NearestNeighbours.__init__(self)

        cursor = self.conn.cursor()

        buckets = {i:[] for i in range(10)}

        for i in range(len(self.training[0])):
            a = self.training[0][i]
            b = self.training[1][i]

            buckets[b].append(a)

        cursor.execute("select subject_id,region_id,column_id,row_id,digit_index from cells")

        probabilities = {}
        correctness = {}

        ids = []


        for id_ in cursor.fetchall():
            # print alg,gold
            # id_ = subject_id,region_id,column_id,row_id

            ids.append(id_)

        training_indices = random.sample(ids,40)

        new_training = {i:[] for i in range(10)}

        cursor.execute("select subject_id,region_id,column_id,row_id,digit_index,algorithm_classification, probability, gold_classification,pixels from cells")
        for (subject_id,region_id,column_id,row_id,digit,alg,p,gold,pixels) in cursor.fetchall():
            # print alg,gold
            id_ = subject_id,region_id,column_id,row_id,digit

            if gold < 0:
                continue

            if id_ in training_indices:
                cursor.execute("select fname from subject_info where subject_id = " + str(subject_id))
                fname = cursor.fetchone()[0]
                image = load(fname)

                pixels,_ = self.__normalize_pixels__(image,json.loads(pixels))
                print type(pixels)

                new_training[gold].append(list(pixels))

        for i in new_training.keys():
            print i,len(new_training[i])

        updated_training = []
        updated_labels = []

        for i in buckets:
            if new_training[i] == []:
                continue
            replacement_indices = random.sample(range(len(buckets[i])),int(0.5*len(buckets[i])))

            for j in replacement_indices:
                buckets[i][j] = random.sample(new_training[i],1)[0]


            updated_training.extend(buckets[i])
            updated_labels.extend([i for k in range(len(buckets[i]))])

        pca = PCA(n_components=50)
        print updated_training[0]
        print self.training[0][0]
        # updated_training = np.asarray(updated_training)
        self.T = pca.fit(updated_training)
        reduced_training = self.T.transform(updated_training)
        self.clf.fit(reduced_training, updated_labels)

        correct = 0
        total = 0.

        cursor.execute("select subject_id,region_id,column_id,row_id,digit_index, gold_classification,pixels from cells")
        for (subject_id,region_id,column_id,row_id,digit,gold,pixels) in cursor.fetchall():
            id_ = subject_id,region_id,column_id,row_id,digit

            if gold < 0:
                continue

            if id_ not in training_indices:
                cursor.execute("select fname from subject_info where subject_id = " + str(subject_id))
                fname = cursor.fetchone()[0]
                image = load(fname)

                _,algorithm_digit,_ = self.__identify_digit__(image,json.loads(pixels),False)
                # pixels = self.__normalize_pixels__(image,json.loads(pixels))

                if algorithm_digit == gold:
                    correct += 1
                total += 1
        print correct/total
Пример #58
0
def previewThumbs(thumbList, dims, plottitle=""):

    tsize = 50
    clickedList = []
    thumbdir = 'D:\\memeproject\\thumbnails\\'
    pview = uint8(np.zeros((tsize*dims[0],tsize*dims[1],3)));
    for i in range(0,dims[0]*dims[1]):
        if i >= len(thumbList) :
            break
        # Sometimes images have 4 channels. Crop it to 3
        pthumb = skimage.transform.resize(data.load(thumbdir + thumbList[i])[:,:,0:3], (tsize,tsize))
        pthumb = uint8(255*pthumb);
        x = i/dims[1]
        y = i - dims[1]*(i/dims[1])

        pview[tsize*x:tsize*(x+1), tsize*y:tsize*(y+1),:] = pthumb

    pviewOriginal = copy(pview)
    # Generate Plot and assign callback on click
    thumbPlot = figure()
    ax = thumbPlot.add_subplot(111)
    imageWindow = ax.imshow(pview)
    title(plottitle)

    def coord2ij(coord):
        # Coordinate to (i,j) position on screen
        return (coord/dims[1], coord%dims[1])

    def highlight(coord):
        ij = coord2ij(coord)
        x = (ij[0]*tsize,(ij[0]+1)*tsize)
        y = (ij[1]*tsize,(ij[1]+1)*tsize)

        pview[ x[0]:x[1] , y[0]:y[1] ] = pview[ x[0]:x[1] , y[0]:y[1] ] + 10
        return pview

    def setborder(coord):
        ij = coord2ij(coord)
        x = (ij[0]*tsize,(ij[0]+1)*tsize)
        y = (ij[1]*tsize,(ij[1]+1)*tsize)

        pview[ x[0]:x[1] , y[0]:y[1] ] = 0
        pview[ (x[0] + 3):(x[1] - 3) , (y[0] + 3):(y[1] - 3) ] = 255

        pview[ (x[0] + 6):(x[1] - 6) , (y[0] + 6):(y[1] - 6) ] = pviewOriginal[ (x[0] + 6):(x[1] - 6) , (y[0] + 6):(y[1] - 6) ]

        return pview

    def unhighlight(coord):
        ij = coord2ij(coord)

        x = (ij[0]*tsize,(ij[0]+1)*tsize)
        y = (ij[1]*tsize,(ij[1]+1)*tsize)

        pview[ x[0]:x[1] , y[0]:y[1] ] = pviewOriginal[ x[0]:x[1] , y[0]:y[1] ]
        return pview

    def onclick(event):

        if event.ydata == None or event.xdata == None:
            # User clicked outside the box
            return

        ij = (math.floor(event.ydata/tsize), math.floor(event.xdata/tsize))
        coord = int((ij[0] * dims[1]) + ij[1])

        if event.button == 2:
            # Left Click
            if coord in clickedList:
                unhighlight(coord)
                clickedList.remove(coord)
            else:
                setborder(coord)
                clickedList.append(coord)

        if event.button == 3:
            # Right Click to choose cutoff
            del clickedList[:]
            pview[:] = pviewOriginal[:]

            for i in range(0,coord+1):
                clickedList.append(i)
                setborder(i)

        clickedList.sort()

        imageWindow.set_data(pview)
        thumbPlot.canvas.draw()

    thumbPlot.canvas.mpl_connect('button_press_event', onclick)
    return clickedList