Esempio n. 1
0
def segment(filename):

    original = cv2.imread(filename, cv2.IMREAD_UNCHANGED)

    kernel = np.ones((5, 5), np.uint8)
    height = original.shape[0]
    width = original.shape[1]

    grayscale = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
    for i in range(height - 120, height - 6):
        for j in range(width - 120, width - 6):
            avg_val = 0
            for k in range(i - 2, i + 3):
                for l in range(j - 2, j + 3):
                    avg_val = avg_val + grayscale[k, l]
            avg_val = avg_val / 25
            grayscale.itemset((i, j), avg_val)

    #print thresh.shape
    get_otsu = otsu.otsu(grayscale)
    kernel = np.ones((5, 5), np.uint8)
    dilation = cv2.erode(get_otsu, kernel, iterations=3)
    dilation = cv2.dilate(dilation, kernel, iterations=3)
    gradient = cv2.morphologyEx(dilation, cv2.MORPH_GRADIENT, kernel)
    cv2.imshow('Original2', grayscale)
    cv2.imshow('Original', gradient)
    cv2.imwrite('Otsu.jpg', dilation)
    cv2.waitKey()
    cv2.destroyAllWindows()
Esempio n. 2
0
def get_otsu_threshold(image,
                       mask=None,
                       two_class_otsu=True,
                       use_weighted_variance=True,
                       assign_middle_to_foreground=True):
    if not mask is None:
        image = image[mask]
    else:
        image = np.array(image.flat)
    image = image[image >= 0]
    if len(image) == 0:
        return 1
    image, d = log_transform(image)
    if two_class_otsu:
        if use_weighted_variance:
            threshold = otsu(image)
        else:
            threshold = entropy(image)
    else:
        if use_weighted_variance:
            t1, t2 = otsu3(image)
        else:
            t1, t2 = entropy3(image)
        threshold = t1 if assign_middle_to_foreground else t2
    threshold = inverse_log_transform(threshold, d)
    return threshold
Esempio n. 3
0
def get_ridler_calvard_threshold(image, mask=None):
    """Find a threshold using the method of Ridler and Calvard
    
    The reference for this method is:
    "Picture Thresholding Using an Iterative Selection Method" 
    by T. Ridler and S. Calvard, in IEEE Transactions on Systems, Man and
    Cybernetics, vol. 8, no. 8, August 1978.
    """
    cropped_image = np.array(image.flat) if mask is None else image[mask]
    if np.product(cropped_image.shape) < 3:
        return 0
    if np.min(cropped_image) == np.max(cropped_image):
        return cropped_image[0]

    # We want to limit the dynamic range of the image to 256. Otherwise,
    # an image with almost all values near zero can give a bad result.
    min_val = np.max(cropped_image) / 256
    cropped_image[cropped_image < min_val] = min_val
    im = np.log(cropped_image)
    min_val = np.min(im)
    max_val = np.max(im)
    im = (im - min_val) / (max_val - min_val)
    pre_thresh = 0
    # This method needs an initial value to start iterating. Using
    # graythresh (Otsu's method) is probably not the best, because the
    # Ridler Calvard threshold ends up being too close to this one and in
    # most cases has the same exact value.
    new_thresh = otsu(im)
    delta = 0.00001
    while abs(pre_thresh - new_thresh) > delta:
        pre_thresh = new_thresh
        mean1 = np.mean(im[im < pre_thresh])
        mean2 = np.mean(im[im >= pre_thresh])
        new_thresh = np.mean([mean1, mean2])
    return math.exp(min_val + (max_val - min_val) * new_thresh)
Esempio n. 4
0
def classify(image):
    train_images_gray, train_labels = read_data.read(range(10), 'training')
    if USEBW:
        train_images_bw = convert_bw(train_images_gray)
        test = otsu.otsu(numpy.array(jtov.jtov(image)))
        clf = svm.SVC(kernel="poly", degree=1)
        clf.fit(train_images_bw[:10000], train_labels[:10000])
        print clf.predict(test)
    else:
        test = numpy.array(jtov.jtov(image))
        clf = svm.SVC(kernel="poly", degree=2)
        clf.fit(train_images_gray[:10000], train_labels[:10000])
        print clf.predict(test)
Esempio n. 5
0
def detect_particles(video):
    '''
    this function was used to detect the position of particles. 
    otsu method is used to calculate the threshold and the the 
    coordinates of the particles will be calculated.
    
    It can be tested with following code:
    import detect_particles as dp
    import numpy as np
    A = np.array([[
                    [1,0,1,0,4,5],
                    [7,1,3,1,5,1],
                    [1,5,1,4,0,1],
                    [0,1,8,1,4,7],
                    [3,2,4,5,2,0],
                    [1,5,1,4,0,1]],
                    [[0,1,3,1,5,9],
                    [1,0,1,4,0,9],
                    [0,2,0,0,2,9],
                    [1,0,1,0,4,5],
                    [1,0,1,4,0,9],
                    [0,1,0,1,4,7]]])
    dp.detect_particles(A)
    '''
    positions = []
    for i in range(video.shape[0]):
        pos = ps.Position(0,0)
        count = 0
        slice = video[i]
        threshold = ot.otsu(video[i])
        for j in range(video.shape[1]):
            for k in range(video.shape[2]):
                if (slice[j,k] < threshold):
                    slice[j,k] = 0
                else:
                    slice[j,k] = 1
                    pos.x += j
                    pos.y += k
                    count += 1

        pos.x /= count
        pos.y /= count
        positions.append(pos)

##        print threshold
##        print slice
##        print pos.x
##        print pos.y
##        print count
    return positions
Esempio n. 6
0
        def filter(self, job):
            img = job.img
            
            mode = img.mode
            if mode == '1':
                return
            elif mode == 'RGB':
                img = img.convert('L')
            elif mode != 'L':
                raise SaneError('unexpected image mode: %s' % mode)
            
            hist = img.histogram()
            t = otsu.otsu(hist)
            print "Otsu threshold value", t
            
            img = img.point(lambda x: x > t and 255 or 0, '1')

            job.img = img
Esempio n. 7
0
def cal_imgs(dir_addr, train_or_test):
    namelist = filelist(dir_addr)
    data = []
    for filename in namelist:
        im = Image.open(dir_addr + '/' + filename)
        imarray = numpy.array(im)
        width = im.size[0]
        height = im.size[1]
        my_im = Image.new("L", (width, height))
        my_im_array = numpy.array(my_im)

        threshold = otsu.otsu(imarray, height, width)

        otsu.scan_image(imarray, my_im_array, height, width, threshold)

        data.append(feature_extract(my_im_array))

    dir_addr = dir_addr[dir_addr.index('/') + 1 : -1]
    write_file("./result/classifier_1/" + train_or_test + '/' + dir_addr + '_dataset.csv', data)
Esempio n. 8
0
import matplotlib
import sys

import draw
import extract
import calculation
import iris
import pupil
import otsu

draw = draw.draw()
extract = extract.extract()
calc = calculation.calculation()
iris = iris.iris()
pupil = pupil.pupil()
otsu = otsu.otsu()

matplotlib.rcParams['font.size'] = 8

def convertXPointList(pointList, x):
    """座標点リストをX,Y座標リストへ変換
    @ param1[in] pointList      座標点リスト
    @ param2[in] x              X座標の場合は0、Y座標の場合は1を指定
    @ param[out] xPointList     変換座標
    """
    xPointList = []
    for i in range(len(pointList)):
        if x == 0:
            xPointList.append(pointList[i][x])
        elif x == 1:
            xPointList.append(pointList[i][x])
Esempio n. 9
0
import otsu
import cv2
import numpy as np

if __name__ == "__main__":
    image = cv2.imread('7.jpg', cv2.IMREAD_GRAYSCALE)
    arr = np.asarray(image)
    arr2 = cv2.resize(arr, (28, 28))
    np.savetxt('./7_2.txt', arr2, fmt='%f')
    otsu.otsu(arr2)
Esempio n. 10
0
                            parity=serial.PARITY_NONE,
                            stopbits=serial.STOPBITS_ONE,
                            bytesize=serial.EIGHTBITS)

    while True:
        ret, img = cap.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        if PREPROCESSING == Preprocessing.ADAPTIVE_THRESHOLD:
            resized = cv2.resize(gray, (28, 28))
            trunc_inv = cv2.adaptiveThreshold(resized, 255,
                                              cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                              cv2.THRESH_BINARY_INV, 11, 9)
        elif PREPROCESSING == Preprocessing.OTSU:
            resized = cv2.resize(gray, (28, 28))
            trunc_inv = otsu(resized)
        elif PREPROCESSING == Preprocessing.OTSU_EMBEDDED:
            resized = cv2.resize(gray, (28, 28))
            trunc_inv = otsu(resized)
        elif PREPROCESSING == Preprocessing.OTSU_ORIGINAL_IMAGE_NO_SERIAL:
            trunc_inv = otsu(gray)

        if PREPROCESSING != Preprocessing.OTSU_ORIGINAL_IMAGE_NO_SERIAL and SERIAL:
            send(ser, trunc_inv, PREPROCESSING)

        final = trunc_inv / 255.0
        cv2.imshow('Video', final)

        if PREPROCESSING == Preprocessing.OTSU_ORIGINAL_IMAGE_NO_SERIAL:  # resize for neural network
            final = cv2.resize(final, (28, 28))
Esempio n. 11
0
import basic_threshold as bt
import otsu
from PIL import Image
import os, sys

try:
    img = Image.open(sys.argv[1])
    img.load()
    img.show()
    bw = img.convert('L')
    otsu.otsu(bw)
except IOError:
    print("Unable to open file. Please try another format or check spelling.")
Esempio n. 12
0
for i in range(len(train_images)):
    np.set_printoptions(precision=3,suppress=True)
    print(train_files[i], 'class:', train_class[i])#, 'data:', train_images[i])
    print('')

# logistic regression
t0 = time.clock()
model = LogisticRegression()
print(train_images.shape,train_class.shape)
model.fit(train_images, train_class)
# make predictions
expected = test_class
predicted = model.predict(test_images)
print('expected:', expected)
print('predicted:', predicted)
# summarize the fit of the model
print('logistic regression time:',time.clock()-t0)
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
error = np.zeros(4)
for i in range(len(test_images)):
    if expected[i] != predicted[i]:
        error[expected[i]] += 1
print('errors:',error)

kmeans('.\\img\\segmentation\\mix_ie.png')

mywatershed('.\\img\\segmentation\\mix_ie.png')

otsu('.\\img\\segmentation\\mix_ie.png')
Esempio n. 13
0
    whitematter = []
    greymatter = []
    img=cv2.imread(path[0]+arr[j],cv2.IMREAD_GRAYSCALE)
    # create a CLAHE object (Arguments are optional).  To make the image more distinct
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img = clahe.apply(img)

    #cv2.imshow('original', img)

    #Cropping the image
    cropped = img[36:187, 38:253]

    #cv2.imshow("cropped", cropped)
    #cv2.waitKey(0)

    otsu_img=otsu(cropped)
    #thres_img = histogramthresholding(cropped)  # Balanced histogram thresholding
    cols,rows=otsu_img.shape
    # get the whitematter and graymatter patches using otsu thresholding for the original image
    for y in xrange(0,cols,10):
        for x in xrange(0,rows,10):
            if(otsu_img[y][x]==255):
                whitematter.append(img[y:y+10,x:x+10])
            if(otsu_img[y][x]==0):
                greymatter.append(img[y:y+10,x:x+10])
    #Calculate glcm parameters for the white matter
    for i in xrange(0,5):
        glcm = greycomatrix(whitematter[i], [1], [0], 256, symmetric=True, normed=True)
        diss.append(greycoprops(glcm, 'dissimilarity')[0, 0])
        corr.append(greycoprops(glcm, 'contrast')[0, 0])
        classes.append(0)
Esempio n. 14
0
import basic_threshold as bt
import otsu 
from PIL import Image
import os, sys

try:
  img = Image.open(sys.argv[1])
  img.load()
  img.show()
  bw = img.convert('L')
  otsu.otsu(bw)
except IOError:                                                                   
  print "Unable to open file. Please try another format or check spelling."




Esempio n. 15
0
def convert_bw(images):
  bw = []
  for i in images:
    bw.append(numpy.array(otsu.otsu(i)))
  return numpy.array(bw)
Esempio n. 16
0
def thresholding(file_name, thresholds_count):
    return otsu.otsu(file_name, thresholds_count)
Esempio n. 17
0
File: p.py Progetto: bavaria95/mikro
def binarize(gray):
    threshold = otsu(gray)
    # threshold = 55
    return np.where(gray < threshold, np.uint8(255), np.uint8(0))
Esempio n. 18
-1
def segment(filename):
    
    original=cv2.imread(filename, cv2.IMREAD_UNCHANGED)
    
    kernel = np.ones((5,5),np.uint8)
    height=original.shape[0]
    width=original.shape[1]
    
    grayscale=cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
    for i in range(height-120, height-6):
        for j in range(width-120, width-6):
            avg_val=0
            for k in range(i-2, i+3):
                for l in range(j-2, j+3):
                    avg_val=avg_val+grayscale[k,l]
            avg_val=avg_val/25
            grayscale.itemset((i,j), avg_val)
    
    #print thresh.shape
    get_otsu=otsu.otsu(grayscale)
    kernel = np.ones((5,5),np.uint8)
    dilation = cv2.erode(get_otsu,kernel,iterations = 3)
    dilation = cv2.dilate(dilation,kernel,iterations = 3)
    gradient = cv2.morphologyEx(dilation, cv2.MORPH_GRADIENT, kernel)
    cv2.imshow('Original2', grayscale)
    cv2.imshow('Original', gradient)
    cv2.imwrite('Otsu.jpg', dilation)
    cv2.waitKey()
    cv2.destroyAllWindows()