def run_simple(self):
        u'''Simple prediction'''
        if len(self.datapath) >= 2:
            # Use only two previous images
            af_img = io.imread(self.datapath[0])
            bf_img = io.imread(self.datapath[1])
            
            #af_img = io.imread(r'./viptrafficof_02.png')
            #bf_img = io.imread(r'./viptrafficof_03.png')

            # Convert to gray image
            af_gray = color.rgb2gray(af_img)
            bf_gray = color.rgb2gray(bf_img)

            # Calculate density flow
            # Small -> WHY?
            flow = cv2.calcOpticalFlowFarneback(bf_gray, af_gray, \
                0.5, 6, 20, 10, 5, 1.2, 0)
            print  flow.shape, flow[:, :, 0].min(), flow[:, :, 1].max()  
            self.before = bf_gray
            self.after = af_gray
            #self.result = self.current
            self.result = transform(af_img, flow)
            
            # Color code the result for better visualization of optical flow. 
            # Direction corresponds to Hue value of the image. 
            # Magnitude corresponds to Value plane
            
            mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            hsv = np.zeros_like(af_img)
            hsv[...,1] = 255
            hsv[...,0] = ang*180/np.pi/2
            hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
            self.optical = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)        
Ejemplo n.º 2
0
def motion_trajectories_stack_feature_image(cap_stream,morp_stream):
    kernel = np.ones((5,5),np.uint8) #kernal function for open operation
    stack_feature = np.zeros((240,320),dtype=float);
    frame_t1 = np.zeros((240,320),dtype=float)
    start = 0;
    if cap_stream.isOpened():
        ret, frame = cap_stream.read()
        morp_frame = cv2.morphologyEx(frame,cv2.MORPH_CLOSE,kernel) #remove outlier using open operation
        morp_stream.write(morp_frame)
        while(True):
            frame_t1 = color.rgb2gray(morp_frame)

            if ret==True:
                #if a pixel have a different value to previous frame then it occur
                ret, frame = cap_stream.read()
                if ret==True:
                    morp_frame = cv2.morphologyEx(frame,cv2.MORPH_CLOSE,kernel) #remove outlier using open operation
                    morp_stream.write(morp_frame)
                    frame_t2 = color.rgb2gray(morp_frame)
                    stack_feature = stack_feature + (frame_t2-frame_t1)*0.1
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
                else:
                    break

            else:
                break

    return stack_feature
Ejemplo n.º 3
0
def process_images(d, files, analysis_fxn):
    '''
    Opens images and starts the analysis_fxn

    Inputs:
        d, string, current cell folder
        files, list of strings, names of the files in the
            current cell folder
        analysis_fxn, function, type of analysis to perform

    Returns:
        None
    '''

    nom = os.path.join(d, files[3]) #Nom_crop
    mch = os.path.join(d, files[2])

    nom_im = Image.open(nom)
    gray_nom = color.rgb2gray(nom_im)

    mch_im = Image.open(mch)
    gray_mch = color.rgb2gray(gray_im)

    analysis_fxn(gray_nom, gray_mch)
    return None
Ejemplo n.º 4
0
def count_bubble(image_filename, ref_filename, plot_show = 0):
    
    image = io.imread(gv.__DIR__ + gv.__TrainImageDir__ + \
                      image_filename)
    ref_image = io.imread(gv.__DIR__ + gv.__TrainImageDir__ + \
                      ref_filename)

    image_gray = rgb2gray(image)
    ref_gray = rgb2gray(ref_image)

    # Constants
    Window_Size = 5
           
    pre_image = pre.noise_reduction(image_gray,
                                    ref_gray,
                                    Window_Size,
                                    mode = 0)
    seg_image = segmentation(pre_image,'self_design')
    perimeters = perimeter_exaction(seg_image, image, image_filename)
    if(plot_show == 1):
        fig, ax = plt.subplots(1,3)
        ax[0].imshow(image)
        ax[0].set_title('Original')
        ax[1].imshow(seg_image, cmap=plt.cm.gray)
        ax[1].set_title('Segmentation')
        result = io.imread(gv.__DIR__ + gv.cu__image_dir + image_filename)
        ax[2].imshow(result)
        ax[2].set_title('Result')
        plt.show()
       
    return perimeters
Ejemplo n.º 5
0
def repeated_sales(df, artistname, artname, r2thresh=7000, fftr2thresh=10000, IMAGES_DIR='/home/ryan/asi_images/'):
    """
        Takes a dataframe, artistname and artname and tries to decide, via image matching, if there is a repeat sale. Returns a dict of lot_ids, each entry a list of repeat sales
    """
    artdf = df[(df['artistID']==artistname) & (df['artTitle']==artname)]

    artdf.images = artdf.images.apply(getpath)
    paths = artdf[['_id','images']].dropna()
    id_dict = {}
    img_buffer = {}
    already_ordered = []
    for i, path_i in paths.values:
        id_dict[i] = []
        img_buffer[i] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + path_i), (300,300))))
        for j, path_j in paths[paths._id != i].values:
            if j > i and j not in already_ordered:
                if j not in img_buffer.keys():
                    img_buffer[j] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + path_j), (300,300))))
                if norm(img_buffer[i] - img_buffer[j]) < r2thresh and\
                        norm(fft2(img_buffer[i]) - fft2(img_buffer[j])) < fftr2thresh:
                    id_dict[i].append(j)
                    already_ordered.append(j)
    for key in id_dict.keys():
        if id_dict[key] == []:
            id_dict.pop(key)
    return id_dict
Ejemplo n.º 6
0
def iris_scan_orb(request):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS3.jpg'))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS6.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # print("Matched: ", len(matches12), " of ", len(descriptors1))
    percent = len(matches12) / len(descriptors1) * 100

    # print("Percent Match - ", percent, "%")

    """if percent > 80:
        print("Matched!")
    else:
        print("Not Matched!")"""

    return render(request, 'scan.html', {'percent': percent})
Ejemplo n.º 7
0
def image_compare(df, IMAGES_DIR='/home/ryan/asi_images/'):
    '''
    takes a list of n image ids and returns sum(n..n-1) n comparisons of r2 difference, r2(fft) difference, and average number of thresholded pixels
    '''
    img_buffer = {}
    return_list = []
    artdf = df[['_id', 'images']].copy()
    artdf.images = artdf.images.apply(getpath) 
    paths = artdf[['_id','images']].dropna()
    paths.index = paths._id
    paths = paths.images
    if paths.shape[0] < 2:
        return DataFrame([])
    for id_pair in combinations(paths.index, 2):
        if id_pair[0] in img_buffer:
            img1 = img_buffer[id_pair[0]]
        else:
            img_buffer[id_pair[0]] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + paths[id_pair[0]]), (300,300))))
            img1 = img_buffer[id_pair[0]]
        
        if id_pair[1] in img_buffer:
            img2 = img_buffer[id_pair[1]]
        else:
            img_buffer[id_pair[1]] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + paths[id_pair[1]]), (300,300))))
            img2 = img_buffer[id_pair[1]]
        return_list.append(
                [id_pair[0], id_pair[1], \
                    norm(img1 - img2), \
                    norm(fft2(img1) - fft2(img2)), \
                    #mean([sum(img1 > threshold_otsu(img1)), sum(img2 > threshold_otsu(img2))])]
                    #mean([sum(img1 > 0.9), sum(img2 > 0.9)])] 
                    std(img1)+std(img2)/2.]
       )
    return DataFrame(return_list, columns=['id1','id2','r2diff', 'fftdiff', 'stdavg'])
Ejemplo n.º 8
0
def main():
    try:
	if len(sys.argv) <= 1:
		print 'Error: Filename Required'  
	if len(sys.argv) == 2:
		print 'Error: Background Filename Required'
	if len(sys.argv) >= 3:

	    # Constants
	    Window_Size = 5
	    image_name = sys.argv[1]
	    ref_name = sys.argv[2]	
	
            image = rgb2gray(io.imread(sys.argv[1]))
	    ref = rgb2gray(io.imread(sys.argv[2]))

	    part_image, region, angle = pre.interest_region(image, plot_image = 0)
	    ref_rotate = rotate(ref,angle)
	    part_ref = ref_rotate[region[0]:region[1], region[2]:region[3]]
	
	    pre_image = pre.noise_reduction(part_image, part_ref, Window_Size, mode = 0)
	    io.imsave('pre_image.jpg',pre_image)

    except KeyboardInterrupt:
        print "Shutdown requested... exiting"
    except Exception:
        traceback.print_exc(file=sys.stdout)
    sys.exit(0)
Ejemplo n.º 9
0
def iris_scan_orb_android(file_name):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/'+ file_name))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS9.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    percent = len(matches12) / len(descriptors1) * 100

    return percent
Ejemplo n.º 10
0
def test(classifier, pca):
	building = io.imread("http://www.nps.gov/tps/images/briefs/14-commercial-building.jpg")
	building = transform.resize(building, (200, 200, 3))
	building = color.rgb2gray(building)
	building = building.reshape(1, -1)
	# building = pca.transform(building)
	print building
	print classifier.predict(building)[0]
	print to_cat[str(classifier.predict(building)[0])] + " (expect building)"
	# print classifier.predict_proba(building)

	snow = io.imread("http://farm4.static.flickr.com/3405/3332148397_92d89db2ab.jpg")
	snow = transform.resize(snow, (200, 200, 3))
	snow = color.rgb2gray(snow)
	snow = snow.reshape(1, -1)
	# snow = pca.transform(snow)
	print snow
	print to_cat[str(classifier.predict(snow)[0])] + " (expect snow)"
	# print classifier.predict_proba(snow)


	flower = io.imread("https://upload.wikimedia.org/wikipedia/commons/f/fd/Daisy_flower_green_background.jpg")
	flower = transform.resize(flower, (200, 200, 3))
	flower = color.rgb2gray(flower)
	flower = flower.reshape(1, -1)
	# flower = pca.transform(flower)
	print to_cat[str(classifier.predict(flower)[0])] + " (expect plant)"
Ejemplo n.º 11
0
def compare_images(imageA, imageB):
    # compute the mean squared error and structural similarity
    # index for the images
    m = mse(imageA, imageB)
    s = ssim(color.rgb2gray(imageA), color.rgb2gray(imageB))
    if(m>500):print(m)
    if(s<0.85): print(s)
Ejemplo n.º 12
0
def image_similarity_index(image_1_path_name, image_2_path_name):
    """Calculates the similarity of two images. A structural similarity index of 1.0 means the images are identical."""
    image_1 = color.rgb2gray(imread(image_1_path_name))  # color-images are not supported in the version for Raspbian
    image_2 = color.rgb2gray(imread(image_2_path_name))

    similarity = compare_ssim(image_1, image_2)

    return similarity
Ejemplo n.º 13
0
    def precisionRecall(self):
        test_images = []
        test_blobs = []
        X = []
        Y_ground = []

        #Get blobs
        for i in range(500,505):
            file = self.croppedImages[i]
            iter_image = rgb2gray(cv2.imread(file))
            iter_blobs = blob_dog(iter_image, min_sigma=1, max_sigma=25,
                                  sigma_ratio=1.6, threshold=.25, overlap=0.5)
            test_images.append(iter_image)
            test_blobs.append(iter_blobs)
            print("Get blobs: " + str(i) + "/1000")

        #Get X values for testing
        for i in range(0, len(test_images)):
            RadPic = self.RadPIC(10, test_blobs[i], rgb2gray(test_images[i]))
            inputHOG = self.HOG(10, test_blobs[i], rgb2gray(test_images[i]))
            X.extend(self.makeX(RadPic, inputHOG))
            print("X: " + str(i) + "/1000")

        #Get Y ground truth values
        for i in range(500, 505):
            j = i
            tempStr = self.labels[j]

            while tempStr.find("frame" + str(i)) == -1:
                j = j + 1
                tempStr = self.labels[j]

            label_image = cv2.imread((glob.glob(tempStr))[0])
            cropped_image = cv2.imread((glob.glob("cropped_images/frame" + str(i) + ".jpg"))[0])
            cropped_image = rgb2gray(cropped_image)
            blobs = blob_doh(cropped_image, min_sigma=1, max_sigma=25, num_sigma=15, threshold=.001)
            for blob in blobs:
                y, x, r = blob
                if label_image[y,x][0] == 255 & label_image[y,x][1] == 255 & label_image[y,x][2] == 255:
                    Y_ground.append(1)
                else:
                    Y_ground.append(0)
                #print("Ground Truth: " + str(i) + "/1000")

        #Open classifier
        with open('croppedImagesV2.pkl', 'rb') as f:
            forest = pickle.load(f)

        #Precidicions
        Y_classifier = forest.predict(X)

        #Precision-Recall
        precision, recall, thresholds = precision_recall_curve(Y_ground, Y_classifier)

        plt.plot(recall, precision)
        plt.ylabel('Precision')
        plt.xlabel('Recall')
        plt.show()
def get_displacement(image0, image1):
    """
    Gets displacement (in pixels I think) difference between 2 images using scikit-image
    not as accurate as the opencv version i think.

    :param image0: reference image
    :param image1: target image
    :return:
    """
    from skimage.feature import (match_descriptors, ORB, plot_matches)
    from skimage.color import rgb2gray
    from scipy.spatial.distance import hamming
    from scipy import misc
    image0_gray = rgb2gray(image0)
    image1_gray = rgb2gray(image1)
    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(image0_gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(image1_gray)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # Sort the matches based on distance.  Least distance
    # is better
    distances12 = []
    for match in matches12:
        distance = hamming(descriptors1[match[0]], descriptors2[match[1]])
        distances12.append(distance)

    indices = np.arange(len(matches12))
    indices = [index for (_, index) in sorted(zip(distances12, indices))]
    matches12 = matches12[indices]

    # collect displacement from the first 10 matches
    dx_list = []
    dy_list = []
    for mat in matches12[:10]:
        # Get the matching key points for each of the images
        img1_idx = mat[0]
        img2_idx = mat[1]

        # x - columns
        # y - rows
        (x1, y1) = keypoints1[img1_idx]
        (x2, y2) = keypoints2[img2_idx]
        dx_list.append(abs(x1 - x2))
        dy_list.append(abs(y1 - y2))

    dx_median = np.median(np.asarray(dx_list, dtype=np.double))
    dy_median = np.median(np.asarray(dy_list, dtype=np.double))
    # plot_matches(image0, image1, descriptors1, descriptors2, matches12[:10])
    return dx_median, dy_median
Ejemplo n.º 15
0
    def process(self, img2, image_gray):
        # img2 = warp(img2)
        patch_size = [640]
        img2 = rgb2gray(img2)
        image_gray = rgb2gray(img2)

        blobs_dog = blob_dog(image_gray, min_sigma=0.2, max_sigma=225, sigma_ratio=1.6, threshold=.5)
        blobs_dog[:, 2] = blobs_dog[:, 2]

        blobs = [blobs_dog]
        colors = ['black']
        titles = ['Difference of Gaussian']
        sequence = zip(blobs, colors, titles)

        # plt.imshow(img2)
        # plt.axis("equal")
        # plt.show()

        for blobs, color, title in sequence:
            print(len(blobs))
            for blob in blobs:
                y, x, r = blob
                plotx = x
                ploty = y
                for i in range (3):
                    keypoints1 = corner_peaks(corner_harris(Array.image_arr[i]), min_distance=1)
                    keypoints2 = corner_peaks(corner_harris(img2), min_distance=1)

                    extractor = BRIEF(patch_size=30, mode="uniform")

                    extractor.extract(Array.image_arr[i], keypoints1)
                    keypoints1 = keypoints1[extractor.mask]
                    descriptors1 = extractor.descriptors

                    extractor.extract(img2, keypoints2)
                    keypoints2 = keypoints2[extractor.mask]
                    descriptors2 = extractor.descriptors

                    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
                    
                    # print(keypoints1, keypoints2)
                    # print(matches12)
                    #FUCKGGGPLAYT
                    for pizdezh in matches12:
                        X = keypoints2[pizdezh[1]][1]
                        Y = keypoints2[pizdezh[1]][0]

                    if sqrt((plotx - X)**2 + (ploty - Y)**2) < r:
                        seen = [{
                            "type": Array.type_arr[i],
                            "center_shift": (plotx - 160/2) * -0.02,
                            "distance": image_gray[y][x] / 0.08
                        }]
                        print seen
                        data.seen.add(seen)
                        break
Ejemplo n.º 16
0
def compare(file1, file2):
    image1 = io.imread(file1)
    image2 = io.imread(file2)
    image1 = color.rgb2gray(image1)
    image2 = color.rgb2gray(image2)
    # h1 = image1.histogram()
    # h2 = image2.histogram()
    # rms = math.sqrt(reduce(operator.add,
                           # map(lambda a,b: (a-b)**2, h1, h2))/len(h1))
    return ssim(image1, image2)
def main():
    try:
        data_filename = 'number.txt'
        start_time = time.time()
        number = np.zeros((41,3))
        index = -1
        for det in range(1,8):
            if (det!=6):
                num_n = 6
            else:
                num_n = 5
            for n in range(0,num_n):
                index = index + 1
                temp = np.zeros(3)
                for angle in range(1,4):
                    filename = 'detector_' + str(det) + '_no_' + str(n) \
                                    + '_angle_' + str(angle) + '.jpg'
                    refname = 'detector_' + str(det) + '_no_' + str(n) \
                                    + '_background.jpg'

                    elapse_time = (time.time()-start_time)
                    if(elapse_time >= 1):
                        remain_time = elapse_time/(index*3+angle-1)*41*3-elapse_time
                        print 'Processing .. ' + filename \
                                +  time.strftime(" %H:%M:%S", time.gmtime(elapse_time)) \
                                + ' has past. ' + 'Remaining time: ' \
                                +  time.strftime(" %H:%M:%S", time.gmtime(remain_time))
                    else:
                        print 'Processing .. ' + filename \
                                +  time.strftime(" %H:%M:%S", time.gmtime(elapse_time)) \
                                + ' has past'

                    image = rgb2gray(io.imread(filename))
                    ref = rgb2gray(io.imread(refname))

                    temp[angle-1] = ellipse.count_bubble(image,ref)
                    #temp[angle-1] = ellipse.count_bubble(image)

                number[index,1] = np.mean(temp)
                number[index,2] = np.std(temp)
        
        manual_count = np.array([1,27,40,79,122,160,1,18,28,42,121,223,0,11,24,46,\
                142,173,3,19,23,76,191,197,0,15,24,45,91,152,0,\
                16,27,34,88,0,9,12,69,104,123]) 
        number[:,0] = manual_count.T
        number.tofile(data_filename,sep=" ")
  
    except KeyboardInterrupt:
        print "Shutdown requested... exiting"
    except Exception:
        traceback.print_exc(file=sys.stdout)
        sys.exit(0)
Ejemplo n.º 18
0
 def compute_binary_diff_image(self, new_image):
     """
     Compute an Otsu-thresholded image corresponding to the
     absolute difference between the empty chessboard image and the
     current image.
     """
     adj_start_image = exposure.adjust_gamma(
                     color.rgb2gray(self.empty_chessboard_image), 0.1)
     # gamma values have a strong impact on classification
     adj_image = exposure.adjust_gamma(color.rgb2gray(new_image), 0.1)
     diff_image = exposure.adjust_gamma(np.abs(adj_image - adj_start_image),
                                        0.3)
     return diff_image > threshold_otsu(diff_image)
    def computeDiffImgHistogramFromFrame(self, frameA, frameB):
        from skimage.color import rgb2gray
        import cv2

        frameA_grey = rgb2gray(frameA)
        frameB_grey = rgb2gray(frameB)


        flow = cv2.calcOpticalFlowFarneback(frameA_grey, frameB_grey)

        flattenedFrame = flow.reshape((-1, 2))
        H, edges = np.histogramdd(flattenedFrame, bins = (8, 8))
        return H.ravel()
Ejemplo n.º 20
0
def get_textural_features(img, isMultidirectional=False, distance=1):
    '''Extract GLCM feature vector from image
    Args:
        img: input image.

        isMultidirectional: Controls whether co-occurence should be calculated
            in other directions (ie 45 degrees, 90 degrees and 135 degrees).

        distance: Distance between pixels for co-occurence.

    Returns:
        features: if isMultidirectional=False, this is a 4 element vector of
        [dissimilarity, correlation,homogeneity, energy]. If not it is a 16
        element vector containing each of the above properties in each direction.
    '''
    if(isMultidirectional):
        img = img_as_ubyte(rgb2gray(img))
        glcm = greycomatrix(img, [distance], [0, 0.79, 1.57, 2.36], 256, symmetric=True, normed=True)
        dissimilarity_1 = greycoprops(glcm, 'dissimilarity')[0][0]
        dissimilarity_2 = greycoprops(glcm, 'dissimilarity')[0][1]
        dissimilarity_3 = greycoprops(glcm, 'dissimilarity')[0][2]
        dissimilarity_4 = greycoprops(glcm, 'dissimilarity')[0][3]
        correlation_1 = greycoprops(glcm, 'correlation')[0][0]
        correlation_2 = greycoprops(glcm, 'correlation')[0][1]
        correlation_3 = greycoprops(glcm, 'correlation')[0][2]
        correlation_4 = greycoprops(glcm, 'correlation')[0][3]
        homogeneity_1 = greycoprops(glcm, 'homogeneity')[0][0]
        homogeneity_2 = greycoprops(glcm, 'homogeneity')[0][1]
        homogeneity_3 = greycoprops(glcm, 'homogeneity')[0][2]
        homogeneity_4 = greycoprops(glcm, 'homogeneity')[0][3]
        energy_1 = greycoprops(glcm, 'energy')[0][0]
        energy_2 = greycoprops(glcm, 'energy')[0][1]
        energy_3 = greycoprops(glcm, 'energy')[0][2]
        energy_4 = greycoprops(glcm, 'energy')[0][3]
        feature = np.array([dissimilarity_1, dissimilarity_2, dissimilarity_3,\
         dissimilarity_4, correlation_1, correlation_2, correlation_3, correlation_4,\
         homogeneity_1, homogeneity_2, homogeneity_3, homogeneity_4, energy_1,\
         energy_2, energy_3, energy_4])
        return feature
    else:
        img = img_as_ubyte(rgb2gray(img))
        glcm = greycomatrix(img, [distance], [0], 256, symmetric=True, normed=True)
        dissimilarity = greycoprops(glcm, 'dissimilarity')[0][0]
        correlation = greycoprops(glcm, 'correlation')[0][0]
        homogeneity = greycoprops(glcm, 'homogeneity')[0][0]
        energy = greycoprops(glcm, 'energy')[0][0]
        feature = np.array([dissimilarity, correlation, homogeneity, energy])
        return feature
def getWords(imageloc,finalBoundingboxesFiltered):
	img=io.imread(imageloc)
	imgray=color.rgb2gray(img)
	horizontaldistances=[]
	verticaldistances=[]
	imgwidth=Image.open(imageloc).size[0]

	for i in range(0,len(finalBoundingboxesFiltered)):
		for j in range (i+1,len(finalBoundingboxesFiltered)):
			item1=finalBoundingboxesFiltered[i]
			item2=finalBoundingboxesFiltered[j]
			h=getDistHorizontal(item1,item2)
			v=getDistVertical(item1,item2)
			if h!=0:
				horizontaldistances.append(h)
			if v!=0:
				verticaldistances.append(v)
	global HORIZONTALTHRESHOLD
	global VERTICALTHRESHOLD

	#print horizontaldistances,verticaldistances
	HORIZONTALTHRESHOLD=sorted(np.unique(horizontaldistances))[2]+1
	VERTICALTHRESHOLD=sorted(np.unique(verticaldistances))[1]+1
	print "using horizontal and vertical thresholds",HORIZONTALTHRESHOLD,VERTICALTHRESHOLD
	nomerges=1
	while(nomerges):
		(finalBoundingboxesFiltered,nomerges)=mergeOnce(imgray,finalBoundingboxesFiltered,imgwidth)	

	finalBoundingboxes=finalBoundingboxesFiltered
	return finalBoundingboxes	
Ejemplo n.º 22
0
def print_hog_image(image):
    """
    image is expected to be in it's original format

    function prints hog image
    """
    print image.shape
    image = color.rgb2gray(image)

    fd, hog_image = hog(image, orientations=8, pixels_per_cell=(4, 4),
                        cells_per_block=(1, 1), visualise=True, normalise=True)
    print "finished hog..."
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)

    ax1.axis('off')
    ax1.imshow(image, cmap=plt.cm.gray)
    ax1.set_title('Input image')
    ax1.set_adjustable('box-forced')

    # Rescale histogram for better display
    hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))

    ax2.axis('off')
    ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
    ax2.set_title('Histogram of Oriented Gradients')
    ax1.set_adjustable('box-forced')
    plt.show()
Ejemplo n.º 23
0
def neg_hog_rand(path, num_samples, window_size, num_window_per_image):
	rows = window_size[0]
	cols = window_size[1]
	features = []
	cnt = 0
	for dirpath, dirnames, filenames in walk(path):
		for my_file in filenames:
			
			if cnt < num_samples:
				print cnt,my_file
				cnt = cnt + 1
				im = cv2.imread(path + my_file)
				image = color.rgb2gray(im)
				image_rows = image.shape[0]
				image_cols = image.shape[1]
				
				for i in range(0,num_window_per_image):
					x_min = random.randrange(0,image_rows - rows)
					y_min = random.randrange(0,image_cols - cols)

					x_max = x_min + rows
					y_max = y_min + cols
					
					image_hog = image[x_min:x_max , y_min:y_max]
					
					my_feature, _ = hog(image_hog, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualise=True)
					features.append(my_feature)
	return features
Ejemplo n.º 24
0
def main():
    numberOfImages = 11;

    # TODO: AUTOMATICALLY GET NUMBER OF IMAGES
    # Get number of images. Remeber to divide by 2 as for every relevant image,
    # theres also the comparison image.
    # if ".DS_Store" in os.listdir("Wheat_ROIs"):
    #     numberOfImages = (len(os.listdir("Wheat_ROIs")) - 1)/2;
    # else:
    #     numberOfImages = len(os.listdir("Wheat_ROIs"))/2;

    # For each ROI image in folder
    for i in tqdm.tqdm(range(1, numberOfImages+1)):
        # Load image
        filename = "../Wheat_ROIs/{:03d}_ROI.png".format(i);
        img = misc.imread(filename);
        img_gray = rgb2gray(img);

        # Detect blobs. See http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.blob_doh
        # for function documentation
        blobs = blob_doh(img_gray, min_sigma=1, max_sigma=100, threshold=.01)

        # Display blobs on image and save image
        fig, ax = plt.subplots()
        plt.title("Number of Blobs Detected: {}".format(blobs.shape[0]))
        plt.grid(False)
        ax.imshow(img, interpolation='nearest')
        for blob in blobs:
            y, x, r = blob
            c = plt.Circle((x, y), r, color='red', linewidth=2, fill=False)
            ax.add_patch(c)
        fig.savefig("../Wheat_ROIs/{:03d}_Blob.png".format(i))
Ejemplo n.º 25
0
def load_image_from_file(filename):
   filename = os.path.abspath(filename)
   
   image = data.load(filename)
   image_gray = rgb2gray(image)

   return image, image_gray
def create_mask(img, num_circles, lo_thickness, hi_thickness, patch_size):
    im = rgb2gray(img)
    m = np.ones_like(im)

    np.random.seed(31415926)
    for i in range(num_circles):
        im_tmp = np.ones_like(m)
        yy = np.random.randint(0, m.shape[0])
        xx = np.random.randint(0, m.shape[1])
        r = np.random.randint(20, m.shape[0] / 2)
        t = np.random.randint(lo_thickness, hi_thickness)
        rro, cco = circle(yy, xx, r, shape=m.shape)
        rri, cci = circle(yy, xx, r - t, shape=m.shape)
        im_tmp[rro, cco] = 0
        im_tmp[rri, cci] = 1
        m[im_tmp == 0] = 0

    # Fix mask border.
    d = patch_size + 1
    m[:d, :] = 1
    m[-d:, :] = 1
    m[:, :d] = 1
    m[:, -d:] = 1

    return m
Ejemplo n.º 27
0
def getFeat(Data, mode, fileNames, positive):
    num = 0

    for image in Data:
        gray = rgb2gray(image)
        fd, hog_image = hog(gray, orientations, pixels_per_cell,
                            cells_per_block, block_norm, visualize, normalize, feature_vector)

        if(visualize):
            visualize(data, hog_image)

        fd_name = str(fileNames[num]) + '.feat'  # set file name

        if mode == 'train':
            if positive == True:
                fd_path = os.path.join('./features/train/positive/', fd_name)
            else:
                fd_path = os.path.join(
                    './features/train/negative/', fd_name)

        else:
            if positive == True:
                fd_path = os.path.join('./features/test/positive/', fd_name)
            else:
                fd_path = os.path.join('./features/test/negative/', fd_name)

        joblib.dump(fd, fd_path, compress=3)  # save data to local
        num += 1
        print("%d saving: ." % (num))
Ejemplo n.º 28
0
    def rgb2gray(self, rgb):

        img = color.rgb2gray(rgb)
        # r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
        #      gray = 0.2989 * r + 0.5870 * g + 0.1140 * b

        return img
Ejemplo n.º 29
0
def get_features(filename):
    feature = []
    raw_image = io.imread(filename)
    for channel in range(0, 4):
        image = get_channel(raw_image, channel)
        image_gray = rgb2gray(image)
        for is_smoothing in range(0, 2):
            image_gray_1 = smooth(image_gray) if is_smoothing == 1 else image_gray
            for is_incrase_contrast in range(1, 2):
                image_gray_2 = increase_contrast(image_gray_1) if is_incrase_contrast == 1 else image_gray_1

                # Get number of blobs.
                num_blobs = get_num_blobs(image_gray_2)
                feature.append(num_blobs)

                # Get number of edges.
                # for is_otsu in range(0, 2):
                #    image_gray_3 = otsu(image_gray_2) if is_otsu == 1 else image_gray_2

                """
                num_edge = get_num_edges(image_gray_2, 2)
                feature.append(num_edge)
                if num_blobs == 0:
                    feature.append(0)
                else:
                    feature.append(num_edge/num_blobs)
                """

    out = ""
    for i in range(0, len(feature)):
        out = out + " " + str(i + 1) + ":" + str(feature[i])
    return out
Ejemplo n.º 30
0
    def view_U_matrix(self,distance2=4,row_normalized='Yes',show_data='Yes',contooor='Yes',blob = 'Yes',save='Yes',save_dir = ''):
    	import scipy
    	from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
    	umat = self.U_matrix(distance=distance2,row_normalized=row_normalized) 
    	data = getattr(self, 'data_raw')
    	proj = self.project_data(data)
    	msz =  getattr(self, 'mapsize')
    	coord = self.ind_to_xy(proj)
	#     freq = plt.hist2d(coord[:,1], coord[:,0], bins=(msz[1],msz[0]),alpha=1.0,cmap=cm.jet)[0]
	#     plt.close()
   	 
	#     fig, ax = plt.figure()
    	fig, ax= plt.subplots(1, 1)
    	im = imshow(umat,cmap=cm.RdYlBu_r,alpha=1) # drawing the function
    	# adding the Contour lines with labels`
    	# imshow(freq[0].T,cmap=cm.jet_r,alpha=1)
    	if contooor=='Yes':
        	mn = np.min(umat.flatten())
        	mx = np.max(umat.flatten())
        	std = np.std(umat.flatten())
        	md = np.median(umat.flatten())
        	mx = md + 0*std
#         	mn = md
#         	umat[umat<=mn]=mn
        	cset = contour(umat,np.linspace(mn,mx,15),linewidths=0.7,cmap=cm.Blues)
    
    	if show_data=='Yes':
        	plt.scatter(coord[:,1], coord[:,0], s=2, alpha=1.,c='Gray',marker='o',cmap='jet',linewidths=3, edgecolor = 'Gray')
        	plt.axis('off')
    
    	ratio = float(msz[0])/(msz[0]+msz[1])
    	fig.set_size_inches((1-ratio)*15,ratio*15)
    	plt.tight_layout()
    	plt.subplots_adjust(hspace = .00,wspace=.000)
    	sel_points = list()
    	if blob=='Yes':
        	from skimage.feature import blob_dog, blob_log, blob_doh
        	from math import sqrt
        	from skimage.color import rgb2gray
        	image = 1/umat
        	image_gray = rgb2gray(image)

        	#'Laplacian of Gaussian'
        	blobs = blob_log(image, max_sigma=5, num_sigma=4, threshold=.152)
        	blobs[:, 2] = blobs[:, 2] * sqrt(2)
        	imshow(umat,cmap=cm.RdYlBu_r,alpha=1)
        	sel_points = list()
        	for blob in blobs:
        		row, col, r = blob
        		c = plt.Circle((col, row), r, color='red', linewidth=2, fill=False)
        		ax.add_patch(c)
        		dist = scipy.spatial.distance_matrix(coord[:,:2],np.array([row,col])[np.newaxis,:])
        		sel_point = dist <= r
        		plt.plot(coord[:,1][sel_point[:,0]], coord[:,0][sel_point[:,0]],'.r')
        		sel_points.append(sel_point[:,0])

            
        if save=='Yes':
        	fig.savefig(save_dir, transparent=False, dpi=400) 
        return sel_points,umat
Ejemplo n.º 31
0
def backward_energy(img, importance_map, mask, old_energy, pool):
    return filters.sobel(color.rgb2gray(img))
 def __call__(self, img):
     img_gray = color.rgb2gray(img)
     img_gray = np.expand_dims(img_gray, axis=0)
     return img_gray
Ejemplo n.º 33
0
def preprocessing(image, new_HW, height_range=(35, 195)):
    image = crop_image(image, height_range)  # (210, 160, 3)  --> (160, 160, 3)
    image = resize(rgb2gray(image), new_HW, mode='reflect')
    image = np.expand_dims(image, axis=2)  # (80, 80, 1)
    return image
Ejemplo n.º 34
0
def vertical_merge(imgFolder, imgName):
    NUM_VERTICAL_COMB = 2
    VOL_V_THRESHOLD_RATIO = 1.5
    V_THRESHOLD_RATIO_JUDGE = 0.33
    H_THRESHOLD_RATIO_HELP = 1
    # grayscale
    imgPath = os.path.join(imgFolder, imgName)
    img = rgb2gray(io.imread(imgPath))
    img_size = img.shape
    thresh = threshold_mean(img)
    # img = (img > thresh) * 255
    img = img > thresh * 0.8  #### ATTENTION!
    # connected components
    [label, num_label] = measure.label(img,
                                       neighbors=8,
                                       background=1,
                                       return_num=True)
    CCs = measure.regionprops(label)
    # controids, horizontals, verticals, heights, vols
    centroids = []
    cc_heights = []
    cc_areas = []
    for cc in CCs:
        cc_heights.append(cc.bbox[2] - cc.bbox[0])
        cc_areas.append(cc.area)
        centroids.append(cc.centroid)
    horizontals = np.array([centroid[0] for centroid in centroids])
    verticals = np.array([centroid[1] for centroid in centroids])
    cc_heights = np.array(cc_heights)
    cc_areas = np.array(cc_areas)
    # get character heights, get character areas for later criteria
    mean_char_height = np.mean(cc_heights)
    mean_char_area = np.mean(cc_areas)
    if len(cc_heights) > 2:
        kmeans = KMeans(n_clusters=2).fit([(h, 0) for h in sorted(cc_heights)])
        ch_idxs = np.nonzero(kmeans.labels_ == kmeans.labels_[-1])[0]
        mean_char_height = np.mean(np.array(sorted(cc_heights))[ch_idxs])
        kmeans = KMeans(n_clusters=2).fit([(h, 0) for h in sorted(cc_areas)])
        ch_idxs = np.nonzero(kmeans.labels_ == kmeans.labels_[-1])[0]
        mean_char_area = np.mean(np.array(sorted(cc_areas))[ch_idxs])
    # get mean vertical distance, set H_THRESHOLD_RATIO_HELP
    v_dis = []
    for i in xrange(num_label):
        for j in xrange(i + 1, num_label):
            v_dis.append(abs(horizontals[i] - horizontals[j]))
    v_dis = np.array(v_dis)
    if len(v_dis) > 6 and np.amax(v_dis) * 0.8 < mean_char_height * 1.2:
        # more than 4 CCs
        H_THRESHOLD_RATIO_HELP = 2

    v_idx = np.argsort(verticals)
    v_points_sort = verticals[v_idx]
    # get threshold
    diff_v = []
    for i in xrange(1, num_label):
        diff_v.append(v_points_sort[i] - v_points_sort[i - 1])
    mean_div_v = np.mean(diff_v)
    threshold = mean_div_v * V_THRESHOLD_RATIO_JUDGE
    verti_comb = defaultdict(list)
    for i in xrange(num_label):
        for j in xrange(num_label):
            if i != j and abs(verticals[i] - verticals[j]) < threshold:
                verti_comb[i].append(j)
    # modify dict: delete elements (when more then 3) in the list
    #			   based on horizontal distances
    for cc_idx, verti_list in verti_comb.iteritems():
        if len(verti_list) > NUM_VERTICAL_COMB:
            horiz_diff = [abs(horizontals[v_idx]-horizontals[cc_idx]) \
                 for v_idx in verti_list]
            selected_idx = np.argsort(horiz_diff)
            # print selected_idx
            verti_comb[cc_idx] = [verti_list[idx] \
                   for idx in selected_idx[:NUM_VERTICAL_COMB]]
    # groups tuples
    groups = set()
    for cc_idx, verti_list in verti_comb.iteritems():
        delete_list = []
        for bi_idx in verti_list:
            if abs(horizontals[cc_idx] - horizontals[bi_idx]) < mean_char_height * H_THRESHOLD_RATIO_HELP \
            and CCs[cc_idx].area + CCs[bi_idx].area < mean_char_area * VOL_V_THRESHOLD_RATIO:
                groups.add(tuple(sorted([cc_idx, bi_idx])))
            else:
                delete_list.append(bi_idx)
        for idx in delete_list:
            verti_list.remove(idx)
        if len(verti_list) == 2:
            groups.add(tuple(sorted([cc_idx, verti_list[0], verti_list[1]])))
    groups = list(groups)
    # print groups
    return label, CCs, groups
Ejemplo n.º 35
0
import numpy as np
import csv
from skimage.feature import greycomatrix, greycoprops
from skimage import io, color, img_as_ubyte

#img_dir = "D:\MajorProject\Images" # Enter Directory of all images
img_dir = "D:\\MajorProject\\dataset\\2_cataract"
data_path = os.path.join(img_dir,'*g')
files = glob.glob(data_path)
data = []

for f1 in files:
	image = io.imread(f1)
	#image = io.imread('C:\\Users\\Abhishek Kamal\\Desktop\\image.jpg')
	#image =io.imread('D:\\pic1.png')
	gray = color.rgb2gray(image)
	image = img_as_ubyte(gray)
	data.append(image)

#bins = np.array([0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 255]) #16-bit
#inds = np.digitize(image, bins)

	# GLCM properties
def contrast_feature(matrix_coocurrence):
	contrast = greycoprops(matrix_coocurrence, 'contrast')
	return contrast[0][0]
	#return "Contrast = ", contrast

def dissimilarity_feature(matrix_coocurrence):
	dissimilarity = greycoprops(matrix_coocurrence, 'dissimilarity')
	return dissimilarity[0][0]
Ejemplo n.º 36
0
# img = io.imread(pjoin(root_dir, 'Dataset30/input-frames/frame_0075.png'))[..., :3]
# truth = (io.imread(
#     pjoin(root_dir, 'Dataset30/ground_truth-frames/frame_0075.png'))[..., 0] > 0).astype(float)
# p_x, p_y = 150, 110

# out_size = 22
# p_x, p_y = 9, 9
# rr, cc = draw.ellipse(p_y, p_x, 8, 8, shape=(out_size, out_size), rotation=15)
# img = np.zeros((out_size, out_size, 3))
# truth = np.zeros((out_size, out_size))
# truth[rr, cc] = 1
# img[rr, cc, :] = (0, 1, 0)

img = resize(img, (out_size, out_size))
img_gray = color.rgb2gray(img)
width_bg = 2

truth_ = resize(truth, (out_size, out_size))
truth = np.zeros((truth_.shape[0] + 2*width_bg, truth_.shape[1] + 2*width_bg))
truth[width_bg: -width_bg, width_bg:-width_bg] = truth_
truth_contour = (segmentation.find_boundaries(truth > 0))

import pdb; pdb.set_trace() ## DEBUG ##
f = torch.tensor(np.concatenate((truth[None, ...], 1 - truth[None, ...]))[None, ...])

A = scipy.spatial.distance.pdist(truth, metric='cityblock')
A = scipy.spatial.distance.squareform(A, force='tomatrix')

markers = np.zeros(truth.shape, dtype=np.uint)
rr, cc = draw.circle(p_y, p_x, 4, shape=markers.shape)
Ejemplo n.º 37
0
#kp, des = surf.detectAndCompute(img,None)
#len(kp)

from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray

import matplotlib.pyplot as plt

#image = data.hubble_deep_field()[0:500, 0:500]
image = cv2.imread(
    "/Users/2020shatgiskessell/Desktop/New_Mole_Detector/Test_Images/template.png"
)

image_gray = rgb2gray(image)

blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)

# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)

blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)

blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)

blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = [
    'Laplacian of Gaussian', 'Difference of Gaussian', 'Determinant of Hessian'
Ejemplo n.º 38
0
    if (resize is not False):
        imgage_resized = resize(img, resize_img, anti_aliasing=True)
    
    with open(txt_out, 'w') as txt:
        for row in imgage_resized:
            line = ''
            for dot in row:
                value = int(dot*(len(map)-1))
                line += map[value]
            txt.write(line + '\r\n')
    return txt_out

if __name__ == '__main__':
    # TO TEST
    img = imread('./avatar.jpg')
    img = rgb2gray(img)

    image_resized = resize(img, (128, 256),
                        anti_aliasing=True)

    print(is_low_contrast(image_resized))

    with open('./text.txt', 'w') as txt:
        for row in image_resized:
            line = ''
            for dot in row:
                value = int(dot*11)
                line += default_map[value]
            txt.write(line + '\r\n')

def filename_plan_segmentation(imagestack, filename):
    imageStack = imagestack
    muDivision = np.linspace(0, 0.5, 15)
    lambda1Division = np.linspace(0, 4, 15)
    lambda2Division = np.linspace(0, 4, 15)

    zStack, width, length = np.shape(imageStack)
    maxImage = np.max(imageStack, axis=0)
    # maxImage = np.max(np.delete(imageStack, 0, 3), axis=0)

    # noColorPlan = rgb2gray(maxImage)

    mu_0 = 0.25
    delta_mu = 0.05
    lambda1_0 = 2
    lambda2_0 = 4
    tol = 1e-3
    max_iter = 1000
    dt = 0.5

    loopPosition = 0

    for plan in imageStack:
        global falseMatrixActivation
        falseMatrixActivation = 0
        fig, ax = plt.subplots()
        plt.subplots_adjust(left=0.1, bottom=0.3)
        graphTitle = 'Interactive segmentation  plan ' + str(loopPosition+1)
        plt.title(graphTitle)
        try:
            with open('cv_Parameter.txt', 'r') as cvParameter:

                print('cv_Parameter.txt found, loading previous parameters.')
                line = cvParameter.read().splitlines()
                mu_0 = float(line[0])
                lambda1_0 = float(line[1])
                lambda2_0 = float(line[2])
                delta_mu = 0.05
                tol = 1e-3
                max_iter = 1000
                dt = 0.5

        except FileNotFoundError:
            mu_0 = 0.25
            lambda1_0 = 2
            lambda2_0 = 4
            delta_mu = 0.05
            tol = 1e-3
            max_iter = 1000
            dt = 0.5

        noColorPlan = rgb2gray(plan)
        # print(plan, noColorPlan)
        cv = chan_vese(noColorPlan, mu=mu_0, lambda1=lambda1_0, lambda2=lambda2_0, tol=tol, max_iter=max_iter / 10, dt=dt,
                       init_level_set='checkerboard', extended_output=True)

        # if 16 bit image
        # maxImage2 = maxImage*16
        # maxImage3 = skimage.img_as_ubyte(maxImages2)
        # ax.imshow(mark_boundaries(maxImage, cv[0]), vmin=0, vmax=4096)
        ax.imshow(mark_boundaries(noColorPlan, cv[0]))

        # reinitialise value for slider

        # mu_0 = 0.25
        # lambda1_0 = 2
        # lambda2_0 = 4

        colorAxe = 'lightgray'
        muAxe = plt.axes([0.25, 0.2, 0.5, 0.03], facecolor='lightcoral')
        lambda1Axe = plt.axes([0.25, 0.15, 0.5, 0.03], facecolor='yellowgreen')
        lambda2Axe = plt.axes([0.25, 0.1, 0.5, 0.03], facecolor='mediumturquoise')
        muSlider = Slider(muAxe, '$\mu$', -0.05, 2., valinit=mu_0)
        lambda1Slider = Slider(lambda1Axe, '$\lambda_1$', 0., 10.0, valinit=lambda1_0)
        lambda2Slider = Slider(lambda2Axe, '$\lambda_2$', 0., 10.0, valinit=lambda2_0)

        def update(val):
            mu = muSlider.val
            lambda1 = lambda1Slider.val
            lambda2 = lambda2Slider.val
            cv = chan_vese(noColorPlan, mu=mu, lambda1=lambda1, lambda2=lambda2, tol=tol, max_iter=max_iter / 10, dt=dt,
                           init_level_set='checkerboard', extended_output=True)
            ax.imshow(mark_boundaries(noColorPlan, cv[0]), vmin=0, vmax=4096)
            #fig.canvas.draw_idle()

        muSlider.on_changed(update)
        lambda1Slider.on_changed(update)
        lambda2Slider.on_changed(update)

        resetAxe = plt.axes([0.65, 0.025, 0.1, 0.04])
        button = Button(resetAxe, 'Reset', color=colorAxe, hovercolor='0.6')

        def reset(event):
            muSlider.reset()
            lambda1Slider.reset()
            lambda2Slider.reset()

            try:
                with open('cv_Parameter.txt', 'r'):
                    pass
                os.remove('cv_Parameter.txt')

            except FileNotFoundError:
                pass

        button.on_clicked(reset)

        def keep_nothing(event):
            global falseMatrixActivation
            falseMatrixActivation = 1
            # print(f'falseMatrixActivation : {falseMatrixActivation}')
            plt.close()

        keepNothingAxe = plt.axes([0.25, 0.025, 0.2, 0.04])
        keepNothingButton = Button(keepNothingAxe, 'Keep Nothing', color=colorAxe, hovercolor='0.6')
        keepNothingButton.on_clicked(keep_nothing)

        saveAxe = plt.axes([0.5, 0.025, 0.1, 0.04])
        saveButton = Button(saveAxe, 'Save', color=colorAxe, hovercolor='0.6')

        def save(event):
            with open('cv_Parameter.txt', 'w') as cvParameter:
                cvParameter.write(str(muSlider.val) + '\n')
                cvParameter.write(str(lambda1Slider.val) + '\n')
                cvParameter.write(str(lambda2Slider.val) + '\n')
            print('Data saved in cv_Parameter.txt')
            plt.close()

        saveButton.on_clicked(save)

        plt.show()

        # Segmentation with best parameters or default
        if falseMatrixActivation == 1:
            imageStack[loopPosition] = np.zeros(np.shape(plan))
            print(f'no segmentation done')
        else:
            try:
                with open('cv_Parameter.txt', 'r') as cvParameter:

                    print('cv_Parameter.txt found, loading best parameters.')
                    line = cvParameter.read().splitlines()
                    mu = float(line[0])
                    lambda1 = float(line[1])
                    lambda2 = float(line[2])
            except FileNotFoundError:
                print('cv_Parameter.txt not found, loading default parameters.')
                mu = mu_0
                lambda1 = lambda1_0
                lambda2 = lambda2_0

            cv = chan_vese(noColorPlan, mu=mu, lambda1=lambda1, lambda2=lambda2, tol=tol, max_iter=max_iter, dt=dt,
                           init_level_set='checkerboard', extended_output=True)

            print('Segmentation done with parameter $\mu$ : {0}, $\lambda_1$ : {1}, $\lambda_2$ : {2}, '
                  'tolerance : {3:1.2e}, ''max iteration : {4:1.2e}, dt : {5}.'.format(mu, lambda1, lambda2, tol,
                                                                                       max_iter, dt))

            imageStack[loopPosition] = cv[0] * plan

        loopPosition += 1

    # Treatment and export

    segmentedImageName = 'segmented_Image/' + filename + '_segmentedImage.tif'
    with skimage.external.tifffile.TiffWriter(segmentedImageName) as tif:
        for image in range(imageStack.shape[0]):
            tif.save(imageStack[image], compress=0)

    return imageStack
Ejemplo n.º 40
0
import numpy as np
from scipy import ndimage as ndi
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import matplotlib.image as mpimg
from skimage import data
from skimage import color
from skimage.util import view_as_blocks

# get astronaut from skimage.data in grayscale
l = color.rgb2gray(data.astronaut())

# size of blocks
block_shape = (4, 4)

# see astronaut as a matrix of blocks (of shape block_shape)
view = view_as_blocks(l, block_shape)

# collapse the last two dimensions in one
flatten_view = view.reshape(view.shape[0], view.shape[1], -1)

# resampling the image by taking either the `mean`,
# the `max` or the `median` value of each blocks.
mean_view = np.mean(flatten_view, axis=2)
max_view = np.max(flatten_view, axis=2)
median_view = np.median(flatten_view, axis=2)

# display resampled images
fig, axes = plt.subplots(3, 5, figsize=(20, 20), sharex=True, sharey=True)
ax = axes.ravel()
Ejemplo n.º 41
0
def Detect_Cellphone_In_Image(split):
    classifier = pickle.load(open("cellphone_image_classifier.sav", 'rb'))
    df = pd.read_csv("find_phone/" + "labels.txt", sep=" ", header=None)
    df.columns = ["file_name", "x_coordinate", "y_coordinate"]

    if split == 0:
        ##The last 1/3
        df = df[86:]
    elif split == 1:
        ##The first 1/3
        df = df[:43]
    else:
        ##The middle 1/3
        df = df[43:86]

    found = 0
    total = 0
    for index, row in df.iterrows():
        image = imageio.imread("find_phone/" + row["file_name"])
        image = img_as_float(image)
        image = rgb2gray(image)

        x_val = range(3, 430, 10)  #43
        y_val = range(5, 260, 10)  #26
        x_coord = [(i, i + 64) for i in x_val]
        y_coord = [(i, i + 64) for i in y_val]
        box_coord = [[i, j] for i in x_coord for j in y_coord]

        box_images = []
        for box_k in box_coord:
            xmin = box_k[0][0]
            xmax = box_k[0][1]
            ymin = box_k[1][0]
            ymax = box_k[1][1]
            img = image[ymin:ymax, xmin:xmax]
            img_prewitt = prewitt(img)
            box_images.append([img, img_prewitt])

        n_box_images = len(box_images)
        box_images = np.array(box_images)
        box_images = box_images.reshape(n_box_images, -1)
        box_images_prediction = classifier.predict_proba(box_images)

        index = 0
        prob = 0

        for idx, prob_m in enumerate(box_images_prediction):
            if prob_m[1] > prob:
                prob = prob_m[1]
                index = idx

        xmin = box_coord[index][0][0]
        xmax = box_coord[index][0][1]
        ymin = box_coord[index][1][0]
        ymax = box_coord[index][1][1]
        x_predicted = float(int(xmax + xmin) / 2) / 490
        y_predicted = float(int(ymax + ymin) / 2) / 326
        x = float(row["x_coordinate"])
        y = float(row["y_coordinate"])

        total += 1
        if np.sqrt(((x_predicted - x)**2) + ((y_predicted - y)**2)) < 0.05:
            found += 1
        else:
            print("NO  ", row["file_name"])

        print(row["file_name"], "x:", row["x_coordinate"], "y:",
              row["y_coordinate"], "p:", prob, x_predicted, y_predicted)
        print(" ")

    print(found)
    print(total)
    print(found / total)
Ejemplo n.º 42
0
 def _step(self, action):
     self.data.append(self.close[0][3])
     self.counter = len(self.data)
     self.close.pop(0)
     self.times.pop(0)
     if len(self.data) > 0:
         self.text1.setPos(0, max(self.data))
         self.text2.setPos(0, min(self.data))
         self.text3.setPos(390, max(self.data))
         self.text4.setPos(390, min(self.data))
     if self.position == 0:
         self.text1.setText(text='LONG ', color='000000')
         self.text2.setText(text='SHORT', color='000000')
     elif self.position == 1:
         self.text1.setText(text='@', color='FFFFFF')
         self.text2.setText(text='SHORT', color='000000')
     elif self.position == -1:
         self.text1.setText(text='LONG ', color='000000')
         self.text2.setText(text='@', color='FFFFFF')
     self.text3.setText(text='BUY  ', color='000000')
     self.text4.setText(text='SELL ', color='000000')
     self.curve1.setData(self.data)
     #self.state = self.init()
     if action == 0:
         self.text3.setText(text='@', color='FFFFFF')
         if self.position == 1:
             self.reward = 0
             self.reward1 = 0
         elif self.position == 0:
             self.text1.setText(text='@', color='FFFFFF')
             self.position = 1
             self.bprice = self.data[-1]
             self.lineitem.setValue(self.counter)
             self.reward = 0
             self.reward1 = 0
     elif action == 1:
         self.text4.setText(text='@', color='FFFFFF')
         if self.position == -1:
             self.reward = 0
             self.reward1 = 0
         elif self.position == 0:
             self.text2.setText(text='@', color='FFFFFF')
             self.position = -1
             self.bprice = self.data[-1]
             self.lineitem.setValue(self.counter)
             self.reward = 0
             self.reward1 = 0
     elif action == 2:
         self.text4.setText(text='@', color='FFFFFF')
         if self.position == 0:
             self.reward = 0
             self.reward1 = 0
         elif self.position == 1:
             self.position = 0
             self.text1.setText(text='LONG ', color='000000')
             self.lineitem.setValue(0)
             self.reward1 = self.data[-1] - self.bprice
             if self.reward1 > 0:
                 self.reward = 1
             else:
                 self.reward = -1
         elif self.position == -1:
             self.position = 0
             self.text2.setText(text='SHORT', color='000000')
             self.lineitem.setValue(0)
             self.reward1 = self.bprice - self.data[-1]
             if self.reward1 > 0:
                 self.reward = 1
             else:
                 self.reward = -1
     else:
         self.reward = 0
         self.reward1 = 0
     if self.times[0].time() == self.dt.time():
         print("Terminal" + str(self.dates[0]))
         self.dates.pop(0)
         self.currentdate = self.dates[0]
         self.times = self.grouped.get_group(
             self.currentdate).index.tolist()
         self.close = self.grouped.get_group(
             self.currentdate).values.tolist()
         self.position = 0
         self.bprice = 0
         self.terminal = True
         action = 2
         if self.position == 0:
             self.reward = 0
         elif self.position == 1:
             self.position = 0
             self.lineitem.setValue(0)
             self.reward1 = self.data[-1] - self.bprice
             if self.reward1 > 0:
                 self.reward = 1
             else:
                 self.reward = -1
         elif self.position == -1:
             self.position = 0
             self.lineitem.setValue(0)
             self.reward1 = self.bprice - self.data[-1]
             if self.reward1 > 0:
                 self.reward = 1
             else:
                 self.reward = -1
         self.data = []
     self.app.processEvents()
     self.aaaa.export('temp.png')
     self.state = color.rgb2gray(io.imread('temp.png'))
     self.state = np.array(self.state)
     return self.state, self.reward, self.terminal, {}
Ejemplo n.º 43
0
def main(_):
  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default():
    tf_global_step = slim.get_or_create_global_step()
    ######################
    # Gnerate the tfRecorder data #
    ######################
    datareader,Volshape,rindex,spmap,labelOrg,imgOrg=EvalSample_Gnerate(FLAGS.dataset_dir, 'Glabel.nrrd')
    Patchsize=len(rindex)
    ######################
    # Select the dataset #
    ######################

    dataset = dataset_factory.get_dataset(
        FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir,file_pattern=FLAGS.file_name,Datasize=Patchsize)

    ####################
    # Select the model #
    ####################
    #num_classes=2

    with tf.Graph().as_default():
        network_fn = nets_factory.get_network_fn(
                FLAGS.model_name,
                num_classes=(dataset.num_classes - FLAGS.labels_offset),
                is_training=False)

    ##############################################################
    # Create a dataset provider that loads data from the dataset #
    ##############################################################

    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        shuffle=False,
        common_queue_capacity=2 * FLAGS.batch_size,
        common_queue_min=FLAGS.batch_size)
    [image, label] = provider.get(['image', 'label'])
    # label -= FLAGS.labels_offset

    #####################################
    # Select the preprocessing function #
    #####################################
    preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
                 preprocessing_name,
                 is_training=False)

    eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size
    image = image_preprocessing_fn(image, eval_image_size, eval_image_size)

    images, labels = tf.train.batch(
             [image, label],
             batch_size=FLAGS.batch_size,
             num_threads=FLAGS.num_preprocessing_threads,
             capacity=5 * FLAGS.batch_size)

     ###################
    # Define the model #
    ####################

    logits, end_points = network_fn(images)
    probabilities = tf.nn.softmax(logits)
    pred = tf.argmax(logits, dimension=1)
    # if FLAGS.moving_average_decay:
    #   variable_averages = tf.train.ExponentialMovingAverage(
    #       FLAGS.moving_average_decay, tf_global_step)
    #   variables_to_restore = variable_averages.variables_to_restore(
    #       slim.get_model_variables())
    #   variables_to_restore[tf_global_step.op.name] = tf_global_step
    # else:
    #   variables_to_restore = slim.get_variables_to_restore()

    # #predictions = tf.argmax(logits, 1)
    # labels = tf.squeeze(labels)
    #
    # # Define the metrics:
    # names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
    #     'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
    #     'Recall@5': slim.metrics.streaming_recall_at_k(
    #         logits, labels, 5),
    # })
    #
    # # Print the summaries to screen.
    # summary_ops = []
    # for name, value in names_to_values.items():
    #   summary_name = 'eval/%s' % name
    #   op = tf.scalar_summary(summary_name, value, collections=[])
    #   op = tf.Print(op, [value], summary_name)
    #   tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
    #   summary_ops.append(op)
    # # TODO(sguada) use num_epochs=1
    # if FLAGS.max_num_batches:
    #   num_batches = FLAGS.max_num_batches
    # else:
    #   # This ensures that we make a single pass over all of the data.
    #   num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
            checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)
    init_fn = slim.assign_from_checkpoint_fn(
                os.path.join(checkpoint_path),
                slim.get_model_variables())# vriable name???

    #Volshape=(50,61,61)
    imgshape=spmap.shape
    segResult=np.zeros(imgshape)
    groundtruth=np.zeros(imgshape)
    segPromap=np.zeros(imgshape)
    segPromapEdge=np.zeros(imgshape)
    PreMap=[]
    labellist=[]
    seglist=[]
    conv1list=[]
    conv2list=[]
    imgorglist=[]
    fclist=[]
    with tf.Session() as sess:
            # Load weights
            init_fn(sess)
           # sess.run(images.initializer, feed_dict)
            # Start input enqueue threads.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            num_iter = int(math.ceil(dataset.num_samples/float(FLAGS.batch_size)))
            step = 0

            try:
                while step < num_iter and not coord.should_stop():
                    # Run evaluation steps or whatever
                    segmentation,log,pmap,labelmap,imgpre,conv1,conv2,fc3= sess.run([pred,logits,probabilities,labels,images,end_points['conv1'],end_points['conv3'],end_points['fc3']])
                    step+=1
                    PreMap.extend(pmap)
                    conv1list.extend(conv1)
                    conv2list.extend(conv2)
                    fclist.extend(fc3)
                    imgorglist.extend(imgpre)
                    seglist.append(segmentation)
                    labellist.append(labelmap)
                    print('The No. %d/%d caculation'%(step,num_iter))
                    #Miaimshow.subplots(Pro_imgs, num=step+2, cols=8)
            except tf.errors.OutOfRangeError:
                print('Done evalutaion -- epoch limit reached')
            finally:
                # When done, ask the threads to stop.
                coord.request_stop()
            PreMap = np.array(PreMap)
            np.save(os.path.join(FLAGS.dataset_dir,'liverspProbmap.npy'), PreMap)
            # PreMap=np.squeeze(PreMap,axis=1)

           # PreMap_flat = PreMap.ravel()
           # PreMap_flat=np.divide((PreMap_flat - np.amin(PreMap_flat)) * 255, (np.amax(PreMap_flat) - np.amin(PreMap_flat)))
            m = 0
            for i in range(len(rindex)):
                segResult[spmap==rindex[i]]=np.array(seglist).ravel()[i]
                segPromap[spmap==rindex[i]]=PreMap[i,1]
                segPromapEdge[spmap==rindex[i]]=PreMap[i,2]
                groundtruth[spmap==rindex[i]]=np.array(labellist).ravel()[i]

            coord.join(threads)

            fig,ax= plt.subplots(nrows=2,ncols=3)

            from skimage.segmentation import mark_boundaries
            ax[0,0].set_title('Segmentation with superpixel map')
            ax[0,0].imshow(mark_boundaries(segResult, spmap))
            ax[0,1].set_title('Segmentation with ground truth map')
            ax[0,1].imshow(segResult)
            ax[0,1].imshow(labelOrg,alpha=0.5,cmap='jet')
            ax[0,2].set_title('Reading label')
            ax[0,2].imshow(groundtruth)

            ax[1,0].set_title('liver Probabilities map')
            ax[1,0].imshow(segPromap)
            ax[1,1].set_title('edge Probabilities map')
            ax[1,1].imshow(segPromapEdge)
            ax[1,2].set_title('liver +edge Probabilities map')
            ax[1,2].imshow(segPromapEdge+segPromap)

            segthpro=segPromapEdge+segPromap
            segthpro[segthpro<0.8]=0

    from skimage.segmentation import active_contour
    from skimage.measure import find_contours
    from skimage.filters import gaussian
    from skimage import morphology

    #edg=sobel(segResult.astype(int))
    segmorp=morphology.remove_small_objects(segthpro.astype(bool),5000)
    segopen=morphology.opening(segmorp,morphology.disk(3))
    segclose=morphology.closing(segopen,morphology.disk(15))
    fig,ax=plt.subplots(1,3)
    ax=ax.ravel()
    ax[0].imshow(segmorp)
    ax[0].set_title('Removed the small objects')
    ax[1].imshow(segopen)
    ax[1].set_title('After open operation')
    ax[2].imshow(segclose)
    ax[2].imshow(labelOrg,alpha=0.5,cmap='jet')
    ax[2].set_title('After close operation')
    plt.axis('off')
    from MiaUtils import Miametrics as metric
    mt=metric.MiaMetrics(logger)
    dsc=mt.DSCMetric(segclose,labelOrg.astype(bool))
    print('The dice similarity coefficient score is {}'.format(dsc))

    voe=mt.VOEMetric(segclose,labelOrg.astype(bool))
    print('The Volume overlap Error score is {}'.format(voe))

    rvd=mt.RVDMetric(segclose,labelOrg.astype(bool))
    print('The Relative voume difference score is {}'.format(rvd))

    from medpy.metric.binary import hd
    from medpy.metric.binary import asd
    from medpy.metric.binary import  obj_fpr
    from medpy.metric.binary import  obj_tpr
    Asd=asd(segclose,labelOrg.astype(bool))
    print('The Asd score is {}'.format(Asd))

    HD=hd(segclose,labelOrg.astype(bool))
    print('The Hausdorff Distance score is {}'.format(HD))
###************************************************************************
    ####superpixel-graph cuts mehtod computation
#######********************************************************************
    from skimage import segmentation, color,filters
    from skimage.future import graph
    img=DataNormalize(imgOrg)/255
    img=np.dstack((np.dstack((img, img)), img))
    labels1 = segmentation.slic(img, compactness=5, n_segments=2000,sigma=1)
    #labels1=spmap
    out1 = color.label2rgb(labels1, img, kind='avg')
    edge_map = filters.sobel(color.rgb2gray(img))
    g = graph.rag_boundary(labels1, edge_map)
    #g = graph.rag_mean_color(img, labels1, mode='similarity')
    labels2 = graph.cut_normalized(labels1, g)
    out2 = color.label2rgb(labels2, img, kind='avg')

    fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
    ax[0].imshow(out1)
    ax[1].imshow(out2)
    for a in ax:
       a.axis('off')
    plt.tight_layout()
Ejemplo n.º 44
0
plt.semilogx(bandwidths, scores)
plt.xlabel('bandwidth')
plt.ylabel('accuracy')
plt.title('KDE Model Performance')
print(grid.best_params_)
print('accuracy =', grid.best_score_)

from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import cross_val_score
cross_val_score(GaussianNB(), digits.data, digits.target).mean()

from skimage import data, color, feature
import skimage.data

image = color.rgb2gray(data.chelsea())
hog_vec, hog_vis = feature.hog(image, visualise=True)

fig, ax = plt.subplots(1, 2, figsize=(12, 6),
                       subplot_kw=dict(xticks=[], yticks=[]))

ax[0].imshow(image, cmap='gray')
ax[0].set_title('input image')

ax[1].imshow(hog_vis)
ax[1].set_title('visualization of HOG features');

from sklearn.datasets import fetch_lfw_people
faces = fetch_lfw_people()
positive_patches = faces.images
positive_patches.shape
Ejemplo n.º 45
0
# rows x cols = 1006 x 1280
#mondriaan = 'mondriaan.jpg'
pic = argv[1]
# rows x cols = 104, 101
#mondriaan = 'mondriaan-test.png'

# path = '/Users/reinierdevalk/Downloads/'
# path = 'C:/Users/Reinier/Desktop/sonification/sonification/'
#path = argv[1]
path = ''

file = os.path.join(path, 'img/' + pic)

mon_col = io.imread(file)
mon = rgb2gray(mon_col)

dims = mon.shape
print(dims)
y_pix = dims[0]
x_pix = dims[1]

# print len(mon_col[0]) # first row in first 1006 x 1280 panel
# print mon_col[0][0] # RGB values for first element in first row in RGB panels
# print len(mon[0]) # first row in only (grayscale) 1006 x 1280 panel
# print mon[0][0] # RGB value for first element in first row in gray panel

#io.imshow(mon)
#io.show()

# 72 pitches from 24 (C1) to 96 (C7)
Ejemplo n.º 46
0
def create_data_pickles(dataset, update=False, cnn_input_size=250,
                        target_size=None):

    image_files = dataset["image_files"]
    pickle_files = dataset["pickle_files"]

    resized_dir = os.path.join(
        os.path.dirname(os.path.dirname(image_files[0])), "resized",)
    if not os.path.exists(resized_dir):
        os.makedirs(resized_dir)
    else:
        shutil.rmtree(resized_dir)
        os.makedirs(resized_dir)

    for idx in range(len(image_files)):

        image_file = image_files[idx]
        data_file = pickle_files[idx]

        print "processing image: ", image_file

        basename = os.path.basename(image_file).split(".")[0]
        tmp_file = os.path.join(
            resized_dir, basename + "_resized.png")

        pkl_exists = os.path.isfile(data_file)

        if (not pkl_exists) or update:

            if target_size is not None:

                resize_command = "convert %s -resize %dx%d %s" % (
                    image_file, target_size, target_size, tmp_file)
                os.system(resize_command)

                imageRGB = ndimage.imread(tmp_file)

            else:
                imageRGB = ndimage.imread(image_file)

            image = color.rgb2gray(imageRGB)

            datum = {"dataset": dataset["name"],
                     "image_file": image_file,
                     "image_shape": image.shape,
                     "image": imageRGB}

            lsd_result = detect_lsd_lines(image)

            line_segments = lsd_result['segments']

            lines = np.zeros((line_segments.shape[0], 3))
            linelen = np.zeros((line_segments.shape[0]))

            for li in range(line_segments.shape[0]):
                ls = line_segments[li, :]
                p1 = np.array([ls[0], ls[1], 1])
                p2 = np.array([ls[2], ls[3], 1])
                linelen[li] = np.linalg.norm(p1-p2, ord=2)

                line = np.cross(p1, p2)
                lines[li, :] = line.copy()

            datum['line_segments'] = line_segments
            datum['lines'] = lines

            if not(datum['line_segments'] is None):

                sphere_image = get_sphere_image(
                    datum['lines'], size=cnn_input_size, alpha=0.1)

                pkl_data = {'lines': datum, 'sphere_image': sphere_image}

            else:
                print "SKIPPING: incomplete data %s" % data_file
                pkl_data = {'lines': datum, 'sphere_image': None}

            with open(data_file, 'wb') as pickle_file:
                pickle.dump(pkl_data, pickle_file, -1)

    return
from skimage.color import rgb2gray
import numpy as np
import cv2
import matplotlib.pyplot as plt
#%matplotlib inline
from scipy import ndimage

image = plt.imread('1.jpeg')
image.shape
plt.imshow(image)

gray = rgb2gray(image)
plt.imshow(gray, cmap='gray')

gray.shape

gray_r = gray.reshape(gray.shape[0]*gray.shape[1])
for i in range(gray_r.shape[0]):
    if gray_r[i] > gray_r.mean():
        gray_r[i] = 1
    else:
        gray_r[i] = 0
gray = gray_r.reshape(gray.shape[0],gray.shape[1])
plt.imshow(gray, cmap='gray')

gray = rgb2gray(image)
gray_r = gray.reshape(gray.shape[0]*gray.shape[1])
for i in range(gray_r.shape[0]):
    if gray_r[i] > gray_r.mean():
        gray_r[i] = 3
    elif gray_r[i] > 0.5:
Ejemplo n.º 48
0
    check = (4 * im[y, x] + im[y + Delta, x] + im[y - Delta, x] +
             im[y, x + Delta] + im[y, x - Delta]) // 8
    return check


def check_edge_overlapp(y, x, r, edge):
    if abs(y - im.shape[0]) <= r or abs(x - im.shape[1]) <= r:
        r = 0
    overlapp = np.zeros(edge.shape, dtype=bool)
    rr, cc = circle(y, x, r)
    overlapp[rr, cc] = edge[rr, cc]
    return np.sum(overlapp)


image = io.imread('dropbox/retinopathy/sample/13_left.jpeg')
img_resc = transform.rescale(rgb2gray(image), 0.25)
img_gray = img_as_ubyte(exposure.equalize_adapthist(img_resc, clip_limit=0.3))
#plt.imshow(img_gray, cmap = plt.get_cmap('gray'))
#io.imsave('dropbox/retinopathy/bw.jpeg',img_gray)

#Invert White<->Black
image_gray = 255 - img_gray

image_gray_rgb = gray2rgb(img_gray)

#Make blurry image for edge detection
im = ndimage.gaussian_filter(img_gray, 4)

# Compute the Canny filter for two values of sigma
edges1 = feature.canny(im, sigma=0.1)
edges2 = feature.canny(im, sigma=3)
from skimage.restoration import (denoise_wavelet, denoise_tv_chambolle, estimate_sigma, denoise_nl_means)

np.random.seed(1)
# load images and convert them
n = 256
m = 8 # Check other sizes
image_size = (n, n)
patch_size = (m, m)
step = 4

print('Learning the dictionary for recto images...')
patches_recto = []
for pic_set in np.arange(4): # I'm using all the images for the learning
    
    img_train = mpimg.imread('./train_building/set'+ str(pic_set + 1) + '_pic.png')
    img_train_gray = rgb2gray(img_train) # the value is between 0 and 1

#    Extract reference patches from the image

    patches = patchify(img_train_gray, patch_size, step)
    initial_patch_size = patches.shape
    patches = patches.reshape(-1, patch_size[0] * patch_size[1])
    
    patches_recto.append(patches)

# Change the size of patches
patches_recto = np.asarray(patches_recto)
patches_recto = patches_recto.reshape(-1, m*m)
# Do the normalisation here
patches_recto -= np.mean(patches_recto, axis=0) # remove the mean
patches_recto /= np.std(patches_recto, axis=0) # normalise each patch
Ejemplo n.º 50
0
 def __init__(self):
     self.viewer = None
     self.dims = (300, 300)
     self.observation_space = spaces.Box(low=0, high=300, shape=(self.dims))
     # Action space omits the Tackle/Catch actions, which are useful on defense
     self.action_space = spaces.Discrete(3)
     self.reward = 0
     self.actions = self.action_space
     self.df = read_hdf('uptodate.h5')
     self.df = self.df.loc['2000-1-1':'2014-1-1']
     self.grouped = self.df.groupby(lambda x: x.date).filter(
         lambda x: len(x) > 389 and len(x) < 391)
     self.grouped = self.grouped.groupby(lambda x: x.date)
     self.dates = self.grouped.groups.keys()
     shuffle(self.dates)
     self.epochs = len(
         self.dates
     )  # number of epochs = # of trading days we are training for.
     print(self.epochs)
     self.app = QtGui.QApplication([])
     self.win = pg.GraphicsWindow()
     self.p1 = self.win.addPlot()
     self.p1.setXRange(0, 390)
     self.terminal = False
     self.text1 = pg.TextItem(text='LONG ',
                              anchor=(0, 0),
                              border='w',
                              fill=(255, 255, 255, 255))
     self.text2 = pg.TextItem(text='SHORT',
                              anchor=(0, 1),
                              border='w',
                              fill=(255, 255, 255, 255))
     self.text3 = pg.TextItem(text='BUY  ',
                              anchor=(1, 1),
                              border='w',
                              fill=(255, 255, 255, 255))
     self.text4 = pg.TextItem(text='SELL ',
                              anchor=(1, 0),
                              border='w',
                              fill=(255, 255, 255, 255))
     self.lineitem = pg.InfiniteLine()
     self.lineitem.setValue(0)
     self.p1.addItem(self.text1)
     self.p1.addItem(self.text2)
     self.p1.addItem(self.text3)
     self.p1.addItem(self.text4)
     self.p1.addItem(self.lineitem)
     self.curve1 = self.p1.plot()
     self.app.processEvents()
     self.counter = 0
     self.aaaa = pg.exporters.ImageExporter(self.p1)
     self.aaaa.export('temp.png')
     self.state = color.rgb2gray(io.imread('temp.png'))
     self.state = np.array(self.state)
     self.data = []
     self.count = 0
     self.cumrewards = 0.0
     self.testrewards1 = []
     self.testprofits1 = []
     self.testrewards = 0.0
     self.testprofits = 0.0
     self.b = "16:00:00"
     self.dt = datetime.strptime(self.b, "%H:%M:%S")
     self.currenttime = datetime.strptime('9:29:00', "%H:%M:%S")
     self.currentdate = self.dates[0]
     self.dates.pop(0)
     self.times = self.grouped.get_group(self.currentdate).index.tolist()
     self.close = self.grouped.get_group(self.currentdate).values.tolist()
     self.position = 0
     self.bprice = 0
     print("done")
     self._seed()
     self.reset()
Ejemplo n.º 51
0
model = bgs_model.MattNet()
model.load_state_dict(torch.load("model_epoch_10.pth").state_dict())

inputImg = data.getImg(sys.argv[1])
print(inputImg.shape)
input_ = Variable(torch.tensor(np.reshape(inputImg, (1, 640, 480, 3))))
input_ = input_.float().permute(0, 3, 1, 2)

prediction = model(input_)
prediction = torch.cat((prediction, prediction, prediction), 1)
print(prediction.shape)
output = prediction.permute(0, 2, 3, 1)[0].data.numpy()
io.imsave("output.png", output)

grayscale = rgb2gray(output)
io.imsave("grayscale.png", grayscale)

thresh = threshold_otsu(grayscale)
thresholded = (grayscale >= (thresh * 0.95)).astype(np.float64)
io.imsave("thresh.png", thresholded)

product = output * inputImg
io.imsave("product.png", product)

# thresh = threshold_otsu(product)
productThresholded = product * np.reshape(
    thresholded, (640, 480, 1))  # (product >= thresh).astype(np.float64)
io.imsave("productT.png", productThresholded)

io.imsave("idk.png", product * np.reshape(rgb2gray(product), (640, 480, 1)))
Ejemplo n.º 52
0
def rgbtogray(imgs):
    imgs_np = imgs.numpy()
    imgs_np = np.transpose(imgs_np, (0, 2, 3, 1))
    imgs_rgb = rgb2gray(imgs_np)
    return torch.from_numpy(imgs_rgb.reshape(-1, 1, 64, 64)).cuda().float()
Ejemplo n.º 53
0
##
from skimage.color import rgb2gray  #pip install scikit-image
import numpy as np
import cv2  #pip install opencv-python
import matplotlib.pyplot as plt
#%matplotlib inline
from scipy import ndimage

image = plt.imread('images/me.jpg')
print("Coloar image, shape height/width/rbg(3)", image.shape)

plt.imshow(image)
plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
plt.show()

gray_image = rgb2gray(image)
print("Coloar image, shape height/width", gray_image.shape)
plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
plt.imshow(gray_image, cmap='gray')
plt.show()

###
"""
The height and width of the image is 1632 and 2070 respectively. 
We will take the mean of the pixel values and use that as a threshold. 
If the pixel value is more than our threshold, we can say that it belongs to an object. 
If the pixel value is less than the threshold, it will be treated as the background.

The darker region (black) represents the background and the brighter (white) region is the foreground.
"""
gray_r = gray_image.reshape(gray_image.shape[0] * gray_image.shape[1])
Ejemplo n.º 54
0
def pre_processing(observe):
    processed_observe = np.uint8(
        resize(rgb2gray(observe), (84, 84), mode='constant') * 255)
    return processed_observe
Ejemplo n.º 55
0
def all_preprocess():
    training_paths = pathlib.Path("../data/stage1_train").glob(
        "*/images/*.png")
    training_sorted = sorted([x for x in training_paths])
    im_path = training_sorted[sample_index]
    im = imageio.imread(str(im_path))

    print("Original image shape: {}".format(im.shape))
    im_gray = rgb2gray(im)
    print("Grayed image shape: {}".format(im_gray.shape))

    # make images drastic
    thresh_val = threshold_otsu(im_gray)
    mask = np.where(im_gray > thresh_val, 1, 0)
    # imageio.imwrite("tmp2.png", mask)
    if np.sum(mask == 0) < np.sum(mask == 1):
        mask = np.where(mask, 0, 1)

    # get separate labels
    # numbers in labels represent feature number
    # [1,1,0,0,0...] <- label of feature1
    labels, nlabels = ndimage.label(mask)
    label_arrays = []
    for label_num in range(1, nlabels + 1):
        label_mask = np.where(labels == label_num, 1, 0)
        label_arrays.append(label_mask)
    imageio.imwrite("tmp3.png", label_arrays[0])

    print(
        "There are {} separate components / objects detected.".format(nlabels))

    rand_cmap = ListedColormap(np.random.rand(256, 3))
    print(rand_cmap)
    labels_for_display = np.where(labels > 0, labels, np.nan)
    # for showing a background picture
    plt.imshow(im_gray, cmap="gray")
    plt.imshow(labels_for_display, cmap=rand_cmap)
    plt.axis("off")
    plt.title("Labeled Cells ({} cells)".format(nlabels))
    # plt.show()
    # find_objects: return location of the each object (separated) given an input
    # square
    for label_ind, label_coords in enumerate(ndimage.find_objects(labels)):
        cell = im_gray[label_coords]
        print(label_coords)
        # remove too small nuclei
        if np.product(cell.shape) < 10:
            print('Label {} is too small! Setting to 0.'.format(label_ind))
            mask = np.where(labels == label_ind, 0, mask)

    # regenerate the labels
    labels, nlabels = ndimage.label(mask)
    print("There are now {} separate components / objects detected.".format(
        nlabels))

    # get the object indices, and perform a binary opening procudure
    # that is, separate combined objects
    two_cell_indices = ndimage.find_objects(labels)[1]
    cell_mask = mask[two_cell_indices]
    cell_mask_opened = ndimage.binary_opening(cell_mask, iterations=8)

    # convert each label object to RLE
    print("RLE Encoding for the current mask is: {}".format(
        rle_encoding(label_mask)))
Ejemplo n.º 56
0
def Train_And_Test_Image_Classifier(split):
    phone_images = []
    for image_file in [
            img_f for img_f in os.listdir(".")
            if img_f.startswith("yes") and img_f.endswith("jpg")
    ]:
        image = imageio.imread(image_file)
        image = img_as_float(image)
        image = rgb2gray(image)
        image_prewitt = prewitt(image)
        phone_images.append([image, image_prewitt])

    n_phone_images = len(phone_images)
    #split phone images into training and testing sets
    factor_pi = int(n_phone_images / 3)
    training_phone_images = []
    testing_phone_images = []

    if split == 0:
        ##The last 1/3
        training_phone_images = phone_images[:factor_pi * 2]
        testing_phone_images = phone_images[factor_pi * 2:]
    elif split == 1:
        ##The first 1/3
        training_phone_images = phone_images[factor_pi:]
        testing_phone_images = phone_images[:factor_pi]
    else:
        ##The middle 1/3
        training_phone_images = phone_images[:factor_pi] + phone_images[
            factor_pi * 2:]
        testing_phone_images = phone_images[factor_pi:factor_pi * 2]

    non_phone_images = []
    for image_file in [
            img_f for img_f in os.listdir(".")
            if img_f.startswith("no") and img_f.endswith("jpg")
    ]:
        image = imageio.imread(image_file)
        image = img_as_float(image)
        image = rgb2gray(image)
        image_prewitt = prewitt(image)
        non_phone_images.append([image, image_prewitt])

    n_non_phone_images = len(non_phone_images)
    #split none phone images into training and testing sets
    factor_npi = int(n_non_phone_images / 3)
    training_non_phone_images = []
    testing_non_phone_images = []

    if split == 0:
        ##The last 1/3
        training_non_phone_images = non_phone_images[:factor_npi * 2]
        testing_non_phone_images = non_phone_images[factor_npi * 2:]
    elif split == 1:
        ##The first 1/3
        training_non_phone_images = non_phone_images[factor_npi:]
        testing_non_phone_images = non_phone_images[:factor_npi]
    else:
        ##The middle 1/3
        training_non_phone_images = non_phone_images[:
                                                     factor_npi] + non_phone_images[
                                                         factor_npi * 2:]
        testing_non_phone_images = non_phone_images[factor_npi:factor_npi * 2]

    training_set = training_phone_images + training_non_phone_images
    training_set_output = [1] * len(training_phone_images) + [0] * len(
        training_non_phone_images)

    testing_set = testing_phone_images + testing_non_phone_images
    testing_set_output = [1] * len(testing_phone_images) + [0] * len(
        testing_non_phone_images)

    n_training_set = len(training_set)
    training_set = np.array(training_set)
    training_set = training_set.reshape(n_training_set, -1)

    n_testing_set = len(testing_set)
    testing_set = np.array(testing_set)
    testing_set = testing_set.reshape(n_testing_set, -1)

    classifier = svm.SVC(C=100, probability=True, random_state=0)
    classifier.fit(training_set, training_set_output)
    pickle.dump(classifier, open("cellphone_image_classifier.sav", "wb"))

    predicted = classifier.predict(testing_set)
    print(classifier.score(testing_set, testing_set_output))

    i = 0
    while i < len(testing_set_output):
        if predicted[i] != testing_set_output[i]:
            print(i, predicted[i], testing_set_output[i])
        i += 1

    print("Classification report for classifier %s:\n%s\n" %
          (classifier,
           metrics.classification_report(testing_set_output, predicted)))

    predict_prob = classifier.predict_proba(testing_set)
    i = 0
    while i < len(testing_set_output):
        if predict_prob[i][0] > predict_prob[i][1]:
            if testing_set_output[i] != 0:
                print(i, "0: ", predict_prob[i][0], "1: ", predict_prob[i][1],
                      " shouldbe:1")
        else:
            if testing_set_output[i] != 1:
                print(i, "0: ", predict_prob[i][0], "1: ", predict_prob[i][1],
                      " shouldbe:0")
        i += 1
    print(" ")
Ejemplo n.º 57
0
# 池化层
def pooling(featureMaps, size=2, stride=2):
    poolResults = np.zeros((np.int((featureMaps.shape[0] - size)/stride + 1),
                            np.int((featureMaps.shape[1] - size)/stride + 1),
                            featureMaps.shape[-1]))
    for mapNum in range(featureMaps.shape[-1]):
        for r in range(poolResults.shape[0]):
            for c in range(poolResults.shape[1]):
                rf, cf = r*stride, c*stride  # 将矩阵压缩
                poolResults[r, c, mapNum] = np.max(featureMaps[rf: rf+size, cf: cf+size, mapNum])
    return poolResults

if __name__ == "__main__":
    img = data.chelsea()
    img = color.rgb2gray(img)

    layerFilter1 = np.zeros((2, 3, 3))  # 2个初始过滤器
    layerFilter1[0, :, 0] = layerFilter1[1, 2, :] = -1
    layerFilter1[0, :, 2] = layerFilter1[1, 0, :] = 1
    featureMaps1 = conv(img, layerFilter1)  # 卷积计算
    reluResults1 = relu(featureMaps1)  # 对卷积结果使用激活函数
    poolResults1 = pooling(reluResults1)  # 对激活结果进行池化

    layerFilter2 = np.random.rand(3, 5, 5, poolResults1.shape[-1])
    featureMaps2 = conv(poolResults1, layerFilter2)
    reluResults2 = relu(featureMaps2)
    poolResults2 = pooling(reluResults2)

    layerFilter3 = np.random.rand(1, 7, 7, poolResults2.shape[-1])
    featureMaps3 = conv(poolResults2, layerFilter3)
Ejemplo n.º 58
0
Image.MAX_IMAGE_PIXELS = None

# setting paths
paths = {
    'fixed': os.path.abspath(os.path.expanduser(sys.argv[1])),
    'moving': os.path.abspath(os.path.expanduser(sys.argv[2])),
    'lnds': os.path.abspath(os.path.expanduser(sys.argv[3])),
    'out': os.path.abspath(os.path.expanduser(sys.argv[4]))
}
paths['warped'] = os.path.join(paths['out'], 'warped-image.jpg')
paths['pts'] = os.path.join(paths['out'], 'warped-landmarks.csv')
paths['time'] = os.path.join(paths['out'], 'time.txt')

t_start = time.time()
# loading images and landmarks
fixed = ants.from_numpy(rgb2gray(imread(paths['fixed'])))
moving = ants.from_numpy(rgb2gray(imread(paths['moving'])))
lnds = pd.read_csv(paths['lnds'])[['X', 'Y']]
# transform landmarks coordinates
lnds.columns = ['y', 'x']

# perform image registration
mytx = ants.registration(fixed=fixed,
                         moving=moving,
                         initial_transform='AffineFast',
                         type_of_transform='ElasticSyN')
print('Transform: %r' % mytx)
t_elapsed = time.time() - t_start
print('Time: %r seconds' % t_elapsed)
warped_moving = ants.apply_transforms(fixed=fixed,
                                      moving=moving,
Ejemplo n.º 59
0
 def load_edge(self, img, index):
     return canny(rgb2gray(img), sigma=self.sigma).astype(np.float)
Ejemplo n.º 60
0

#%%

train_set = load_image_files("C:/Users/louis/Documents/Louis/ecole/2A/Méthodes d'apprentissage/chest_xray/train")
test_set = load_image_files("C:/Users/louis/Documents/Louis/ecole/2A/Méthodes d'apprentissage/chest_xray/test")


#%%
scaler = preprocessing.StandardScaler().fit(train_set.data)
normed_train_set = scaler.transform(train_set.data,True)
normed_test_set = scaler.transform(test_set.data, True)

#%%

gray_train_set = [rgb2gray(img) for img in train_set.images]
gray_test_set =  [rgb2gray(img) for img in test_set.images]

#%%

laplacian_train_set = [convolution2D(img, laplacian_of_gaussian_33).flatten() for img in gray_train_set]
laplacian_test_set = [convolution2D(img, laplacian_of_gaussian_33).flatten() for img in gray_test_set]

#%%

median_cut_train_set = [median_cut(img).flatten() for img in gray_train_set] 
median_cut_test_set = [median_cut(img).flatten() for img in gray_test_set]

#%%
param_grid = [
  {'C': [1, 10, 100, 1000], 'gamma': [0.0001,0.001, 0.01], 'kernel': ['rbf']},